Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * common.c
4 : * Catalog routines used by pg_dump; long ago these were shared
5 : * by another dump tool, but not anymore.
6 : *
7 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : *
11 : * IDENTIFICATION
12 : * src/bin/pg_dump/common.c
13 : *
14 : *-------------------------------------------------------------------------
15 : */
16 : #include "postgres_fe.h"
17 :
18 : #include <ctype.h>
19 :
20 : #include "catalog/pg_class_d.h"
21 : #include "catalog/pg_collation_d.h"
22 : #include "catalog/pg_extension_d.h"
23 : #include "catalog/pg_namespace_d.h"
24 : #include "catalog/pg_operator_d.h"
25 : #include "catalog/pg_proc_d.h"
26 : #include "catalog/pg_publication_d.h"
27 : #include "catalog/pg_subscription_d.h"
28 : #include "catalog/pg_type_d.h"
29 : #include "common/hashfn.h"
30 : #include "pg_backup_utils.h"
31 : #include "pg_dump.h"
32 :
33 : /*
34 : * Variables for mapping DumpId to DumpableObject
35 : */
36 : static DumpableObject **dumpIdMap = NULL;
37 : static int allocedDumpIds = 0;
38 : static DumpId lastDumpId = 0; /* Note: 0 is InvalidDumpId */
39 :
40 : /*
41 : * Infrastructure for mapping CatalogId to DumpableObject
42 : *
43 : * We use a hash table generated by simplehash.h. That infrastructure
44 : * requires all the hash table entries to be the same size, and it also
45 : * expects that it can move them around when resizing the table. So we
46 : * cannot make the DumpableObjects be elements of the hash table directly;
47 : * instead, the hash table elements contain pointers to DumpableObjects.
48 : * This does have the advantage of letting us map multiple CatalogIds
49 : * to one DumpableObject, which is useful for blobs.
50 : *
51 : * It turns out to be convenient to also use this data structure to map
52 : * CatalogIds to owning extensions, if any. Since extension membership
53 : * data is read before creating most DumpableObjects, either one of dobj
54 : * and ext could be NULL.
55 : */
56 : typedef struct _catalogIdMapEntry
57 : {
58 : CatalogId catId; /* the indexed CatalogId */
59 : uint32 status; /* hash status */
60 : uint32 hashval; /* hash code for the CatalogId */
61 : DumpableObject *dobj; /* the associated DumpableObject, if any */
62 : ExtensionInfo *ext; /* owning extension, if any */
63 : } CatalogIdMapEntry;
64 :
65 : #define SH_PREFIX catalogid
66 : #define SH_ELEMENT_TYPE CatalogIdMapEntry
67 : #define SH_KEY_TYPE CatalogId
68 : #define SH_KEY catId
69 : #define SH_HASH_KEY(tb, key) hash_bytes((const unsigned char *) &(key), sizeof(CatalogId))
70 : #define SH_EQUAL(tb, a, b) ((a).oid == (b).oid && (a).tableoid == (b).tableoid)
71 : #define SH_STORE_HASH
72 : #define SH_GET_HASH(tb, a) (a)->hashval
73 : #define SH_SCOPE static inline
74 : #define SH_RAW_ALLOCATOR pg_malloc0
75 : #define SH_DECLARE
76 : #define SH_DEFINE
77 : #include "lib/simplehash.h"
78 :
79 : #define CATALOGIDHASH_INITIAL_SIZE 10000
80 :
81 : static catalogid_hash *catalogIdHash = NULL;
82 :
83 : static void flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
84 : InhInfo *inhinfo, int numInherits);
85 : static void flagInhIndexes(Archive *fout, TableInfo *tblinfo, int numTables);
86 : static void flagInhAttrs(Archive *fout, DumpOptions *dopt, TableInfo *tblinfo,
87 : int numTables);
88 : static int strInArray(const char *pattern, char **arr, int arr_size);
89 : static IndxInfo *findIndexByOid(Oid oid);
90 :
91 :
92 : /*
93 : * getSchemaData
94 : * Collect information about all potentially dumpable objects
95 : */
96 : TableInfo *
97 310 : getSchemaData(Archive *fout, int *numTablesPtr)
98 : {
99 : TableInfo *tblinfo;
100 : ExtensionInfo *extinfo;
101 : InhInfo *inhinfo;
102 : int numTables;
103 : int numExtensions;
104 : int numInherits;
105 :
106 : /*
107 : * We must read extensions and extension membership info first, because
108 : * extension membership needs to be consultable during decisions about
109 : * whether other objects are to be dumped.
110 : */
111 310 : pg_log_info("reading extensions");
112 310 : extinfo = getExtensions(fout, &numExtensions);
113 :
114 310 : pg_log_info("identifying extension members");
115 310 : getExtensionMembership(fout, extinfo, numExtensions);
116 :
117 310 : pg_log_info("reading schemas");
118 310 : getNamespaces(fout);
119 :
120 : /*
121 : * getTables should be done as soon as possible, so as to minimize the
122 : * window between starting our transaction and acquiring per-table locks.
123 : * However, we have to do getNamespaces first because the tables get
124 : * linked to their containing namespaces during getTables.
125 : */
126 310 : pg_log_info("reading user-defined tables");
127 310 : tblinfo = getTables(fout, &numTables);
128 :
129 308 : getOwnedSeqs(fout, tblinfo, numTables);
130 :
131 308 : pg_log_info("reading user-defined functions");
132 308 : getFuncs(fout);
133 :
134 : /* this must be after getTables and getFuncs */
135 308 : pg_log_info("reading user-defined types");
136 308 : getTypes(fout);
137 :
138 : /* this must be after getFuncs, too */
139 308 : pg_log_info("reading procedural languages");
140 308 : getProcLangs(fout);
141 :
142 308 : pg_log_info("reading user-defined aggregate functions");
143 308 : getAggregates(fout);
144 :
145 308 : pg_log_info("reading user-defined operators");
146 308 : getOperators(fout);
147 :
148 308 : pg_log_info("reading user-defined access methods");
149 308 : getAccessMethods(fout);
150 :
151 308 : pg_log_info("reading user-defined operator classes");
152 308 : getOpclasses(fout);
153 :
154 308 : pg_log_info("reading user-defined operator families");
155 308 : getOpfamilies(fout);
156 :
157 308 : pg_log_info("reading user-defined text search parsers");
158 308 : getTSParsers(fout);
159 :
160 308 : pg_log_info("reading user-defined text search templates");
161 308 : getTSTemplates(fout);
162 :
163 308 : pg_log_info("reading user-defined text search dictionaries");
164 308 : getTSDictionaries(fout);
165 :
166 308 : pg_log_info("reading user-defined text search configurations");
167 308 : getTSConfigurations(fout);
168 :
169 308 : pg_log_info("reading user-defined foreign-data wrappers");
170 308 : getForeignDataWrappers(fout);
171 :
172 308 : pg_log_info("reading user-defined foreign servers");
173 308 : getForeignServers(fout);
174 :
175 308 : pg_log_info("reading default privileges");
176 308 : getDefaultACLs(fout);
177 :
178 308 : pg_log_info("reading user-defined collations");
179 308 : getCollations(fout);
180 :
181 308 : pg_log_info("reading user-defined conversions");
182 308 : getConversions(fout);
183 :
184 308 : pg_log_info("reading type casts");
185 308 : getCasts(fout);
186 :
187 308 : pg_log_info("reading transforms");
188 308 : getTransforms(fout);
189 :
190 308 : pg_log_info("reading table inheritance information");
191 308 : inhinfo = getInherits(fout, &numInherits);
192 :
193 308 : pg_log_info("reading event triggers");
194 308 : getEventTriggers(fout);
195 :
196 : /* Identify extension configuration tables that should be dumped */
197 308 : pg_log_info("finding extension tables");
198 308 : processExtensionTables(fout, extinfo, numExtensions);
199 :
200 : /* Link tables to parents, mark parents of target tables interesting */
201 308 : pg_log_info("finding inheritance relationships");
202 308 : flagInhTables(fout, tblinfo, numTables, inhinfo, numInherits);
203 :
204 308 : pg_log_info("reading column info for interesting tables");
205 308 : getTableAttrs(fout, tblinfo, numTables);
206 :
207 308 : pg_log_info("flagging inherited columns in subtables");
208 308 : flagInhAttrs(fout, fout->dopt, tblinfo, numTables);
209 :
210 308 : pg_log_info("reading partitioning data");
211 308 : getPartitioningInfo(fout);
212 :
213 308 : pg_log_info("reading indexes");
214 308 : getIndexes(fout, tblinfo, numTables);
215 :
216 308 : pg_log_info("flagging indexes in partitioned tables");
217 308 : flagInhIndexes(fout, tblinfo, numTables);
218 :
219 308 : pg_log_info("reading extended statistics");
220 308 : getExtendedStatistics(fout);
221 :
222 308 : pg_log_info("reading constraints");
223 308 : getConstraints(fout, tblinfo, numTables);
224 :
225 308 : pg_log_info("reading triggers");
226 308 : getTriggers(fout, tblinfo, numTables);
227 :
228 308 : pg_log_info("reading rewrite rules");
229 308 : getRules(fout);
230 :
231 308 : pg_log_info("reading policies");
232 308 : getPolicies(fout, tblinfo, numTables);
233 :
234 308 : pg_log_info("reading publications");
235 308 : getPublications(fout);
236 :
237 308 : pg_log_info("reading publication membership of tables");
238 308 : getPublicationTables(fout, tblinfo, numTables);
239 :
240 308 : pg_log_info("reading publication membership of schemas");
241 308 : getPublicationNamespaces(fout);
242 :
243 308 : pg_log_info("reading subscriptions");
244 308 : getSubscriptions(fout);
245 :
246 308 : pg_log_info("reading subscription membership of tables");
247 308 : getSubscriptionTables(fout);
248 :
249 308 : free(inhinfo); /* not needed any longer */
250 :
251 308 : *numTablesPtr = numTables;
252 308 : return tblinfo;
253 : }
254 :
255 : /* flagInhTables -
256 : * Fill in parent link fields of tables for which we need that information,
257 : * mark parents of target tables as interesting, and create
258 : * TableAttachInfo objects for partitioned tables with appropriate
259 : * dependency links.
260 : *
261 : * Note that only direct ancestors of targets are marked interesting.
262 : * This is sufficient; we don't much care whether they inherited their
263 : * attributes or not.
264 : *
265 : * modifies tblinfo
266 : */
267 : static void
268 308 : flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
269 : InhInfo *inhinfo, int numInherits)
270 : {
271 308 : TableInfo *child = NULL;
272 308 : TableInfo *parent = NULL;
273 : int i,
274 : j;
275 :
276 : /*
277 : * Set up links from child tables to their parents.
278 : *
279 : * We used to attempt to skip this work for tables that are not to be
280 : * dumped; but the optimizable cases are rare in practice, and setting up
281 : * these links in bulk is cheaper than the old way. (Note in particular
282 : * that it's very rare for a child to have more than one parent.)
283 : */
284 6096 : for (i = 0; i < numInherits; i++)
285 : {
286 : /*
287 : * Skip a hashtable lookup if it's same table as last time. This is
288 : * unlikely for the child, but less so for the parent. (Maybe we
289 : * should ask the backend for a sorted array to make it more likely?
290 : * Not clear the sorting effort would be repaid, though.)
291 : */
292 5788 : if (child == NULL ||
293 4366 : child->dobj.catId.oid != inhinfo[i].inhrelid)
294 : {
295 5690 : child = findTableByOid(inhinfo[i].inhrelid);
296 :
297 : /*
298 : * If we find no TableInfo, assume the pg_inherits entry is for a
299 : * partitioned index, which we don't need to track.
300 : */
301 5690 : if (child == NULL)
302 1384 : continue;
303 : }
304 4404 : if (parent == NULL ||
305 4266 : parent->dobj.catId.oid != inhinfo[i].inhparent)
306 : {
307 2586 : parent = findTableByOid(inhinfo[i].inhparent);
308 2586 : if (parent == NULL)
309 0 : pg_fatal("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
310 : inhinfo[i].inhparent,
311 : child->dobj.name,
312 : child->dobj.catId.oid);
313 : }
314 : /* Add this parent to the child's list of parents. */
315 4404 : if (child->numParents > 0)
316 98 : child->parents = pg_realloc_array(child->parents,
317 : TableInfo *,
318 : child->numParents + 1);
319 : else
320 4306 : child->parents = pg_malloc_array(TableInfo *, 1);
321 4404 : child->parents[child->numParents++] = parent;
322 : }
323 :
324 : /*
325 : * Now consider all child tables and mark parents interesting as needed.
326 : */
327 81216 : for (i = 0; i < numTables; i++)
328 : {
329 : /*
330 : * If needed, mark the parents as interesting for getTableAttrs and
331 : * getIndexes. We only need this for direct parents of dumpable
332 : * tables.
333 : */
334 80908 : if (tblinfo[i].dobj.dump)
335 : {
336 49974 : int numParents = tblinfo[i].numParents;
337 49974 : TableInfo **parents = tblinfo[i].parents;
338 :
339 53230 : for (j = 0; j < numParents; j++)
340 3256 : parents[j]->interesting = true;
341 : }
342 :
343 : /* Create TableAttachInfo object if needed */
344 80908 : if ((tblinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
345 12162 : tblinfo[i].ispartition)
346 : {
347 : TableAttachInfo *attachinfo;
348 :
349 : /* With partitions there can only be one parent */
350 2496 : if (tblinfo[i].numParents != 1)
351 0 : pg_fatal("invalid number of parents %d for table \"%s\"",
352 : tblinfo[i].numParents,
353 : tblinfo[i].dobj.name);
354 :
355 2496 : attachinfo = (TableAttachInfo *) palloc(sizeof(TableAttachInfo));
356 2496 : attachinfo->dobj.objType = DO_TABLE_ATTACH;
357 2496 : attachinfo->dobj.catId.tableoid = 0;
358 2496 : attachinfo->dobj.catId.oid = 0;
359 2496 : AssignDumpId(&attachinfo->dobj);
360 2496 : attachinfo->dobj.name = pg_strdup(tblinfo[i].dobj.name);
361 2496 : attachinfo->dobj.namespace = tblinfo[i].dobj.namespace;
362 2496 : attachinfo->parentTbl = tblinfo[i].parents[0];
363 2496 : attachinfo->partitionTbl = &tblinfo[i];
364 :
365 : /*
366 : * We must state the DO_TABLE_ATTACH object's dependencies
367 : * explicitly, since it will not match anything in pg_depend.
368 : *
369 : * Give it dependencies on both the partition table and the parent
370 : * table, so that it will not be executed till both of those
371 : * exist. (There's no need to care what order those are created
372 : * in.)
373 : */
374 2496 : addObjectDependency(&attachinfo->dobj, tblinfo[i].dobj.dumpId);
375 2496 : addObjectDependency(&attachinfo->dobj, tblinfo[i].parents[0]->dobj.dumpId);
376 : }
377 : }
378 308 : }
379 :
380 : /*
381 : * flagInhIndexes -
382 : * Create IndexAttachInfo objects for partitioned indexes, and add
383 : * appropriate dependency links.
384 : */
385 : static void
386 308 : flagInhIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
387 : {
388 : int i,
389 : j;
390 :
391 81216 : for (i = 0; i < numTables; i++)
392 : {
393 80908 : if (!tblinfo[i].ispartition || tblinfo[i].numParents == 0)
394 77538 : continue;
395 :
396 : Assert(tblinfo[i].numParents == 1);
397 :
398 4566 : for (j = 0; j < tblinfo[i].numIndexes; j++)
399 : {
400 1196 : IndxInfo *index = &(tblinfo[i].indexes[j]);
401 : IndxInfo *parentidx;
402 : IndexAttachInfo *attachinfo;
403 :
404 1196 : if (index->parentidx == 0)
405 100 : continue;
406 :
407 1096 : parentidx = findIndexByOid(index->parentidx);
408 1096 : if (parentidx == NULL)
409 0 : continue;
410 :
411 1096 : attachinfo = pg_malloc_object(IndexAttachInfo);
412 :
413 1096 : attachinfo->dobj.objType = DO_INDEX_ATTACH;
414 1096 : attachinfo->dobj.catId.tableoid = 0;
415 1096 : attachinfo->dobj.catId.oid = 0;
416 1096 : AssignDumpId(&attachinfo->dobj);
417 1096 : attachinfo->dobj.name = pg_strdup(index->dobj.name);
418 1096 : attachinfo->dobj.namespace = index->indextable->dobj.namespace;
419 1096 : attachinfo->parentIdx = parentidx;
420 1096 : attachinfo->partitionIdx = index;
421 :
422 : /*
423 : * We must state the DO_INDEX_ATTACH object's dependencies
424 : * explicitly, since it will not match anything in pg_depend.
425 : *
426 : * Give it dependencies on both the partition index and the parent
427 : * index, so that it will not be executed till both of those
428 : * exist. (There's no need to care what order those are created
429 : * in.)
430 : *
431 : * In addition, give it dependencies on the indexes' underlying
432 : * tables. This does nothing of great value so far as serial
433 : * restore ordering goes, but it ensures that a parallel restore
434 : * will not try to run the ATTACH concurrently with other
435 : * operations on those tables.
436 : */
437 1096 : addObjectDependency(&attachinfo->dobj, index->dobj.dumpId);
438 1096 : addObjectDependency(&attachinfo->dobj, parentidx->dobj.dumpId);
439 1096 : addObjectDependency(&attachinfo->dobj,
440 1096 : index->indextable->dobj.dumpId);
441 1096 : addObjectDependency(&attachinfo->dobj,
442 1096 : parentidx->indextable->dobj.dumpId);
443 :
444 : /* keep track of the list of partitions in the parent index */
445 1096 : simple_ptr_list_append(&parentidx->partattaches, &attachinfo->dobj);
446 : }
447 : }
448 308 : }
449 :
450 : /* flagInhAttrs -
451 : * for each dumpable table in tblinfo, flag its inherited attributes
452 : *
453 : * What we need to do here is:
454 : *
455 : * - Detect child columns that inherit NOT NULL bits from their parents, so
456 : * that we needn't specify that again for the child. (Versions >= 18 no
457 : * longer need this.)
458 : *
459 : * - Detect child columns that have DEFAULT NULL when their parents had some
460 : * non-null default. In this case, we make up a dummy AttrDefInfo object so
461 : * that we'll correctly emit the necessary DEFAULT NULL clause; otherwise
462 : * the backend will apply an inherited default to the column.
463 : *
464 : * - Detect child columns that have a generation expression and all their
465 : * parents also have the same generation expression, and if so suppress the
466 : * child's expression. The child will inherit the generation expression
467 : * automatically, so there's no need to dump it. This improves the dump's
468 : * compatibility with pre-v16 servers, which didn't allow the child's
469 : * expression to be given explicitly. Exceptions: If it's a partition or
470 : * we are in binary upgrade mode, we dump such expressions anyway because
471 : * in those cases inherited tables are recreated standalone first and then
472 : * reattached to the parent. (See also the logic in dumpTableSchema().)
473 : *
474 : * modifies tblinfo
475 : */
476 : static void
477 308 : flagInhAttrs(Archive *fout, DumpOptions *dopt, TableInfo *tblinfo, int numTables)
478 : {
479 : int i,
480 : j,
481 : k;
482 :
483 : /*
484 : * We scan the tables in OID order, since that's how tblinfo[] is sorted.
485 : * Hence we will typically visit parents before their children --- but
486 : * that is *not* guaranteed. Thus this loop must be careful that it does
487 : * not alter table properties in a way that could change decisions made at
488 : * child tables during other iterations.
489 : */
490 81216 : for (i = 0; i < numTables; i++)
491 : {
492 80908 : TableInfo *tbinfo = &(tblinfo[i]);
493 : int numParents;
494 : TableInfo **parents;
495 :
496 : /* Some kinds never have parents */
497 80908 : if (tbinfo->relkind == RELKIND_SEQUENCE ||
498 79724 : tbinfo->relkind == RELKIND_VIEW ||
499 34922 : tbinfo->relkind == RELKIND_MATVIEW)
500 46872 : continue;
501 :
502 : /* Don't bother computing anything for non-target tables, either */
503 34036 : if (!tbinfo->dobj.dump)
504 7298 : continue;
505 :
506 26738 : numParents = tbinfo->numParents;
507 26738 : parents = tbinfo->parents;
508 :
509 26738 : if (numParents == 0)
510 23552 : continue; /* nothing to see here, move along */
511 :
512 : /* For each column, search for matching column names in parent(s) */
513 11490 : for (j = 0; j < tbinfo->numatts; j++)
514 : {
515 : bool foundNotNull; /* Attr was NOT NULL in a parent */
516 : bool foundDefault; /* Found a default in a parent */
517 : bool foundSameGenerated; /* Found matching GENERATED */
518 : bool foundDiffGenerated; /* Found non-matching GENERATED */
519 :
520 : /* no point in examining dropped columns */
521 8304 : if (tbinfo->attisdropped[j])
522 610 : continue;
523 :
524 7694 : foundNotNull = false;
525 7694 : foundDefault = false;
526 7694 : foundSameGenerated = false;
527 7694 : foundDiffGenerated = false;
528 15688 : for (k = 0; k < numParents; k++)
529 : {
530 7994 : TableInfo *parent = parents[k];
531 : int inhAttrInd;
532 :
533 7994 : inhAttrInd = strInArray(tbinfo->attnames[j],
534 : parent->attnames,
535 : parent->numatts);
536 7994 : if (inhAttrInd >= 0)
537 : {
538 7544 : AttrDefInfo *parentDef = parent->attrdefs[inhAttrInd];
539 :
540 : /*
541 : * Account for each parent having a not-null constraint.
542 : * In versions 18 and later, we don't need this (and those
543 : * didn't have NO INHERIT.)
544 : */
545 7544 : if (fout->remoteVersion < 180000 &&
546 0 : parent->notnull_constrs[inhAttrInd] != NULL)
547 0 : foundNotNull = true;
548 :
549 15812 : foundDefault |= (parentDef != NULL &&
550 8168 : strcmp(parentDef->adef_expr, "NULL") != 0 &&
551 624 : !parent->attgenerated[inhAttrInd]);
552 7544 : if (parent->attgenerated[inhAttrInd])
553 : {
554 : /* these pointer nullness checks are just paranoia */
555 244 : if (parentDef != NULL &&
556 232 : tbinfo->attrdefs[j] != NULL &&
557 232 : strcmp(parentDef->adef_expr,
558 232 : tbinfo->attrdefs[j]->adef_expr) == 0)
559 202 : foundSameGenerated = true;
560 : else
561 42 : foundDiffGenerated = true;
562 : }
563 : }
564 : }
565 :
566 : /*
567 : * In versions < 18, for lack of a better system, we arbitrarily
568 : * decide that a not-null constraint is not locally defined if at
569 : * least one of the parents has it.
570 : */
571 7694 : if (fout->remoteVersion < 180000 && foundNotNull)
572 0 : tbinfo->notnull_islocal[j] = false;
573 :
574 : /*
575 : * Manufacture a DEFAULT NULL clause if necessary. This breaks
576 : * the advice given above to avoid changing state that might get
577 : * inspected in other loop iterations. We prevent trouble by
578 : * having the foundDefault test above check whether adef_expr is
579 : * "NULL", so that it will reach the same conclusion before or
580 : * after this is done.
581 : */
582 7694 : if (foundDefault && tbinfo->attrdefs[j] == NULL)
583 : {
584 : AttrDefInfo *attrDef;
585 :
586 80 : attrDef = pg_malloc_object(AttrDefInfo);
587 80 : attrDef->dobj.objType = DO_ATTRDEF;
588 80 : attrDef->dobj.catId.tableoid = 0;
589 80 : attrDef->dobj.catId.oid = 0;
590 80 : AssignDumpId(&attrDef->dobj);
591 80 : attrDef->dobj.name = pg_strdup(tbinfo->dobj.name);
592 80 : attrDef->dobj.namespace = tbinfo->dobj.namespace;
593 80 : attrDef->dobj.dump = tbinfo->dobj.dump;
594 :
595 80 : attrDef->adtable = tbinfo;
596 80 : attrDef->adnum = j + 1;
597 80 : attrDef->adef_expr = pg_strdup("NULL");
598 :
599 : /* Will column be dumped explicitly? */
600 80 : if (shouldPrintColumn(dopt, tbinfo, j))
601 : {
602 80 : attrDef->separate = false;
603 : /* No dependency needed: NULL cannot have dependencies */
604 : }
605 : else
606 : {
607 : /* column will be suppressed, print default separately */
608 0 : attrDef->separate = true;
609 : /* ensure it comes out after the table */
610 0 : addObjectDependency(&attrDef->dobj,
611 : tbinfo->dobj.dumpId);
612 : }
613 :
614 80 : tbinfo->attrdefs[j] = attrDef;
615 : }
616 :
617 : /* No need to dump generation expression if it's inheritable */
618 7694 : if (foundSameGenerated && !foundDiffGenerated &&
619 202 : !tbinfo->ispartition && !dopt->binary_upgrade)
620 160 : tbinfo->attrdefs[j]->dobj.dump = DUMP_COMPONENT_NONE;
621 : }
622 : }
623 308 : }
624 :
625 : /*
626 : * AssignDumpId
627 : * Given a newly-created dumpable object, assign a dump ID,
628 : * and enter the object into the lookup tables.
629 : *
630 : * The caller is expected to have filled in objType and catId,
631 : * but not any of the other standard fields of a DumpableObject.
632 : */
633 : void
634 1126316 : AssignDumpId(DumpableObject *dobj)
635 : {
636 1126316 : dobj->dumpId = ++lastDumpId;
637 1126316 : dobj->name = NULL; /* must be set later */
638 1126316 : dobj->namespace = NULL; /* may be set later */
639 1126316 : dobj->dump = DUMP_COMPONENT_ALL; /* default assumption */
640 1126316 : dobj->dump_contains = DUMP_COMPONENT_ALL; /* default assumption */
641 : /* All objects have definitions; we may set more components bits later */
642 1126316 : dobj->components = DUMP_COMPONENT_DEFINITION;
643 1126316 : dobj->ext_member = false; /* default assumption */
644 1126316 : dobj->depends_on_ext = false; /* default assumption */
645 1126316 : dobj->dependencies = NULL;
646 1126316 : dobj->nDeps = 0;
647 1126316 : dobj->allocDeps = 0;
648 :
649 : /* Add object to dumpIdMap[], enlarging that array if need be */
650 1127884 : while (dobj->dumpId >= allocedDumpIds)
651 : {
652 : int newAlloc;
653 :
654 1568 : if (allocedDumpIds <= 0)
655 : {
656 310 : newAlloc = 256;
657 310 : dumpIdMap = pg_malloc_array(DumpableObject *, newAlloc);
658 : }
659 : else
660 : {
661 1258 : newAlloc = allocedDumpIds * 2;
662 1258 : dumpIdMap = pg_realloc_array(dumpIdMap, DumpableObject *, newAlloc);
663 : }
664 1568 : memset(dumpIdMap + allocedDumpIds, 0,
665 1568 : (newAlloc - allocedDumpIds) * sizeof(DumpableObject *));
666 1568 : allocedDumpIds = newAlloc;
667 : }
668 1126316 : dumpIdMap[dobj->dumpId] = dobj;
669 :
670 : /* If it has a valid CatalogId, enter it into the hash table */
671 1126316 : if (OidIsValid(dobj->catId.tableoid))
672 : {
673 : CatalogIdMapEntry *entry;
674 : bool found;
675 :
676 : /* Initialize CatalogId hash table if not done yet */
677 1099278 : if (catalogIdHash == NULL)
678 310 : catalogIdHash = catalogid_create(CATALOGIDHASH_INITIAL_SIZE, NULL);
679 :
680 1099278 : entry = catalogid_insert(catalogIdHash, dobj->catId, &found);
681 1099278 : if (!found)
682 : {
683 1097686 : entry->dobj = NULL;
684 1097686 : entry->ext = NULL;
685 : }
686 : Assert(entry->dobj == NULL);
687 1099278 : entry->dobj = dobj;
688 : }
689 1126316 : }
690 :
691 : /*
692 : * recordAdditionalCatalogID
693 : * Record an additional catalog ID for the given DumpableObject
694 : */
695 : void
696 20 : recordAdditionalCatalogID(CatalogId catId, DumpableObject *dobj)
697 : {
698 : CatalogIdMapEntry *entry;
699 : bool found;
700 :
701 : /* CatalogId hash table must exist, if we have a DumpableObject */
702 : Assert(catalogIdHash != NULL);
703 :
704 : /* Add reference to CatalogId hash */
705 20 : entry = catalogid_insert(catalogIdHash, catId, &found);
706 20 : if (!found)
707 : {
708 20 : entry->dobj = NULL;
709 20 : entry->ext = NULL;
710 : }
711 : Assert(entry->dobj == NULL);
712 20 : entry->dobj = dobj;
713 20 : }
714 :
715 : /*
716 : * Assign a DumpId that's not tied to a DumpableObject.
717 : *
718 : * This is used when creating a "fixed" ArchiveEntry that doesn't need to
719 : * participate in the sorting logic.
720 : */
721 : DumpId
722 18420 : createDumpId(void)
723 : {
724 18420 : return ++lastDumpId;
725 : }
726 :
727 : /*
728 : * Return the largest DumpId so far assigned
729 : */
730 : DumpId
731 2108 : getMaxDumpId(void)
732 : {
733 2108 : return lastDumpId;
734 : }
735 :
736 : /*
737 : * Find a DumpableObject by dump ID
738 : *
739 : * Returns NULL for invalid ID
740 : */
741 : DumpableObject *
742 36241874 : findObjectByDumpId(DumpId dumpId)
743 : {
744 36241874 : if (dumpId <= 0 || dumpId >= allocedDumpIds)
745 0 : return NULL; /* out of range? */
746 36241874 : return dumpIdMap[dumpId];
747 : }
748 :
749 : /*
750 : * Find a DumpableObject by catalog ID
751 : *
752 : * Returns NULL for unknown ID
753 : */
754 : DumpableObject *
755 3861546 : findObjectByCatalogId(CatalogId catalogId)
756 : {
757 : CatalogIdMapEntry *entry;
758 :
759 3861546 : if (catalogIdHash == NULL)
760 0 : return NULL; /* no objects exist yet */
761 :
762 3861546 : entry = catalogid_lookup(catalogIdHash, catalogId);
763 3861546 : if (entry == NULL)
764 1100648 : return NULL;
765 2760898 : return entry->dobj;
766 : }
767 :
768 : /*
769 : * Build an array of pointers to all known dumpable objects
770 : *
771 : * This simply creates a modifiable copy of the internal map.
772 : */
773 : void
774 320 : getDumpableObjects(DumpableObject ***objs, int *numObjs)
775 : {
776 : int i,
777 : j;
778 :
779 320 : *objs = pg_malloc_array(DumpableObject *, allocedDumpIds);
780 320 : j = 0;
781 1441792 : for (i = 1; i < allocedDumpIds; i++)
782 : {
783 1441472 : if (dumpIdMap[i])
784 1168550 : (*objs)[j++] = dumpIdMap[i];
785 : }
786 320 : *numObjs = j;
787 320 : }
788 :
789 : /*
790 : * Add a dependency link to a DumpableObject
791 : *
792 : * Note: duplicate dependencies are currently not eliminated
793 : */
794 : void
795 1778728 : addObjectDependency(DumpableObject *dobj, DumpId refId)
796 : {
797 1778728 : if (dobj->nDeps >= dobj->allocDeps)
798 : {
799 297202 : if (dobj->allocDeps <= 0)
800 : {
801 289416 : dobj->allocDeps = 16;
802 289416 : dobj->dependencies = pg_malloc_array(DumpId, dobj->allocDeps);
803 : }
804 : else
805 : {
806 7786 : dobj->allocDeps *= 2;
807 7786 : dobj->dependencies = pg_realloc_array(dobj->dependencies,
808 : DumpId, dobj->allocDeps);
809 : }
810 : }
811 1778728 : dobj->dependencies[dobj->nDeps++] = refId;
812 1778728 : }
813 :
814 : /*
815 : * Remove a dependency link from a DumpableObject
816 : *
817 : * If there are multiple links, all are removed
818 : */
819 : void
820 50562 : removeObjectDependency(DumpableObject *dobj, DumpId refId)
821 : {
822 : int i;
823 50562 : int j = 0;
824 :
825 1226368 : for (i = 0; i < dobj->nDeps; i++)
826 : {
827 1175806 : if (dobj->dependencies[i] != refId)
828 1123178 : dobj->dependencies[j++] = dobj->dependencies[i];
829 : }
830 50562 : dobj->nDeps = j;
831 50562 : }
832 :
833 :
834 : /*
835 : * findTableByOid
836 : * finds the DumpableObject for the table with the given oid
837 : * returns NULL if not found
838 : */
839 : TableInfo *
840 138266 : findTableByOid(Oid oid)
841 : {
842 : CatalogId catId;
843 : DumpableObject *dobj;
844 :
845 138266 : catId.tableoid = RelationRelationId;
846 138266 : catId.oid = oid;
847 138266 : dobj = findObjectByCatalogId(catId);
848 : Assert(dobj == NULL || dobj->objType == DO_TABLE);
849 138266 : return (TableInfo *) dobj;
850 : }
851 :
852 : /*
853 : * findIndexByOid
854 : * finds the DumpableObject for the index with the given oid
855 : * returns NULL if not found
856 : */
857 : static IndxInfo *
858 1096 : findIndexByOid(Oid oid)
859 : {
860 : CatalogId catId;
861 : DumpableObject *dobj;
862 :
863 1096 : catId.tableoid = RelationRelationId;
864 1096 : catId.oid = oid;
865 1096 : dobj = findObjectByCatalogId(catId);
866 : Assert(dobj == NULL || dobj->objType == DO_INDEX);
867 1096 : return (IndxInfo *) dobj;
868 : }
869 :
870 : /*
871 : * findTypeByOid
872 : * finds the DumpableObject for the type with the given oid
873 : * returns NULL if not found
874 : */
875 : TypeInfo *
876 144448 : findTypeByOid(Oid oid)
877 : {
878 : CatalogId catId;
879 : DumpableObject *dobj;
880 :
881 144448 : catId.tableoid = TypeRelationId;
882 144448 : catId.oid = oid;
883 144448 : dobj = findObjectByCatalogId(catId);
884 : Assert(dobj == NULL ||
885 : dobj->objType == DO_TYPE || dobj->objType == DO_DUMMY_TYPE);
886 144448 : return (TypeInfo *) dobj;
887 : }
888 :
889 : /*
890 : * findFuncByOid
891 : * finds the DumpableObject for the function with the given oid
892 : * returns NULL if not found
893 : */
894 : FuncInfo *
895 520 : findFuncByOid(Oid oid)
896 : {
897 : CatalogId catId;
898 : DumpableObject *dobj;
899 :
900 520 : catId.tableoid = ProcedureRelationId;
901 520 : catId.oid = oid;
902 520 : dobj = findObjectByCatalogId(catId);
903 : Assert(dobj == NULL || dobj->objType == DO_FUNC);
904 520 : return (FuncInfo *) dobj;
905 : }
906 :
907 : /*
908 : * findOprByOid
909 : * finds the DumpableObject for the operator with the given oid
910 : * returns NULL if not found
911 : */
912 : OprInfo *
913 5648 : findOprByOid(Oid oid)
914 : {
915 : CatalogId catId;
916 : DumpableObject *dobj;
917 :
918 5648 : catId.tableoid = OperatorRelationId;
919 5648 : catId.oid = oid;
920 5648 : dobj = findObjectByCatalogId(catId);
921 : Assert(dobj == NULL || dobj->objType == DO_OPERATOR);
922 5648 : return (OprInfo *) dobj;
923 : }
924 :
925 : /*
926 : * findCollationByOid
927 : * finds the DumpableObject for the collation with the given oid
928 : * returns NULL if not found
929 : */
930 : CollInfo *
931 536 : findCollationByOid(Oid oid)
932 : {
933 : CatalogId catId;
934 : DumpableObject *dobj;
935 :
936 536 : catId.tableoid = CollationRelationId;
937 536 : catId.oid = oid;
938 536 : dobj = findObjectByCatalogId(catId);
939 : Assert(dobj == NULL || dobj->objType == DO_COLLATION);
940 536 : return (CollInfo *) dobj;
941 : }
942 :
943 : /*
944 : * findNamespaceByOid
945 : * finds the DumpableObject for the namespace with the given oid
946 : * returns NULL if not found
947 : */
948 : NamespaceInfo *
949 963656 : findNamespaceByOid(Oid oid)
950 : {
951 : CatalogId catId;
952 : DumpableObject *dobj;
953 :
954 963656 : catId.tableoid = NamespaceRelationId;
955 963656 : catId.oid = oid;
956 963656 : dobj = findObjectByCatalogId(catId);
957 : Assert(dobj == NULL || dobj->objType == DO_NAMESPACE);
958 963656 : return (NamespaceInfo *) dobj;
959 : }
960 :
961 : /*
962 : * findExtensionByOid
963 : * finds the DumpableObject for the extension with the given oid
964 : * returns NULL if not found
965 : */
966 : ExtensionInfo *
967 360 : findExtensionByOid(Oid oid)
968 : {
969 : CatalogId catId;
970 : DumpableObject *dobj;
971 :
972 360 : catId.tableoid = ExtensionRelationId;
973 360 : catId.oid = oid;
974 360 : dobj = findObjectByCatalogId(catId);
975 : Assert(dobj == NULL || dobj->objType == DO_EXTENSION);
976 360 : return (ExtensionInfo *) dobj;
977 : }
978 :
979 : /*
980 : * findPublicationByOid
981 : * finds the DumpableObject for the publication with the given oid
982 : * returns NULL if not found
983 : */
984 : PublicationInfo *
985 774 : findPublicationByOid(Oid oid)
986 : {
987 : CatalogId catId;
988 : DumpableObject *dobj;
989 :
990 774 : catId.tableoid = PublicationRelationId;
991 774 : catId.oid = oid;
992 774 : dobj = findObjectByCatalogId(catId);
993 : Assert(dobj == NULL || dobj->objType == DO_PUBLICATION);
994 774 : return (PublicationInfo *) dobj;
995 : }
996 :
997 : /*
998 : * findSubscriptionByOid
999 : * finds the DumpableObject for the subscription with the given oid
1000 : * returns NULL if not found
1001 : */
1002 : SubscriptionInfo *
1003 4 : findSubscriptionByOid(Oid oid)
1004 : {
1005 : CatalogId catId;
1006 : DumpableObject *dobj;
1007 :
1008 4 : catId.tableoid = SubscriptionRelationId;
1009 4 : catId.oid = oid;
1010 4 : dobj = findObjectByCatalogId(catId);
1011 : Assert(dobj == NULL || dobj->objType == DO_SUBSCRIPTION);
1012 4 : return (SubscriptionInfo *) dobj;
1013 : }
1014 :
1015 :
1016 : /*
1017 : * recordExtensionMembership
1018 : * Record that the object identified by the given catalog ID
1019 : * belongs to the given extension
1020 : */
1021 : void
1022 2440 : recordExtensionMembership(CatalogId catId, ExtensionInfo *ext)
1023 : {
1024 : CatalogIdMapEntry *entry;
1025 : bool found;
1026 :
1027 : /* CatalogId hash table must exist, if we have an ExtensionInfo */
1028 : Assert(catalogIdHash != NULL);
1029 :
1030 : /* Add reference to CatalogId hash */
1031 2440 : entry = catalogid_insert(catalogIdHash, catId, &found);
1032 2440 : if (!found)
1033 : {
1034 2440 : entry->dobj = NULL;
1035 2440 : entry->ext = NULL;
1036 : }
1037 : Assert(entry->ext == NULL);
1038 2440 : entry->ext = ext;
1039 2440 : }
1040 :
1041 : /*
1042 : * findOwningExtension
1043 : * return owning extension for specified catalog ID, or NULL if none
1044 : */
1045 : ExtensionInfo *
1046 959582 : findOwningExtension(CatalogId catalogId)
1047 : {
1048 : CatalogIdMapEntry *entry;
1049 :
1050 959582 : if (catalogIdHash == NULL)
1051 0 : return NULL; /* no objects exist yet */
1052 :
1053 959582 : entry = catalogid_lookup(catalogIdHash, catalogId);
1054 959582 : if (entry == NULL)
1055 0 : return NULL;
1056 959582 : return entry->ext;
1057 : }
1058 :
1059 :
1060 : /*
1061 : * parseOidArray
1062 : * parse a string of numbers delimited by spaces into a character array
1063 : *
1064 : * Note: actually this is used for both Oids and potentially-signed
1065 : * attribute numbers. This should cause no trouble, but we could split
1066 : * the function into two functions with different argument types if it does.
1067 : */
1068 :
1069 : void
1070 11984 : parseOidArray(const char *str, Oid *array, int arraysize)
1071 : {
1072 : int j,
1073 : argNum;
1074 : char temp[100];
1075 : char s;
1076 :
1077 11984 : argNum = 0;
1078 11984 : j = 0;
1079 : for (;;)
1080 : {
1081 56672 : s = *str++;
1082 56672 : if (s == ' ' || s == '\0')
1083 : {
1084 19200 : if (j > 0)
1085 : {
1086 19200 : if (argNum >= arraysize)
1087 0 : pg_fatal("could not parse numeric array \"%s\": too many numbers", str);
1088 19200 : temp[j] = '\0';
1089 19200 : array[argNum++] = atooid(temp);
1090 19200 : j = 0;
1091 : }
1092 19200 : if (s == '\0')
1093 11984 : break;
1094 : }
1095 : else
1096 : {
1097 37472 : if (!(isdigit((unsigned char) s) || s == '-') ||
1098 37472 : j >= sizeof(temp) - 1)
1099 0 : pg_fatal("could not parse numeric array \"%s\": invalid character in number", str);
1100 37472 : temp[j++] = s;
1101 : }
1102 : }
1103 :
1104 11984 : while (argNum < arraysize)
1105 0 : array[argNum++] = InvalidOid;
1106 11984 : }
1107 :
1108 :
1109 : /*
1110 : * strInArray:
1111 : * takes in a string and a string array and the number of elements in the
1112 : * string array.
1113 : * returns the index if the string is somewhere in the array, -1 otherwise
1114 : */
1115 :
1116 : static int
1117 7994 : strInArray(const char *pattern, char **arr, int arr_size)
1118 : {
1119 : int i;
1120 :
1121 16010 : for (i = 0; i < arr_size; i++)
1122 : {
1123 15560 : if (strcmp(pattern, arr[i]) == 0)
1124 7544 : return i;
1125 : }
1126 450 : return -1;
1127 : }
|