Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * common.c
4 : * Catalog routines used by pg_dump; long ago these were shared
5 : * by another dump tool, but not anymore.
6 : *
7 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : *
11 : * IDENTIFICATION
12 : * src/bin/pg_dump/common.c
13 : *
14 : *-------------------------------------------------------------------------
15 : */
16 : #include "postgres_fe.h"
17 :
18 : #include <ctype.h>
19 :
20 : #include "catalog/pg_class_d.h"
21 : #include "catalog/pg_collation_d.h"
22 : #include "catalog/pg_extension_d.h"
23 : #include "catalog/pg_namespace_d.h"
24 : #include "catalog/pg_operator_d.h"
25 : #include "catalog/pg_proc_d.h"
26 : #include "catalog/pg_publication_d.h"
27 : #include "catalog/pg_subscription_d.h"
28 : #include "catalog/pg_type_d.h"
29 : #include "common/hashfn.h"
30 : #include "pg_backup_utils.h"
31 : #include "pg_dump.h"
32 :
33 : /*
34 : * Variables for mapping DumpId to DumpableObject
35 : */
36 : static DumpableObject **dumpIdMap = NULL;
37 : static int allocedDumpIds = 0;
38 : static DumpId lastDumpId = 0; /* Note: 0 is InvalidDumpId */
39 :
40 : /*
41 : * Infrastructure for mapping CatalogId to DumpableObject
42 : *
43 : * We use a hash table generated by simplehash.h. That infrastructure
44 : * requires all the hash table entries to be the same size, and it also
45 : * expects that it can move them around when resizing the table. So we
46 : * cannot make the DumpableObjects be elements of the hash table directly;
47 : * instead, the hash table elements contain pointers to DumpableObjects.
48 : * This does have the advantage of letting us map multiple CatalogIds
49 : * to one DumpableObject, which is useful for blobs.
50 : *
51 : * It turns out to be convenient to also use this data structure to map
52 : * CatalogIds to owning extensions, if any. Since extension membership
53 : * data is read before creating most DumpableObjects, either one of dobj
54 : * and ext could be NULL.
55 : */
56 : typedef struct _catalogIdMapEntry
57 : {
58 : CatalogId catId; /* the indexed CatalogId */
59 : uint32 status; /* hash status */
60 : uint32 hashval; /* hash code for the CatalogId */
61 : DumpableObject *dobj; /* the associated DumpableObject, if any */
62 : ExtensionInfo *ext; /* owning extension, if any */
63 : } CatalogIdMapEntry;
64 :
65 : #define SH_PREFIX catalogid
66 : #define SH_ELEMENT_TYPE CatalogIdMapEntry
67 : #define SH_KEY_TYPE CatalogId
68 : #define SH_KEY catId
69 : #define SH_HASH_KEY(tb, key) hash_bytes((const unsigned char *) &(key), sizeof(CatalogId))
70 : #define SH_EQUAL(tb, a, b) ((a).oid == (b).oid && (a).tableoid == (b).tableoid)
71 : #define SH_STORE_HASH
72 : #define SH_GET_HASH(tb, a) (a)->hashval
73 : #define SH_SCOPE static inline
74 : #define SH_RAW_ALLOCATOR pg_malloc0
75 : #define SH_DECLARE
76 : #define SH_DEFINE
77 : #include "lib/simplehash.h"
78 :
79 : #define CATALOGIDHASH_INITIAL_SIZE 10000
80 :
81 : static catalogid_hash *catalogIdHash = NULL;
82 :
83 : static void flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
84 : InhInfo *inhinfo, int numInherits);
85 : static void flagInhIndexes(Archive *fout, TableInfo *tblinfo, int numTables);
86 : static void flagInhAttrs(Archive *fout, DumpOptions *dopt, TableInfo *tblinfo,
87 : int numTables);
88 : static int strInArray(const char *pattern, char **arr, int arr_size);
89 : static IndxInfo *findIndexByOid(Oid oid);
90 :
91 :
92 : /*
93 : * getSchemaData
94 : * Collect information about all potentially dumpable objects
95 : */
96 : TableInfo *
97 470 : getSchemaData(Archive *fout, int *numTablesPtr)
98 : {
99 : TableInfo *tblinfo;
100 : ExtensionInfo *extinfo;
101 : InhInfo *inhinfo;
102 : int numTables;
103 : int numExtensions;
104 : int numInherits;
105 :
106 : /*
107 : * We must read extensions and extension membership info first, because
108 : * extension membership needs to be consultable during decisions about
109 : * whether other objects are to be dumped.
110 : */
111 470 : pg_log_info("reading extensions");
112 470 : extinfo = getExtensions(fout, &numExtensions);
113 :
114 470 : pg_log_info("identifying extension members");
115 470 : getExtensionMembership(fout, extinfo, numExtensions);
116 :
117 470 : pg_log_info("reading schemas");
118 470 : getNamespaces(fout);
119 :
120 : /*
121 : * getTables should be done as soon as possible, so as to minimize the
122 : * window between starting our transaction and acquiring per-table locks.
123 : * However, we have to do getNamespaces first because the tables get
124 : * linked to their containing namespaces during getTables.
125 : */
126 470 : pg_log_info("reading user-defined tables");
127 470 : tblinfo = getTables(fout, &numTables);
128 :
129 468 : getOwnedSeqs(fout, tblinfo, numTables);
130 :
131 468 : pg_log_info("reading user-defined functions");
132 468 : getFuncs(fout);
133 :
134 : /* this must be after getTables and getFuncs */
135 468 : pg_log_info("reading user-defined types");
136 468 : getTypes(fout);
137 :
138 : /* this must be after getFuncs, too */
139 468 : pg_log_info("reading procedural languages");
140 468 : getProcLangs(fout);
141 :
142 468 : pg_log_info("reading user-defined aggregate functions");
143 468 : getAggregates(fout);
144 :
145 468 : pg_log_info("reading user-defined operators");
146 468 : getOperators(fout);
147 :
148 468 : pg_log_info("reading user-defined access methods");
149 468 : getAccessMethods(fout);
150 :
151 468 : pg_log_info("reading user-defined operator classes");
152 468 : getOpclasses(fout);
153 :
154 468 : pg_log_info("reading user-defined operator families");
155 468 : getOpfamilies(fout);
156 :
157 468 : pg_log_info("reading user-defined text search parsers");
158 468 : getTSParsers(fout);
159 :
160 468 : pg_log_info("reading user-defined text search templates");
161 468 : getTSTemplates(fout);
162 :
163 468 : pg_log_info("reading user-defined text search dictionaries");
164 468 : getTSDictionaries(fout);
165 :
166 468 : pg_log_info("reading user-defined text search configurations");
167 468 : getTSConfigurations(fout);
168 :
169 468 : pg_log_info("reading user-defined foreign-data wrappers");
170 468 : getForeignDataWrappers(fout);
171 :
172 468 : pg_log_info("reading user-defined foreign servers");
173 468 : getForeignServers(fout);
174 :
175 468 : pg_log_info("reading default privileges");
176 468 : getDefaultACLs(fout);
177 :
178 468 : pg_log_info("reading user-defined collations");
179 468 : getCollations(fout);
180 :
181 468 : pg_log_info("reading user-defined conversions");
182 468 : getConversions(fout);
183 :
184 468 : pg_log_info("reading type casts");
185 468 : getCasts(fout);
186 :
187 468 : pg_log_info("reading transforms");
188 468 : getTransforms(fout);
189 :
190 468 : pg_log_info("reading table inheritance information");
191 468 : inhinfo = getInherits(fout, &numInherits);
192 :
193 468 : pg_log_info("reading event triggers");
194 468 : getEventTriggers(fout);
195 :
196 : /* Identify extension configuration tables that should be dumped */
197 468 : pg_log_info("finding extension tables");
198 468 : processExtensionTables(fout, extinfo, numExtensions);
199 :
200 : /* Link tables to parents, mark parents of target tables interesting */
201 468 : pg_log_info("finding inheritance relationships");
202 468 : flagInhTables(fout, tblinfo, numTables, inhinfo, numInherits);
203 :
204 468 : pg_log_info("reading column info for interesting tables");
205 468 : getTableAttrs(fout, tblinfo, numTables);
206 :
207 468 : pg_log_info("flagging inherited columns in subtables");
208 468 : flagInhAttrs(fout, fout->dopt, tblinfo, numTables);
209 :
210 468 : pg_log_info("reading partitioning data");
211 468 : getPartitioningInfo(fout);
212 :
213 468 : pg_log_info("reading indexes");
214 468 : getIndexes(fout, tblinfo, numTables);
215 :
216 468 : pg_log_info("flagging indexes in partitioned tables");
217 468 : flagInhIndexes(fout, tblinfo, numTables);
218 :
219 468 : pg_log_info("reading extended statistics");
220 468 : getExtendedStatistics(fout);
221 :
222 468 : pg_log_info("reading constraints");
223 468 : getConstraints(fout, tblinfo, numTables);
224 :
225 468 : pg_log_info("reading triggers");
226 468 : getTriggers(fout, tblinfo, numTables);
227 :
228 468 : pg_log_info("reading rewrite rules");
229 468 : getRules(fout);
230 :
231 468 : pg_log_info("reading policies");
232 468 : getPolicies(fout, tblinfo, numTables);
233 :
234 468 : pg_log_info("reading publications");
235 468 : getPublications(fout);
236 :
237 468 : pg_log_info("reading publication membership of tables");
238 468 : getPublicationTables(fout, tblinfo, numTables);
239 :
240 468 : pg_log_info("reading publication membership of schemas");
241 468 : getPublicationNamespaces(fout);
242 :
243 468 : pg_log_info("reading subscriptions");
244 468 : getSubscriptions(fout);
245 :
246 468 : pg_log_info("reading subscription membership of tables");
247 468 : getSubscriptionTables(fout);
248 :
249 468 : free(inhinfo); /* not needed any longer */
250 :
251 468 : *numTablesPtr = numTables;
252 468 : return tblinfo;
253 : }
254 :
255 : /* flagInhTables -
256 : * Fill in parent link fields of tables for which we need that information,
257 : * mark parents of target tables as interesting, and create
258 : * TableAttachInfo objects for partitioned tables with appropriate
259 : * dependency links.
260 : *
261 : * Note that only direct ancestors of targets are marked interesting.
262 : * This is sufficient; we don't much care whether they inherited their
263 : * attributes or not.
264 : *
265 : * modifies tblinfo
266 : */
267 : static void
268 468 : flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
269 : InhInfo *inhinfo, int numInherits)
270 : {
271 468 : TableInfo *child = NULL;
272 468 : TableInfo *parent = NULL;
273 : int i,
274 : j;
275 :
276 : /*
277 : * Set up links from child tables to their parents.
278 : *
279 : * We used to attempt to skip this work for tables that are not to be
280 : * dumped; but the optimizable cases are rare in practice, and setting up
281 : * these links in bulk is cheaper than the old way. (Note in particular
282 : * that it's very rare for a child to have more than one parent.)
283 : */
284 9066 : for (i = 0; i < numInherits; i++)
285 : {
286 : /*
287 : * Skip a hashtable lookup if it's same table as last time. This is
288 : * unlikely for the child, but less so for the parent. (Maybe we
289 : * should ask the backend for a sorted array to make it more likely?
290 : * Not clear the sorting effort would be repaid, though.)
291 : */
292 8598 : if (child == NULL ||
293 6682 : child->dobj.catId.oid != inhinfo[i].inhrelid)
294 : {
295 8328 : child = findTableByOid(inhinfo[i].inhrelid);
296 :
297 : /*
298 : * If we find no TableInfo, assume the pg_inherits entry is for a
299 : * partitioned index, which we don't need to track.
300 : */
301 8328 : if (child == NULL)
302 1768 : continue;
303 : }
304 6830 : if (parent == NULL ||
305 6674 : parent->dobj.catId.oid != inhinfo[i].inhparent)
306 : {
307 3856 : parent = findTableByOid(inhinfo[i].inhparent);
308 3856 : if (parent == NULL)
309 0 : pg_fatal("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
310 : inhinfo[i].inhparent,
311 : child->dobj.name,
312 : child->dobj.catId.oid);
313 : }
314 : /* Add this parent to the child's list of parents. */
315 6830 : if (child->numParents > 0)
316 278 : child->parents = pg_realloc_array(child->parents,
317 : TableInfo *,
318 : child->numParents + 1);
319 : else
320 6552 : child->parents = pg_malloc_array(TableInfo *, 1);
321 6830 : child->parents[child->numParents++] = parent;
322 : }
323 :
324 : /*
325 : * Now consider all child tables and mark parents interesting as needed.
326 : */
327 123652 : for (i = 0; i < numTables; i++)
328 : {
329 : /*
330 : * If needed, mark the parents as interesting for getTableAttrs and
331 : * getIndexes. We only need this for direct parents of dumpable
332 : * tables.
333 : */
334 123184 : if (tblinfo[i].dobj.dump)
335 : {
336 80640 : int numParents = tblinfo[i].numParents;
337 80640 : TableInfo **parents = tblinfo[i].parents;
338 :
339 86182 : for (j = 0; j < numParents; j++)
340 5542 : parents[j]->interesting = true;
341 : }
342 :
343 : /* Create TableAttachInfo object if needed */
344 123184 : if ((tblinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
345 19260 : tblinfo[i].ispartition)
346 : {
347 : TableAttachInfo *attachinfo;
348 :
349 : /* With partitions there can only be one parent */
350 3900 : if (tblinfo[i].numParents != 1)
351 0 : pg_fatal("invalid number of parents %d for table \"%s\"",
352 : tblinfo[i].numParents,
353 : tblinfo[i].dobj.name);
354 :
355 3900 : attachinfo = (TableAttachInfo *) palloc(sizeof(TableAttachInfo));
356 3900 : attachinfo->dobj.objType = DO_TABLE_ATTACH;
357 3900 : attachinfo->dobj.catId.tableoid = 0;
358 3900 : attachinfo->dobj.catId.oid = 0;
359 3900 : AssignDumpId(&attachinfo->dobj);
360 3900 : attachinfo->dobj.name = pg_strdup(tblinfo[i].dobj.name);
361 3900 : attachinfo->dobj.namespace = tblinfo[i].dobj.namespace;
362 3900 : attachinfo->parentTbl = tblinfo[i].parents[0];
363 3900 : attachinfo->partitionTbl = &tblinfo[i];
364 :
365 : /*
366 : * We must state the DO_TABLE_ATTACH object's dependencies
367 : * explicitly, since it will not match anything in pg_depend.
368 : *
369 : * Give it dependencies on both the partition table and the parent
370 : * table, so that it will not be executed till both of those
371 : * exist. (There's no need to care what order those are created
372 : * in.)
373 : */
374 3900 : addObjectDependency(&attachinfo->dobj, tblinfo[i].dobj.dumpId);
375 3900 : addObjectDependency(&attachinfo->dobj, tblinfo[i].parents[0]->dobj.dumpId);
376 : }
377 : }
378 468 : }
379 :
380 : /*
381 : * flagInhIndexes -
382 : * Create IndexAttachInfo objects for partitioned indexes, and add
383 : * appropriate dependency links.
384 : */
385 : static void
386 468 : flagInhIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
387 : {
388 : int i,
389 : j;
390 :
391 123652 : for (i = 0; i < numTables; i++)
392 : {
393 123184 : if (!tblinfo[i].ispartition || tblinfo[i].numParents == 0)
394 118374 : continue;
395 :
396 : Assert(tblinfo[i].numParents == 1);
397 :
398 6466 : for (j = 0; j < tblinfo[i].numIndexes; j++)
399 : {
400 1656 : IndxInfo *index = &(tblinfo[i].indexes[j]);
401 : IndxInfo *parentidx;
402 : IndexAttachInfo *attachinfo;
403 :
404 1656 : if (index->parentidx == 0)
405 176 : continue;
406 :
407 1480 : parentidx = findIndexByOid(index->parentidx);
408 1480 : if (parentidx == NULL)
409 0 : continue;
410 :
411 1480 : attachinfo = pg_malloc_object(IndexAttachInfo);
412 :
413 1480 : attachinfo->dobj.objType = DO_INDEX_ATTACH;
414 1480 : attachinfo->dobj.catId.tableoid = 0;
415 1480 : attachinfo->dobj.catId.oid = 0;
416 1480 : AssignDumpId(&attachinfo->dobj);
417 1480 : attachinfo->dobj.name = pg_strdup(index->dobj.name);
418 1480 : attachinfo->dobj.namespace = index->indextable->dobj.namespace;
419 1480 : attachinfo->parentIdx = parentidx;
420 1480 : attachinfo->partitionIdx = index;
421 :
422 : /*
423 : * We must state the DO_INDEX_ATTACH object's dependencies
424 : * explicitly, since it will not match anything in pg_depend.
425 : *
426 : * Give it dependencies on both the partition index and the parent
427 : * index, so that it will not be executed till both of those
428 : * exist. (There's no need to care what order those are created
429 : * in.)
430 : *
431 : * In addition, give it dependencies on the indexes' underlying
432 : * tables. This does nothing of great value so far as serial
433 : * restore ordering goes, but it ensures that a parallel restore
434 : * will not try to run the ATTACH concurrently with other
435 : * operations on those tables.
436 : */
437 1480 : addObjectDependency(&attachinfo->dobj, index->dobj.dumpId);
438 1480 : addObjectDependency(&attachinfo->dobj, parentidx->dobj.dumpId);
439 1480 : addObjectDependency(&attachinfo->dobj,
440 1480 : index->indextable->dobj.dumpId);
441 1480 : addObjectDependency(&attachinfo->dobj,
442 1480 : parentidx->indextable->dobj.dumpId);
443 :
444 : /* keep track of the list of partitions in the parent index */
445 1480 : simple_ptr_list_append(&parentidx->partattaches, &attachinfo->dobj);
446 : }
447 : }
448 468 : }
449 :
450 : /* flagInhAttrs -
451 : * for each dumpable table in tblinfo, flag its inherited attributes
452 : *
453 : * What we need to do here is:
454 : *
455 : * - Detect child columns that inherit NOT NULL bits from their parents, so
456 : * that we needn't specify that again for the child. For versions 18 and
457 : * up, this is needed when the parent is NOT VALID and the child isn't.
458 : *
459 : * - Detect child columns that have DEFAULT NULL when their parents had some
460 : * non-null default. In this case, we make up a dummy AttrDefInfo object so
461 : * that we'll correctly emit the necessary DEFAULT NULL clause; otherwise
462 : * the backend will apply an inherited default to the column.
463 : *
464 : * - Detect child columns that have a generation expression and all their
465 : * parents also have the same generation expression, and if so suppress the
466 : * child's expression. The child will inherit the generation expression
467 : * automatically, so there's no need to dump it. This improves the dump's
468 : * compatibility with pre-v16 servers, which didn't allow the child's
469 : * expression to be given explicitly. Exceptions: If it's a partition or
470 : * we are in binary upgrade mode, we dump such expressions anyway because
471 : * in those cases inherited tables are recreated standalone first and then
472 : * reattached to the parent. (See also the logic in dumpTableSchema().)
473 : *
474 : * modifies tblinfo
475 : */
476 : static void
477 468 : flagInhAttrs(Archive *fout, DumpOptions *dopt, TableInfo *tblinfo, int numTables)
478 : {
479 : int i,
480 : j,
481 : k;
482 :
483 : /*
484 : * We scan the tables in OID order, since that's how tblinfo[] is sorted.
485 : * Hence we will typically visit parents before their children --- but
486 : * that is *not* guaranteed. Thus this loop must be careful that it does
487 : * not alter table properties in a way that could change decisions made at
488 : * child tables during other iterations.
489 : */
490 123652 : for (i = 0; i < numTables; i++)
491 : {
492 123184 : TableInfo *tbinfo = &(tblinfo[i]);
493 : int numParents;
494 : TableInfo **parents;
495 :
496 : /* Some kinds never have parents */
497 123184 : if (tbinfo->relkind == RELKIND_SEQUENCE ||
498 121552 : tbinfo->relkind == RELKIND_VIEW ||
499 52594 : tbinfo->relkind == RELKIND_MATVIEW)
500 71730 : continue;
501 :
502 : /* Don't bother computing anything for non-target tables, either */
503 51454 : if (!tbinfo->dobj.dump)
504 8390 : continue;
505 :
506 43064 : numParents = tbinfo->numParents;
507 43064 : parents = tbinfo->parents;
508 :
509 43064 : if (numParents == 0)
510 37750 : continue; /* nothing to see here, move along */
511 :
512 : /* For each column, search for matching column names in parent(s) */
513 18782 : for (j = 0; j < tbinfo->numatts; j++)
514 : {
515 : bool foundNotNull; /* Attr was NOT NULL in a parent */
516 : bool foundDefault; /* Found a default in a parent */
517 : bool foundSameGenerated; /* Found matching GENERATED */
518 : bool foundDiffGenerated; /* Found non-matching GENERATED */
519 13468 : bool allNotNullsInvalid = true; /* is NOT NULL NOT VALID
520 : * on all parents? */
521 :
522 : /* no point in examining dropped columns */
523 13468 : if (tbinfo->attisdropped[j])
524 854 : continue;
525 :
526 12614 : foundNotNull = false;
527 12614 : foundDefault = false;
528 12614 : foundSameGenerated = false;
529 12614 : foundDiffGenerated = false;
530 25856 : for (k = 0; k < numParents; k++)
531 : {
532 13242 : TableInfo *parent = parents[k];
533 : int inhAttrInd;
534 :
535 13242 : inhAttrInd = strInArray(tbinfo->attnames[j],
536 : parent->attnames,
537 : parent->numatts);
538 13242 : if (inhAttrInd >= 0)
539 : {
540 12474 : AttrDefInfo *parentDef = parent->attrdefs[inhAttrInd];
541 :
542 : /*
543 : * Account for each parent having a not-null constraint.
544 : * In versions 18 and later, we don't need this (and those
545 : * didn't have NO INHERIT.)
546 : */
547 12474 : if (fout->remoteVersion < 180000 &&
548 0 : parent->notnull_constrs[inhAttrInd] != NULL)
549 0 : foundNotNull = true;
550 :
551 : /*
552 : * Keep track of whether all the parents that have a
553 : * not-null constraint on this column have it as NOT
554 : * VALID; if they all are, arrange to have it printed for
555 : * this column. If at least one parent has it as valid,
556 : * there's no need.
557 : */
558 12474 : if (fout->remoteVersion >= 180000 &&
559 12474 : parent->notnull_constrs[inhAttrInd] &&
560 2304 : !parent->notnull_invalid[inhAttrInd])
561 2304 : allNotNullsInvalid = false;
562 :
563 26268 : foundDefault |= (parentDef != NULL &&
564 13634 : strcmp(parentDef->adef_expr, "NULL") != 0 &&
565 1160 : !parent->attgenerated[inhAttrInd]);
566 12474 : if (parent->attgenerated[inhAttrInd])
567 : {
568 : /* these pointer nullness checks are just paranoia */
569 720 : if (parentDef != NULL &&
570 672 : tbinfo->attrdefs[j] != NULL &&
571 672 : strcmp(parentDef->adef_expr,
572 672 : tbinfo->attrdefs[j]->adef_expr) == 0)
573 576 : foundSameGenerated = true;
574 : else
575 144 : foundDiffGenerated = true;
576 : }
577 : }
578 : }
579 :
580 : /*
581 : * In versions < 18, for lack of a better system, we arbitrarily
582 : * decide that a not-null constraint is not locally defined if at
583 : * least one of the parents has it.
584 : */
585 12614 : if (fout->remoteVersion < 180000 && foundNotNull)
586 0 : tbinfo->notnull_islocal[j] = false;
587 :
588 : /*
589 : * For versions >18, we must print the not-null constraint locally
590 : * for this table even if it isn't really locally defined, but is
591 : * valid for the child and no parent has it as valid.
592 : */
593 12614 : if (fout->remoteVersion >= 180000 && allNotNullsInvalid)
594 10326 : tbinfo->notnull_islocal[j] = true;
595 :
596 : /*
597 : * Manufacture a DEFAULT NULL clause if necessary. This breaks
598 : * the advice given above to avoid changing state that might get
599 : * inspected in other loop iterations. We prevent trouble by
600 : * having the foundDefault test above check whether adef_expr is
601 : * "NULL", so that it will reach the same conclusion before or
602 : * after this is done.
603 : */
604 12614 : if (foundDefault && tbinfo->attrdefs[j] == NULL)
605 : {
606 : AttrDefInfo *attrDef;
607 :
608 128 : attrDef = pg_malloc_object(AttrDefInfo);
609 128 : attrDef->dobj.objType = DO_ATTRDEF;
610 128 : attrDef->dobj.catId.tableoid = 0;
611 128 : attrDef->dobj.catId.oid = 0;
612 128 : AssignDumpId(&attrDef->dobj);
613 128 : attrDef->dobj.name = pg_strdup(tbinfo->dobj.name);
614 128 : attrDef->dobj.namespace = tbinfo->dobj.namespace;
615 128 : attrDef->dobj.dump = tbinfo->dobj.dump;
616 :
617 128 : attrDef->adtable = tbinfo;
618 128 : attrDef->adnum = j + 1;
619 128 : attrDef->adef_expr = pg_strdup("NULL");
620 :
621 : /* Will column be dumped explicitly? */
622 128 : if (shouldPrintColumn(dopt, tbinfo, j))
623 : {
624 128 : attrDef->separate = false;
625 : /* No dependency needed: NULL cannot have dependencies */
626 : }
627 : else
628 : {
629 : /* column will be suppressed, print default separately */
630 0 : attrDef->separate = true;
631 : /* ensure it comes out after the table */
632 0 : addObjectDependency(&attrDef->dobj,
633 : tbinfo->dobj.dumpId);
634 : }
635 :
636 128 : tbinfo->attrdefs[j] = attrDef;
637 : }
638 :
639 : /* No need to dump generation expression if it's inheritable */
640 12614 : if (foundSameGenerated && !foundDiffGenerated &&
641 576 : !tbinfo->ispartition && !dopt->binary_upgrade)
642 392 : tbinfo->attrdefs[j]->dobj.dump = DUMP_COMPONENT_NONE;
643 : }
644 : }
645 468 : }
646 :
647 : /*
648 : * AssignDumpId
649 : * Given a newly-created dumpable object, assign a dump ID,
650 : * and enter the object into the lookup tables.
651 : *
652 : * The caller is expected to have filled in objType and catId,
653 : * but not any of the other standard fields of a DumpableObject.
654 : */
655 : void
656 1734682 : AssignDumpId(DumpableObject *dobj)
657 : {
658 1734682 : dobj->dumpId = ++lastDumpId;
659 1734682 : dobj->name = NULL; /* must be set later */
660 1734682 : dobj->namespace = NULL; /* may be set later */
661 1734682 : dobj->dump = DUMP_COMPONENT_ALL; /* default assumption */
662 1734682 : dobj->dump_contains = DUMP_COMPONENT_ALL; /* default assumption */
663 : /* All objects have definitions; we may set more components bits later */
664 1734682 : dobj->components = DUMP_COMPONENT_DEFINITION;
665 1734682 : dobj->ext_member = false; /* default assumption */
666 1734682 : dobj->depends_on_ext = false; /* default assumption */
667 1734682 : dobj->dependencies = NULL;
668 1734682 : dobj->nDeps = 0;
669 1734682 : dobj->allocDeps = 0;
670 :
671 : /* Add object to dumpIdMap[], enlarging that array if need be */
672 1737064 : while (dobj->dumpId >= allocedDumpIds)
673 : {
674 : int newAlloc;
675 :
676 2382 : if (allocedDumpIds <= 0)
677 : {
678 470 : newAlloc = 256;
679 470 : dumpIdMap = pg_malloc_array(DumpableObject *, newAlloc);
680 : }
681 : else
682 : {
683 1912 : newAlloc = allocedDumpIds * 2;
684 1912 : dumpIdMap = pg_realloc_array(dumpIdMap, DumpableObject *, newAlloc);
685 : }
686 2382 : memset(dumpIdMap + allocedDumpIds, 0,
687 2382 : (newAlloc - allocedDumpIds) * sizeof(DumpableObject *));
688 2382 : allocedDumpIds = newAlloc;
689 : }
690 1734682 : dumpIdMap[dobj->dumpId] = dobj;
691 :
692 : /* If it has a valid CatalogId, enter it into the hash table */
693 1734682 : if (OidIsValid(dobj->catId.tableoid))
694 : {
695 : CatalogIdMapEntry *entry;
696 : bool found;
697 :
698 : /* Initialize CatalogId hash table if not done yet */
699 1673438 : if (catalogIdHash == NULL)
700 470 : catalogIdHash = catalogid_create(CATALOGIDHASH_INITIAL_SIZE, NULL);
701 :
702 1673438 : entry = catalogid_insert(catalogIdHash, dobj->catId, &found);
703 1673438 : if (!found)
704 : {
705 1671584 : entry->dobj = NULL;
706 1671584 : entry->ext = NULL;
707 : }
708 : Assert(entry->dobj == NULL);
709 1673438 : entry->dobj = dobj;
710 : }
711 1734682 : }
712 :
713 : /*
714 : * recordAdditionalCatalogID
715 : * Record an additional catalog ID for the given DumpableObject
716 : */
717 : void
718 32 : recordAdditionalCatalogID(CatalogId catId, DumpableObject *dobj)
719 : {
720 : CatalogIdMapEntry *entry;
721 : bool found;
722 :
723 : /* CatalogId hash table must exist, if we have a DumpableObject */
724 : Assert(catalogIdHash != NULL);
725 :
726 : /* Add reference to CatalogId hash */
727 32 : entry = catalogid_insert(catalogIdHash, catId, &found);
728 32 : if (!found)
729 : {
730 32 : entry->dobj = NULL;
731 32 : entry->ext = NULL;
732 : }
733 : Assert(entry->dobj == NULL);
734 32 : entry->dobj = dobj;
735 32 : }
736 :
737 : /*
738 : * Assign a DumpId that's not tied to a DumpableObject.
739 : *
740 : * This is used when creating a "fixed" ArchiveEntry that doesn't need to
741 : * participate in the sorting logic.
742 : */
743 : DumpId
744 39360 : createDumpId(void)
745 : {
746 39360 : return ++lastDumpId;
747 : }
748 :
749 : /*
750 : * Return the largest DumpId so far assigned
751 : */
752 : DumpId
753 3054 : getMaxDumpId(void)
754 : {
755 3054 : return lastDumpId;
756 : }
757 :
758 : /*
759 : * Find a DumpableObject by dump ID
760 : *
761 : * Returns NULL for invalid ID
762 : */
763 : DumpableObject *
764 23090018 : findObjectByDumpId(DumpId dumpId)
765 : {
766 23090018 : if (dumpId <= 0 || dumpId >= allocedDumpIds)
767 0 : return NULL; /* out of range? */
768 23090018 : return dumpIdMap[dumpId];
769 : }
770 :
771 : /*
772 : * Find a DumpableObject by catalog ID
773 : *
774 : * Returns NULL for unknown ID
775 : */
776 : DumpableObject *
777 5886742 : findObjectByCatalogId(CatalogId catalogId)
778 : {
779 : CatalogIdMapEntry *entry;
780 :
781 5886742 : if (catalogIdHash == NULL)
782 0 : return NULL; /* no objects exist yet */
783 :
784 5886742 : entry = catalogid_lookup(catalogIdHash, catalogId);
785 5886742 : if (entry == NULL)
786 1686600 : return NULL;
787 4200142 : return entry->dobj;
788 : }
789 :
790 : /*
791 : * Build an array of pointers to all known dumpable objects
792 : *
793 : * This simply creates a modifiable copy of the internal map.
794 : */
795 : void
796 482 : getDumpableObjects(DumpableObject ***objs, int *numObjs)
797 : {
798 : int i,
799 : j;
800 :
801 482 : *objs = pg_malloc_array(DumpableObject *, allocedDumpIds);
802 482 : j = 0;
803 2195456 : for (i = 1; i < allocedDumpIds; i++)
804 : {
805 2194974 : if (dumpIdMap[i])
806 1784940 : (*objs)[j++] = dumpIdMap[i];
807 : }
808 482 : *numObjs = j;
809 482 : }
810 :
811 : /*
812 : * Add a dependency link to a DumpableObject
813 : *
814 : * Note: duplicate dependencies are currently not eliminated
815 : */
816 : void
817 2746386 : addObjectDependency(DumpableObject *dobj, DumpId refId)
818 : {
819 2746386 : if (dobj->nDeps >= dobj->allocDeps)
820 : {
821 475976 : if (dobj->allocDeps <= 0)
822 : {
823 441300 : dobj->allocDeps = 16;
824 441300 : dobj->dependencies = pg_malloc_array(DumpId, dobj->allocDeps);
825 : }
826 : else
827 : {
828 34676 : dobj->allocDeps *= 2;
829 34676 : dobj->dependencies = pg_realloc_array(dobj->dependencies,
830 : DumpId, dobj->allocDeps);
831 : }
832 : }
833 2746386 : dobj->dependencies[dobj->nDeps++] = refId;
834 2746386 : }
835 :
836 : /*
837 : * Remove a dependency link from a DumpableObject
838 : *
839 : * If there are multiple links, all are removed
840 : */
841 : void
842 77874 : removeObjectDependency(DumpableObject *dobj, DumpId refId)
843 : {
844 : int i;
845 77874 : int j = 0;
846 :
847 1535308 : for (i = 0; i < dobj->nDeps; i++)
848 : {
849 1457434 : if (dobj->dependencies[i] != refId)
850 1375746 : dobj->dependencies[j++] = dobj->dependencies[i];
851 : }
852 77874 : dobj->nDeps = j;
853 77874 : }
854 :
855 :
856 : /*
857 : * findTableByOid
858 : * finds the DumpableObject for the table with the given oid
859 : * returns NULL if not found
860 : */
861 : TableInfo *
862 210060 : findTableByOid(Oid oid)
863 : {
864 : CatalogId catId;
865 : DumpableObject *dobj;
866 :
867 210060 : catId.tableoid = RelationRelationId;
868 210060 : catId.oid = oid;
869 210060 : dobj = findObjectByCatalogId(catId);
870 : Assert(dobj == NULL || dobj->objType == DO_TABLE);
871 210060 : return (TableInfo *) dobj;
872 : }
873 :
874 : /*
875 : * findIndexByOid
876 : * finds the DumpableObject for the index with the given oid
877 : * returns NULL if not found
878 : */
879 : static IndxInfo *
880 1480 : findIndexByOid(Oid oid)
881 : {
882 : CatalogId catId;
883 : DumpableObject *dobj;
884 :
885 1480 : catId.tableoid = RelationRelationId;
886 1480 : catId.oid = oid;
887 1480 : dobj = findObjectByCatalogId(catId);
888 : Assert(dobj == NULL || dobj->objType == DO_INDEX);
889 1480 : return (IndxInfo *) dobj;
890 : }
891 :
892 : /*
893 : * findTypeByOid
894 : * finds the DumpableObject for the type with the given oid
895 : * returns NULL if not found
896 : */
897 : TypeInfo *
898 224204 : findTypeByOid(Oid oid)
899 : {
900 : CatalogId catId;
901 : DumpableObject *dobj;
902 :
903 224204 : catId.tableoid = TypeRelationId;
904 224204 : catId.oid = oid;
905 224204 : dobj = findObjectByCatalogId(catId);
906 : Assert(dobj == NULL ||
907 : dobj->objType == DO_TYPE || dobj->objType == DO_DUMMY_TYPE);
908 224204 : return (TypeInfo *) dobj;
909 : }
910 :
911 : /*
912 : * findFuncByOid
913 : * finds the DumpableObject for the function with the given oid
914 : * returns NULL if not found
915 : */
916 : FuncInfo *
917 580 : findFuncByOid(Oid oid)
918 : {
919 : CatalogId catId;
920 : DumpableObject *dobj;
921 :
922 580 : catId.tableoid = ProcedureRelationId;
923 580 : catId.oid = oid;
924 580 : dobj = findObjectByCatalogId(catId);
925 : Assert(dobj == NULL || dobj->objType == DO_FUNC);
926 580 : return (FuncInfo *) dobj;
927 : }
928 :
929 : /*
930 : * findOprByOid
931 : * finds the DumpableObject for the operator with the given oid
932 : * returns NULL if not found
933 : */
934 : OprInfo *
935 5678 : findOprByOid(Oid oid)
936 : {
937 : CatalogId catId;
938 : DumpableObject *dobj;
939 :
940 5678 : catId.tableoid = OperatorRelationId;
941 5678 : catId.oid = oid;
942 5678 : dobj = findObjectByCatalogId(catId);
943 : Assert(dobj == NULL || dobj->objType == DO_OPERATOR);
944 5678 : return (OprInfo *) dobj;
945 : }
946 :
947 : /*
948 : * findCollationByOid
949 : * finds the DumpableObject for the collation with the given oid
950 : * returns NULL if not found
951 : */
952 : CollInfo *
953 560 : findCollationByOid(Oid oid)
954 : {
955 : CatalogId catId;
956 : DumpableObject *dobj;
957 :
958 560 : catId.tableoid = CollationRelationId;
959 560 : catId.oid = oid;
960 560 : dobj = findObjectByCatalogId(catId);
961 : Assert(dobj == NULL || dobj->objType == DO_COLLATION);
962 560 : return (CollInfo *) dobj;
963 : }
964 :
965 : /*
966 : * findNamespaceByOid
967 : * finds the DumpableObject for the namespace with the given oid
968 : * returns NULL if not found
969 : */
970 : NamespaceInfo *
971 1464656 : findNamespaceByOid(Oid oid)
972 : {
973 : CatalogId catId;
974 : DumpableObject *dobj;
975 :
976 1464656 : catId.tableoid = NamespaceRelationId;
977 1464656 : catId.oid = oid;
978 1464656 : dobj = findObjectByCatalogId(catId);
979 : Assert(dobj == NULL || dobj->objType == DO_NAMESPACE);
980 1464656 : return (NamespaceInfo *) dobj;
981 : }
982 :
983 : /*
984 : * findExtensionByOid
985 : * finds the DumpableObject for the extension with the given oid
986 : * returns NULL if not found
987 : */
988 : ExtensionInfo *
989 520 : findExtensionByOid(Oid oid)
990 : {
991 : CatalogId catId;
992 : DumpableObject *dobj;
993 :
994 520 : catId.tableoid = ExtensionRelationId;
995 520 : catId.oid = oid;
996 520 : dobj = findObjectByCatalogId(catId);
997 : Assert(dobj == NULL || dobj->objType == DO_EXTENSION);
998 520 : return (ExtensionInfo *) dobj;
999 : }
1000 :
1001 : /*
1002 : * findPublicationByOid
1003 : * finds the DumpableObject for the publication with the given oid
1004 : * returns NULL if not found
1005 : */
1006 : PublicationInfo *
1007 882 : findPublicationByOid(Oid oid)
1008 : {
1009 : CatalogId catId;
1010 : DumpableObject *dobj;
1011 :
1012 882 : catId.tableoid = PublicationRelationId;
1013 882 : catId.oid = oid;
1014 882 : dobj = findObjectByCatalogId(catId);
1015 : Assert(dobj == NULL || dobj->objType == DO_PUBLICATION);
1016 882 : return (PublicationInfo *) dobj;
1017 : }
1018 :
1019 : /*
1020 : * findSubscriptionByOid
1021 : * finds the DumpableObject for the subscription with the given oid
1022 : * returns NULL if not found
1023 : */
1024 : SubscriptionInfo *
1025 4 : findSubscriptionByOid(Oid oid)
1026 : {
1027 : CatalogId catId;
1028 : DumpableObject *dobj;
1029 :
1030 4 : catId.tableoid = SubscriptionRelationId;
1031 4 : catId.oid = oid;
1032 4 : dobj = findObjectByCatalogId(catId);
1033 : Assert(dobj == NULL || dobj->objType == DO_SUBSCRIPTION);
1034 4 : return (SubscriptionInfo *) dobj;
1035 : }
1036 :
1037 :
1038 : /*
1039 : * recordExtensionMembership
1040 : * Record that the object identified by the given catalog ID
1041 : * belongs to the given extension
1042 : */
1043 : void
1044 3080 : recordExtensionMembership(CatalogId catId, ExtensionInfo *ext)
1045 : {
1046 : CatalogIdMapEntry *entry;
1047 : bool found;
1048 :
1049 : /* CatalogId hash table must exist, if we have an ExtensionInfo */
1050 : Assert(catalogIdHash != NULL);
1051 :
1052 : /* Add reference to CatalogId hash */
1053 3080 : entry = catalogid_insert(catalogIdHash, catId, &found);
1054 3080 : if (!found)
1055 : {
1056 3080 : entry->dobj = NULL;
1057 3080 : entry->ext = NULL;
1058 : }
1059 : Assert(entry->ext == NULL);
1060 3080 : entry->ext = ext;
1061 3080 : }
1062 :
1063 : /*
1064 : * findOwningExtension
1065 : * return owning extension for specified catalog ID, or NULL if none
1066 : */
1067 : ExtensionInfo *
1068 1460248 : findOwningExtension(CatalogId catalogId)
1069 : {
1070 : CatalogIdMapEntry *entry;
1071 :
1072 1460248 : if (catalogIdHash == NULL)
1073 0 : return NULL; /* no objects exist yet */
1074 :
1075 1460248 : entry = catalogid_lookup(catalogIdHash, catalogId);
1076 1460248 : if (entry == NULL)
1077 0 : return NULL;
1078 1460248 : return entry->ext;
1079 : }
1080 :
1081 :
1082 : /*
1083 : * parseOidArray
1084 : * parse a string of numbers delimited by spaces into a character array
1085 : *
1086 : * Note: actually this is used for both Oids and potentially-signed
1087 : * attribute numbers. This should cause no trouble, but we could split
1088 : * the function into two functions with different argument types if it does.
1089 : */
1090 :
1091 : void
1092 17414 : parseOidArray(const char *str, Oid *array, int arraysize)
1093 : {
1094 : int j,
1095 : argNum;
1096 : char temp[100];
1097 : char s;
1098 :
1099 17414 : argNum = 0;
1100 17414 : j = 0;
1101 : for (;;)
1102 : {
1103 82060 : s = *str++;
1104 82060 : if (s == ' ' || s == '\0')
1105 : {
1106 27832 : if (j > 0)
1107 : {
1108 27832 : if (argNum >= arraysize)
1109 0 : pg_fatal("could not parse numeric array \"%s\": too many numbers", str);
1110 27832 : temp[j] = '\0';
1111 27832 : array[argNum++] = atooid(temp);
1112 27832 : j = 0;
1113 : }
1114 27832 : if (s == '\0')
1115 17414 : break;
1116 : }
1117 : else
1118 : {
1119 54228 : if (!(isdigit((unsigned char) s) || s == '-') ||
1120 54228 : j >= sizeof(temp) - 1)
1121 0 : pg_fatal("could not parse numeric array \"%s\": invalid character in number", str);
1122 54228 : temp[j++] = s;
1123 : }
1124 : }
1125 :
1126 17414 : while (argNum < arraysize)
1127 0 : array[argNum++] = InvalidOid;
1128 17414 : }
1129 :
1130 :
1131 : /*
1132 : * strInArray:
1133 : * takes in a string and a string array and the number of elements in the
1134 : * string array.
1135 : * returns the index if the string is somewhere in the array, -1 otherwise
1136 : */
1137 :
1138 : static int
1139 13242 : strInArray(const char *pattern, char **arr, int arr_size)
1140 : {
1141 : int i;
1142 :
1143 26208 : for (i = 0; i < arr_size; i++)
1144 : {
1145 25440 : if (strcmp(pattern, arr[i]) == 0)
1146 12474 : return i;
1147 : }
1148 768 : return -1;
1149 : }
|