Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * common.c
4 : * Catalog routines used by pg_dump; long ago these were shared
5 : * by another dump tool, but not anymore.
6 : *
7 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : *
11 : * IDENTIFICATION
12 : * src/bin/pg_dump/common.c
13 : *
14 : *-------------------------------------------------------------------------
15 : */
16 : #include "postgres_fe.h"
17 :
18 : #include <ctype.h>
19 :
20 : #include "catalog/pg_am_d.h"
21 : #include "catalog/pg_class_d.h"
22 : #include "catalog/pg_collation_d.h"
23 : #include "catalog/pg_extension_d.h"
24 : #include "catalog/pg_namespace_d.h"
25 : #include "catalog/pg_operator_d.h"
26 : #include "catalog/pg_proc_d.h"
27 : #include "catalog/pg_publication_d.h"
28 : #include "catalog/pg_subscription_d.h"
29 : #include "catalog/pg_type_d.h"
30 : #include "common/hashfn.h"
31 : #include "pg_backup_utils.h"
32 : #include "pg_dump.h"
33 :
34 : /*
35 : * Variables for mapping DumpId to DumpableObject
36 : */
37 : static DumpableObject **dumpIdMap = NULL;
38 : static int allocedDumpIds = 0;
39 : static DumpId lastDumpId = 0; /* Note: 0 is InvalidDumpId */
40 :
41 : /*
42 : * Infrastructure for mapping CatalogId to DumpableObject
43 : *
44 : * We use a hash table generated by simplehash.h. That infrastructure
45 : * requires all the hash table entries to be the same size, and it also
46 : * expects that it can move them around when resizing the table. So we
47 : * cannot make the DumpableObjects be elements of the hash table directly;
48 : * instead, the hash table elements contain pointers to DumpableObjects.
49 : * This does have the advantage of letting us map multiple CatalogIds
50 : * to one DumpableObject, which is useful for blobs.
51 : *
52 : * It turns out to be convenient to also use this data structure to map
53 : * CatalogIds to owning extensions, if any. Since extension membership
54 : * data is read before creating most DumpableObjects, either one of dobj
55 : * and ext could be NULL.
56 : */
57 : typedef struct _catalogIdMapEntry
58 : {
59 : CatalogId catId; /* the indexed CatalogId */
60 : uint32 status; /* hash status */
61 : uint32 hashval; /* hash code for the CatalogId */
62 : DumpableObject *dobj; /* the associated DumpableObject, if any */
63 : ExtensionInfo *ext; /* owning extension, if any */
64 : } CatalogIdMapEntry;
65 :
66 : #define SH_PREFIX catalogid
67 : #define SH_ELEMENT_TYPE CatalogIdMapEntry
68 : #define SH_KEY_TYPE CatalogId
69 : #define SH_KEY catId
70 : #define SH_HASH_KEY(tb, key) hash_bytes((const unsigned char *) &(key), sizeof(CatalogId))
71 : #define SH_EQUAL(tb, a, b) ((a).oid == (b).oid && (a).tableoid == (b).tableoid)
72 : #define SH_STORE_HASH
73 : #define SH_GET_HASH(tb, a) (a)->hashval
74 : #define SH_SCOPE static inline
75 : #define SH_RAW_ALLOCATOR pg_malloc0
76 : #define SH_DECLARE
77 : #define SH_DEFINE
78 : #include "lib/simplehash.h"
79 :
80 : #define CATALOGIDHASH_INITIAL_SIZE 10000
81 :
82 : static catalogid_hash *catalogIdHash = NULL;
83 :
84 : static void flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
85 : InhInfo *inhinfo, int numInherits);
86 : static void flagInhIndexes(Archive *fout, TableInfo *tblinfo, int numTables);
87 : static void flagInhAttrs(Archive *fout, DumpOptions *dopt, TableInfo *tblinfo,
88 : int numTables);
89 : static int strInArray(const char *pattern, char **arr, int arr_size);
90 : static IndxInfo *findIndexByOid(Oid oid);
91 :
92 :
93 : /*
94 : * getSchemaData
95 : * Collect information about all potentially dumpable objects
96 : */
97 : TableInfo *
98 260 : getSchemaData(Archive *fout, int *numTablesPtr)
99 : {
100 : TableInfo *tblinfo;
101 : ExtensionInfo *extinfo;
102 : InhInfo *inhinfo;
103 : int numTables;
104 : int numExtensions;
105 : int numInherits;
106 :
107 : /*
108 : * We must read extensions and extension membership info first, because
109 : * extension membership needs to be consultable during decisions about
110 : * whether other objects are to be dumped.
111 : */
112 260 : pg_log_info("reading extensions");
113 260 : extinfo = getExtensions(fout, &numExtensions);
114 :
115 260 : pg_log_info("identifying extension members");
116 260 : getExtensionMembership(fout, extinfo, numExtensions);
117 :
118 260 : pg_log_info("reading schemas");
119 260 : getNamespaces(fout);
120 :
121 : /*
122 : * getTables should be done as soon as possible, so as to minimize the
123 : * window between starting our transaction and acquiring per-table locks.
124 : * However, we have to do getNamespaces first because the tables get
125 : * linked to their containing namespaces during getTables.
126 : */
127 260 : pg_log_info("reading user-defined tables");
128 260 : tblinfo = getTables(fout, &numTables);
129 :
130 259 : getOwnedSeqs(fout, tblinfo, numTables);
131 :
132 259 : pg_log_info("reading user-defined functions");
133 259 : getFuncs(fout);
134 :
135 : /* this must be after getTables and getFuncs */
136 259 : pg_log_info("reading user-defined types");
137 259 : getTypes(fout);
138 :
139 : /* this must be after getFuncs, too */
140 259 : pg_log_info("reading procedural languages");
141 259 : getProcLangs(fout);
142 :
143 259 : pg_log_info("reading user-defined aggregate functions");
144 259 : getAggregates(fout);
145 :
146 259 : pg_log_info("reading user-defined operators");
147 259 : getOperators(fout);
148 :
149 259 : pg_log_info("reading user-defined access methods");
150 259 : getAccessMethods(fout);
151 :
152 259 : pg_log_info("reading user-defined operator classes");
153 259 : getOpclasses(fout);
154 :
155 259 : pg_log_info("reading user-defined operator families");
156 259 : getOpfamilies(fout);
157 :
158 259 : pg_log_info("reading user-defined text search parsers");
159 259 : getTSParsers(fout);
160 :
161 259 : pg_log_info("reading user-defined text search templates");
162 259 : getTSTemplates(fout);
163 :
164 259 : pg_log_info("reading user-defined text search dictionaries");
165 259 : getTSDictionaries(fout);
166 :
167 259 : pg_log_info("reading user-defined text search configurations");
168 259 : getTSConfigurations(fout);
169 :
170 259 : pg_log_info("reading user-defined foreign-data wrappers");
171 259 : getForeignDataWrappers(fout);
172 :
173 259 : pg_log_info("reading user-defined foreign servers");
174 259 : getForeignServers(fout);
175 :
176 259 : pg_log_info("reading default privileges");
177 259 : getDefaultACLs(fout);
178 :
179 259 : pg_log_info("reading user-defined collations");
180 259 : getCollations(fout);
181 :
182 259 : pg_log_info("reading user-defined conversions");
183 259 : getConversions(fout);
184 :
185 259 : pg_log_info("reading type casts");
186 259 : getCasts(fout);
187 :
188 259 : pg_log_info("reading transforms");
189 259 : getTransforms(fout);
190 :
191 259 : pg_log_info("reading table inheritance information");
192 259 : inhinfo = getInherits(fout, &numInherits);
193 :
194 259 : pg_log_info("reading event triggers");
195 259 : getEventTriggers(fout);
196 :
197 : /* Identify extension configuration tables that should be dumped */
198 259 : pg_log_info("finding extension tables");
199 259 : processExtensionTables(fout, extinfo, numExtensions);
200 :
201 : /* Link tables to parents, mark parents of target tables interesting */
202 259 : pg_log_info("finding inheritance relationships");
203 259 : flagInhTables(fout, tblinfo, numTables, inhinfo, numInherits);
204 :
205 259 : pg_log_info("reading column info for interesting tables");
206 259 : getTableAttrs(fout, tblinfo, numTables);
207 :
208 259 : pg_log_info("flagging inherited columns in subtables");
209 259 : flagInhAttrs(fout, fout->dopt, tblinfo, numTables);
210 :
211 259 : pg_log_info("reading partitioning data");
212 259 : getPartitioningInfo(fout);
213 :
214 259 : pg_log_info("reading indexes");
215 259 : getIndexes(fout, tblinfo, numTables);
216 :
217 259 : pg_log_info("flagging indexes in partitioned tables");
218 259 : flagInhIndexes(fout, tblinfo, numTables);
219 :
220 259 : pg_log_info("reading extended statistics");
221 259 : getExtendedStatistics(fout);
222 :
223 259 : pg_log_info("reading constraints");
224 259 : getConstraints(fout, tblinfo, numTables);
225 :
226 259 : pg_log_info("reading triggers");
227 259 : getTriggers(fout, tblinfo, numTables);
228 :
229 259 : pg_log_info("reading rewrite rules");
230 259 : getRules(fout);
231 :
232 259 : pg_log_info("reading policies");
233 259 : getPolicies(fout, tblinfo, numTables);
234 :
235 259 : pg_log_info("reading publications");
236 259 : getPublications(fout);
237 :
238 259 : pg_log_info("reading publication membership of tables");
239 259 : getPublicationTables(fout, tblinfo, numTables);
240 :
241 259 : pg_log_info("reading publication membership of schemas");
242 259 : getPublicationNamespaces(fout);
243 :
244 259 : pg_log_info("reading subscriptions");
245 259 : getSubscriptions(fout);
246 :
247 259 : pg_log_info("reading subscription membership of relations");
248 259 : getSubscriptionRelations(fout);
249 :
250 259 : free(inhinfo); /* not needed any longer */
251 :
252 259 : *numTablesPtr = numTables;
253 259 : return tblinfo;
254 : }
255 :
256 : /* flagInhTables -
257 : * Fill in parent link fields of tables for which we need that information,
258 : * mark parents of target tables as interesting, and create
259 : * TableAttachInfo objects for partitioned tables with appropriate
260 : * dependency links.
261 : *
262 : * Note that only direct ancestors of targets are marked interesting.
263 : * This is sufficient; we don't much care whether they inherited their
264 : * attributes or not.
265 : *
266 : * modifies tblinfo
267 : */
268 : static void
269 259 : flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
270 : InhInfo *inhinfo, int numInherits)
271 : {
272 259 : TableInfo *child = NULL;
273 259 : TableInfo *parent = NULL;
274 : int i,
275 : j;
276 :
277 : /*
278 : * Set up links from child tables to their parents.
279 : *
280 : * We used to attempt to skip this work for tables that are not to be
281 : * dumped; but the optimizable cases are rare in practice, and setting up
282 : * these links in bulk is cheaper than the old way. (Note in particular
283 : * that it's very rare for a child to have more than one parent.)
284 : */
285 3812 : for (i = 0; i < numInherits; i++)
286 : {
287 : /*
288 : * Skip a hashtable lookup if it's same table as last time. This is
289 : * unlikely for the child, but less so for the parent. (Maybe we
290 : * should ask the backend for a sorted array to make it more likely?
291 : * Not clear the sorting effort would be repaid, though.)
292 : */
293 3553 : if (child == NULL ||
294 2737 : child->dobj.catId.oid != inhinfo[i].inhrelid)
295 : {
296 3438 : child = findTableByOid(inhinfo[i].inhrelid);
297 :
298 : /*
299 : * If we find no TableInfo, assume the pg_inherits entry is for a
300 : * partitioned index, which we don't need to track.
301 : */
302 3438 : if (child == NULL)
303 750 : continue;
304 : }
305 2803 : if (parent == NULL ||
306 2732 : parent->dobj.catId.oid != inhinfo[i].inhparent)
307 : {
308 1611 : parent = findTableByOid(inhinfo[i].inhparent);
309 1611 : if (parent == NULL)
310 0 : pg_fatal("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
311 : inhinfo[i].inhparent,
312 : child->dobj.name,
313 : child->dobj.catId.oid);
314 : }
315 : /* Add this parent to the child's list of parents. */
316 2803 : if (child->numParents > 0)
317 115 : child->parents = pg_realloc_array(child->parents,
318 : TableInfo *,
319 : child->numParents + 1);
320 : else
321 2688 : child->parents = pg_malloc_array(TableInfo *, 1);
322 2803 : child->parents[child->numParents++] = parent;
323 : }
324 :
325 : /*
326 : * Now consider all child tables and mark parents interesting as needed.
327 : */
328 70993 : for (i = 0; i < numTables; i++)
329 : {
330 : /*
331 : * If needed, mark the parents as interesting for getTableAttrs and
332 : * getIndexes. We only need this for direct parents of dumpable
333 : * tables.
334 : */
335 70734 : if (tblinfo[i].dobj.dump)
336 : {
337 44764 : int numParents = tblinfo[i].numParents;
338 44764 : TableInfo **parents = tblinfo[i].parents;
339 :
340 46866 : for (j = 0; j < numParents; j++)
341 2102 : parents[j]->interesting = true;
342 : }
343 :
344 : /* Create TableAttachInfo object if needed */
345 70734 : if ((tblinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
346 7630 : tblinfo[i].ispartition)
347 : {
348 : TableAttachInfo *attachinfo;
349 :
350 : /* With partitions there can only be one parent */
351 1431 : if (tblinfo[i].numParents != 1)
352 0 : pg_fatal("invalid number of parents %d for table \"%s\"",
353 : tblinfo[i].numParents,
354 : tblinfo[i].dobj.name);
355 :
356 1431 : attachinfo = palloc_object(TableAttachInfo);
357 1431 : attachinfo->dobj.objType = DO_TABLE_ATTACH;
358 1431 : attachinfo->dobj.catId.tableoid = 0;
359 1431 : attachinfo->dobj.catId.oid = 0;
360 1431 : AssignDumpId(&attachinfo->dobj);
361 1431 : attachinfo->dobj.name = pg_strdup(tblinfo[i].dobj.name);
362 1431 : attachinfo->dobj.namespace = tblinfo[i].dobj.namespace;
363 1431 : attachinfo->parentTbl = tblinfo[i].parents[0];
364 1431 : attachinfo->partitionTbl = &tblinfo[i];
365 :
366 : /*
367 : * We must state the DO_TABLE_ATTACH object's dependencies
368 : * explicitly, since it will not match anything in pg_depend.
369 : *
370 : * Give it dependencies on both the partition table and the parent
371 : * table, so that it will not be executed till both of those
372 : * exist. (There's no need to care what order those are created
373 : * in.)
374 : */
375 1431 : addObjectDependency(&attachinfo->dobj, tblinfo[i].dobj.dumpId);
376 1431 : addObjectDependency(&attachinfo->dobj, tblinfo[i].parents[0]->dobj.dumpId);
377 : }
378 : }
379 259 : }
380 :
381 : /*
382 : * flagInhIndexes -
383 : * Create IndexAttachInfo objects for partitioned indexes, and add
384 : * appropriate dependency links.
385 : */
386 : static void
387 259 : flagInhIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
388 : {
389 : int i,
390 : j;
391 :
392 70993 : for (i = 0; i < numTables; i++)
393 : {
394 70734 : if (!tblinfo[i].ispartition || tblinfo[i].numParents == 0)
395 68812 : continue;
396 :
397 : Assert(tblinfo[i].numParents == 1);
398 :
399 2576 : for (j = 0; j < tblinfo[i].numIndexes; j++)
400 : {
401 654 : IndxInfo *index = &(tblinfo[i].indexes[j]);
402 : IndxInfo *parentidx;
403 : IndexAttachInfo *attachinfo;
404 :
405 654 : if (index->parentidx == 0)
406 60 : continue;
407 :
408 594 : parentidx = findIndexByOid(index->parentidx);
409 594 : if (parentidx == NULL)
410 0 : continue;
411 :
412 594 : attachinfo = pg_malloc_object(IndexAttachInfo);
413 :
414 594 : attachinfo->dobj.objType = DO_INDEX_ATTACH;
415 594 : attachinfo->dobj.catId.tableoid = 0;
416 594 : attachinfo->dobj.catId.oid = 0;
417 594 : AssignDumpId(&attachinfo->dobj);
418 594 : attachinfo->dobj.name = pg_strdup(index->dobj.name);
419 594 : attachinfo->dobj.namespace = index->indextable->dobj.namespace;
420 594 : attachinfo->parentIdx = parentidx;
421 594 : attachinfo->partitionIdx = index;
422 :
423 : /*
424 : * We must state the DO_INDEX_ATTACH object's dependencies
425 : * explicitly, since it will not match anything in pg_depend.
426 : *
427 : * Give it dependencies on both the partition index and the parent
428 : * index, so that it will not be executed till both of those
429 : * exist. (There's no need to care what order those are created
430 : * in.)
431 : *
432 : * In addition, give it dependencies on the indexes' underlying
433 : * tables. This does nothing of great value so far as serial
434 : * restore ordering goes, but it ensures that a parallel restore
435 : * will not try to run the ATTACH concurrently with other
436 : * operations on those tables.
437 : */
438 594 : addObjectDependency(&attachinfo->dobj, index->dobj.dumpId);
439 594 : addObjectDependency(&attachinfo->dobj, parentidx->dobj.dumpId);
440 594 : addObjectDependency(&attachinfo->dobj,
441 594 : index->indextable->dobj.dumpId);
442 594 : addObjectDependency(&attachinfo->dobj,
443 594 : parentidx->indextable->dobj.dumpId);
444 :
445 : /* keep track of the list of partitions in the parent index */
446 594 : simple_ptr_list_append(&parentidx->partattaches, &attachinfo->dobj);
447 : }
448 : }
449 259 : }
450 :
451 : /* flagInhAttrs -
452 : * for each dumpable table in tblinfo, flag its inherited attributes
453 : *
454 : * What we need to do here is:
455 : *
456 : * - Detect child columns that inherit NOT NULL bits from their parents, so
457 : * that we needn't specify that again for the child. For versions 18 and
458 : * up, this is needed when the parent is NOT VALID and the child isn't.
459 : *
460 : * - Detect child columns that have DEFAULT NULL when their parents had some
461 : * non-null default. In this case, we make up a dummy AttrDefInfo object so
462 : * that we'll correctly emit the necessary DEFAULT NULL clause; otherwise
463 : * the backend will apply an inherited default to the column.
464 : *
465 : * - Detect child columns that have a generation expression and all their
466 : * parents also have the same generation expression, and if so suppress the
467 : * child's expression. The child will inherit the generation expression
468 : * automatically, so there's no need to dump it. This improves the dump's
469 : * compatibility with pre-v16 servers, which didn't allow the child's
470 : * expression to be given explicitly. Exceptions: If it's a partition or
471 : * we are in binary upgrade mode, we dump such expressions anyway because
472 : * in those cases inherited tables are recreated standalone first and then
473 : * reattached to the parent. (See also the logic in dumpTableSchema().)
474 : *
475 : * modifies tblinfo
476 : */
477 : static void
478 259 : flagInhAttrs(Archive *fout, DumpOptions *dopt, TableInfo *tblinfo, int numTables)
479 : {
480 : int i,
481 : j,
482 : k;
483 :
484 : /*
485 : * We scan the tables in OID order, since that's how tblinfo[] is sorted.
486 : * Hence we will typically visit parents before their children --- but
487 : * that is *not* guaranteed. Thus this loop must be careful that it does
488 : * not alter table properties in a way that could change decisions made at
489 : * child tables during other iterations.
490 : */
491 70993 : for (i = 0; i < numTables; i++)
492 : {
493 70734 : TableInfo *tbinfo = &(tblinfo[i]);
494 : int numParents;
495 : TableInfo **parents;
496 :
497 : /* Some kinds never have parents */
498 70734 : if (tbinfo->relkind == RELKIND_SEQUENCE ||
499 70087 : tbinfo->relkind == RELKIND_VIEW ||
500 27699 : tbinfo->relkind == RELKIND_MATVIEW ||
501 27231 : tbinfo->relkind == RELKIND_PROPGRAPH)
502 43639 : continue;
503 :
504 : /* Don't bother computing anything for non-target tables, either */
505 27095 : if (!tbinfo->dobj.dump)
506 4509 : continue;
507 :
508 22586 : numParents = tbinfo->numParents;
509 22586 : parents = tbinfo->parents;
510 :
511 22586 : if (numParents == 0)
512 20572 : continue; /* nothing to see here, move along */
513 :
514 : /* For each column, search for matching column names in parent(s) */
515 7001 : for (j = 0; j < tbinfo->numatts; j++)
516 : {
517 : bool foundNotNull; /* Attr was NOT NULL in a parent */
518 : bool foundDefault; /* Found a default in a parent */
519 : bool foundSameGenerated; /* Found matching GENERATED */
520 : bool foundDiffGenerated; /* Found non-matching GENERATED */
521 4987 : bool allNotNullsInvalid = true; /* is NOT NULL NOT VALID
522 : * on all parents? */
523 :
524 : /* no point in examining dropped columns */
525 4987 : if (tbinfo->attisdropped[j])
526 305 : continue;
527 :
528 4682 : foundNotNull = false;
529 4682 : foundDefault = false;
530 4682 : foundSameGenerated = false;
531 4682 : foundDiffGenerated = false;
532 9577 : for (k = 0; k < numParents; k++)
533 : {
534 4895 : TableInfo *parent = parents[k];
535 : int inhAttrInd;
536 :
537 4895 : inhAttrInd = strInArray(tbinfo->attnames[j],
538 : parent->attnames,
539 : parent->numatts);
540 4895 : if (inhAttrInd >= 0)
541 : {
542 4655 : AttrDefInfo *parentDef = parent->attrdefs[inhAttrInd];
543 :
544 : /*
545 : * Account for each parent having a not-null constraint.
546 : * In versions 18 and later, we don't need this (and those
547 : * didn't have NO INHERIT.)
548 : */
549 4655 : if (fout->remoteVersion < 180000 &&
550 0 : parent->notnull_constrs[inhAttrInd] != NULL)
551 0 : foundNotNull = true;
552 :
553 : /*
554 : * Keep track of whether all the parents that have a
555 : * not-null constraint on this column have it as NOT
556 : * VALID; if they all are, arrange to have it printed for
557 : * this column. If at least one parent has it as valid,
558 : * there's no need.
559 : */
560 4655 : if (fout->remoteVersion >= 180000 &&
561 4655 : parent->notnull_constrs[inhAttrInd] &&
562 952 : !parent->notnull_invalid[inhAttrInd])
563 952 : allNotNullsInvalid = false;
564 :
565 9820 : foundDefault |= (parentDef != NULL &&
566 5115 : strcmp(parentDef->adef_expr, "NULL") != 0 &&
567 460 : !parent->attgenerated[inhAttrInd]);
568 4655 : if (parent->attgenerated[inhAttrInd])
569 : {
570 : /* these pointer nullness checks are just paranoia */
571 292 : if (parentDef != NULL &&
572 268 : tbinfo->attrdefs[j] != NULL &&
573 268 : strcmp(parentDef->adef_expr,
574 268 : tbinfo->attrdefs[j]->adef_expr) == 0)
575 238 : foundSameGenerated = true;
576 : else
577 54 : foundDiffGenerated = true;
578 : }
579 : }
580 : }
581 :
582 : /*
583 : * In versions < 18, for lack of a better system, we arbitrarily
584 : * decide that a not-null constraint is not locally defined if at
585 : * least one of the parents has it.
586 : */
587 4682 : if (fout->remoteVersion < 180000 && foundNotNull)
588 0 : tbinfo->notnull_islocal[j] = false;
589 :
590 : /*
591 : * For versions >18, we must print the not-null constraint locally
592 : * for this table even if it isn't really locally defined, but is
593 : * valid for the child and no parent has it as valid.
594 : */
595 4682 : if (fout->remoteVersion >= 180000 && allNotNullsInvalid)
596 3735 : tbinfo->notnull_islocal[j] = true;
597 :
598 : /*
599 : * Manufacture a DEFAULT NULL clause if necessary. This breaks
600 : * the advice given above to avoid changing state that might get
601 : * inspected in other loop iterations. We prevent trouble by
602 : * having the foundDefault test above check whether adef_expr is
603 : * "NULL", so that it will reach the same conclusion before or
604 : * after this is done.
605 : */
606 4682 : if (foundDefault && tbinfo->attrdefs[j] == NULL)
607 : {
608 : AttrDefInfo *attrDef;
609 :
610 40 : attrDef = pg_malloc_object(AttrDefInfo);
611 40 : attrDef->dobj.objType = DO_ATTRDEF;
612 40 : attrDef->dobj.catId.tableoid = 0;
613 40 : attrDef->dobj.catId.oid = 0;
614 40 : AssignDumpId(&attrDef->dobj);
615 40 : attrDef->dobj.name = pg_strdup(tbinfo->dobj.name);
616 40 : attrDef->dobj.namespace = tbinfo->dobj.namespace;
617 40 : attrDef->dobj.dump = tbinfo->dobj.dump;
618 :
619 40 : attrDef->adtable = tbinfo;
620 40 : attrDef->adnum = j + 1;
621 40 : attrDef->adef_expr = pg_strdup("NULL");
622 :
623 : /* Will column be dumped explicitly? */
624 40 : if (shouldPrintColumn(dopt, tbinfo, j))
625 : {
626 40 : attrDef->separate = false;
627 : /* No dependency needed: NULL cannot have dependencies */
628 : }
629 : else
630 : {
631 : /* column will be suppressed, print default separately */
632 0 : attrDef->separate = true;
633 : /* ensure it comes out after the table */
634 0 : addObjectDependency(&attrDef->dobj,
635 : tbinfo->dobj.dumpId);
636 : }
637 :
638 40 : tbinfo->attrdefs[j] = attrDef;
639 : }
640 :
641 : /* No need to dump generation expression if it's inheritable */
642 4682 : if (foundSameGenerated && !foundDiffGenerated &&
643 238 : !tbinfo->ispartition && !dopt->binary_upgrade)
644 156 : tbinfo->attrdefs[j]->dobj.dump = DUMP_COMPONENT_NONE;
645 : }
646 : }
647 259 : }
648 :
649 : /*
650 : * AssignDumpId
651 : * Given a newly-created dumpable object, assign a dump ID,
652 : * and enter the object into the lookup tables.
653 : *
654 : * The caller is expected to have filled in objType and catId,
655 : * but not any of the other standard fields of a DumpableObject.
656 : */
657 : void
658 984051 : AssignDumpId(DumpableObject *dobj)
659 : {
660 984051 : dobj->dumpId = ++lastDumpId;
661 984051 : dobj->name = NULL; /* must be set later */
662 984051 : dobj->namespace = NULL; /* may be set later */
663 984051 : dobj->dump = DUMP_COMPONENT_ALL; /* default assumption */
664 984051 : dobj->dump_contains = DUMP_COMPONENT_ALL; /* default assumption */
665 : /* All objects have definitions; we may set more components bits later */
666 984051 : dobj->components = DUMP_COMPONENT_DEFINITION;
667 984051 : dobj->ext_member = false; /* default assumption */
668 984051 : dobj->depends_on_ext = false; /* default assumption */
669 984051 : dobj->dependencies = NULL;
670 984051 : dobj->nDeps = 0;
671 984051 : dobj->allocDeps = 0;
672 :
673 : /* Add object to dumpIdMap[], enlarging that array if need be */
674 985381 : while (dobj->dumpId >= allocedDumpIds)
675 : {
676 : int newAlloc;
677 :
678 1330 : if (allocedDumpIds <= 0)
679 : {
680 260 : newAlloc = 256;
681 260 : dumpIdMap = pg_malloc_array(DumpableObject *, newAlloc);
682 : }
683 : else
684 : {
685 1070 : newAlloc = allocedDumpIds * 2;
686 1070 : dumpIdMap = pg_realloc_array(dumpIdMap, DumpableObject *, newAlloc);
687 : }
688 1330 : memset(dumpIdMap + allocedDumpIds, 0,
689 1330 : (newAlloc - allocedDumpIds) * sizeof(DumpableObject *));
690 1330 : allocedDumpIds = newAlloc;
691 : }
692 984051 : dumpIdMap[dobj->dumpId] = dobj;
693 :
694 : /* If it has a valid CatalogId, enter it into the hash table */
695 984051 : if (OidIsValid(dobj->catId.tableoid))
696 : {
697 : CatalogIdMapEntry *entry;
698 : bool found;
699 :
700 : /* Initialize CatalogId hash table if not done yet */
701 963900 : if (catalogIdHash == NULL)
702 260 : catalogIdHash = catalogid_create(CATALOGIDHASH_INITIAL_SIZE, NULL);
703 :
704 963900 : entry = catalogid_insert(catalogIdHash, dobj->catId, &found);
705 963900 : if (!found)
706 : {
707 962915 : entry->dobj = NULL;
708 962915 : entry->ext = NULL;
709 : }
710 : Assert(entry->dobj == NULL);
711 963900 : entry->dobj = dobj;
712 : }
713 984051 : }
714 :
715 : /*
716 : * recordAdditionalCatalogID
717 : * Record an additional catalog ID for the given DumpableObject
718 : */
719 : void
720 14 : recordAdditionalCatalogID(CatalogId catId, DumpableObject *dobj)
721 : {
722 : CatalogIdMapEntry *entry;
723 : bool found;
724 :
725 : /* CatalogId hash table must exist, if we have a DumpableObject */
726 : Assert(catalogIdHash != NULL);
727 :
728 : /* Add reference to CatalogId hash */
729 14 : entry = catalogid_insert(catalogIdHash, catId, &found);
730 14 : if (!found)
731 : {
732 14 : entry->dobj = NULL;
733 14 : entry->ext = NULL;
734 : }
735 : Assert(entry->dobj == NULL);
736 14 : entry->dobj = dobj;
737 14 : }
738 :
739 : /*
740 : * Assign a DumpId that's not tied to a DumpableObject.
741 : *
742 : * This is used when creating a "fixed" ArchiveEntry that doesn't need to
743 : * participate in the sorting logic.
744 : */
745 : DumpId
746 13895 : createDumpId(void)
747 : {
748 13895 : return ++lastDumpId;
749 : }
750 :
751 : /*
752 : * Return the largest DumpId so far assigned
753 : */
754 : DumpId
755 1552 : getMaxDumpId(void)
756 : {
757 1552 : return lastDumpId;
758 : }
759 :
760 : /*
761 : * Find a DumpableObject by dump ID
762 : *
763 : * Returns NULL for invalid ID
764 : */
765 : DumpableObject *
766 25150931 : findObjectByDumpId(DumpId dumpId)
767 : {
768 25150931 : if (dumpId <= 0 || dumpId >= allocedDumpIds)
769 0 : return NULL; /* out of range? */
770 25150931 : return dumpIdMap[dumpId];
771 : }
772 :
773 : /*
774 : * Find a DumpableObject by catalog ID
775 : *
776 : * Returns NULL for unknown ID
777 : */
778 : DumpableObject *
779 5291532 : findObjectByCatalogId(CatalogId catalogId)
780 : {
781 : CatalogIdMapEntry *entry;
782 :
783 5291532 : if (catalogIdHash == NULL)
784 0 : return NULL; /* no objects exist yet */
785 :
786 5291532 : entry = catalogid_lookup(catalogIdHash, catalogId);
787 5291532 : if (entry == NULL)
788 949154 : return NULL;
789 4342378 : return entry->dobj;
790 : }
791 :
792 : /*
793 : * Build an array of pointers to all known dumpable objects
794 : *
795 : * This simply creates a modifiable copy of the internal map.
796 : */
797 : void
798 266 : getDumpableObjects(DumpableObject ***objs, int *numObjs)
799 : {
800 : int i,
801 : j;
802 :
803 266 : *objs = pg_malloc_array(DumpableObject *, allocedDumpIds);
804 266 : j = 0;
805 1245184 : for (i = 1; i < allocedDumpIds; i++)
806 : {
807 1244918 : if (dumpIdMap[i])
808 1010756 : (*objs)[j++] = dumpIdMap[i];
809 : }
810 266 : *numObjs = j;
811 266 : }
812 :
813 : /*
814 : * Add a dependency link to a DumpableObject
815 : *
816 : * Note: duplicate dependencies are currently not eliminated
817 : */
818 : void
819 1558895 : addObjectDependency(DumpableObject *dobj, DumpId refId)
820 : {
821 1558895 : if (dobj->nDeps >= dobj->allocDeps)
822 : {
823 256423 : if (dobj->allocDeps <= 0)
824 : {
825 245334 : dobj->allocDeps = 16;
826 245334 : dobj->dependencies = pg_malloc_array(DumpId, dobj->allocDeps);
827 : }
828 : else
829 : {
830 11089 : dobj->allocDeps *= 2;
831 11089 : dobj->dependencies = pg_realloc_array(dobj->dependencies,
832 : DumpId, dobj->allocDeps);
833 : }
834 : }
835 1558895 : dobj->dependencies[dobj->nDeps++] = refId;
836 1558895 : }
837 :
838 : /*
839 : * Remove a dependency link from a DumpableObject
840 : *
841 : * If there are multiple links, all are removed
842 : */
843 : void
844 46046 : removeObjectDependency(DumpableObject *dobj, DumpId refId)
845 : {
846 : int i;
847 46046 : int j = 0;
848 :
849 779371 : for (i = 0; i < dobj->nDeps; i++)
850 : {
851 733325 : if (dobj->dependencies[i] != refId)
852 685504 : dobj->dependencies[j++] = dobj->dependencies[i];
853 : }
854 46046 : dobj->nDeps = j;
855 46046 : }
856 :
857 :
858 : /*
859 : * findTableByOid
860 : * finds the DumpableObject for the table with the given oid
861 : * returns NULL if not found
862 : */
863 : TableInfo *
864 121136 : findTableByOid(Oid oid)
865 : {
866 : CatalogId catId;
867 : DumpableObject *dobj;
868 :
869 121136 : catId.tableoid = RelationRelationId;
870 121136 : catId.oid = oid;
871 121136 : dobj = findObjectByCatalogId(catId);
872 : Assert(dobj == NULL || dobj->objType == DO_TABLE);
873 121136 : return (TableInfo *) dobj;
874 : }
875 :
876 : /*
877 : * findIndexByOid
878 : * finds the DumpableObject for the index with the given oid
879 : * returns NULL if not found
880 : */
881 : static IndxInfo *
882 594 : findIndexByOid(Oid oid)
883 : {
884 : CatalogId catId;
885 : DumpableObject *dobj;
886 :
887 594 : catId.tableoid = RelationRelationId;
888 594 : catId.oid = oid;
889 594 : dobj = findObjectByCatalogId(catId);
890 : Assert(dobj == NULL || dobj->objType == DO_INDEX);
891 594 : return (IndxInfo *) dobj;
892 : }
893 :
894 : /*
895 : * findTypeByOid
896 : * finds the DumpableObject for the type with the given oid
897 : * returns NULL if not found
898 : */
899 : TypeInfo *
900 1972520 : findTypeByOid(Oid oid)
901 : {
902 : CatalogId catId;
903 : DumpableObject *dobj;
904 :
905 1972520 : catId.tableoid = TypeRelationId;
906 1972520 : catId.oid = oid;
907 1972520 : dobj = findObjectByCatalogId(catId);
908 : Assert(dobj == NULL ||
909 : dobj->objType == DO_TYPE || dobj->objType == DO_DUMMY_TYPE);
910 1972520 : return (TypeInfo *) dobj;
911 : }
912 :
913 : /*
914 : * findFuncByOid
915 : * finds the DumpableObject for the function with the given oid
916 : * returns NULL if not found
917 : */
918 : FuncInfo *
919 253 : findFuncByOid(Oid oid)
920 : {
921 : CatalogId catId;
922 : DumpableObject *dobj;
923 :
924 253 : catId.tableoid = ProcedureRelationId;
925 253 : catId.oid = oid;
926 253 : dobj = findObjectByCatalogId(catId);
927 : Assert(dobj == NULL || dobj->objType == DO_FUNC);
928 253 : return (FuncInfo *) dobj;
929 : }
930 :
931 : /*
932 : * findOprByOid
933 : * finds the DumpableObject for the operator with the given oid
934 : * returns NULL if not found
935 : */
936 : OprInfo *
937 2860 : findOprByOid(Oid oid)
938 : {
939 : CatalogId catId;
940 : DumpableObject *dobj;
941 :
942 2860 : catId.tableoid = OperatorRelationId;
943 2860 : catId.oid = oid;
944 2860 : dobj = findObjectByCatalogId(catId);
945 : Assert(dobj == NULL || dobj->objType == DO_OPERATOR);
946 2860 : return (OprInfo *) dobj;
947 : }
948 :
949 : /*
950 : * findAccessMethodByOid
951 : * finds the DumpableObject for the access method with the given oid
952 : * returns NULL if not found
953 : */
954 : AccessMethodInfo *
955 71866 : findAccessMethodByOid(Oid oid)
956 : {
957 : CatalogId catId;
958 : DumpableObject *dobj;
959 :
960 71866 : catId.tableoid = AccessMethodRelationId;
961 71866 : catId.oid = oid;
962 71866 : dobj = findObjectByCatalogId(catId);
963 : Assert(dobj == NULL || dobj->objType == DO_ACCESS_METHOD);
964 71866 : return (AccessMethodInfo *) dobj;
965 : }
966 :
967 : /*
968 : * findCollationByOid
969 : * finds the DumpableObject for the collation with the given oid
970 : * returns NULL if not found
971 : */
972 : CollInfo *
973 282 : findCollationByOid(Oid oid)
974 : {
975 : CatalogId catId;
976 : DumpableObject *dobj;
977 :
978 282 : catId.tableoid = CollationRelationId;
979 282 : catId.oid = oid;
980 282 : dobj = findObjectByCatalogId(catId);
981 : Assert(dobj == NULL || dobj->objType == DO_COLLATION);
982 282 : return (CollInfo *) dobj;
983 : }
984 :
985 : /*
986 : * findNamespaceByOid
987 : * finds the DumpableObject for the namespace with the given oid
988 : * returns NULL if not found
989 : */
990 : NamespaceInfo *
991 843945 : findNamespaceByOid(Oid oid)
992 : {
993 : CatalogId catId;
994 : DumpableObject *dobj;
995 :
996 843945 : catId.tableoid = NamespaceRelationId;
997 843945 : catId.oid = oid;
998 843945 : dobj = findObjectByCatalogId(catId);
999 : Assert(dobj == NULL || dobj->objType == DO_NAMESPACE);
1000 843945 : return (NamespaceInfo *) dobj;
1001 : }
1002 :
1003 : /*
1004 : * findExtensionByOid
1005 : * finds the DumpableObject for the extension with the given oid
1006 : * returns NULL if not found
1007 : */
1008 : ExtensionInfo *
1009 291 : findExtensionByOid(Oid oid)
1010 : {
1011 : CatalogId catId;
1012 : DumpableObject *dobj;
1013 :
1014 291 : catId.tableoid = ExtensionRelationId;
1015 291 : catId.oid = oid;
1016 291 : dobj = findObjectByCatalogId(catId);
1017 : Assert(dobj == NULL || dobj->objType == DO_EXTENSION);
1018 291 : return (ExtensionInfo *) dobj;
1019 : }
1020 :
1021 : /*
1022 : * findPublicationByOid
1023 : * finds the DumpableObject for the publication with the given oid
1024 : * returns NULL if not found
1025 : */
1026 : PublicationInfo *
1027 475 : findPublicationByOid(Oid oid)
1028 : {
1029 : CatalogId catId;
1030 : DumpableObject *dobj;
1031 :
1032 475 : catId.tableoid = PublicationRelationId;
1033 475 : catId.oid = oid;
1034 475 : dobj = findObjectByCatalogId(catId);
1035 : Assert(dobj == NULL || dobj->objType == DO_PUBLICATION);
1036 475 : return (PublicationInfo *) dobj;
1037 : }
1038 :
1039 : /*
1040 : * findSubscriptionByOid
1041 : * finds the DumpableObject for the subscription with the given oid
1042 : * returns NULL if not found
1043 : */
1044 : SubscriptionInfo *
1045 2 : findSubscriptionByOid(Oid oid)
1046 : {
1047 : CatalogId catId;
1048 : DumpableObject *dobj;
1049 :
1050 2 : catId.tableoid = SubscriptionRelationId;
1051 2 : catId.oid = oid;
1052 2 : dobj = findObjectByCatalogId(catId);
1053 : Assert(dobj == NULL || dobj->objType == DO_SUBSCRIPTION);
1054 2 : return (SubscriptionInfo *) dobj;
1055 : }
1056 :
1057 :
1058 : /*
1059 : * recordExtensionMembership
1060 : * Record that the object identified by the given catalog ID
1061 : * belongs to the given extension
1062 : */
1063 : void
1064 1646 : recordExtensionMembership(CatalogId catId, ExtensionInfo *ext)
1065 : {
1066 : CatalogIdMapEntry *entry;
1067 : bool found;
1068 :
1069 : /* CatalogId hash table must exist, if we have an ExtensionInfo */
1070 : Assert(catalogIdHash != NULL);
1071 :
1072 : /* Add reference to CatalogId hash */
1073 1646 : entry = catalogid_insert(catalogIdHash, catId, &found);
1074 1646 : if (!found)
1075 : {
1076 1646 : entry->dobj = NULL;
1077 1646 : entry->ext = NULL;
1078 : }
1079 : Assert(entry->ext == NULL);
1080 1646 : entry->ext = ext;
1081 1646 : }
1082 :
1083 : /*
1084 : * findOwningExtension
1085 : * return owning extension for specified catalog ID, or NULL if none
1086 : */
1087 : ExtensionInfo *
1088 842030 : findOwningExtension(CatalogId catalogId)
1089 : {
1090 : CatalogIdMapEntry *entry;
1091 :
1092 842030 : if (catalogIdHash == NULL)
1093 0 : return NULL; /* no objects exist yet */
1094 :
1095 842030 : entry = catalogid_lookup(catalogIdHash, catalogId);
1096 842030 : if (entry == NULL)
1097 0 : return NULL;
1098 842030 : return entry->ext;
1099 : }
1100 :
1101 :
1102 : /*
1103 : * parseOidArray
1104 : * parse a string of numbers delimited by spaces into a character array
1105 : *
1106 : * Note: actually this is used for both Oids and potentially-signed
1107 : * attribute numbers. This should cause no trouble, but we could split
1108 : * the function into two functions with different argument types if it does.
1109 : */
1110 :
1111 : void
1112 7679 : parseOidArray(const char *str, Oid *array, int arraysize)
1113 : {
1114 : int j,
1115 : argNum;
1116 : char temp[100];
1117 : char s;
1118 :
1119 7679 : argNum = 0;
1120 7679 : j = 0;
1121 : for (;;)
1122 : {
1123 36952 : s = *str++;
1124 36952 : if (s == ' ' || s == '\0')
1125 : {
1126 12489 : if (j > 0)
1127 : {
1128 12489 : if (argNum >= arraysize)
1129 0 : pg_fatal("could not parse numeric array \"%s\": too many numbers", str);
1130 12489 : temp[j] = '\0';
1131 12489 : array[argNum++] = atooid(temp);
1132 12489 : j = 0;
1133 : }
1134 12489 : if (s == '\0')
1135 7679 : break;
1136 : }
1137 : else
1138 : {
1139 24463 : if (!(isdigit((unsigned char) s) || s == '-') ||
1140 24463 : j >= sizeof(temp) - 1)
1141 0 : pg_fatal("could not parse numeric array \"%s\": invalid character in number", str);
1142 24463 : temp[j++] = s;
1143 : }
1144 : }
1145 :
1146 7679 : while (argNum < arraysize)
1147 0 : array[argNum++] = InvalidOid;
1148 7679 : }
1149 :
1150 :
1151 : /*
1152 : * strInArray:
1153 : * takes in a string and a string array and the number of elements in the
1154 : * string array.
1155 : * returns the index if the string is somewhere in the array, -1 otherwise
1156 : */
1157 :
1158 : static int
1159 4895 : strInArray(const char *pattern, char **arr, int arr_size)
1160 : {
1161 : int i;
1162 :
1163 9632 : for (i = 0; i < arr_size; i++)
1164 : {
1165 9392 : if (strcmp(pattern, arr[i]) == 0)
1166 4655 : return i;
1167 : }
1168 240 : return -1;
1169 : }
|