Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * common.c
4 : * Catalog routines used by pg_dump; long ago these were shared
5 : * by another dump tool, but not anymore.
6 : *
7 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : *
11 : * IDENTIFICATION
12 : * src/bin/pg_dump/common.c
13 : *
14 : *-------------------------------------------------------------------------
15 : */
16 : #include "postgres_fe.h"
17 :
18 : #include <ctype.h>
19 :
20 : #include "catalog/pg_class_d.h"
21 : #include "catalog/pg_collation_d.h"
22 : #include "catalog/pg_extension_d.h"
23 : #include "catalog/pg_namespace_d.h"
24 : #include "catalog/pg_operator_d.h"
25 : #include "catalog/pg_proc_d.h"
26 : #include "catalog/pg_publication_d.h"
27 : #include "catalog/pg_subscription_d.h"
28 : #include "catalog/pg_type_d.h"
29 : #include "common/hashfn.h"
30 : #include "fe_utils/string_utils.h"
31 : #include "pg_backup_archiver.h"
32 : #include "pg_backup_utils.h"
33 : #include "pg_dump.h"
34 :
35 : /*
36 : * Variables for mapping DumpId to DumpableObject
37 : */
38 : static DumpableObject **dumpIdMap = NULL;
39 : static int allocedDumpIds = 0;
40 : static DumpId lastDumpId = 0; /* Note: 0 is InvalidDumpId */
41 :
42 : /*
43 : * Infrastructure for mapping CatalogId to DumpableObject
44 : *
45 : * We use a hash table generated by simplehash.h. That infrastructure
46 : * requires all the hash table entries to be the same size, and it also
47 : * expects that it can move them around when resizing the table. So we
48 : * cannot make the DumpableObjects be elements of the hash table directly;
49 : * instead, the hash table elements contain pointers to DumpableObjects.
50 : * This does have the advantage of letting us map multiple CatalogIds
51 : * to one DumpableObject, which is useful for blobs.
52 : *
53 : * It turns out to be convenient to also use this data structure to map
54 : * CatalogIds to owning extensions, if any. Since extension membership
55 : * data is read before creating most DumpableObjects, either one of dobj
56 : * and ext could be NULL.
57 : */
58 : typedef struct _catalogIdMapEntry
59 : {
60 : CatalogId catId; /* the indexed CatalogId */
61 : uint32 status; /* hash status */
62 : uint32 hashval; /* hash code for the CatalogId */
63 : DumpableObject *dobj; /* the associated DumpableObject, if any */
64 : ExtensionInfo *ext; /* owning extension, if any */
65 : } CatalogIdMapEntry;
66 :
67 : #define SH_PREFIX catalogid
68 : #define SH_ELEMENT_TYPE CatalogIdMapEntry
69 : #define SH_KEY_TYPE CatalogId
70 : #define SH_KEY catId
71 : #define SH_HASH_KEY(tb, key) hash_bytes((const unsigned char *) &(key), sizeof(CatalogId))
72 : #define SH_EQUAL(tb, a, b) ((a).oid == (b).oid && (a).tableoid == (b).tableoid)
73 : #define SH_STORE_HASH
74 : #define SH_GET_HASH(tb, a) (a)->hashval
75 : #define SH_SCOPE static inline
76 : #define SH_RAW_ALLOCATOR pg_malloc0
77 : #define SH_DECLARE
78 : #define SH_DEFINE
79 : #include "lib/simplehash.h"
80 :
81 : #define CATALOGIDHASH_INITIAL_SIZE 10000
82 :
83 : static catalogid_hash *catalogIdHash = NULL;
84 :
85 : static void flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
86 : InhInfo *inhinfo, int numInherits);
87 : static void flagInhIndexes(Archive *fout, TableInfo *tblinfo, int numTables);
88 : static void flagInhAttrs(Archive *fout, TableInfo *tblinfo, int numTables);
89 : static int strInArray(const char *pattern, char **arr, int arr_size);
90 : static IndxInfo *findIndexByOid(Oid oid);
91 :
92 :
93 : /*
94 : * getSchemaData
95 : * Collect information about all potentially dumpable objects
96 : */
97 : TableInfo *
98 310 : getSchemaData(Archive *fout, int *numTablesPtr)
99 : {
100 : TableInfo *tblinfo;
101 : ExtensionInfo *extinfo;
102 : InhInfo *inhinfo;
103 : int numTables;
104 : int numExtensions;
105 : int numInherits;
106 :
107 : /*
108 : * We must read extensions and extension membership info first, because
109 : * extension membership needs to be consultable during decisions about
110 : * whether other objects are to be dumped.
111 : */
112 310 : pg_log_info("reading extensions");
113 310 : extinfo = getExtensions(fout, &numExtensions);
114 :
115 310 : pg_log_info("identifying extension members");
116 310 : getExtensionMembership(fout, extinfo, numExtensions);
117 :
118 310 : pg_log_info("reading schemas");
119 310 : getNamespaces(fout);
120 :
121 : /*
122 : * getTables should be done as soon as possible, so as to minimize the
123 : * window between starting our transaction and acquiring per-table locks.
124 : * However, we have to do getNamespaces first because the tables get
125 : * linked to their containing namespaces during getTables.
126 : */
127 310 : pg_log_info("reading user-defined tables");
128 310 : tblinfo = getTables(fout, &numTables);
129 :
130 308 : getOwnedSeqs(fout, tblinfo, numTables);
131 :
132 308 : pg_log_info("reading user-defined functions");
133 308 : getFuncs(fout);
134 :
135 : /* this must be after getTables and getFuncs */
136 308 : pg_log_info("reading user-defined types");
137 308 : getTypes(fout);
138 :
139 : /* this must be after getFuncs, too */
140 308 : pg_log_info("reading procedural languages");
141 308 : getProcLangs(fout);
142 :
143 308 : pg_log_info("reading user-defined aggregate functions");
144 308 : getAggregates(fout);
145 :
146 308 : pg_log_info("reading user-defined operators");
147 308 : getOperators(fout);
148 :
149 308 : pg_log_info("reading user-defined access methods");
150 308 : getAccessMethods(fout);
151 :
152 308 : pg_log_info("reading user-defined operator classes");
153 308 : getOpclasses(fout);
154 :
155 308 : pg_log_info("reading user-defined operator families");
156 308 : getOpfamilies(fout);
157 :
158 308 : pg_log_info("reading user-defined text search parsers");
159 308 : getTSParsers(fout);
160 :
161 308 : pg_log_info("reading user-defined text search templates");
162 308 : getTSTemplates(fout);
163 :
164 308 : pg_log_info("reading user-defined text search dictionaries");
165 308 : getTSDictionaries(fout);
166 :
167 308 : pg_log_info("reading user-defined text search configurations");
168 308 : getTSConfigurations(fout);
169 :
170 308 : pg_log_info("reading user-defined foreign-data wrappers");
171 308 : getForeignDataWrappers(fout);
172 :
173 308 : pg_log_info("reading user-defined foreign servers");
174 308 : getForeignServers(fout);
175 :
176 308 : pg_log_info("reading default privileges");
177 308 : getDefaultACLs(fout);
178 :
179 308 : pg_log_info("reading user-defined collations");
180 308 : getCollations(fout);
181 :
182 308 : pg_log_info("reading user-defined conversions");
183 308 : getConversions(fout);
184 :
185 308 : pg_log_info("reading type casts");
186 308 : getCasts(fout);
187 :
188 308 : pg_log_info("reading transforms");
189 308 : getTransforms(fout);
190 :
191 308 : pg_log_info("reading table inheritance information");
192 308 : inhinfo = getInherits(fout, &numInherits);
193 :
194 308 : pg_log_info("reading event triggers");
195 308 : getEventTriggers(fout);
196 :
197 : /* Identify extension configuration tables that should be dumped */
198 308 : pg_log_info("finding extension tables");
199 308 : processExtensionTables(fout, extinfo, numExtensions);
200 :
201 : /* Link tables to parents, mark parents of target tables interesting */
202 308 : pg_log_info("finding inheritance relationships");
203 308 : flagInhTables(fout, tblinfo, numTables, inhinfo, numInherits);
204 :
205 308 : pg_log_info("reading column info for interesting tables");
206 308 : getTableAttrs(fout, tblinfo, numTables);
207 :
208 308 : pg_log_info("flagging inherited columns in subtables");
209 308 : flagInhAttrs(fout, tblinfo, numTables);
210 :
211 308 : pg_log_info("reading partitioning data");
212 308 : getPartitioningInfo(fout);
213 :
214 308 : pg_log_info("reading indexes");
215 308 : getIndexes(fout, tblinfo, numTables);
216 :
217 308 : pg_log_info("flagging indexes in partitioned tables");
218 308 : flagInhIndexes(fout, tblinfo, numTables);
219 :
220 308 : pg_log_info("reading extended statistics");
221 308 : getExtendedStatistics(fout);
222 :
223 308 : pg_log_info("reading constraints");
224 308 : getConstraints(fout, tblinfo, numTables);
225 :
226 308 : pg_log_info("reading triggers");
227 308 : getTriggers(fout, tblinfo, numTables);
228 :
229 308 : pg_log_info("reading rewrite rules");
230 308 : getRules(fout);
231 :
232 308 : pg_log_info("reading policies");
233 308 : getPolicies(fout, tblinfo, numTables);
234 :
235 308 : pg_log_info("reading publications");
236 308 : getPublications(fout);
237 :
238 308 : pg_log_info("reading publication membership of tables");
239 308 : getPublicationTables(fout, tblinfo, numTables);
240 :
241 308 : pg_log_info("reading publication membership of schemas");
242 308 : getPublicationNamespaces(fout);
243 :
244 308 : pg_log_info("reading subscriptions");
245 308 : getSubscriptions(fout);
246 :
247 308 : pg_log_info("reading subscription membership of tables");
248 308 : getSubscriptionTables(fout);
249 :
250 308 : free(inhinfo); /* not needed any longer */
251 :
252 308 : *numTablesPtr = numTables;
253 308 : return tblinfo;
254 : }
255 :
256 : /* flagInhTables -
257 : * Fill in parent link fields of tables for which we need that information,
258 : * mark parents of target tables as interesting, and create
259 : * TableAttachInfo objects for partitioned tables with appropriate
260 : * dependency links.
261 : *
262 : * Note that only direct ancestors of targets are marked interesting.
263 : * This is sufficient; we don't much care whether they inherited their
264 : * attributes or not.
265 : *
266 : * modifies tblinfo
267 : */
268 : static void
269 308 : flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
270 : InhInfo *inhinfo, int numInherits)
271 : {
272 308 : TableInfo *child = NULL;
273 308 : TableInfo *parent = NULL;
274 : int i,
275 : j;
276 :
277 : /*
278 : * Set up links from child tables to their parents.
279 : *
280 : * We used to attempt to skip this work for tables that are not to be
281 : * dumped; but the optimizable cases are rare in practice, and setting up
282 : * these links in bulk is cheaper than the old way. (Note in particular
283 : * that it's very rare for a child to have more than one parent.)
284 : */
285 5984 : for (i = 0; i < numInherits; i++)
286 : {
287 : /*
288 : * Skip a hashtable lookup if it's same table as last time. This is
289 : * unlikely for the child, but less so for the parent. (Maybe we
290 : * should ask the backend for a sorted array to make it more likely?
291 : * Not clear the sorting effort would be repaid, though.)
292 : */
293 5676 : if (child == NULL ||
294 4246 : child->dobj.catId.oid != inhinfo[i].inhrelid)
295 : {
296 5592 : child = findTableByOid(inhinfo[i].inhrelid);
297 :
298 : /*
299 : * If we find no TableInfo, assume the pg_inherits entry is for a
300 : * partitioned index, which we don't need to track.
301 : */
302 5592 : if (child == NULL)
303 1384 : continue;
304 : }
305 4292 : if (parent == NULL ||
306 4154 : parent->dobj.catId.oid != inhinfo[i].inhparent)
307 : {
308 2616 : parent = findTableByOid(inhinfo[i].inhparent);
309 2616 : if (parent == NULL)
310 0 : pg_fatal("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
311 : inhinfo[i].inhparent,
312 : child->dobj.name,
313 : child->dobj.catId.oid);
314 : }
315 : /* Add this parent to the child's list of parents. */
316 4292 : if (child->numParents > 0)
317 84 : child->parents = pg_realloc_array(child->parents,
318 : TableInfo *,
319 : child->numParents + 1);
320 : else
321 4208 : child->parents = pg_malloc_array(TableInfo *, 1);
322 4292 : child->parents[child->numParents++] = parent;
323 : }
324 :
325 : /*
326 : * Now consider all child tables and mark parents interesting as needed.
327 : */
328 80978 : for (i = 0; i < numTables; i++)
329 : {
330 : /*
331 : * If needed, mark the parents as interesting for getTableAttrs and
332 : * getIndexes. We only need this for direct parents of dumpable
333 : * tables.
334 : */
335 80670 : if (tblinfo[i].dobj.dump)
336 : {
337 49804 : int numParents = tblinfo[i].numParents;
338 49804 : TableInfo **parents = tblinfo[i].parents;
339 :
340 52980 : for (j = 0; j < numParents; j++)
341 3176 : parents[j]->interesting = true;
342 : }
343 :
344 : /* Create TableAttachInfo object if needed */
345 80670 : if ((tblinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
346 11992 : tblinfo[i].ispartition)
347 : {
348 : TableAttachInfo *attachinfo;
349 :
350 : /* With partitions there can only be one parent */
351 2486 : if (tblinfo[i].numParents != 1)
352 0 : pg_fatal("invalid number of parents %d for table \"%s\"",
353 : tblinfo[i].numParents,
354 : tblinfo[i].dobj.name);
355 :
356 2486 : attachinfo = (TableAttachInfo *) palloc(sizeof(TableAttachInfo));
357 2486 : attachinfo->dobj.objType = DO_TABLE_ATTACH;
358 2486 : attachinfo->dobj.catId.tableoid = 0;
359 2486 : attachinfo->dobj.catId.oid = 0;
360 2486 : AssignDumpId(&attachinfo->dobj);
361 2486 : attachinfo->dobj.name = pg_strdup(tblinfo[i].dobj.name);
362 2486 : attachinfo->dobj.namespace = tblinfo[i].dobj.namespace;
363 2486 : attachinfo->parentTbl = tblinfo[i].parents[0];
364 2486 : attachinfo->partitionTbl = &tblinfo[i];
365 :
366 : /*
367 : * We must state the DO_TABLE_ATTACH object's dependencies
368 : * explicitly, since it will not match anything in pg_depend.
369 : *
370 : * Give it dependencies on both the partition table and the parent
371 : * table, so that it will not be executed till both of those
372 : * exist. (There's no need to care what order those are created
373 : * in.)
374 : */
375 2486 : addObjectDependency(&attachinfo->dobj, tblinfo[i].dobj.dumpId);
376 2486 : addObjectDependency(&attachinfo->dobj, tblinfo[i].parents[0]->dobj.dumpId);
377 : }
378 : }
379 308 : }
380 :
381 : /*
382 : * flagInhIndexes -
383 : * Create IndexAttachInfo objects for partitioned indexes, and add
384 : * appropriate dependency links.
385 : */
386 : static void
387 308 : flagInhIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
388 : {
389 : int i,
390 : j;
391 :
392 80978 : for (i = 0; i < numTables; i++)
393 : {
394 80670 : if (!tblinfo[i].ispartition || tblinfo[i].numParents == 0)
395 77314 : continue;
396 :
397 : Assert(tblinfo[i].numParents == 1);
398 :
399 4552 : for (j = 0; j < tblinfo[i].numIndexes; j++)
400 : {
401 1196 : IndxInfo *index = &(tblinfo[i].indexes[j]);
402 : IndxInfo *parentidx;
403 : IndexAttachInfo *attachinfo;
404 :
405 1196 : if (index->parentidx == 0)
406 100 : continue;
407 :
408 1096 : parentidx = findIndexByOid(index->parentidx);
409 1096 : if (parentidx == NULL)
410 0 : continue;
411 :
412 1096 : attachinfo = pg_malloc_object(IndexAttachInfo);
413 :
414 1096 : attachinfo->dobj.objType = DO_INDEX_ATTACH;
415 1096 : attachinfo->dobj.catId.tableoid = 0;
416 1096 : attachinfo->dobj.catId.oid = 0;
417 1096 : AssignDumpId(&attachinfo->dobj);
418 1096 : attachinfo->dobj.name = pg_strdup(index->dobj.name);
419 1096 : attachinfo->dobj.namespace = index->indextable->dobj.namespace;
420 1096 : attachinfo->parentIdx = parentidx;
421 1096 : attachinfo->partitionIdx = index;
422 :
423 : /*
424 : * We must state the DO_INDEX_ATTACH object's dependencies
425 : * explicitly, since it will not match anything in pg_depend.
426 : *
427 : * Give it dependencies on both the partition index and the parent
428 : * index, so that it will not be executed till both of those
429 : * exist. (There's no need to care what order those are created
430 : * in.)
431 : *
432 : * In addition, give it dependencies on the indexes' underlying
433 : * tables. This does nothing of great value so far as serial
434 : * restore ordering goes, but it ensures that a parallel restore
435 : * will not try to run the ATTACH concurrently with other
436 : * operations on those tables.
437 : */
438 1096 : addObjectDependency(&attachinfo->dobj, index->dobj.dumpId);
439 1096 : addObjectDependency(&attachinfo->dobj, parentidx->dobj.dumpId);
440 1096 : addObjectDependency(&attachinfo->dobj,
441 1096 : index->indextable->dobj.dumpId);
442 1096 : addObjectDependency(&attachinfo->dobj,
443 1096 : parentidx->indextable->dobj.dumpId);
444 :
445 : /* keep track of the list of partitions in the parent index */
446 1096 : simple_ptr_list_append(&parentidx->partattaches, &attachinfo->dobj);
447 : }
448 : }
449 308 : }
450 :
451 : /* flagInhAttrs -
452 : * for each dumpable table in tblinfo, flag its inherited attributes
453 : *
454 : * What we need to do here is:
455 : *
456 : * - Detect child columns that inherit NOT NULL bits from their parents, so
457 : * that we needn't specify that again for the child.
458 : *
459 : * - Detect child columns that have DEFAULT NULL when their parents had some
460 : * non-null default. In this case, we make up a dummy AttrDefInfo object so
461 : * that we'll correctly emit the necessary DEFAULT NULL clause; otherwise
462 : * the backend will apply an inherited default to the column.
463 : *
464 : * - Detect child columns that have a generation expression and all their
465 : * parents also have the same generation expression, and if so suppress the
466 : * child's expression. The child will inherit the generation expression
467 : * automatically, so there's no need to dump it. This improves the dump's
468 : * compatibility with pre-v16 servers, which didn't allow the child's
469 : * expression to be given explicitly. Exceptions: If it's a partition or
470 : * we are in binary upgrade mode, we dump such expressions anyway because
471 : * in those cases inherited tables are recreated standalone first and then
472 : * reattached to the parent. (See also the logic in dumpTableSchema().)
473 : *
474 : * modifies tblinfo
475 : */
476 : static void
477 308 : flagInhAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
478 : {
479 308 : DumpOptions *dopt = fout->dopt;
480 : int i,
481 : j,
482 : k;
483 :
484 : /*
485 : * We scan the tables in OID order, since that's how tblinfo[] is sorted.
486 : * Hence we will typically visit parents before their children --- but
487 : * that is *not* guaranteed. Thus this loop must be careful that it does
488 : * not alter table properties in a way that could change decisions made at
489 : * child tables during other iterations.
490 : */
491 80978 : for (i = 0; i < numTables; i++)
492 : {
493 80670 : TableInfo *tbinfo = &(tblinfo[i]);
494 : int numParents;
495 : TableInfo **parents;
496 :
497 : /* Some kinds never have parents */
498 80670 : if (tbinfo->relkind == RELKIND_SEQUENCE ||
499 79486 : tbinfo->relkind == RELKIND_VIEW ||
500 34684 : tbinfo->relkind == RELKIND_MATVIEW)
501 46872 : continue;
502 :
503 : /* Don't bother computing anything for non-target tables, either */
504 33798 : if (!tbinfo->dobj.dump)
505 7230 : continue;
506 :
507 26568 : numParents = tbinfo->numParents;
508 26568 : parents = tbinfo->parents;
509 :
510 26568 : if (numParents == 0)
511 23452 : continue; /* nothing to see here, move along */
512 :
513 : /* For each column, search for matching column names in parent(s) */
514 11260 : for (j = 0; j < tbinfo->numatts; j++)
515 : {
516 : bool foundNotNull; /* Attr was NOT NULL in a parent */
517 : bool foundDefault; /* Found a default in a parent */
518 : bool foundSameGenerated; /* Found matching GENERATED */
519 : bool foundDiffGenerated; /* Found non-matching GENERATED */
520 :
521 : /* no point in examining dropped columns */
522 8144 : if (tbinfo->attisdropped[j])
523 610 : continue;
524 :
525 7534 : foundNotNull = false;
526 7534 : foundDefault = false;
527 7534 : foundSameGenerated = false;
528 7534 : foundDiffGenerated = false;
529 15318 : for (k = 0; k < numParents; k++)
530 : {
531 7784 : TableInfo *parent = parents[k];
532 : int inhAttrInd;
533 :
534 7784 : inhAttrInd = strInArray(tbinfo->attnames[j],
535 : parent->attnames,
536 : parent->numatts);
537 7784 : if (inhAttrInd >= 0)
538 : {
539 7414 : AttrDefInfo *parentDef = parent->attrdefs[inhAttrInd];
540 :
541 7414 : foundNotNull |= parent->notnull[inhAttrInd];
542 15552 : foundDefault |= (parentDef != NULL &&
543 8038 : strcmp(parentDef->adef_expr, "NULL") != 0 &&
544 624 : !parent->attgenerated[inhAttrInd]);
545 7414 : if (parent->attgenerated[inhAttrInd])
546 : {
547 : /* these pointer nullness checks are just paranoia */
548 244 : if (parentDef != NULL &&
549 232 : tbinfo->attrdefs[j] != NULL &&
550 232 : strcmp(parentDef->adef_expr,
551 232 : tbinfo->attrdefs[j]->adef_expr) == 0)
552 202 : foundSameGenerated = true;
553 : else
554 42 : foundDiffGenerated = true;
555 : }
556 : }
557 : }
558 :
559 : /* Remember if we found inherited NOT NULL */
560 7534 : tbinfo->inhNotNull[j] = foundNotNull;
561 :
562 : /*
563 : * Manufacture a DEFAULT NULL clause if necessary. This breaks
564 : * the advice given above to avoid changing state that might get
565 : * inspected in other loop iterations. We prevent trouble by
566 : * having the foundDefault test above check whether adef_expr is
567 : * "NULL", so that it will reach the same conclusion before or
568 : * after this is done.
569 : */
570 7534 : if (foundDefault && tbinfo->attrdefs[j] == NULL)
571 : {
572 : AttrDefInfo *attrDef;
573 :
574 80 : attrDef = pg_malloc_object(AttrDefInfo);
575 80 : attrDef->dobj.objType = DO_ATTRDEF;
576 80 : attrDef->dobj.catId.tableoid = 0;
577 80 : attrDef->dobj.catId.oid = 0;
578 80 : AssignDumpId(&attrDef->dobj);
579 80 : attrDef->dobj.name = pg_strdup(tbinfo->dobj.name);
580 80 : attrDef->dobj.namespace = tbinfo->dobj.namespace;
581 80 : attrDef->dobj.dump = tbinfo->dobj.dump;
582 :
583 80 : attrDef->adtable = tbinfo;
584 80 : attrDef->adnum = j + 1;
585 80 : attrDef->adef_expr = pg_strdup("NULL");
586 :
587 : /* Will column be dumped explicitly? */
588 80 : if (shouldPrintColumn(dopt, tbinfo, j))
589 : {
590 80 : attrDef->separate = false;
591 : /* No dependency needed: NULL cannot have dependencies */
592 : }
593 : else
594 : {
595 : /* column will be suppressed, print default separately */
596 0 : attrDef->separate = true;
597 : /* ensure it comes out after the table */
598 0 : addObjectDependency(&attrDef->dobj,
599 : tbinfo->dobj.dumpId);
600 : }
601 :
602 80 : tbinfo->attrdefs[j] = attrDef;
603 : }
604 :
605 : /* No need to dump generation expression if it's inheritable */
606 7534 : if (foundSameGenerated && !foundDiffGenerated &&
607 202 : !tbinfo->ispartition && !dopt->binary_upgrade)
608 160 : tbinfo->attrdefs[j]->dobj.dump = DUMP_COMPONENT_NONE;
609 : }
610 : }
611 308 : }
612 :
613 : /*
614 : * AssignDumpId
615 : * Given a newly-created dumpable object, assign a dump ID,
616 : * and enter the object into the lookup tables.
617 : *
618 : * The caller is expected to have filled in objType and catId,
619 : * but not any of the other standard fields of a DumpableObject.
620 : */
621 : void
622 1125014 : AssignDumpId(DumpableObject *dobj)
623 : {
624 1125014 : dobj->dumpId = ++lastDumpId;
625 1125014 : dobj->name = NULL; /* must be set later */
626 1125014 : dobj->namespace = NULL; /* may be set later */
627 1125014 : dobj->dump = DUMP_COMPONENT_ALL; /* default assumption */
628 1125014 : dobj->dump_contains = DUMP_COMPONENT_ALL; /* default assumption */
629 : /* All objects have definitions; we may set more components bits later */
630 1125014 : dobj->components = DUMP_COMPONENT_DEFINITION;
631 1125014 : dobj->ext_member = false; /* default assumption */
632 1125014 : dobj->depends_on_ext = false; /* default assumption */
633 1125014 : dobj->dependencies = NULL;
634 1125014 : dobj->nDeps = 0;
635 1125014 : dobj->allocDeps = 0;
636 :
637 : /* Add object to dumpIdMap[], enlarging that array if need be */
638 1126582 : while (dobj->dumpId >= allocedDumpIds)
639 : {
640 : int newAlloc;
641 :
642 1568 : if (allocedDumpIds <= 0)
643 : {
644 310 : newAlloc = 256;
645 310 : dumpIdMap = pg_malloc_array(DumpableObject *, newAlloc);
646 : }
647 : else
648 : {
649 1258 : newAlloc = allocedDumpIds * 2;
650 1258 : dumpIdMap = pg_realloc_array(dumpIdMap, DumpableObject *, newAlloc);
651 : }
652 1568 : memset(dumpIdMap + allocedDumpIds, 0,
653 1568 : (newAlloc - allocedDumpIds) * sizeof(DumpableObject *));
654 1568 : allocedDumpIds = newAlloc;
655 : }
656 1125014 : dumpIdMap[dobj->dumpId] = dobj;
657 :
658 : /* If it has a valid CatalogId, enter it into the hash table */
659 1125014 : if (OidIsValid(dobj->catId.tableoid))
660 : {
661 : CatalogIdMapEntry *entry;
662 : bool found;
663 :
664 : /* Initialize CatalogId hash table if not done yet */
665 1098284 : if (catalogIdHash == NULL)
666 310 : catalogIdHash = catalogid_create(CATALOGIDHASH_INITIAL_SIZE, NULL);
667 :
668 1098284 : entry = catalogid_insert(catalogIdHash, dobj->catId, &found);
669 1098284 : if (!found)
670 : {
671 1096692 : entry->dobj = NULL;
672 1096692 : entry->ext = NULL;
673 : }
674 : Assert(entry->dobj == NULL);
675 1098284 : entry->dobj = dobj;
676 : }
677 1125014 : }
678 :
679 : /*
680 : * recordAdditionalCatalogID
681 : * Record an additional catalog ID for the given DumpableObject
682 : */
683 : void
684 20 : recordAdditionalCatalogID(CatalogId catId, DumpableObject *dobj)
685 : {
686 : CatalogIdMapEntry *entry;
687 : bool found;
688 :
689 : /* CatalogId hash table must exist, if we have a DumpableObject */
690 : Assert(catalogIdHash != NULL);
691 :
692 : /* Add reference to CatalogId hash */
693 20 : entry = catalogid_insert(catalogIdHash, catId, &found);
694 20 : if (!found)
695 : {
696 20 : entry->dobj = NULL;
697 20 : entry->ext = NULL;
698 : }
699 : Assert(entry->dobj == NULL);
700 20 : entry->dobj = dobj;
701 20 : }
702 :
703 : /*
704 : * Assign a DumpId that's not tied to a DumpableObject.
705 : *
706 : * This is used when creating a "fixed" ArchiveEntry that doesn't need to
707 : * participate in the sorting logic.
708 : */
709 : DumpId
710 18420 : createDumpId(void)
711 : {
712 18420 : return ++lastDumpId;
713 : }
714 :
715 : /*
716 : * Return the largest DumpId so far assigned
717 : */
718 : DumpId
719 2108 : getMaxDumpId(void)
720 : {
721 2108 : return lastDumpId;
722 : }
723 :
724 : /*
725 : * Find a DumpableObject by dump ID
726 : *
727 : * Returns NULL for invalid ID
728 : */
729 : DumpableObject *
730 35000592 : findObjectByDumpId(DumpId dumpId)
731 : {
732 35000592 : if (dumpId <= 0 || dumpId >= allocedDumpIds)
733 0 : return NULL; /* out of range? */
734 35000592 : return dumpIdMap[dumpId];
735 : }
736 :
737 : /*
738 : * Find a DumpableObject by catalog ID
739 : *
740 : * Returns NULL for unknown ID
741 : */
742 : DumpableObject *
743 3850258 : findObjectByCatalogId(CatalogId catalogId)
744 : {
745 : CatalogIdMapEntry *entry;
746 :
747 3850258 : if (catalogIdHash == NULL)
748 0 : return NULL; /* no objects exist yet */
749 :
750 3850258 : entry = catalogid_lookup(catalogIdHash, catalogId);
751 3850258 : if (entry == NULL)
752 1092346 : return NULL;
753 2757912 : return entry->dobj;
754 : }
755 :
756 : /*
757 : * Build an array of pointers to all known dumpable objects
758 : *
759 : * This simply creates a modifiable copy of the internal map.
760 : */
761 : void
762 320 : getDumpableObjects(DumpableObject ***objs, int *numObjs)
763 : {
764 : int i,
765 : j;
766 :
767 320 : *objs = pg_malloc_array(DumpableObject *, allocedDumpIds);
768 320 : j = 0;
769 1441792 : for (i = 1; i < allocedDumpIds; i++)
770 : {
771 1441472 : if (dumpIdMap[i])
772 1167240 : (*objs)[j++] = dumpIdMap[i];
773 : }
774 320 : *numObjs = j;
775 320 : }
776 :
777 : /*
778 : * Add a dependency link to a DumpableObject
779 : *
780 : * Note: duplicate dependencies are currently not eliminated
781 : */
782 : void
783 1776110 : addObjectDependency(DumpableObject *dobj, DumpId refId)
784 : {
785 1776110 : if (dobj->nDeps >= dobj->allocDeps)
786 : {
787 296094 : if (dobj->allocDeps <= 0)
788 : {
789 288308 : dobj->allocDeps = 16;
790 288308 : dobj->dependencies = pg_malloc_array(DumpId, dobj->allocDeps);
791 : }
792 : else
793 : {
794 7786 : dobj->allocDeps *= 2;
795 7786 : dobj->dependencies = pg_realloc_array(dobj->dependencies,
796 : DumpId, dobj->allocDeps);
797 : }
798 : }
799 1776110 : dobj->dependencies[dobj->nDeps++] = refId;
800 1776110 : }
801 :
802 : /*
803 : * Remove a dependency link from a DumpableObject
804 : *
805 : * If there are multiple links, all are removed
806 : */
807 : void
808 50518 : removeObjectDependency(DumpableObject *dobj, DumpId refId)
809 : {
810 : int i;
811 50518 : int j = 0;
812 :
813 1226226 : for (i = 0; i < dobj->nDeps; i++)
814 : {
815 1175708 : if (dobj->dependencies[i] != refId)
816 1123154 : dobj->dependencies[j++] = dobj->dependencies[i];
817 : }
818 50518 : dobj->nDeps = j;
819 50518 : }
820 :
821 :
822 : /*
823 : * findTableByOid
824 : * finds the DumpableObject for the table with the given oid
825 : * returns NULL if not found
826 : */
827 : TableInfo *
828 137960 : findTableByOid(Oid oid)
829 : {
830 : CatalogId catId;
831 : DumpableObject *dobj;
832 :
833 137960 : catId.tableoid = RelationRelationId;
834 137960 : catId.oid = oid;
835 137960 : dobj = findObjectByCatalogId(catId);
836 : Assert(dobj == NULL || dobj->objType == DO_TABLE);
837 137960 : return (TableInfo *) dobj;
838 : }
839 :
840 : /*
841 : * findIndexByOid
842 : * finds the DumpableObject for the index with the given oid
843 : * returns NULL if not found
844 : */
845 : static IndxInfo *
846 1096 : findIndexByOid(Oid oid)
847 : {
848 : CatalogId catId;
849 : DumpableObject *dobj;
850 :
851 1096 : catId.tableoid = RelationRelationId;
852 1096 : catId.oid = oid;
853 1096 : dobj = findObjectByCatalogId(catId);
854 : Assert(dobj == NULL || dobj->objType == DO_INDEX);
855 1096 : return (IndxInfo *) dobj;
856 : }
857 :
858 : /*
859 : * findTypeByOid
860 : * finds the DumpableObject for the type with the given oid
861 : * returns NULL if not found
862 : */
863 : TypeInfo *
864 144406 : findTypeByOid(Oid oid)
865 : {
866 : CatalogId catId;
867 : DumpableObject *dobj;
868 :
869 144406 : catId.tableoid = TypeRelationId;
870 144406 : catId.oid = oid;
871 144406 : dobj = findObjectByCatalogId(catId);
872 : Assert(dobj == NULL ||
873 : dobj->objType == DO_TYPE || dobj->objType == DO_DUMMY_TYPE);
874 144406 : return (TypeInfo *) dobj;
875 : }
876 :
877 : /*
878 : * findFuncByOid
879 : * finds the DumpableObject for the function with the given oid
880 : * returns NULL if not found
881 : */
882 : FuncInfo *
883 520 : findFuncByOid(Oid oid)
884 : {
885 : CatalogId catId;
886 : DumpableObject *dobj;
887 :
888 520 : catId.tableoid = ProcedureRelationId;
889 520 : catId.oid = oid;
890 520 : dobj = findObjectByCatalogId(catId);
891 : Assert(dobj == NULL || dobj->objType == DO_FUNC);
892 520 : return (FuncInfo *) dobj;
893 : }
894 :
895 : /*
896 : * findOprByOid
897 : * finds the DumpableObject for the operator with the given oid
898 : * returns NULL if not found
899 : */
900 : OprInfo *
901 5648 : findOprByOid(Oid oid)
902 : {
903 : CatalogId catId;
904 : DumpableObject *dobj;
905 :
906 5648 : catId.tableoid = OperatorRelationId;
907 5648 : catId.oid = oid;
908 5648 : dobj = findObjectByCatalogId(catId);
909 : Assert(dobj == NULL || dobj->objType == DO_OPERATOR);
910 5648 : return (OprInfo *) dobj;
911 : }
912 :
913 : /*
914 : * findCollationByOid
915 : * finds the DumpableObject for the collation with the given oid
916 : * returns NULL if not found
917 : */
918 : CollInfo *
919 536 : findCollationByOid(Oid oid)
920 : {
921 : CatalogId catId;
922 : DumpableObject *dobj;
923 :
924 536 : catId.tableoid = CollationRelationId;
925 536 : catId.oid = oid;
926 536 : dobj = findObjectByCatalogId(catId);
927 : Assert(dobj == NULL || dobj->objType == DO_COLLATION);
928 536 : return (CollInfo *) dobj;
929 : }
930 :
931 : /*
932 : * findNamespaceByOid
933 : * finds the DumpableObject for the namespace with the given oid
934 : * returns NULL if not found
935 : */
936 : NamespaceInfo *
937 962942 : findNamespaceByOid(Oid oid)
938 : {
939 : CatalogId catId;
940 : DumpableObject *dobj;
941 :
942 962942 : catId.tableoid = NamespaceRelationId;
943 962942 : catId.oid = oid;
944 962942 : dobj = findObjectByCatalogId(catId);
945 : Assert(dobj == NULL || dobj->objType == DO_NAMESPACE);
946 962942 : return (NamespaceInfo *) dobj;
947 : }
948 :
949 : /*
950 : * findExtensionByOid
951 : * finds the DumpableObject for the extension with the given oid
952 : * returns NULL if not found
953 : */
954 : ExtensionInfo *
955 360 : findExtensionByOid(Oid oid)
956 : {
957 : CatalogId catId;
958 : DumpableObject *dobj;
959 :
960 360 : catId.tableoid = ExtensionRelationId;
961 360 : catId.oid = oid;
962 360 : dobj = findObjectByCatalogId(catId);
963 : Assert(dobj == NULL || dobj->objType == DO_EXTENSION);
964 360 : return (ExtensionInfo *) dobj;
965 : }
966 :
967 : /*
968 : * findPublicationByOid
969 : * finds the DumpableObject for the publication with the given oid
970 : * returns NULL if not found
971 : */
972 : PublicationInfo *
973 774 : findPublicationByOid(Oid oid)
974 : {
975 : CatalogId catId;
976 : DumpableObject *dobj;
977 :
978 774 : catId.tableoid = PublicationRelationId;
979 774 : catId.oid = oid;
980 774 : dobj = findObjectByCatalogId(catId);
981 : Assert(dobj == NULL || dobj->objType == DO_PUBLICATION);
982 774 : return (PublicationInfo *) dobj;
983 : }
984 :
985 : /*
986 : * findSubscriptionByOid
987 : * finds the DumpableObject for the subscription with the given oid
988 : * returns NULL if not found
989 : */
990 : SubscriptionInfo *
991 4 : findSubscriptionByOid(Oid oid)
992 : {
993 : CatalogId catId;
994 : DumpableObject *dobj;
995 :
996 4 : catId.tableoid = SubscriptionRelationId;
997 4 : catId.oid = oid;
998 4 : dobj = findObjectByCatalogId(catId);
999 : Assert(dobj == NULL || dobj->objType == DO_SUBSCRIPTION);
1000 4 : return (SubscriptionInfo *) dobj;
1001 : }
1002 :
1003 :
1004 : /*
1005 : * recordExtensionMembership
1006 : * Record that the object identified by the given catalog ID
1007 : * belongs to the given extension
1008 : */
1009 : void
1010 2440 : recordExtensionMembership(CatalogId catId, ExtensionInfo *ext)
1011 : {
1012 : CatalogIdMapEntry *entry;
1013 : bool found;
1014 :
1015 : /* CatalogId hash table must exist, if we have an ExtensionInfo */
1016 : Assert(catalogIdHash != NULL);
1017 :
1018 : /* Add reference to CatalogId hash */
1019 2440 : entry = catalogid_insert(catalogIdHash, catId, &found);
1020 2440 : if (!found)
1021 : {
1022 2440 : entry->dobj = NULL;
1023 2440 : entry->ext = NULL;
1024 : }
1025 : Assert(entry->ext == NULL);
1026 2440 : entry->ext = ext;
1027 2440 : }
1028 :
1029 : /*
1030 : * findOwningExtension
1031 : * return owning extension for specified catalog ID, or NULL if none
1032 : */
1033 : ExtensionInfo *
1034 958996 : findOwningExtension(CatalogId catalogId)
1035 : {
1036 : CatalogIdMapEntry *entry;
1037 :
1038 958996 : if (catalogIdHash == NULL)
1039 0 : return NULL; /* no objects exist yet */
1040 :
1041 958996 : entry = catalogid_lookup(catalogIdHash, catalogId);
1042 958996 : if (entry == NULL)
1043 0 : return NULL;
1044 958996 : return entry->ext;
1045 : }
1046 :
1047 :
1048 : /*
1049 : * parseOidArray
1050 : * parse a string of numbers delimited by spaces into a character array
1051 : *
1052 : * Note: actually this is used for both Oids and potentially-signed
1053 : * attribute numbers. This should cause no trouble, but we could split
1054 : * the function into two functions with different argument types if it does.
1055 : */
1056 :
1057 : void
1058 11914 : parseOidArray(const char *str, Oid *array, int arraysize)
1059 : {
1060 : int j,
1061 : argNum;
1062 : char temp[100];
1063 : char s;
1064 :
1065 11914 : argNum = 0;
1066 11914 : j = 0;
1067 : for (;;)
1068 : {
1069 56532 : s = *str++;
1070 56532 : if (s == ' ' || s == '\0')
1071 : {
1072 19130 : if (j > 0)
1073 : {
1074 19130 : if (argNum >= arraysize)
1075 0 : pg_fatal("could not parse numeric array \"%s\": too many numbers", str);
1076 19130 : temp[j] = '\0';
1077 19130 : array[argNum++] = atooid(temp);
1078 19130 : j = 0;
1079 : }
1080 19130 : if (s == '\0')
1081 11914 : break;
1082 : }
1083 : else
1084 : {
1085 37402 : if (!(isdigit((unsigned char) s) || s == '-') ||
1086 37402 : j >= sizeof(temp) - 1)
1087 0 : pg_fatal("could not parse numeric array \"%s\": invalid character in number", str);
1088 37402 : temp[j++] = s;
1089 : }
1090 : }
1091 :
1092 11914 : while (argNum < arraysize)
1093 0 : array[argNum++] = InvalidOid;
1094 11914 : }
1095 :
1096 :
1097 : /*
1098 : * strInArray:
1099 : * takes in a string and a string array and the number of elements in the
1100 : * string array.
1101 : * returns the index if the string is somewhere in the array, -1 otherwise
1102 : */
1103 :
1104 : static int
1105 7784 : strInArray(const char *pattern, char **arr, int arr_size)
1106 : {
1107 : int i;
1108 :
1109 15610 : for (i = 0; i < arr_size; i++)
1110 : {
1111 15240 : if (strcmp(pattern, arr[i]) == 0)
1112 7414 : return i;
1113 : }
1114 370 : return -1;
1115 : }
|