Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * array_typanalyze.c
4 : * Functions for gathering statistics from array columns
5 : *
6 : * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/utils/adt/array_typanalyze.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include "access/detoast.h"
18 : #include "commands/vacuum.h"
19 : #include "utils/array.h"
20 : #include "utils/builtins.h"
21 : #include "utils/datum.h"
22 : #include "utils/lsyscache.h"
23 : #include "utils/typcache.h"
24 :
25 :
26 : /*
27 : * To avoid consuming too much memory, IO and CPU load during analysis, and/or
28 : * too much space in the resulting pg_statistic rows, we ignore arrays that
29 : * are wider than ARRAY_WIDTH_THRESHOLD (after detoasting!). Note that this
30 : * number is considerably more than the similar WIDTH_THRESHOLD limit used
31 : * in analyze.c's standard typanalyze code.
32 : */
33 : #define ARRAY_WIDTH_THRESHOLD 0x10000
34 :
35 : /* Extra data for compute_array_stats function */
36 : typedef struct
37 : {
38 : /* Information about array element type */
39 : Oid type_id; /* element type's OID */
40 : Oid eq_opr; /* default equality operator's OID */
41 : Oid coll_id; /* collation to use */
42 : bool typbyval; /* physical properties of element type */
43 : int16 typlen;
44 : char typalign;
45 :
46 : /*
47 : * Lookup data for element type's comparison and hash functions (these are
48 : * in the type's typcache entry, which we expect to remain valid over the
49 : * lifespan of the ANALYZE run)
50 : */
51 : FmgrInfo *cmp;
52 : FmgrInfo *hash;
53 :
54 : /* Saved state from std_typanalyze() */
55 : AnalyzeAttrComputeStatsFunc std_compute_stats;
56 : void *std_extra_data;
57 : } ArrayAnalyzeExtraData;
58 :
59 : /*
60 : * While compute_array_stats is running, we keep a pointer to the extra data
61 : * here for use by assorted subroutines. compute_array_stats doesn't
62 : * currently need to be re-entrant, so avoiding this is not worth the extra
63 : * notational cruft that would be needed.
64 : */
65 : static ArrayAnalyzeExtraData *array_extra_data;
66 :
67 : /* A hash table entry for the Lossy Counting algorithm */
68 : typedef struct
69 : {
70 : Datum key; /* This is 'e' from the LC algorithm. */
71 : int frequency; /* This is 'f'. */
72 : int delta; /* And this is 'delta'. */
73 : int last_container; /* For de-duplication of array elements. */
74 : } TrackItem;
75 :
76 : /* A hash table entry for distinct-elements counts */
77 : typedef struct
78 : {
79 : int count; /* Count of distinct elements in an array */
80 : int frequency; /* Number of arrays seen with this count */
81 : } DECountItem;
82 :
83 : static void compute_array_stats(VacAttrStats *stats,
84 : AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows);
85 : static void prune_element_hashtable(HTAB *elements_tab, int b_current);
86 : static uint32 element_hash(const void *key, Size keysize);
87 : static int element_match(const void *key1, const void *key2, Size keysize);
88 : static int element_compare(const void *key1, const void *key2);
89 : static int trackitem_compare_frequencies_desc(const void *e1, const void *e2);
90 : static int trackitem_compare_element(const void *e1, const void *e2);
91 : static int countitem_compare_count(const void *e1, const void *e2);
92 :
93 :
94 : /*
95 : * array_typanalyze -- typanalyze function for array columns
96 : */
97 : Datum
98 24234 : array_typanalyze(PG_FUNCTION_ARGS)
99 : {
100 24234 : VacAttrStats *stats = (VacAttrStats *) PG_GETARG_POINTER(0);
101 : Oid element_typeid;
102 : TypeCacheEntry *typentry;
103 : ArrayAnalyzeExtraData *extra_data;
104 :
105 : /*
106 : * Call the standard typanalyze function. It may fail to find needed
107 : * operators, in which case we also can't do anything, so just fail.
108 : */
109 24234 : if (!std_typanalyze(stats))
110 0 : PG_RETURN_BOOL(false);
111 :
112 : /*
113 : * Check attribute data type is a varlena array (or a domain over one).
114 : */
115 24234 : element_typeid = get_base_element_type(stats->attrtypid);
116 24234 : if (!OidIsValid(element_typeid))
117 0 : elog(ERROR, "array_typanalyze was invoked for non-array type %u",
118 : stats->attrtypid);
119 :
120 : /*
121 : * Gather information about the element type. If we fail to find
122 : * something, return leaving the state from std_typanalyze() in place.
123 : */
124 24234 : typentry = lookup_type_cache(element_typeid,
125 : TYPECACHE_EQ_OPR |
126 : TYPECACHE_CMP_PROC_FINFO |
127 : TYPECACHE_HASH_PROC_FINFO);
128 :
129 24234 : if (!OidIsValid(typentry->eq_opr) ||
130 23658 : !OidIsValid(typentry->cmp_proc_finfo.fn_oid) ||
131 15610 : !OidIsValid(typentry->hash_proc_finfo.fn_oid))
132 8624 : PG_RETURN_BOOL(true);
133 :
134 : /* Store our findings for use by compute_array_stats() */
135 15610 : extra_data = (ArrayAnalyzeExtraData *) palloc(sizeof(ArrayAnalyzeExtraData));
136 15610 : extra_data->type_id = typentry->type_id;
137 15610 : extra_data->eq_opr = typentry->eq_opr;
138 15610 : extra_data->coll_id = stats->attrcollid; /* collation we should use */
139 15610 : extra_data->typbyval = typentry->typbyval;
140 15610 : extra_data->typlen = typentry->typlen;
141 15610 : extra_data->typalign = typentry->typalign;
142 15610 : extra_data->cmp = &typentry->cmp_proc_finfo;
143 15610 : extra_data->hash = &typentry->hash_proc_finfo;
144 :
145 : /* Save old compute_stats and extra_data for scalar statistics ... */
146 15610 : extra_data->std_compute_stats = stats->compute_stats;
147 15610 : extra_data->std_extra_data = stats->extra_data;
148 :
149 : /* ... and replace with our info */
150 15610 : stats->compute_stats = compute_array_stats;
151 15610 : stats->extra_data = extra_data;
152 :
153 : /*
154 : * Note we leave stats->minrows set as std_typanalyze set it. Should it
155 : * be increased for array analysis purposes?
156 : */
157 :
158 15610 : PG_RETURN_BOOL(true);
159 : }
160 :
161 : /*
162 : * compute_array_stats() -- compute statistics for an array column
163 : *
164 : * This function computes statistics useful for determining selectivity of
165 : * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
166 : * compute_stats hook after sample rows have been collected.
167 : *
168 : * We also invoke the standard compute_stats function, which will compute
169 : * "scalar" statistics relevant to the btree-style array comparison operators.
170 : * However, exact duplicates of an entire array may be rare despite many
171 : * arrays sharing individual elements. This especially afflicts long arrays,
172 : * which are also liable to lack all scalar statistics due to the low
173 : * WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
174 : * we find the most common array elements and compute a histogram of distinct
175 : * element counts.
176 : *
177 : * The algorithm used is Lossy Counting, as proposed in the paper "Approximate
178 : * frequency counts over data streams" by G. S. Manku and R. Motwani, in
179 : * Proceedings of the 28th International Conference on Very Large Data Bases,
180 : * Hong Kong, China, August 2002, section 4.2. The paper is available at
181 : * http://www.vldb.org/conf/2002/S10P03.pdf
182 : *
183 : * The Lossy Counting (aka LC) algorithm goes like this:
184 : * Let s be the threshold frequency for an item (the minimum frequency we
185 : * are interested in) and epsilon the error margin for the frequency. Let D
186 : * be a set of triples (e, f, delta), where e is an element value, f is that
187 : * element's frequency (actually, its current occurrence count) and delta is
188 : * the maximum error in f. We start with D empty and process the elements in
189 : * batches of size w. (The batch size is also known as "bucket size" and is
190 : * equal to 1/epsilon.) Let the current batch number be b_current, starting
191 : * with 1. For each element e we either increment its f count, if it's
192 : * already in D, or insert a new triple into D with values (e, 1, b_current
193 : * - 1). After processing each batch we prune D, by removing from it all
194 : * elements with f + delta <= b_current. After the algorithm finishes we
195 : * suppress all elements from D that do not satisfy f >= (s - epsilon) * N,
196 : * where N is the total number of elements in the input. We emit the
197 : * remaining elements with estimated frequency f/N. The LC paper proves
198 : * that this algorithm finds all elements with true frequency at least s,
199 : * and that no frequency is overestimated or is underestimated by more than
200 : * epsilon. Furthermore, given reasonable assumptions about the input
201 : * distribution, the required table size is no more than about 7 times w.
202 : *
203 : * In the absence of a principled basis for other particular values, we
204 : * follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
205 : * But we leave out the correction for stopwords, which do not apply to
206 : * arrays. These parameters give bucket width w = K/0.007 and maximum
207 : * expected hashtable size of about 1000 * K.
208 : *
209 : * Elements may repeat within an array. Since duplicates do not change the
210 : * behavior of <@, && or @>, we want to count each element only once per
211 : * array. Therefore, we store in the finished pg_statistic entry each
212 : * element's frequency as the fraction of all non-null rows that contain it.
213 : * We divide the raw counts by nonnull_cnt to get those figures.
214 : */
215 : static void
216 10442 : compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
217 : int samplerows, double totalrows)
218 : {
219 : ArrayAnalyzeExtraData *extra_data;
220 : int num_mcelem;
221 10442 : int null_cnt = 0;
222 10442 : int null_elem_cnt = 0;
223 10442 : int analyzed_rows = 0;
224 :
225 : /* This is D from the LC algorithm. */
226 : HTAB *elements_tab;
227 : HASHCTL elem_hash_ctl;
228 : HASH_SEQ_STATUS scan_status;
229 :
230 : /* This is the current bucket number from the LC algorithm */
231 : int b_current;
232 :
233 : /* This is 'w' from the LC algorithm */
234 : int bucket_width;
235 : int array_no;
236 : int64 element_no;
237 : TrackItem *item;
238 : int slot_idx;
239 : HTAB *count_tab;
240 : HASHCTL count_hash_ctl;
241 : DECountItem *count_item;
242 :
243 10442 : extra_data = (ArrayAnalyzeExtraData *) stats->extra_data;
244 :
245 : /*
246 : * Invoke analyze.c's standard analysis function to create scalar-style
247 : * stats for the column. It will expect its own extra_data pointer, so
248 : * temporarily install that.
249 : */
250 10442 : stats->extra_data = extra_data->std_extra_data;
251 10442 : extra_data->std_compute_stats(stats, fetchfunc, samplerows, totalrows);
252 10442 : stats->extra_data = extra_data;
253 :
254 : /*
255 : * Set up static pointer for use by subroutines. We wait till here in
256 : * case std_compute_stats somehow recursively invokes us (probably not
257 : * possible, but ...)
258 : */
259 10442 : array_extra_data = extra_data;
260 :
261 : /*
262 : * We want statistics_target * 10 elements in the MCELEM array. This
263 : * multiplier is pretty arbitrary, but is meant to reflect the fact that
264 : * the number of individual elements tracked in pg_statistic ought to be
265 : * more than the number of values for a simple scalar column.
266 : */
267 10442 : num_mcelem = stats->attr->attstattarget * 10;
268 :
269 : /*
270 : * We set bucket width equal to num_mcelem / 0.007 as per the comment
271 : * above.
272 : */
273 10442 : bucket_width = num_mcelem * 1000 / 7;
274 :
275 : /*
276 : * Create the hashtable. It will be in local memory, so we don't need to
277 : * worry about overflowing the initial size. Also we don't need to pay any
278 : * attention to locking and memory management.
279 : */
280 10442 : elem_hash_ctl.keysize = sizeof(Datum);
281 10442 : elem_hash_ctl.entrysize = sizeof(TrackItem);
282 10442 : elem_hash_ctl.hash = element_hash;
283 10442 : elem_hash_ctl.match = element_match;
284 10442 : elem_hash_ctl.hcxt = CurrentMemoryContext;
285 10442 : elements_tab = hash_create("Analyzed elements table",
286 : num_mcelem,
287 : &elem_hash_ctl,
288 : HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
289 :
290 : /* hashtable for array distinct elements counts */
291 10442 : count_hash_ctl.keysize = sizeof(int);
292 10442 : count_hash_ctl.entrysize = sizeof(DECountItem);
293 10442 : count_hash_ctl.hcxt = CurrentMemoryContext;
294 10442 : count_tab = hash_create("Array distinct element count table",
295 : 64,
296 : &count_hash_ctl,
297 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
298 :
299 : /* Initialize counters. */
300 10442 : b_current = 1;
301 10442 : element_no = 0;
302 :
303 : /* Loop over the arrays. */
304 13853392 : for (array_no = 0; array_no < samplerows; array_no++)
305 : {
306 : Datum value;
307 : bool isnull;
308 : ArrayType *array;
309 : int num_elems;
310 : Datum *elem_values;
311 : bool *elem_nulls;
312 : bool null_present;
313 : int j;
314 13842950 : int64 prev_element_no = element_no;
315 : int distinct_count;
316 : bool count_item_found;
317 :
318 13842950 : vacuum_delay_point();
319 :
320 13842950 : value = fetchfunc(stats, array_no, &isnull);
321 13842950 : if (isnull)
322 : {
323 : /* array is null, just count that */
324 13202224 : null_cnt++;
325 13202224 : continue;
326 : }
327 :
328 : /* Skip too-large values. */
329 640726 : if (toast_raw_datum_size(value) > ARRAY_WIDTH_THRESHOLD)
330 0 : continue;
331 : else
332 640726 : analyzed_rows++;
333 :
334 : /*
335 : * Now detoast the array if needed, and deconstruct into datums.
336 : */
337 640726 : array = DatumGetArrayTypeP(value);
338 :
339 : Assert(ARR_ELEMTYPE(array) == extra_data->type_id);
340 640726 : deconstruct_array(array,
341 : extra_data->type_id,
342 640726 : extra_data->typlen,
343 640726 : extra_data->typbyval,
344 640726 : extra_data->typalign,
345 : &elem_values, &elem_nulls, &num_elems);
346 :
347 : /*
348 : * We loop through the elements in the array and add them to our
349 : * tracking hashtable.
350 : */
351 640726 : null_present = false;
352 2967006 : for (j = 0; j < num_elems; j++)
353 : {
354 : Datum elem_value;
355 : bool found;
356 :
357 : /* No null element processing other than flag setting here */
358 2326280 : if (elem_nulls[j])
359 : {
360 32 : null_present = true;
361 409754 : continue;
362 : }
363 :
364 : /* Lookup current element in hashtable, adding it if new */
365 2326248 : elem_value = elem_values[j];
366 2326248 : item = (TrackItem *) hash_search(elements_tab,
367 : (const void *) &elem_value,
368 : HASH_ENTER, &found);
369 :
370 2326248 : if (found)
371 : {
372 : /* The element value is already on the tracking list */
373 :
374 : /*
375 : * The operators we assist ignore duplicate array elements, so
376 : * count a given distinct element only once per array.
377 : */
378 1816702 : if (item->last_container == array_no)
379 409722 : continue;
380 :
381 1406980 : item->frequency++;
382 1406980 : item->last_container = array_no;
383 : }
384 : else
385 : {
386 : /* Initialize new tracking list element */
387 :
388 : /*
389 : * If element type is pass-by-reference, we must copy it into
390 : * palloc'd space, so that we can release the array below. (We
391 : * do this so that the space needed for element values is
392 : * limited by the size of the hashtable; if we kept all the
393 : * array values around, it could be much more.)
394 : */
395 1019092 : item->key = datumCopy(elem_value,
396 509546 : extra_data->typbyval,
397 509546 : extra_data->typlen);
398 :
399 509546 : item->frequency = 1;
400 509546 : item->delta = b_current - 1;
401 509546 : item->last_container = array_no;
402 : }
403 :
404 : /* element_no is the number of elements processed (ie N) */
405 1916526 : element_no++;
406 :
407 : /* We prune the D structure after processing each bucket */
408 1916526 : if (element_no % bucket_width == 0)
409 : {
410 0 : prune_element_hashtable(elements_tab, b_current);
411 0 : b_current++;
412 : }
413 : }
414 :
415 : /* Count null element presence once per array. */
416 640726 : if (null_present)
417 32 : null_elem_cnt++;
418 :
419 : /* Update frequency of the particular array distinct element count. */
420 640726 : distinct_count = (int) (element_no - prev_element_no);
421 640726 : count_item = (DECountItem *) hash_search(count_tab, &distinct_count,
422 : HASH_ENTER,
423 : &count_item_found);
424 :
425 640726 : if (count_item_found)
426 619624 : count_item->frequency++;
427 : else
428 21102 : count_item->frequency = 1;
429 :
430 : /* Free memory allocated while detoasting. */
431 640726 : if (PointerGetDatum(array) != value)
432 596006 : pfree(array);
433 640726 : pfree(elem_values);
434 640726 : pfree(elem_nulls);
435 : }
436 :
437 : /* Skip pg_statistic slots occupied by standard statistics */
438 10442 : slot_idx = 0;
439 18688 : while (slot_idx < STATISTIC_NUM_SLOTS && stats->stakind[slot_idx] != 0)
440 8246 : slot_idx++;
441 10442 : if (slot_idx > STATISTIC_NUM_SLOTS - 2)
442 0 : elog(ERROR, "insufficient pg_statistic slots for array stats");
443 :
444 : /* We can only compute real stats if we found some non-null values. */
445 10442 : if (analyzed_rows > 0)
446 : {
447 2972 : int nonnull_cnt = analyzed_rows;
448 : int count_items_count;
449 : int i;
450 : TrackItem **sort_table;
451 : int track_len;
452 : int64 cutoff_freq;
453 : int64 minfreq,
454 : maxfreq;
455 :
456 : /*
457 : * We assume the standard stats code already took care of setting
458 : * stats_valid, stanullfrac, stawidth, stadistinct. We'd have to
459 : * re-compute those values if we wanted to not store the standard
460 : * stats.
461 : */
462 :
463 : /*
464 : * Construct an array of the interesting hashtable items, that is,
465 : * those meeting the cutoff frequency (s - epsilon)*N. Also identify
466 : * the minimum and maximum frequencies among these items.
467 : *
468 : * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
469 : * frequency is 9*N / bucket_width.
470 : */
471 2972 : cutoff_freq = 9 * element_no / bucket_width;
472 :
473 2972 : i = hash_get_num_entries(elements_tab); /* surely enough space */
474 2972 : sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i);
475 :
476 2972 : hash_seq_init(&scan_status, elements_tab);
477 2972 : track_len = 0;
478 2972 : minfreq = element_no;
479 2972 : maxfreq = 0;
480 512518 : while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
481 : {
482 509546 : if (item->frequency > cutoff_freq)
483 : {
484 293528 : sort_table[track_len++] = item;
485 293528 : minfreq = Min(minfreq, item->frequency);
486 293528 : maxfreq = Max(maxfreq, item->frequency);
487 : }
488 : }
489 : Assert(track_len <= i);
490 :
491 : /* emit some statistics for debug purposes */
492 2972 : elog(DEBUG3, "compute_array_stats: target # mces = %d, "
493 : "bucket width = %d, "
494 : "# elements = " INT64_FORMAT ", hashtable size = %d, "
495 : "usable entries = %d",
496 : num_mcelem, bucket_width, element_no, i, track_len);
497 :
498 : /*
499 : * If we obtained more elements than we really want, get rid of those
500 : * with least frequencies. The easiest way is to qsort the array into
501 : * descending frequency order and truncate the array.
502 : */
503 2972 : if (num_mcelem < track_len)
504 : {
505 30 : qsort(sort_table, track_len, sizeof(TrackItem *),
506 : trackitem_compare_frequencies_desc);
507 : /* reset minfreq to the smallest frequency we're keeping */
508 30 : minfreq = sort_table[num_mcelem - 1]->frequency;
509 : }
510 : else
511 2942 : num_mcelem = track_len;
512 :
513 : /* Generate MCELEM slot entry */
514 2972 : if (num_mcelem > 0)
515 : {
516 : MemoryContext old_context;
517 : Datum *mcelem_values;
518 : float4 *mcelem_freqs;
519 :
520 : /*
521 : * We want to store statistics sorted on the element value using
522 : * the element type's default comparison function. This permits
523 : * fast binary searches in selectivity estimation functions.
524 : */
525 2972 : qsort(sort_table, num_mcelem, sizeof(TrackItem *),
526 : trackitem_compare_element);
527 :
528 : /* Must copy the target values into anl_context */
529 2972 : old_context = MemoryContextSwitchTo(stats->anl_context);
530 :
531 : /*
532 : * We sorted statistics on the element value, but we want to be
533 : * able to find the minimal and maximal frequencies without going
534 : * through all the values. We also want the frequency of null
535 : * elements. Store these three values at the end of mcelem_freqs.
536 : */
537 2972 : mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
538 2972 : mcelem_freqs = (float4 *) palloc((num_mcelem + 3) * sizeof(float4));
539 :
540 : /*
541 : * See comments above about use of nonnull_cnt as the divisor for
542 : * the final frequency estimates.
543 : */
544 282452 : for (i = 0; i < num_mcelem; i++)
545 : {
546 279480 : TrackItem *item = sort_table[i];
547 :
548 558960 : mcelem_values[i] = datumCopy(item->key,
549 279480 : extra_data->typbyval,
550 279480 : extra_data->typlen);
551 279480 : mcelem_freqs[i] = (double) item->frequency /
552 279480 : (double) nonnull_cnt;
553 : }
554 2972 : mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt;
555 2972 : mcelem_freqs[i++] = (double) maxfreq / (double) nonnull_cnt;
556 2972 : mcelem_freqs[i++] = (double) null_elem_cnt / (double) nonnull_cnt;
557 :
558 2972 : MemoryContextSwitchTo(old_context);
559 :
560 2972 : stats->stakind[slot_idx] = STATISTIC_KIND_MCELEM;
561 2972 : stats->staop[slot_idx] = extra_data->eq_opr;
562 2972 : stats->stacoll[slot_idx] = extra_data->coll_id;
563 2972 : stats->stanumbers[slot_idx] = mcelem_freqs;
564 : /* See above comment about extra stanumber entries */
565 2972 : stats->numnumbers[slot_idx] = num_mcelem + 3;
566 2972 : stats->stavalues[slot_idx] = mcelem_values;
567 2972 : stats->numvalues[slot_idx] = num_mcelem;
568 : /* We are storing values of element type */
569 2972 : stats->statypid[slot_idx] = extra_data->type_id;
570 2972 : stats->statyplen[slot_idx] = extra_data->typlen;
571 2972 : stats->statypbyval[slot_idx] = extra_data->typbyval;
572 2972 : stats->statypalign[slot_idx] = extra_data->typalign;
573 2972 : slot_idx++;
574 : }
575 :
576 : /* Generate DECHIST slot entry */
577 2972 : count_items_count = hash_get_num_entries(count_tab);
578 2972 : if (count_items_count > 0)
579 : {
580 2972 : int num_hist = stats->attr->attstattarget;
581 : DECountItem **sorted_count_items;
582 : int j;
583 : int delta;
584 : int64 frac;
585 : float4 *hist;
586 :
587 : /* num_hist must be at least 2 for the loop below to work */
588 2972 : num_hist = Max(num_hist, 2);
589 :
590 : /*
591 : * Create an array of DECountItem pointers, and sort them into
592 : * increasing count order.
593 : */
594 : sorted_count_items = (DECountItem **)
595 2972 : palloc(sizeof(DECountItem *) * count_items_count);
596 2972 : hash_seq_init(&scan_status, count_tab);
597 2972 : j = 0;
598 24074 : while ((count_item = (DECountItem *) hash_seq_search(&scan_status)) != NULL)
599 : {
600 21102 : sorted_count_items[j++] = count_item;
601 : }
602 2972 : qsort(sorted_count_items, count_items_count,
603 : sizeof(DECountItem *), countitem_compare_count);
604 :
605 : /*
606 : * Prepare to fill stanumbers with the histogram, followed by the
607 : * average count. This array must be stored in anl_context.
608 : */
609 : hist = (float4 *)
610 2972 : MemoryContextAlloc(stats->anl_context,
611 2972 : sizeof(float4) * (num_hist + 1));
612 2972 : hist[num_hist] = (double) element_no / (double) nonnull_cnt;
613 :
614 : /*----------
615 : * Construct the histogram of distinct-element counts (DECs).
616 : *
617 : * The object of this loop is to copy the min and max DECs to
618 : * hist[0] and hist[num_hist - 1], along with evenly-spaced DECs
619 : * in between (where "evenly-spaced" is with reference to the
620 : * whole input population of arrays). If we had a complete sorted
621 : * array of DECs, one per analyzed row, the i'th hist value would
622 : * come from DECs[i * (analyzed_rows - 1) / (num_hist - 1)]
623 : * (compare the histogram-making loop in compute_scalar_stats()).
624 : * But instead of that we have the sorted_count_items[] array,
625 : * which holds unique DEC values with their frequencies (that is,
626 : * a run-length-compressed version of the full array). So we
627 : * control advancing through sorted_count_items[] with the
628 : * variable "frac", which is defined as (x - y) * (num_hist - 1),
629 : * where x is the index in the notional DECs array corresponding
630 : * to the start of the next sorted_count_items[] element's run,
631 : * and y is the index in DECs from which we should take the next
632 : * histogram value. We have to advance whenever x <= y, that is
633 : * frac <= 0. The x component is the sum of the frequencies seen
634 : * so far (up through the current sorted_count_items[] element),
635 : * and of course y * (num_hist - 1) = i * (analyzed_rows - 1),
636 : * per the subscript calculation above. (The subscript calculation
637 : * implies dropping any fractional part of y; in this formulation
638 : * that's handled by not advancing until frac reaches 1.)
639 : *
640 : * Even though frac has a bounded range, it could overflow int32
641 : * when working with very large statistics targets, so we do that
642 : * math in int64.
643 : *----------
644 : */
645 2972 : delta = analyzed_rows - 1;
646 2972 : j = 0; /* current index in sorted_count_items */
647 : /* Initialize frac for sorted_count_items[0]; y is initially 0 */
648 2972 : frac = (int64) sorted_count_items[0]->frequency * (num_hist - 1);
649 294532 : for (i = 0; i < num_hist; i++)
650 : {
651 309690 : while (frac <= 0)
652 : {
653 : /* Advance, and update x component of frac */
654 18130 : j++;
655 18130 : frac += (int64) sorted_count_items[j]->frequency * (num_hist - 1);
656 : }
657 291560 : hist[i] = sorted_count_items[j]->count;
658 291560 : frac -= delta; /* update y for upcoming i increment */
659 : }
660 : Assert(j == count_items_count - 1);
661 :
662 2972 : stats->stakind[slot_idx] = STATISTIC_KIND_DECHIST;
663 2972 : stats->staop[slot_idx] = extra_data->eq_opr;
664 2972 : stats->stacoll[slot_idx] = extra_data->coll_id;
665 2972 : stats->stanumbers[slot_idx] = hist;
666 2972 : stats->numnumbers[slot_idx] = num_hist + 1;
667 2972 : slot_idx++;
668 : }
669 : }
670 :
671 : /*
672 : * We don't need to bother cleaning up any of our temporary palloc's. The
673 : * hashtable should also go away, as it used a child memory context.
674 : */
675 10442 : }
676 :
677 : /*
678 : * A function to prune the D structure from the Lossy Counting algorithm.
679 : * Consult compute_tsvector_stats() for wider explanation.
680 : */
681 : static void
682 0 : prune_element_hashtable(HTAB *elements_tab, int b_current)
683 : {
684 : HASH_SEQ_STATUS scan_status;
685 : TrackItem *item;
686 :
687 0 : hash_seq_init(&scan_status, elements_tab);
688 0 : while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
689 : {
690 0 : if (item->frequency + item->delta <= b_current)
691 : {
692 0 : Datum value = item->key;
693 :
694 0 : if (hash_search(elements_tab, (const void *) &item->key,
695 : HASH_REMOVE, NULL) == NULL)
696 0 : elog(ERROR, "hash table corrupted");
697 : /* We should free memory if element is not passed by value */
698 0 : if (!array_extra_data->typbyval)
699 0 : pfree(DatumGetPointer(value));
700 : }
701 : }
702 0 : }
703 :
704 : /*
705 : * Hash function for elements.
706 : *
707 : * We use the element type's default hash opclass, and the column collation
708 : * if the type is collation-sensitive.
709 : */
710 : static uint32
711 2326248 : element_hash(const void *key, Size keysize)
712 : {
713 2326248 : Datum d = *((const Datum *) key);
714 : Datum h;
715 :
716 2326248 : h = FunctionCall1Coll(array_extra_data->hash,
717 2326248 : array_extra_data->coll_id,
718 : d);
719 2326248 : return DatumGetUInt32(h);
720 : }
721 :
722 : /*
723 : * Matching function for elements, to be used in hashtable lookups.
724 : */
725 : static int
726 1818496 : element_match(const void *key1, const void *key2, Size keysize)
727 : {
728 : /* The keysize parameter is superfluous here */
729 1818496 : return element_compare(key1, key2);
730 : }
731 :
732 : /*
733 : * Comparison function for elements.
734 : *
735 : * We use the element type's default btree opclass, and the column collation
736 : * if the type is collation-sensitive.
737 : *
738 : * XXX consider using SortSupport infrastructure
739 : */
740 : static int
741 4180266 : element_compare(const void *key1, const void *key2)
742 : {
743 4180266 : Datum d1 = *((const Datum *) key1);
744 4180266 : Datum d2 = *((const Datum *) key2);
745 : Datum c;
746 :
747 4180266 : c = FunctionCall2Coll(array_extra_data->cmp,
748 4180266 : array_extra_data->coll_id,
749 : d1, d2);
750 4180266 : return DatumGetInt32(c);
751 : }
752 :
753 : /*
754 : * qsort() comparator for sorting TrackItems by frequencies (descending sort)
755 : */
756 : static int
757 34586 : trackitem_compare_frequencies_desc(const void *e1, const void *e2)
758 : {
759 34586 : const TrackItem *const *t1 = (const TrackItem *const *) e1;
760 34586 : const TrackItem *const *t2 = (const TrackItem *const *) e2;
761 :
762 34586 : return (*t2)->frequency - (*t1)->frequency;
763 : }
764 :
765 : /*
766 : * qsort() comparator for sorting TrackItems by element values
767 : */
768 : static int
769 2361770 : trackitem_compare_element(const void *e1, const void *e2)
770 : {
771 2361770 : const TrackItem *const *t1 = (const TrackItem *const *) e1;
772 2361770 : const TrackItem *const *t2 = (const TrackItem *const *) e2;
773 :
774 2361770 : return element_compare(&(*t1)->key, &(*t2)->key);
775 : }
776 :
777 : /*
778 : * qsort() comparator for sorting DECountItems by count
779 : */
780 : static int
781 58212 : countitem_compare_count(const void *e1, const void *e2)
782 : {
783 58212 : const DECountItem *const *t1 = (const DECountItem *const *) e1;
784 58212 : const DECountItem *const *t2 = (const DECountItem *const *) e2;
785 :
786 58212 : if ((*t1)->count < (*t2)->count)
787 27324 : return -1;
788 30888 : else if ((*t1)->count == (*t2)->count)
789 0 : return 0;
790 : else
791 30888 : return 1;
792 : }
|