LCOV - code coverage report
Current view: top level - src/backend/tsearch - ts_typanalyze.c (source / functions) Coverage Total Hit
Test: PostgreSQL 19devel Lines: 82.4 % 131 108
Test Date: 2026-03-01 21:15:06 Functions: 87.5 % 8 7
Legend: Lines:     hit not hit

            Line data    Source code
       1              : /*-------------------------------------------------------------------------
       2              :  *
       3              :  * ts_typanalyze.c
       4              :  *    functions for gathering statistics from tsvector columns
       5              :  *
       6              :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
       7              :  *
       8              :  *
       9              :  * IDENTIFICATION
      10              :  *    src/backend/tsearch/ts_typanalyze.c
      11              :  *
      12              :  *-------------------------------------------------------------------------
      13              :  */
      14              : #include "postgres.h"
      15              : 
      16              : #include "catalog/pg_collation.h"
      17              : #include "catalog/pg_operator.h"
      18              : #include "commands/vacuum.h"
      19              : #include "common/hashfn.h"
      20              : #include "tsearch/ts_type.h"
      21              : #include "utils/builtins.h"
      22              : #include "varatt.h"
      23              : 
      24              : 
      25              : /* A hash key for lexemes */
      26              : typedef struct
      27              : {
      28              :     char       *lexeme;         /* lexeme (not NULL terminated!) */
      29              :     int         length;         /* its length in bytes */
      30              : } LexemeHashKey;
      31              : 
      32              : /* A hash table entry for the Lossy Counting algorithm */
      33              : typedef struct
      34              : {
      35              :     LexemeHashKey key;          /* This is 'e' from the LC algorithm. */
      36              :     int         frequency;      /* This is 'f'. */
      37              :     int         delta;          /* And this is 'delta'. */
      38              : } TrackItem;
      39              : 
      40              : static void compute_tsvector_stats(VacAttrStats *stats,
      41              :                                    AnalyzeAttrFetchFunc fetchfunc,
      42              :                                    int samplerows,
      43              :                                    double totalrows);
      44              : static void prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current);
      45              : static uint32 lexeme_hash(const void *key, Size keysize);
      46              : static int  lexeme_match(const void *key1, const void *key2, Size keysize);
      47              : static int  lexeme_compare(const void *key1, const void *key2);
      48              : static int  trackitem_compare_frequencies_desc(const void *e1, const void *e2,
      49              :                                                void *arg);
      50              : static int  trackitem_compare_lexemes(const void *e1, const void *e2,
      51              :                                       void *arg);
      52              : 
      53              : 
      54              : /*
      55              :  *  ts_typanalyze -- a custom typanalyze function for tsvector columns
      56              :  */
      57              : Datum
      58            4 : ts_typanalyze(PG_FUNCTION_ARGS)
      59              : {
      60            4 :     VacAttrStats *stats = (VacAttrStats *) PG_GETARG_POINTER(0);
      61              : 
      62              :     /* If the attstattarget column is negative, use the default value */
      63            4 :     if (stats->attstattarget < 0)
      64            4 :         stats->attstattarget = default_statistics_target;
      65              : 
      66            4 :     stats->compute_stats = compute_tsvector_stats;
      67              :     /* see comment about the choice of minrows in commands/analyze.c */
      68            4 :     stats->minrows = 300 * stats->attstattarget;
      69              : 
      70            4 :     PG_RETURN_BOOL(true);
      71              : }
      72              : 
      73              : /*
      74              :  *  compute_tsvector_stats() -- compute statistics for a tsvector column
      75              :  *
      76              :  *  This function computes statistics that are useful for determining @@
      77              :  *  operations' selectivity, along with the fraction of non-null rows and
      78              :  *  average width.
      79              :  *
      80              :  *  Instead of finding the most common values, as we do for most datatypes,
      81              :  *  we're looking for the most common lexemes. This is more useful, because
      82              :  *  there most probably won't be any two rows with the same tsvector and thus
      83              :  *  the notion of a MCV is a bit bogus with this datatype. With a list of the
      84              :  *  most common lexemes we can do a better job at figuring out @@ selectivity.
      85              :  *
      86              :  *  For the same reasons we assume that tsvector columns are unique when
      87              :  *  determining the number of distinct values.
      88              :  *
      89              :  *  The algorithm used is Lossy Counting, as proposed in the paper "Approximate
      90              :  *  frequency counts over data streams" by G. S. Manku and R. Motwani, in
      91              :  *  Proceedings of the 28th International Conference on Very Large Data Bases,
      92              :  *  Hong Kong, China, August 2002, section 4.2. The paper is available at
      93              :  *  http://www.vldb.org/conf/2002/S10P03.pdf
      94              :  *
      95              :  *  The Lossy Counting (aka LC) algorithm goes like this:
      96              :  *  Let s be the threshold frequency for an item (the minimum frequency we
      97              :  *  are interested in) and epsilon the error margin for the frequency. Let D
      98              :  *  be a set of triples (e, f, delta), where e is an element value, f is that
      99              :  *  element's frequency (actually, its current occurrence count) and delta is
     100              :  *  the maximum error in f. We start with D empty and process the elements in
     101              :  *  batches of size w. (The batch size is also known as "bucket size" and is
     102              :  *  equal to 1/epsilon.) Let the current batch number be b_current, starting
     103              :  *  with 1. For each element e we either increment its f count, if it's
     104              :  *  already in D, or insert a new triple into D with values (e, 1, b_current
     105              :  *  - 1). After processing each batch we prune D, by removing from it all
     106              :  *  elements with f + delta <= b_current.  After the algorithm finishes we
     107              :  *  suppress all elements from D that do not satisfy f >= (s - epsilon) * N,
     108              :  *  where N is the total number of elements in the input.  We emit the
     109              :  *  remaining elements with estimated frequency f/N.  The LC paper proves
     110              :  *  that this algorithm finds all elements with true frequency at least s,
     111              :  *  and that no frequency is overestimated or is underestimated by more than
     112              :  *  epsilon.  Furthermore, given reasonable assumptions about the input
     113              :  *  distribution, the required table size is no more than about 7 times w.
     114              :  *
     115              :  *  We set s to be the estimated frequency of the K'th word in a natural
     116              :  *  language's frequency table, where K is the target number of entries in
     117              :  *  the MCELEM array plus an arbitrary constant, meant to reflect the fact
     118              :  *  that the most common words in any language would usually be stopwords
     119              :  *  so we will not actually see them in the input.  We assume that the
     120              :  *  distribution of word frequencies (including the stopwords) follows Zipf's
     121              :  *  law with an exponent of 1.
     122              :  *
     123              :  *  Assuming Zipfian distribution, the frequency of the K'th word is equal
     124              :  *  to 1/(K * H(W)) where H(n) is 1/2 + 1/3 + ... + 1/n and W is the number of
     125              :  *  words in the language.  Putting W as one million, we get roughly 0.07/K.
     126              :  *  Assuming top 10 words are stopwords gives s = 0.07/(K + 10).  We set
     127              :  *  epsilon = s/10, which gives bucket width w = (K + 10)/0.007 and
     128              :  *  maximum expected hashtable size of about 1000 * (K + 10).
     129              :  *
     130              :  *  Note: in the above discussion, s, epsilon, and f/N are in terms of a
     131              :  *  lexeme's frequency as a fraction of all lexemes seen in the input.
     132              :  *  However, what we actually want to store in the finished pg_statistic
     133              :  *  entry is each lexeme's frequency as a fraction of all rows that it occurs
     134              :  *  in.  Assuming that the input tsvectors are correctly constructed, no
     135              :  *  lexeme occurs more than once per tsvector, so the final count f is a
     136              :  *  correct estimate of the number of input tsvectors it occurs in, and we
     137              :  *  need only change the divisor from N to nonnull_cnt to get the number we
     138              :  *  want.
     139              :  */
     140              : static void
     141            4 : compute_tsvector_stats(VacAttrStats *stats,
     142              :                        AnalyzeAttrFetchFunc fetchfunc,
     143              :                        int samplerows,
     144              :                        double totalrows)
     145              : {
     146              :     int         num_mcelem;
     147            4 :     int         null_cnt = 0;
     148            4 :     double      total_width = 0;
     149              : 
     150              :     /* This is D from the LC algorithm. */
     151              :     HTAB       *lexemes_tab;
     152              :     HASHCTL     hash_ctl;
     153              :     HASH_SEQ_STATUS scan_status;
     154              : 
     155              :     /* This is the current bucket number from the LC algorithm */
     156              :     int         b_current;
     157              : 
     158              :     /* This is 'w' from the LC algorithm */
     159              :     int         bucket_width;
     160              :     int         vector_no,
     161              :                 lexeme_no;
     162              :     LexemeHashKey hash_key;
     163              : 
     164              :     /*
     165              :      * We want statistics_target * 10 lexemes in the MCELEM array.  This
     166              :      * multiplier is pretty arbitrary, but is meant to reflect the fact that
     167              :      * the number of individual lexeme values tracked in pg_statistic ought to
     168              :      * be more than the number of values for a simple scalar column.
     169              :      */
     170            4 :     num_mcelem = stats->attstattarget * 10;
     171              : 
     172              :     /*
     173              :      * We set bucket width equal to (num_mcelem + 10) / 0.007 as per the
     174              :      * comment above.
     175              :      */
     176            4 :     bucket_width = (num_mcelem + 10) * 1000 / 7;
     177              : 
     178              :     /*
     179              :      * Create the hashtable. It will be in local memory, so we don't need to
     180              :      * worry about overflowing the initial size. Also we don't need to pay any
     181              :      * attention to locking and memory management.
     182              :      */
     183            4 :     hash_ctl.keysize = sizeof(LexemeHashKey);
     184            4 :     hash_ctl.entrysize = sizeof(TrackItem);
     185            4 :     hash_ctl.hash = lexeme_hash;
     186            4 :     hash_ctl.match = lexeme_match;
     187            4 :     hash_ctl.hcxt = CurrentMemoryContext;
     188            4 :     lexemes_tab = hash_create("Analyzed lexemes table",
     189              :                               num_mcelem,
     190              :                               &hash_ctl,
     191              :                               HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
     192              : 
     193              :     /* Initialize counters. */
     194            4 :     b_current = 1;
     195            4 :     lexeme_no = 0;
     196              : 
     197              :     /* Loop over the tsvectors. */
     198         2039 :     for (vector_no = 0; vector_no < samplerows; vector_no++)
     199              :     {
     200              :         Datum       value;
     201              :         bool        isnull;
     202              :         TSVector    vector;
     203              :         WordEntry  *curentryptr;
     204              :         char       *lexemesptr;
     205              :         int         j;
     206              : 
     207         2035 :         vacuum_delay_point(true);
     208              : 
     209         2035 :         value = fetchfunc(stats, vector_no, &isnull);
     210              : 
     211              :         /*
     212              :          * Check for null/nonnull.
     213              :          */
     214         2035 :         if (isnull)
     215              :         {
     216            0 :             null_cnt++;
     217            0 :             continue;
     218              :         }
     219              : 
     220              :         /*
     221              :          * Add up widths for average-width calculation.  Since it's a
     222              :          * tsvector, we know it's varlena.  As in the regular
     223              :          * compute_minimal_stats function, we use the toasted width for this
     224              :          * calculation.
     225              :          */
     226         2035 :         total_width += VARSIZE_ANY(DatumGetPointer(value));
     227              : 
     228              :         /*
     229              :          * Now detoast the tsvector if needed.
     230              :          */
     231         2035 :         vector = DatumGetTSVector(value);
     232              : 
     233              :         /*
     234              :          * We loop through the lexemes in the tsvector and add them to our
     235              :          * tracking hashtable.
     236              :          */
     237         2035 :         lexemesptr = STRPTR(vector);
     238         2035 :         curentryptr = ARRPTR(vector);
     239       117239 :         for (j = 0; j < vector->size; j++)
     240              :         {
     241              :             TrackItem  *item;
     242              :             bool        found;
     243              : 
     244              :             /*
     245              :              * Construct a hash key.  The key points into the (detoasted)
     246              :              * tsvector value at this point, but if a new entry is created, we
     247              :              * make a copy of it.  This way we can free the tsvector value
     248              :              * once we've processed all its lexemes.
     249              :              */
     250       115204 :             hash_key.lexeme = lexemesptr + curentryptr->pos;
     251       115204 :             hash_key.length = curentryptr->len;
     252              : 
     253              :             /* Lookup current lexeme in hashtable, adding it if new */
     254       115204 :             item = (TrackItem *) hash_search(lexemes_tab,
     255              :                                              &hash_key,
     256              :                                              HASH_ENTER, &found);
     257              : 
     258       115204 :             if (found)
     259              :             {
     260              :                 /* The lexeme is already on the tracking list */
     261       110636 :                 item->frequency++;
     262              :             }
     263              :             else
     264              :             {
     265              :                 /* Initialize new tracking list element */
     266         4568 :                 item->frequency = 1;
     267         4568 :                 item->delta = b_current - 1;
     268              : 
     269         4568 :                 item->key.lexeme = palloc(hash_key.length);
     270         4568 :                 memcpy(item->key.lexeme, hash_key.lexeme, hash_key.length);
     271              :             }
     272              : 
     273              :             /* lexeme_no is the number of elements processed (ie N) */
     274       115204 :             lexeme_no++;
     275              : 
     276              :             /* We prune the D structure after processing each bucket */
     277       115204 :             if (lexeme_no % bucket_width == 0)
     278              :             {
     279            0 :                 prune_lexemes_hashtable(lexemes_tab, b_current);
     280            0 :                 b_current++;
     281              :             }
     282              : 
     283              :             /* Advance to the next WordEntry in the tsvector */
     284       115204 :             curentryptr++;
     285              :         }
     286              : 
     287              :         /* If the vector was toasted, free the detoasted copy. */
     288         2035 :         if (TSVectorGetDatum(vector) != value)
     289          259 :             pfree(vector);
     290              :     }
     291              : 
     292              :     /* We can only compute real stats if we found some non-null values. */
     293            4 :     if (null_cnt < samplerows)
     294              :     {
     295            4 :         int         nonnull_cnt = samplerows - null_cnt;
     296              :         int         i;
     297              :         TrackItem **sort_table;
     298              :         TrackItem  *item;
     299              :         int         track_len;
     300              :         int         cutoff_freq;
     301              :         int         minfreq,
     302              :                     maxfreq;
     303              : 
     304            4 :         stats->stats_valid = true;
     305              :         /* Do the simple null-frac and average width stats */
     306            4 :         stats->stanullfrac = (double) null_cnt / (double) samplerows;
     307            4 :         stats->stawidth = total_width / (double) nonnull_cnt;
     308              : 
     309              :         /* Assume it's a unique column (see notes above) */
     310            4 :         stats->stadistinct = -1.0 * (1.0 - stats->stanullfrac);
     311              : 
     312              :         /*
     313              :          * Construct an array of the interesting hashtable items, that is,
     314              :          * those meeting the cutoff frequency (s - epsilon)*N.  Also identify
     315              :          * the maximum frequency among these items.
     316              :          *
     317              :          * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
     318              :          * frequency is 9*N / bucket_width.
     319              :          */
     320            4 :         cutoff_freq = 9 * lexeme_no / bucket_width;
     321              : 
     322            4 :         i = hash_get_num_entries(lexemes_tab);  /* surely enough space */
     323            4 :         sort_table = palloc_array(TrackItem *, i);
     324              : 
     325            4 :         hash_seq_init(&scan_status, lexemes_tab);
     326            4 :         track_len = 0;
     327            4 :         maxfreq = 0;
     328         4576 :         while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
     329              :         {
     330         4568 :             if (item->frequency > cutoff_freq)
     331              :             {
     332         4212 :                 sort_table[track_len++] = item;
     333         4212 :                 maxfreq = Max(maxfreq, item->frequency);
     334              :             }
     335              :         }
     336              :         Assert(track_len <= i);
     337              : 
     338              :         /* emit some statistics for debug purposes */
     339            4 :         elog(DEBUG3, "tsvector_stats: target # mces = %d, bucket width = %d, "
     340              :              "# lexemes = %d, hashtable size = %d, usable entries = %d",
     341              :              num_mcelem, bucket_width, lexeme_no, i, track_len);
     342              : 
     343              :         /*
     344              :          * If we obtained more lexemes than we really want, get rid of those
     345              :          * with least frequencies.  The easiest way is to qsort the array into
     346              :          * descending frequency order and truncate the array.
     347              :          *
     348              :          * If we did not find more elements than we want, then it is safe to
     349              :          * assume that the stored MCE array will contain every element with
     350              :          * frequency above the cutoff.  In that case, rather than storing the
     351              :          * smallest frequency we are keeping, we want to store the minimum
     352              :          * frequency that would have been accepted as a valid MCE.  The
     353              :          * selectivity functions can assume that that is an upper bound on the
     354              :          * frequency of elements not present in the array.
     355              :          *
     356              :          * If we found no candidate MCEs at all, we still want to record the
     357              :          * cutoff frequency, since it's still valid to assume that no element
     358              :          * has frequency more than that.
     359              :          */
     360            4 :         if (num_mcelem < track_len)
     361              :         {
     362            4 :             qsort_interruptible(sort_table, track_len, sizeof(TrackItem *),
     363              :                                 trackitem_compare_frequencies_desc, NULL);
     364              :             /* set minfreq to the smallest frequency we're keeping */
     365            4 :             minfreq = sort_table[num_mcelem - 1]->frequency;
     366              :         }
     367              :         else
     368              :         {
     369            0 :             num_mcelem = track_len;
     370              :             /* set minfreq to the minimum frequency above the cutoff */
     371            0 :             minfreq = cutoff_freq + 1;
     372              :             /* ensure maxfreq is nonzero, too */
     373            0 :             if (track_len == 0)
     374            0 :                 maxfreq = minfreq;
     375              :         }
     376              : 
     377              :         /* Generate MCELEM slot entry */
     378            4 :         if (num_mcelem >= 0)
     379              :         {
     380              :             MemoryContext old_context;
     381              :             Datum      *mcelem_values;
     382              :             float4     *mcelem_freqs;
     383              : 
     384              :             /*
     385              :              * We want to store statistics sorted on the lexeme value using
     386              :              * first length, then byte-for-byte comparison. The reason for
     387              :              * doing length comparison first is that we don't care about the
     388              :              * ordering so long as it's consistent, and comparing lengths
     389              :              * first gives us a chance to avoid a strncmp() call.
     390              :              *
     391              :              * This is different from what we do with scalar statistics --
     392              :              * they get sorted on frequencies. The rationale is that we
     393              :              * usually search through most common elements looking for a
     394              :              * specific value, so we can grab its frequency.  When values are
     395              :              * presorted we can employ binary search for that.  See
     396              :              * ts_selfuncs.c for a real usage scenario.
     397              :              */
     398            4 :             qsort_interruptible(sort_table, num_mcelem, sizeof(TrackItem *),
     399              :                                 trackitem_compare_lexemes, NULL);
     400              : 
     401              :             /* Must copy the target values into anl_context */
     402            4 :             old_context = MemoryContextSwitchTo(stats->anl_context);
     403              : 
     404              :             /*
     405              :              * We sorted statistics on the lexeme value, but we want to be
     406              :              * able to find out the minimal and maximal frequency without
     407              :              * going through all the values.  We keep those two extra
     408              :              * frequencies in two extra cells in mcelem_freqs.
     409              :              *
     410              :              * (Note: the MCELEM statistics slot definition allows for a third
     411              :              * extra number containing the frequency of nulls, but we don't
     412              :              * create that for a tsvector column, since null elements aren't
     413              :              * possible.)
     414              :              */
     415            4 :             mcelem_values = palloc_array(Datum, num_mcelem);
     416            4 :             mcelem_freqs = palloc_array(float4, num_mcelem + 2);
     417              : 
     418              :             /*
     419              :              * See comments above about use of nonnull_cnt as the divisor for
     420              :              * the final frequency estimates.
     421              :              */
     422         4004 :             for (i = 0; i < num_mcelem; i++)
     423              :             {
     424         4000 :                 TrackItem  *titem = sort_table[i];
     425              : 
     426         8000 :                 mcelem_values[i] =
     427         4000 :                     PointerGetDatum(cstring_to_text_with_len(titem->key.lexeme,
     428              :                                                              titem->key.length));
     429         4000 :                 mcelem_freqs[i] = (double) titem->frequency / (double) nonnull_cnt;
     430              :             }
     431            4 :             mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt;
     432            4 :             mcelem_freqs[i] = (double) maxfreq / (double) nonnull_cnt;
     433            4 :             MemoryContextSwitchTo(old_context);
     434              : 
     435            4 :             stats->stakind[0] = STATISTIC_KIND_MCELEM;
     436            4 :             stats->staop[0] = TextEqualOperator;
     437            4 :             stats->stacoll[0] = DEFAULT_COLLATION_OID;
     438            4 :             stats->stanumbers[0] = mcelem_freqs;
     439              :             /* See above comment about two extra frequency fields */
     440            4 :             stats->numnumbers[0] = num_mcelem + 2;
     441            4 :             stats->stavalues[0] = mcelem_values;
     442            4 :             stats->numvalues[0] = num_mcelem;
     443              :             /* We are storing text values */
     444            4 :             stats->statypid[0] = TEXTOID;
     445            4 :             stats->statyplen[0] = -1;    /* typlen, -1 for varlena */
     446            4 :             stats->statypbyval[0] = false;
     447            4 :             stats->statypalign[0] = TYPALIGN_INT;
     448              :         }
     449              :     }
     450              :     else
     451              :     {
     452              :         /* We found only nulls; assume the column is entirely null */
     453            0 :         stats->stats_valid = true;
     454            0 :         stats->stanullfrac = 1.0;
     455            0 :         stats->stawidth = 0; /* "unknown" */
     456            0 :         stats->stadistinct = 0.0;    /* "unknown" */
     457              :     }
     458              : 
     459              :     /*
     460              :      * We don't need to bother cleaning up any of our temporary palloc's. The
     461              :      * hashtable should also go away, as it used a child memory context.
     462              :      */
     463            4 : }
     464              : 
     465              : /*
     466              :  *  A function to prune the D structure from the Lossy Counting algorithm.
     467              :  *  Consult compute_tsvector_stats() for wider explanation.
     468              :  */
     469              : static void
     470            0 : prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current)
     471              : {
     472              :     HASH_SEQ_STATUS scan_status;
     473              :     TrackItem  *item;
     474              : 
     475            0 :     hash_seq_init(&scan_status, lexemes_tab);
     476            0 :     while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
     477              :     {
     478            0 :         if (item->frequency + item->delta <= b_current)
     479              :         {
     480            0 :             char       *lexeme = item->key.lexeme;
     481              : 
     482            0 :             if (hash_search(lexemes_tab, &item->key,
     483              :                             HASH_REMOVE, NULL) == NULL)
     484            0 :                 elog(ERROR, "hash table corrupted");
     485            0 :             pfree(lexeme);
     486              :         }
     487              :     }
     488            0 : }
     489              : 
     490              : /*
     491              :  * Hash functions for lexemes. They are strings, but not NULL terminated,
     492              :  * so we need a special hash function.
     493              :  */
     494              : static uint32
     495       115204 : lexeme_hash(const void *key, Size keysize)
     496              : {
     497       115204 :     const LexemeHashKey *l = (const LexemeHashKey *) key;
     498              : 
     499       115204 :     return DatumGetUInt32(hash_any((const unsigned char *) l->lexeme,
     500       115204 :                                    l->length));
     501              : }
     502              : 
     503              : /*
     504              :  *  Matching function for lexemes, to be used in hashtable lookups.
     505              :  */
     506              : static int
     507       110636 : lexeme_match(const void *key1, const void *key2, Size keysize)
     508              : {
     509              :     /* The keysize parameter is superfluous, the keys store their lengths */
     510       110636 :     return lexeme_compare(key1, key2);
     511              : }
     512              : 
     513              : /*
     514              :  *  Comparison function for lexemes.
     515              :  */
     516              : static int
     517       151272 : lexeme_compare(const void *key1, const void *key2)
     518              : {
     519       151272 :     const LexemeHashKey *d1 = (const LexemeHashKey *) key1;
     520       151272 :     const LexemeHashKey *d2 = (const LexemeHashKey *) key2;
     521              : 
     522              :     /* First, compare by length */
     523       151272 :     if (d1->length > d2->length)
     524            0 :         return 1;
     525       151272 :     else if (d1->length < d2->length)
     526            0 :         return -1;
     527              :     /* Lengths are equal, do a byte-by-byte comparison */
     528       151272 :     return strncmp(d1->lexeme, d2->lexeme, d1->length);
     529              : }
     530              : 
     531              : /*
     532              :  *  Comparator for sorting TrackItems on frequencies (descending sort)
     533              :  */
     534              : static int
     535        25241 : trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg)
     536              : {
     537        25241 :     const TrackItem *const *t1 = (const TrackItem *const *) e1;
     538        25241 :     const TrackItem *const *t2 = (const TrackItem *const *) e2;
     539              : 
     540        25241 :     return (*t2)->frequency - (*t1)->frequency;
     541              : }
     542              : 
     543              : /*
     544              :  *  Comparator for sorting TrackItems on lexemes
     545              :  */
     546              : static int
     547        40636 : trackitem_compare_lexemes(const void *e1, const void *e2, void *arg)
     548              : {
     549        40636 :     const TrackItem *const *t1 = (const TrackItem *const *) e1;
     550        40636 :     const TrackItem *const *t2 = (const TrackItem *const *) e2;
     551              : 
     552        40636 :     return lexeme_compare(&(*t1)->key, &(*t2)->key);
     553              : }
        

Generated by: LCOV version 2.0-1