Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * pg_stat_statements.c
4 : * Track statement planning and execution times as well as resource
5 : * usage across a whole database cluster.
6 : *
7 : * Execution costs are totaled for each distinct source query, and kept in
8 : * a shared hashtable. (We track only as many distinct queries as will fit
9 : * in the designated amount of shared memory.)
10 : *
11 : * Starting in Postgres 9.2, this module normalized query entries. As of
12 : * Postgres 14, the normalization is done by the core if compute_query_id is
13 : * enabled, or optionally by third-party modules.
14 : *
15 : * To facilitate presenting entries to users, we create "representative" query
16 : * strings in which constants are replaced with parameter symbols ($n), to
17 : * make it clearer what a normalized entry can represent. To save on shared
18 : * memory, and to avoid having to truncate oversized query strings, we store
19 : * these strings in a temporary external query-texts file. Offsets into this
20 : * file are kept in shared memory.
21 : *
22 : * Note about locking issues: to create or delete an entry in the shared
23 : * hashtable, one must hold pgss->lock exclusively. Modifying any field
24 : * in an entry except the counters requires the same. To look up an entry,
25 : * one must hold the lock shared. To read or update the counters within
26 : * an entry, one must hold the lock shared or exclusive (so the entry doesn't
27 : * disappear!) and also take the entry's mutex spinlock.
28 : * The shared state variable pgss->extent (the next free spot in the external
29 : * query-text file) should be accessed only while holding either the
30 : * pgss->mutex spinlock, or exclusive lock on pgss->lock. We use the mutex to
31 : * allow reserving file space while holding only shared lock on pgss->lock.
32 : * Rewriting the entire external query-text file, eg for garbage collection,
33 : * requires holding pgss->lock exclusively; this allows individual entries
34 : * in the file to be read or written while holding only shared lock.
35 : *
36 : *
37 : * Copyright (c) 2008-2023, PostgreSQL Global Development Group
38 : *
39 : * IDENTIFICATION
40 : * contrib/pg_stat_statements/pg_stat_statements.c
41 : *
42 : *-------------------------------------------------------------------------
43 : */
44 : #include "postgres.h"
45 :
46 : #include <math.h>
47 : #include <sys/stat.h>
48 : #include <unistd.h>
49 :
50 : #include "access/parallel.h"
51 : #include "catalog/pg_authid.h"
52 : #include "common/hashfn.h"
53 : #include "executor/instrument.h"
54 : #include "funcapi.h"
55 : #include "jit/jit.h"
56 : #include "mb/pg_wchar.h"
57 : #include "miscadmin.h"
58 : #include "nodes/queryjumble.h"
59 : #include "optimizer/planner.h"
60 : #include "parser/analyze.h"
61 : #include "parser/parsetree.h"
62 : #include "parser/scanner.h"
63 : #include "parser/scansup.h"
64 : #include "pgstat.h"
65 : #include "storage/fd.h"
66 : #include "storage/ipc.h"
67 : #include "storage/lwlock.h"
68 : #include "storage/shmem.h"
69 : #include "storage/spin.h"
70 : #include "tcop/utility.h"
71 : #include "utils/acl.h"
72 : #include "utils/builtins.h"
73 : #include "utils/memutils.h"
74 : #include "utils/timestamp.h"
75 :
76 8 : PG_MODULE_MAGIC;
77 :
78 : /* Location of permanent stats file (valid when database is shut down) */
79 : #define PGSS_DUMP_FILE PGSTAT_STAT_PERMANENT_DIRECTORY "/pg_stat_statements.stat"
80 :
81 : /*
82 : * Location of external query text file.
83 : */
84 : #define PGSS_TEXT_FILE PG_STAT_TMP_DIR "/pgss_query_texts.stat"
85 :
86 : /* Magic number identifying the stats file format */
87 : static const uint32 PGSS_FILE_HEADER = 0x20220408;
88 :
89 : /* PostgreSQL major version number, changes in which invalidate all entries */
90 : static const uint32 PGSS_PG_MAJOR_VERSION = PG_VERSION_NUM / 100;
91 :
92 : /* XXX: Should USAGE_EXEC reflect execution time and/or buffer usage? */
93 : #define USAGE_EXEC(duration) (1.0)
94 : #define USAGE_INIT (1.0) /* including initial planning */
95 : #define ASSUMED_MEDIAN_INIT (10.0) /* initial assumed median usage */
96 : #define ASSUMED_LENGTH_INIT 1024 /* initial assumed mean query length */
97 : #define USAGE_DECREASE_FACTOR (0.99) /* decreased every entry_dealloc */
98 : #define STICKY_DECREASE_FACTOR (0.50) /* factor for sticky entries */
99 : #define USAGE_DEALLOC_PERCENT 5 /* free this % of entries at once */
100 : #define IS_STICKY(c) ((c.calls[PGSS_PLAN] + c.calls[PGSS_EXEC]) == 0)
101 :
102 : /*
103 : * Extension version number, for supporting older extension versions' objects
104 : */
105 : typedef enum pgssVersion
106 : {
107 : PGSS_V1_0 = 0,
108 : PGSS_V1_1,
109 : PGSS_V1_2,
110 : PGSS_V1_3,
111 : PGSS_V1_8,
112 : PGSS_V1_9,
113 : PGSS_V1_10,
114 : PGSS_V1_11,
115 : } pgssVersion;
116 :
117 : typedef enum pgssStoreKind
118 : {
119 : PGSS_INVALID = -1,
120 :
121 : /*
122 : * PGSS_PLAN and PGSS_EXEC must be respectively 0 and 1 as they're used to
123 : * reference the underlying values in the arrays in the Counters struct,
124 : * and this order is required in pg_stat_statements_internal().
125 : */
126 : PGSS_PLAN = 0,
127 : PGSS_EXEC,
128 :
129 : PGSS_NUMKIND /* Must be last value of this enum */
130 : } pgssStoreKind;
131 :
132 : /*
133 : * Hashtable key that defines the identity of a hashtable entry. We separate
134 : * queries by user and by database even if they are otherwise identical.
135 : *
136 : * If you add a new key to this struct, make sure to teach pgss_store() to
137 : * zero the padding bytes. Otherwise, things will break, because pgss_hash is
138 : * created using HASH_BLOBS, and thus tag_hash is used to hash this.
139 :
140 : */
141 : typedef struct pgssHashKey
142 : {
143 : Oid userid; /* user OID */
144 : Oid dbid; /* database OID */
145 : uint64 queryid; /* query identifier */
146 : bool toplevel; /* query executed at top level */
147 : } pgssHashKey;
148 :
149 : /*
150 : * The actual stats counters kept within pgssEntry.
151 : */
152 : typedef struct Counters
153 : {
154 : int64 calls[PGSS_NUMKIND]; /* # of times planned/executed */
155 : double total_time[PGSS_NUMKIND]; /* total planning/execution time,
156 : * in msec */
157 : double min_time[PGSS_NUMKIND]; /* minimum planning/execution time in
158 : * msec since min/max reset */
159 : double max_time[PGSS_NUMKIND]; /* maximum planning/execution time in
160 : * msec since min/max reset */
161 : double mean_time[PGSS_NUMKIND]; /* mean planning/execution time in
162 : * msec */
163 : double sum_var_time[PGSS_NUMKIND]; /* sum of variances in
164 : * planning/execution time in msec */
165 : int64 rows; /* total # of retrieved or affected rows */
166 : int64 shared_blks_hit; /* # of shared buffer hits */
167 : int64 shared_blks_read; /* # of shared disk blocks read */
168 : int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */
169 : int64 shared_blks_written; /* # of shared disk blocks written */
170 : int64 local_blks_hit; /* # of local buffer hits */
171 : int64 local_blks_read; /* # of local disk blocks read */
172 : int64 local_blks_dirtied; /* # of local disk blocks dirtied */
173 : int64 local_blks_written; /* # of local disk blocks written */
174 : int64 temp_blks_read; /* # of temp blocks read */
175 : int64 temp_blks_written; /* # of temp blocks written */
176 : double shared_blk_read_time; /* time spent reading shared blocks,
177 : * in msec */
178 : double shared_blk_write_time; /* time spent writing shared blocks,
179 : * in msec */
180 : double local_blk_read_time; /* time spent reading local blocks, in
181 : * msec */
182 : double local_blk_write_time; /* time spent writing local blocks, in
183 : * msec */
184 : double temp_blk_read_time; /* time spent reading temp blocks, in msec */
185 : double temp_blk_write_time; /* time spent writing temp blocks, in
186 : * msec */
187 : double usage; /* usage factor */
188 : int64 wal_records; /* # of WAL records generated */
189 : int64 wal_fpi; /* # of WAL full page images generated */
190 : uint64 wal_bytes; /* total amount of WAL generated in bytes */
191 : int64 jit_functions; /* total number of JIT functions emitted */
192 : double jit_generation_time; /* total time to generate jit code */
193 : int64 jit_inlining_count; /* number of times inlining time has been
194 : * > 0 */
195 : double jit_deform_time; /* total time to deform tuples in jit code */
196 : int64 jit_deform_count; /* number of times deform time has been >
197 : * 0 */
198 :
199 : double jit_inlining_time; /* total time to inline jit code */
200 : int64 jit_optimization_count; /* number of times optimization time
201 : * has been > 0 */
202 : double jit_optimization_time; /* total time to optimize jit code */
203 : int64 jit_emission_count; /* number of times emission time has been
204 : * > 0 */
205 : double jit_emission_time; /* total time to emit jit code */
206 : } Counters;
207 :
208 : /*
209 : * Global statistics for pg_stat_statements
210 : */
211 : typedef struct pgssGlobalStats
212 : {
213 : int64 dealloc; /* # of times entries were deallocated */
214 : TimestampTz stats_reset; /* timestamp with all stats reset */
215 : } pgssGlobalStats;
216 :
217 : /*
218 : * Statistics per statement
219 : *
220 : * Note: in event of a failure in garbage collection of the query text file,
221 : * we reset query_offset to zero and query_len to -1. This will be seen as
222 : * an invalid state by qtext_fetch().
223 : */
224 : typedef struct pgssEntry
225 : {
226 : pgssHashKey key; /* hash key of entry - MUST BE FIRST */
227 : Counters counters; /* the statistics for this query */
228 : Size query_offset; /* query text offset in external file */
229 : int query_len; /* # of valid bytes in query string, or -1 */
230 : int encoding; /* query text encoding */
231 : TimestampTz stats_since; /* timestamp of entry allocation */
232 : TimestampTz minmax_stats_since; /* timestamp of last min/max values reset */
233 : slock_t mutex; /* protects the counters only */
234 : } pgssEntry;
235 :
236 : /*
237 : * Global shared state
238 : */
239 : typedef struct pgssSharedState
240 : {
241 : LWLock *lock; /* protects hashtable search/modification */
242 : double cur_median_usage; /* current median usage in hashtable */
243 : Size mean_query_len; /* current mean entry text length */
244 : slock_t mutex; /* protects following fields only: */
245 : Size extent; /* current extent of query file */
246 : int n_writers; /* number of active writers to query file */
247 : int gc_count; /* query file garbage collection cycle count */
248 : pgssGlobalStats stats; /* global statistics for pgss */
249 : } pgssSharedState;
250 :
251 : /*---- Local variables ----*/
252 :
253 : /* Current nesting depth of planner/ExecutorRun/ProcessUtility calls */
254 : static int nesting_level = 0;
255 :
256 : /* Saved hook values in case of unload */
257 : static shmem_request_hook_type prev_shmem_request_hook = NULL;
258 : static shmem_startup_hook_type prev_shmem_startup_hook = NULL;
259 : static post_parse_analyze_hook_type prev_post_parse_analyze_hook = NULL;
260 : static planner_hook_type prev_planner_hook = NULL;
261 : static ExecutorStart_hook_type prev_ExecutorStart = NULL;
262 : static ExecutorRun_hook_type prev_ExecutorRun = NULL;
263 : static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
264 : static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
265 : static ProcessUtility_hook_type prev_ProcessUtility = NULL;
266 :
267 : /* Links to shared memory state */
268 : static pgssSharedState *pgss = NULL;
269 : static HTAB *pgss_hash = NULL;
270 :
271 : /*---- GUC variables ----*/
272 :
273 : typedef enum
274 : {
275 : PGSS_TRACK_NONE, /* track no statements */
276 : PGSS_TRACK_TOP, /* only top level statements */
277 : PGSS_TRACK_ALL, /* all statements, including nested ones */
278 : } PGSSTrackLevel;
279 :
280 : static const struct config_enum_entry track_options[] =
281 : {
282 : {"none", PGSS_TRACK_NONE, false},
283 : {"top", PGSS_TRACK_TOP, false},
284 : {"all", PGSS_TRACK_ALL, false},
285 : {NULL, 0, false}
286 : };
287 :
288 : static int pgss_max = 5000; /* max # statements to track */
289 : static int pgss_track = PGSS_TRACK_TOP; /* tracking level */
290 : static bool pgss_track_utility = true; /* whether to track utility commands */
291 : static bool pgss_track_planning = false; /* whether to track planning
292 : * duration */
293 : static bool pgss_save = true; /* whether to save stats across shutdown */
294 :
295 :
296 : #define pgss_enabled(level) \
297 : (!IsParallelWorker() && \
298 : (pgss_track == PGSS_TRACK_ALL || \
299 : (pgss_track == PGSS_TRACK_TOP && (level) == 0)))
300 :
301 : #define record_gc_qtexts() \
302 : do { \
303 : volatile pgssSharedState *s = (volatile pgssSharedState *) pgss; \
304 : SpinLockAcquire(&s->mutex); \
305 : s->gc_count++; \
306 : SpinLockRelease(&s->mutex); \
307 : } while(0)
308 :
309 : /*---- Function declarations ----*/
310 :
311 10 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset);
312 8 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_7);
313 26 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_11);
314 0 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_2);
315 10 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_3);
316 8 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_8);
317 10 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_9);
318 10 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_10);
319 30 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_11);
320 0 : PG_FUNCTION_INFO_V1(pg_stat_statements);
321 12 : PG_FUNCTION_INFO_V1(pg_stat_statements_info);
322 :
323 : static void pgss_shmem_request(void);
324 : static void pgss_shmem_startup(void);
325 : static void pgss_shmem_shutdown(int code, Datum arg);
326 : static void pgss_post_parse_analyze(ParseState *pstate, Query *query,
327 : JumbleState *jstate);
328 : static PlannedStmt *pgss_planner(Query *parse,
329 : const char *query_string,
330 : int cursorOptions,
331 : ParamListInfo boundParams);
332 : static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
333 : static void pgss_ExecutorRun(QueryDesc *queryDesc,
334 : ScanDirection direction,
335 : uint64 count, bool execute_once);
336 : static void pgss_ExecutorFinish(QueryDesc *queryDesc);
337 : static void pgss_ExecutorEnd(QueryDesc *queryDesc);
338 : static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
339 : bool readOnlyTree,
340 : ProcessUtilityContext context, ParamListInfo params,
341 : QueryEnvironment *queryEnv,
342 : DestReceiver *dest, QueryCompletion *qc);
343 : static void pgss_store(const char *query, uint64 queryId,
344 : int query_location, int query_len,
345 : pgssStoreKind kind,
346 : double total_time, uint64 rows,
347 : const BufferUsage *bufusage,
348 : const WalUsage *walusage,
349 : const struct JitInstrumentation *jitusage,
350 : JumbleState *jstate);
351 : static void pg_stat_statements_internal(FunctionCallInfo fcinfo,
352 : pgssVersion api_version,
353 : bool showtext);
354 : static Size pgss_memsize(void);
355 : static pgssEntry *entry_alloc(pgssHashKey *key, Size query_offset, int query_len,
356 : int encoding, bool sticky);
357 : static void entry_dealloc(void);
358 : static bool qtext_store(const char *query, int query_len,
359 : Size *query_offset, int *gc_count);
360 : static char *qtext_load_file(Size *buffer_size);
361 : static char *qtext_fetch(Size query_offset, int query_len,
362 : char *buffer, Size buffer_size);
363 : static bool need_gc_qtexts(void);
364 : static void gc_qtexts(void);
365 : static TimestampTz entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only);
366 : static char *generate_normalized_query(JumbleState *jstate, const char *query,
367 : int query_loc, int *query_len_p);
368 : static void fill_in_constant_lengths(JumbleState *jstate, const char *query,
369 : int query_loc);
370 : static int comp_location(const void *a, const void *b);
371 :
372 :
373 : /*
374 : * Module load callback
375 : */
376 : void
377 8 : _PG_init(void)
378 : {
379 : /*
380 : * In order to create our shared memory area, we have to be loaded via
381 : * shared_preload_libraries. If not, fall out without hooking into any of
382 : * the main system. (We don't throw error here because it seems useful to
383 : * allow the pg_stat_statements functions to be created even when the
384 : * module isn't active. The functions must protect themselves against
385 : * being called then, however.)
386 : */
387 8 : if (!process_shared_preload_libraries_in_progress)
388 2 : return;
389 :
390 : /*
391 : * Inform the postmaster that we want to enable query_id calculation if
392 : * compute_query_id is set to auto.
393 : */
394 6 : EnableQueryId();
395 :
396 : /*
397 : * Define (or redefine) custom GUC variables.
398 : */
399 6 : DefineCustomIntVariable("pg_stat_statements.max",
400 : "Sets the maximum number of statements tracked by pg_stat_statements.",
401 : NULL,
402 : &pgss_max,
403 : 5000,
404 : 100,
405 : INT_MAX / 2,
406 : PGC_POSTMASTER,
407 : 0,
408 : NULL,
409 : NULL,
410 : NULL);
411 :
412 6 : DefineCustomEnumVariable("pg_stat_statements.track",
413 : "Selects which statements are tracked by pg_stat_statements.",
414 : NULL,
415 : &pgss_track,
416 : PGSS_TRACK_TOP,
417 : track_options,
418 : PGC_SUSET,
419 : 0,
420 : NULL,
421 : NULL,
422 : NULL);
423 :
424 6 : DefineCustomBoolVariable("pg_stat_statements.track_utility",
425 : "Selects whether utility commands are tracked by pg_stat_statements.",
426 : NULL,
427 : &pgss_track_utility,
428 : true,
429 : PGC_SUSET,
430 : 0,
431 : NULL,
432 : NULL,
433 : NULL);
434 :
435 6 : DefineCustomBoolVariable("pg_stat_statements.track_planning",
436 : "Selects whether planning duration is tracked by pg_stat_statements.",
437 : NULL,
438 : &pgss_track_planning,
439 : false,
440 : PGC_SUSET,
441 : 0,
442 : NULL,
443 : NULL,
444 : NULL);
445 :
446 6 : DefineCustomBoolVariable("pg_stat_statements.save",
447 : "Save pg_stat_statements statistics across server shutdowns.",
448 : NULL,
449 : &pgss_save,
450 : true,
451 : PGC_SIGHUP,
452 : 0,
453 : NULL,
454 : NULL,
455 : NULL);
456 :
457 6 : MarkGUCPrefixReserved("pg_stat_statements");
458 :
459 : /*
460 : * Install hooks.
461 : */
462 6 : prev_shmem_request_hook = shmem_request_hook;
463 6 : shmem_request_hook = pgss_shmem_request;
464 6 : prev_shmem_startup_hook = shmem_startup_hook;
465 6 : shmem_startup_hook = pgss_shmem_startup;
466 6 : prev_post_parse_analyze_hook = post_parse_analyze_hook;
467 6 : post_parse_analyze_hook = pgss_post_parse_analyze;
468 6 : prev_planner_hook = planner_hook;
469 6 : planner_hook = pgss_planner;
470 6 : prev_ExecutorStart = ExecutorStart_hook;
471 6 : ExecutorStart_hook = pgss_ExecutorStart;
472 6 : prev_ExecutorRun = ExecutorRun_hook;
473 6 : ExecutorRun_hook = pgss_ExecutorRun;
474 6 : prev_ExecutorFinish = ExecutorFinish_hook;
475 6 : ExecutorFinish_hook = pgss_ExecutorFinish;
476 6 : prev_ExecutorEnd = ExecutorEnd_hook;
477 6 : ExecutorEnd_hook = pgss_ExecutorEnd;
478 6 : prev_ProcessUtility = ProcessUtility_hook;
479 6 : ProcessUtility_hook = pgss_ProcessUtility;
480 : }
481 :
482 : /*
483 : * shmem_request hook: request additional shared resources. We'll allocate or
484 : * attach to the shared resources in pgss_shmem_startup().
485 : */
486 : static void
487 6 : pgss_shmem_request(void)
488 : {
489 6 : if (prev_shmem_request_hook)
490 0 : prev_shmem_request_hook();
491 :
492 6 : RequestAddinShmemSpace(pgss_memsize());
493 6 : RequestNamedLWLockTranche("pg_stat_statements", 1);
494 6 : }
495 :
496 : /*
497 : * shmem_startup hook: allocate or attach to shared memory,
498 : * then load any pre-existing statistics from file.
499 : * Also create and load the query-texts file, which is expected to exist
500 : * (even if empty) while the module is enabled.
501 : */
502 : static void
503 6 : pgss_shmem_startup(void)
504 : {
505 : bool found;
506 : HASHCTL info;
507 6 : FILE *file = NULL;
508 6 : FILE *qfile = NULL;
509 : uint32 header;
510 : int32 num;
511 : int32 pgver;
512 : int32 i;
513 : int buffer_size;
514 6 : char *buffer = NULL;
515 :
516 6 : if (prev_shmem_startup_hook)
517 0 : prev_shmem_startup_hook();
518 :
519 : /* reset in case this is a restart within the postmaster */
520 6 : pgss = NULL;
521 6 : pgss_hash = NULL;
522 :
523 : /*
524 : * Create or attach to the shared memory state, including hash table
525 : */
526 6 : LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);
527 :
528 6 : pgss = ShmemInitStruct("pg_stat_statements",
529 : sizeof(pgssSharedState),
530 : &found);
531 :
532 6 : if (!found)
533 : {
534 : /* First time through ... */
535 6 : pgss->lock = &(GetNamedLWLockTranche("pg_stat_statements"))->lock;
536 6 : pgss->cur_median_usage = ASSUMED_MEDIAN_INIT;
537 6 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
538 6 : SpinLockInit(&pgss->mutex);
539 6 : pgss->extent = 0;
540 6 : pgss->n_writers = 0;
541 6 : pgss->gc_count = 0;
542 6 : pgss->stats.dealloc = 0;
543 6 : pgss->stats.stats_reset = GetCurrentTimestamp();
544 : }
545 :
546 6 : info.keysize = sizeof(pgssHashKey);
547 6 : info.entrysize = sizeof(pgssEntry);
548 6 : pgss_hash = ShmemInitHash("pg_stat_statements hash",
549 : pgss_max, pgss_max,
550 : &info,
551 : HASH_ELEM | HASH_BLOBS);
552 :
553 6 : LWLockRelease(AddinShmemInitLock);
554 :
555 : /*
556 : * If we're in the postmaster (or a standalone backend...), set up a shmem
557 : * exit hook to dump the statistics to disk.
558 : */
559 6 : if (!IsUnderPostmaster)
560 6 : on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
561 :
562 : /*
563 : * Done if some other process already completed our initialization.
564 : */
565 6 : if (found)
566 6 : return;
567 :
568 : /*
569 : * Note: we don't bother with locks here, because there should be no other
570 : * processes running when this code is reached.
571 : */
572 :
573 : /* Unlink query text file possibly left over from crash */
574 6 : unlink(PGSS_TEXT_FILE);
575 :
576 : /* Allocate new query text temp file */
577 6 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
578 6 : if (qfile == NULL)
579 0 : goto write_error;
580 :
581 : /*
582 : * If we were told not to load old statistics, we're done. (Note we do
583 : * not try to unlink any old dump file in this case. This seems a bit
584 : * questionable but it's the historical behavior.)
585 : */
586 6 : if (!pgss_save)
587 : {
588 0 : FreeFile(qfile);
589 0 : return;
590 : }
591 :
592 : /*
593 : * Attempt to load old statistics from the dump file.
594 : */
595 6 : file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R);
596 6 : if (file == NULL)
597 : {
598 6 : if (errno != ENOENT)
599 0 : goto read_error;
600 : /* No existing persisted stats file, so we're done */
601 6 : FreeFile(qfile);
602 6 : return;
603 : }
604 :
605 0 : buffer_size = 2048;
606 0 : buffer = (char *) palloc(buffer_size);
607 :
608 0 : if (fread(&header, sizeof(uint32), 1, file) != 1 ||
609 0 : fread(&pgver, sizeof(uint32), 1, file) != 1 ||
610 0 : fread(&num, sizeof(int32), 1, file) != 1)
611 0 : goto read_error;
612 :
613 0 : if (header != PGSS_FILE_HEADER ||
614 0 : pgver != PGSS_PG_MAJOR_VERSION)
615 0 : goto data_error;
616 :
617 0 : for (i = 0; i < num; i++)
618 : {
619 : pgssEntry temp;
620 : pgssEntry *entry;
621 : Size query_offset;
622 :
623 0 : if (fread(&temp, sizeof(pgssEntry), 1, file) != 1)
624 0 : goto read_error;
625 :
626 : /* Encoding is the only field we can easily sanity-check */
627 0 : if (!PG_VALID_BE_ENCODING(temp.encoding))
628 0 : goto data_error;
629 :
630 : /* Resize buffer as needed */
631 0 : if (temp.query_len >= buffer_size)
632 : {
633 0 : buffer_size = Max(buffer_size * 2, temp.query_len + 1);
634 0 : buffer = repalloc(buffer, buffer_size);
635 : }
636 :
637 0 : if (fread(buffer, 1, temp.query_len + 1, file) != temp.query_len + 1)
638 0 : goto read_error;
639 :
640 : /* Should have a trailing null, but let's make sure */
641 0 : buffer[temp.query_len] = '\0';
642 :
643 : /* Skip loading "sticky" entries */
644 0 : if (IS_STICKY(temp.counters))
645 0 : continue;
646 :
647 : /* Store the query text */
648 0 : query_offset = pgss->extent;
649 0 : if (fwrite(buffer, 1, temp.query_len + 1, qfile) != temp.query_len + 1)
650 0 : goto write_error;
651 0 : pgss->extent += temp.query_len + 1;
652 :
653 : /* make the hashtable entry (discards old entries if too many) */
654 0 : entry = entry_alloc(&temp.key, query_offset, temp.query_len,
655 : temp.encoding,
656 : false);
657 :
658 : /* copy in the actual stats */
659 0 : entry->counters = temp.counters;
660 0 : entry->stats_since = temp.stats_since;
661 0 : entry->minmax_stats_since = temp.minmax_stats_since;
662 : }
663 :
664 : /* Read global statistics for pg_stat_statements */
665 0 : if (fread(&pgss->stats, sizeof(pgssGlobalStats), 1, file) != 1)
666 0 : goto read_error;
667 :
668 0 : pfree(buffer);
669 0 : FreeFile(file);
670 0 : FreeFile(qfile);
671 :
672 : /*
673 : * Remove the persisted stats file so it's not included in
674 : * backups/replication standbys, etc. A new file will be written on next
675 : * shutdown.
676 : *
677 : * Note: it's okay if the PGSS_TEXT_FILE is included in a basebackup,
678 : * because we remove that file on startup; it acts inversely to
679 : * PGSS_DUMP_FILE, in that it is only supposed to be around when the
680 : * server is running, whereas PGSS_DUMP_FILE is only supposed to be around
681 : * when the server is not running. Leaving the file creates no danger of
682 : * a newly restored database having a spurious record of execution costs,
683 : * which is what we're really concerned about here.
684 : */
685 0 : unlink(PGSS_DUMP_FILE);
686 :
687 0 : return;
688 :
689 0 : read_error:
690 0 : ereport(LOG,
691 : (errcode_for_file_access(),
692 : errmsg("could not read file \"%s\": %m",
693 : PGSS_DUMP_FILE)));
694 0 : goto fail;
695 0 : data_error:
696 0 : ereport(LOG,
697 : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
698 : errmsg("ignoring invalid data in file \"%s\"",
699 : PGSS_DUMP_FILE)));
700 0 : goto fail;
701 0 : write_error:
702 0 : ereport(LOG,
703 : (errcode_for_file_access(),
704 : errmsg("could not write file \"%s\": %m",
705 : PGSS_TEXT_FILE)));
706 0 : fail:
707 0 : if (buffer)
708 0 : pfree(buffer);
709 0 : if (file)
710 0 : FreeFile(file);
711 0 : if (qfile)
712 0 : FreeFile(qfile);
713 : /* If possible, throw away the bogus file; ignore any error */
714 0 : unlink(PGSS_DUMP_FILE);
715 :
716 : /*
717 : * Don't unlink PGSS_TEXT_FILE here; it should always be around while the
718 : * server is running with pg_stat_statements enabled
719 : */
720 : }
721 :
722 : /*
723 : * shmem_shutdown hook: Dump statistics into file.
724 : *
725 : * Note: we don't bother with acquiring lock, because there should be no
726 : * other processes running when this is called.
727 : */
728 : static void
729 6 : pgss_shmem_shutdown(int code, Datum arg)
730 : {
731 : FILE *file;
732 6 : char *qbuffer = NULL;
733 6 : Size qbuffer_size = 0;
734 : HASH_SEQ_STATUS hash_seq;
735 : int32 num_entries;
736 : pgssEntry *entry;
737 :
738 : /* Don't try to dump during a crash. */
739 6 : if (code)
740 6 : return;
741 :
742 : /* Safety check ... shouldn't get here unless shmem is set up. */
743 6 : if (!pgss || !pgss_hash)
744 0 : return;
745 :
746 : /* Don't dump if told not to. */
747 6 : if (!pgss_save)
748 0 : return;
749 :
750 6 : file = AllocateFile(PGSS_DUMP_FILE ".tmp", PG_BINARY_W);
751 6 : if (file == NULL)
752 0 : goto error;
753 :
754 6 : if (fwrite(&PGSS_FILE_HEADER, sizeof(uint32), 1, file) != 1)
755 0 : goto error;
756 6 : if (fwrite(&PGSS_PG_MAJOR_VERSION, sizeof(uint32), 1, file) != 1)
757 0 : goto error;
758 6 : num_entries = hash_get_num_entries(pgss_hash);
759 6 : if (fwrite(&num_entries, sizeof(int32), 1, file) != 1)
760 0 : goto error;
761 :
762 6 : qbuffer = qtext_load_file(&qbuffer_size);
763 6 : if (qbuffer == NULL)
764 0 : goto error;
765 :
766 : /*
767 : * When serializing to disk, we store query texts immediately after their
768 : * entry data. Any orphaned query texts are thereby excluded.
769 : */
770 6 : hash_seq_init(&hash_seq, pgss_hash);
771 49228 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
772 : {
773 49222 : int len = entry->query_len;
774 49222 : char *qstr = qtext_fetch(entry->query_offset, len,
775 : qbuffer, qbuffer_size);
776 :
777 49222 : if (qstr == NULL)
778 0 : continue; /* Ignore any entries with bogus texts */
779 :
780 49222 : if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 ||
781 49222 : fwrite(qstr, 1, len + 1, file) != len + 1)
782 : {
783 : /* note: we assume hash_seq_term won't change errno */
784 0 : hash_seq_term(&hash_seq);
785 0 : goto error;
786 : }
787 : }
788 :
789 : /* Dump global statistics for pg_stat_statements */
790 6 : if (fwrite(&pgss->stats, sizeof(pgssGlobalStats), 1, file) != 1)
791 0 : goto error;
792 :
793 6 : free(qbuffer);
794 6 : qbuffer = NULL;
795 :
796 6 : if (FreeFile(file))
797 : {
798 0 : file = NULL;
799 0 : goto error;
800 : }
801 :
802 : /*
803 : * Rename file into place, so we atomically replace any old one.
804 : */
805 6 : (void) durable_rename(PGSS_DUMP_FILE ".tmp", PGSS_DUMP_FILE, LOG);
806 :
807 : /* Unlink query-texts file; it's not needed while shutdown */
808 6 : unlink(PGSS_TEXT_FILE);
809 :
810 6 : return;
811 :
812 0 : error:
813 0 : ereport(LOG,
814 : (errcode_for_file_access(),
815 : errmsg("could not write file \"%s\": %m",
816 : PGSS_DUMP_FILE ".tmp")));
817 0 : free(qbuffer);
818 0 : if (file)
819 0 : FreeFile(file);
820 0 : unlink(PGSS_DUMP_FILE ".tmp");
821 0 : unlink(PGSS_TEXT_FILE);
822 : }
823 :
824 : /*
825 : * Post-parse-analysis hook: mark query with a queryId
826 : */
827 : static void
828 124028 : pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate)
829 : {
830 124028 : if (prev_post_parse_analyze_hook)
831 0 : prev_post_parse_analyze_hook(pstate, query, jstate);
832 :
833 : /* Safety check... */
834 124028 : if (!pgss || !pgss_hash || !pgss_enabled(nesting_level))
835 23710 : return;
836 :
837 : /*
838 : * If it's EXECUTE, clear the queryId so that stats will accumulate for
839 : * the underlying PREPARE. But don't do this if we're not tracking
840 : * utility statements, to avoid messing up another extension that might be
841 : * tracking them.
842 : */
843 100318 : if (query->utilityStmt)
844 : {
845 46984 : if (pgss_track_utility && IsA(query->utilityStmt, ExecuteStmt))
846 : {
847 2722 : query->queryId = UINT64CONST(0);
848 2722 : return;
849 : }
850 : }
851 :
852 : /*
853 : * If query jumbling were able to identify any ignorable constants, we
854 : * immediately create a hash table entry for the query, so that we can
855 : * record the normalized form of the query string. If there were no such
856 : * constants, the normalized string would be the same as the query text
857 : * anyway, so there's no need for an early entry.
858 : */
859 97596 : if (jstate && jstate->clocations_count > 0)
860 49260 : pgss_store(pstate->p_sourcetext,
861 : query->queryId,
862 : query->stmt_location,
863 : query->stmt_len,
864 : PGSS_INVALID,
865 : 0,
866 : 0,
867 : NULL,
868 : NULL,
869 : NULL,
870 : jstate);
871 : }
872 :
873 : /*
874 : * Planner hook: forward to regular planner, but measure planning time
875 : * if needed.
876 : */
877 : static PlannedStmt *
878 75650 : pgss_planner(Query *parse,
879 : const char *query_string,
880 : int cursorOptions,
881 : ParamListInfo boundParams)
882 : {
883 : PlannedStmt *result;
884 :
885 : /*
886 : * We can't process the query if no query_string is provided, as
887 : * pgss_store needs it. We also ignore query without queryid, as it would
888 : * be treated as a utility statement, which may not be the case.
889 : *
890 : * Note that planner_hook can be called from the planner itself, so we
891 : * have a specific nesting level for the planner. However, utility
892 : * commands containing optimizable statements can also call the planner,
893 : * same for regular DML (for instance for underlying foreign key queries).
894 : * So testing the planner nesting level only is not enough to detect real
895 : * top level planner call.
896 : */
897 75650 : if (pgss_enabled(nesting_level)
898 53560 : && pgss_track_planning && query_string
899 146 : && parse->queryId != UINT64CONST(0))
900 146 : {
901 : instr_time start;
902 : instr_time duration;
903 : BufferUsage bufusage_start,
904 : bufusage;
905 : WalUsage walusage_start,
906 : walusage;
907 :
908 : /* We need to track buffer usage as the planner can access them. */
909 146 : bufusage_start = pgBufferUsage;
910 :
911 : /*
912 : * Similarly the planner could write some WAL records in some cases
913 : * (e.g. setting a hint bit with those being WAL-logged)
914 : */
915 146 : walusage_start = pgWalUsage;
916 146 : INSTR_TIME_SET_CURRENT(start);
917 :
918 146 : nesting_level++;
919 146 : PG_TRY();
920 : {
921 146 : if (prev_planner_hook)
922 0 : result = prev_planner_hook(parse, query_string, cursorOptions,
923 : boundParams);
924 : else
925 146 : result = standard_planner(parse, query_string, cursorOptions,
926 : boundParams);
927 : }
928 0 : PG_FINALLY();
929 : {
930 146 : nesting_level--;
931 : }
932 146 : PG_END_TRY();
933 :
934 146 : INSTR_TIME_SET_CURRENT(duration);
935 146 : INSTR_TIME_SUBTRACT(duration, start);
936 :
937 : /* calc differences of buffer counters. */
938 146 : memset(&bufusage, 0, sizeof(BufferUsage));
939 146 : BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
940 :
941 : /* calc differences of WAL counters. */
942 146 : memset(&walusage, 0, sizeof(WalUsage));
943 146 : WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
944 :
945 146 : pgss_store(query_string,
946 : parse->queryId,
947 : parse->stmt_location,
948 : parse->stmt_len,
949 : PGSS_PLAN,
950 146 : INSTR_TIME_GET_MILLISEC(duration),
951 : 0,
952 : &bufusage,
953 : &walusage,
954 : NULL,
955 : NULL);
956 : }
957 : else
958 : {
959 : /*
960 : * Even though we're not tracking plan time for this statement, we
961 : * must still increment the nesting level, to ensure that functions
962 : * evaluated during planning are not seen as top-level calls.
963 : */
964 75504 : nesting_level++;
965 75504 : PG_TRY();
966 : {
967 75504 : if (prev_planner_hook)
968 0 : result = prev_planner_hook(parse, query_string, cursorOptions,
969 : boundParams);
970 : else
971 75504 : result = standard_planner(parse, query_string, cursorOptions,
972 : boundParams);
973 : }
974 1172 : PG_FINALLY();
975 : {
976 75504 : nesting_level--;
977 : }
978 75504 : PG_END_TRY();
979 : }
980 :
981 74478 : return result;
982 : }
983 :
984 : /*
985 : * ExecutorStart hook: start up tracking if needed
986 : */
987 : static void
988 88808 : pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
989 : {
990 88808 : if (prev_ExecutorStart)
991 0 : prev_ExecutorStart(queryDesc, eflags);
992 : else
993 88808 : standard_ExecutorStart(queryDesc, eflags);
994 :
995 : /*
996 : * If query has queryId zero, don't track it. This prevents double
997 : * counting of optimizable statements that are directly contained in
998 : * utility statements.
999 : */
1000 88282 : if (pgss_enabled(nesting_level) && queryDesc->plannedstmt->queryId != UINT64CONST(0))
1001 : {
1002 : /*
1003 : * Set up to track total elapsed time in ExecutorRun. Make sure the
1004 : * space is allocated in the per-query context so it will go away at
1005 : * ExecutorEnd.
1006 : */
1007 54168 : if (queryDesc->totaltime == NULL)
1008 : {
1009 : MemoryContext oldcxt;
1010 :
1011 54168 : oldcxt = MemoryContextSwitchTo(queryDesc->estate->es_query_cxt);
1012 54168 : queryDesc->totaltime = InstrAlloc(1, INSTRUMENT_ALL, false);
1013 54168 : MemoryContextSwitchTo(oldcxt);
1014 : }
1015 : }
1016 88282 : }
1017 :
1018 : /*
1019 : * ExecutorRun hook: all we need do is track nesting depth
1020 : */
1021 : static void
1022 86558 : pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count,
1023 : bool execute_once)
1024 : {
1025 86558 : nesting_level++;
1026 86558 : PG_TRY();
1027 : {
1028 86558 : if (prev_ExecutorRun)
1029 0 : prev_ExecutorRun(queryDesc, direction, count, execute_once);
1030 : else
1031 86558 : standard_ExecutorRun(queryDesc, direction, count, execute_once);
1032 : }
1033 6258 : PG_FINALLY();
1034 : {
1035 86558 : nesting_level--;
1036 : }
1037 86558 : PG_END_TRY();
1038 80300 : }
1039 :
1040 : /*
1041 : * ExecutorFinish hook: all we need do is track nesting depth
1042 : */
1043 : static void
1044 77040 : pgss_ExecutorFinish(QueryDesc *queryDesc)
1045 : {
1046 77040 : nesting_level++;
1047 77040 : PG_TRY();
1048 : {
1049 77040 : if (prev_ExecutorFinish)
1050 0 : prev_ExecutorFinish(queryDesc);
1051 : else
1052 77040 : standard_ExecutorFinish(queryDesc);
1053 : }
1054 254 : PG_FINALLY();
1055 : {
1056 77040 : nesting_level--;
1057 : }
1058 77040 : PG_END_TRY();
1059 76786 : }
1060 :
1061 : /*
1062 : * ExecutorEnd hook: store results if needed
1063 : */
1064 : static void
1065 81690 : pgss_ExecutorEnd(QueryDesc *queryDesc)
1066 : {
1067 81690 : uint64 queryId = queryDesc->plannedstmt->queryId;
1068 :
1069 81690 : if (queryId != UINT64CONST(0) && queryDesc->totaltime &&
1070 51986 : pgss_enabled(nesting_level))
1071 : {
1072 : /*
1073 : * Make sure stats accumulation is done. (Note: it's okay if several
1074 : * levels of hook all do this.)
1075 : */
1076 51986 : InstrEndLoop(queryDesc->totaltime);
1077 :
1078 51800 : pgss_store(queryDesc->sourceText,
1079 : queryId,
1080 51986 : queryDesc->plannedstmt->stmt_location,
1081 51986 : queryDesc->plannedstmt->stmt_len,
1082 : PGSS_EXEC,
1083 51986 : queryDesc->totaltime->total * 1000.0, /* convert to msec */
1084 51986 : queryDesc->estate->es_total_processed,
1085 51986 : &queryDesc->totaltime->bufusage,
1086 51986 : &queryDesc->totaltime->walusage,
1087 51986 : queryDesc->estate->es_jit ? &queryDesc->estate->es_jit->instr : NULL,
1088 : NULL);
1089 : }
1090 :
1091 81690 : if (prev_ExecutorEnd)
1092 0 : prev_ExecutorEnd(queryDesc);
1093 : else
1094 81690 : standard_ExecutorEnd(queryDesc);
1095 81690 : }
1096 :
1097 : /*
1098 : * ProcessUtility hook
1099 : */
1100 : static void
1101 56452 : pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
1102 : bool readOnlyTree,
1103 : ProcessUtilityContext context,
1104 : ParamListInfo params, QueryEnvironment *queryEnv,
1105 : DestReceiver *dest, QueryCompletion *qc)
1106 : {
1107 56452 : Node *parsetree = pstmt->utilityStmt;
1108 56452 : uint64 saved_queryId = pstmt->queryId;
1109 56452 : int saved_stmt_location = pstmt->stmt_location;
1110 56452 : int saved_stmt_len = pstmt->stmt_len;
1111 56452 : bool enabled = pgss_track_utility && pgss_enabled(nesting_level);
1112 :
1113 : /*
1114 : * Force utility statements to get queryId zero. We do this even in cases
1115 : * where the statement contains an optimizable statement for which a
1116 : * queryId could be derived (such as EXPLAIN or DECLARE CURSOR). For such
1117 : * cases, runtime control will first go through ProcessUtility and then
1118 : * the executor, and we don't want the executor hooks to do anything,
1119 : * since we are already measuring the statement's costs at the utility
1120 : * level.
1121 : *
1122 : * Note that this is only done if pg_stat_statements is enabled and
1123 : * configured to track utility statements, in the unlikely possibility
1124 : * that user configured another extension to handle utility statements
1125 : * only.
1126 : */
1127 56452 : if (enabled)
1128 46882 : pstmt->queryId = UINT64CONST(0);
1129 :
1130 : /*
1131 : * If it's an EXECUTE statement, we don't track it and don't increment the
1132 : * nesting level. This allows the cycles to be charged to the underlying
1133 : * PREPARE instead (by the Executor hooks), which is much more useful.
1134 : *
1135 : * We also don't track execution of PREPARE. If we did, we would get one
1136 : * hash table entry for the PREPARE (with hash calculated from the query
1137 : * string), and then a different one with the same query string (but hash
1138 : * calculated from the query tree) would be used to accumulate costs of
1139 : * ensuing EXECUTEs. This would be confusing. Since PREPARE doesn't
1140 : * actually run the planner (only parse+rewrite), its costs are generally
1141 : * pretty negligible and it seems okay to just ignore it.
1142 : */
1143 56452 : if (enabled &&
1144 46882 : !IsA(parsetree, ExecuteStmt) &&
1145 44162 : !IsA(parsetree, PrepareStmt))
1146 39958 : {
1147 : instr_time start;
1148 : instr_time duration;
1149 : uint64 rows;
1150 : BufferUsage bufusage_start,
1151 : bufusage;
1152 : WalUsage walusage_start,
1153 : walusage;
1154 :
1155 43964 : bufusage_start = pgBufferUsage;
1156 43964 : walusage_start = pgWalUsage;
1157 43964 : INSTR_TIME_SET_CURRENT(start);
1158 :
1159 43964 : nesting_level++;
1160 43964 : PG_TRY();
1161 : {
1162 43964 : if (prev_ProcessUtility)
1163 0 : prev_ProcessUtility(pstmt, queryString, readOnlyTree,
1164 : context, params, queryEnv,
1165 : dest, qc);
1166 : else
1167 43964 : standard_ProcessUtility(pstmt, queryString, readOnlyTree,
1168 : context, params, queryEnv,
1169 : dest, qc);
1170 : }
1171 4006 : PG_FINALLY();
1172 : {
1173 43964 : nesting_level--;
1174 : }
1175 43964 : PG_END_TRY();
1176 :
1177 : /*
1178 : * CAUTION: do not access the *pstmt data structure again below here.
1179 : * If it was a ROLLBACK or similar, that data structure may have been
1180 : * freed. We must copy everything we still need into local variables,
1181 : * which we did above.
1182 : *
1183 : * For the same reason, we can't risk restoring pstmt->queryId to its
1184 : * former value, which'd otherwise be a good idea.
1185 : */
1186 :
1187 39958 : INSTR_TIME_SET_CURRENT(duration);
1188 39958 : INSTR_TIME_SUBTRACT(duration, start);
1189 :
1190 : /*
1191 : * Track the total number of rows retrieved or affected by the utility
1192 : * statements of COPY, FETCH, CREATE TABLE AS, CREATE MATERIALIZED
1193 : * VIEW, REFRESH MATERIALIZED VIEW and SELECT INTO.
1194 : */
1195 39958 : rows = (qc && (qc->commandTag == CMDTAG_COPY ||
1196 37322 : qc->commandTag == CMDTAG_FETCH ||
1197 36808 : qc->commandTag == CMDTAG_SELECT ||
1198 36466 : qc->commandTag == CMDTAG_REFRESH_MATERIALIZED_VIEW)) ?
1199 79916 : qc->nprocessed : 0;
1200 :
1201 : /* calc differences of buffer counters. */
1202 39958 : memset(&bufusage, 0, sizeof(BufferUsage));
1203 39958 : BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
1204 :
1205 : /* calc differences of WAL counters. */
1206 39958 : memset(&walusage, 0, sizeof(WalUsage));
1207 39958 : WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
1208 :
1209 39958 : pgss_store(queryString,
1210 : saved_queryId,
1211 : saved_stmt_location,
1212 : saved_stmt_len,
1213 : PGSS_EXEC,
1214 39958 : INSTR_TIME_GET_MILLISEC(duration),
1215 : rows,
1216 : &bufusage,
1217 : &walusage,
1218 : NULL,
1219 : NULL);
1220 : }
1221 : else
1222 : {
1223 : /*
1224 : * Even though we're not tracking execution time for this statement,
1225 : * we must still increment the nesting level, to ensure that functions
1226 : * evaluated within it are not seen as top-level calls. But don't do
1227 : * so for EXECUTE; that way, when control reaches pgss_planner or
1228 : * pgss_ExecutorStart, we will treat the costs as top-level if
1229 : * appropriate. Likewise, don't bump for PREPARE, so that parse
1230 : * analysis will treat the statement as top-level if appropriate.
1231 : *
1232 : * To be absolutely certain we don't mess up the nesting level,
1233 : * evaluate the bump_level condition just once.
1234 : */
1235 12488 : bool bump_level =
1236 22254 : !IsA(parsetree, ExecuteStmt) &&
1237 9766 : !IsA(parsetree, PrepareStmt);
1238 :
1239 12488 : if (bump_level)
1240 9566 : nesting_level++;
1241 12488 : PG_TRY();
1242 : {
1243 12488 : if (prev_ProcessUtility)
1244 0 : prev_ProcessUtility(pstmt, queryString, readOnlyTree,
1245 : context, params, queryEnv,
1246 : dest, qc);
1247 : else
1248 12488 : standard_ProcessUtility(pstmt, queryString, readOnlyTree,
1249 : context, params, queryEnv,
1250 : dest, qc);
1251 : }
1252 198 : PG_FINALLY();
1253 : {
1254 12488 : if (bump_level)
1255 9566 : nesting_level--;
1256 : }
1257 12488 : PG_END_TRY();
1258 : }
1259 52248 : }
1260 :
1261 : /*
1262 : * Store some statistics for a statement.
1263 : *
1264 : * If jstate is not NULL then we're trying to create an entry for which
1265 : * we have no statistics as yet; we just want to record the normalized
1266 : * query string. total_time, rows, bufusage and walusage are ignored in this
1267 : * case.
1268 : *
1269 : * If kind is PGSS_PLAN or PGSS_EXEC, its value is used as the array position
1270 : * for the arrays in the Counters field.
1271 : */
1272 : static void
1273 141350 : pgss_store(const char *query, uint64 queryId,
1274 : int query_location, int query_len,
1275 : pgssStoreKind kind,
1276 : double total_time, uint64 rows,
1277 : const BufferUsage *bufusage,
1278 : const WalUsage *walusage,
1279 : const struct JitInstrumentation *jitusage,
1280 : JumbleState *jstate)
1281 : {
1282 : pgssHashKey key;
1283 : pgssEntry *entry;
1284 141350 : char *norm_query = NULL;
1285 141350 : int encoding = GetDatabaseEncoding();
1286 :
1287 : Assert(query != NULL);
1288 :
1289 : /* Safety check... */
1290 141350 : if (!pgss || !pgss_hash)
1291 0 : return;
1292 :
1293 : /*
1294 : * Nothing to do if compute_query_id isn't enabled and no other module
1295 : * computed a query identifier.
1296 : */
1297 141350 : if (queryId == UINT64CONST(0))
1298 0 : return;
1299 :
1300 : /*
1301 : * Confine our attention to the relevant part of the string, if the query
1302 : * is a portion of a multi-statement source string, and update query
1303 : * location and length if needed.
1304 : */
1305 141350 : query = CleanQuerytext(query, &query_location, &query_len);
1306 :
1307 : /* Set up key for hashtable search */
1308 :
1309 : /* clear padding */
1310 141350 : memset(&key, 0, sizeof(pgssHashKey));
1311 :
1312 141350 : key.userid = GetUserId();
1313 141350 : key.dbid = MyDatabaseId;
1314 141350 : key.queryid = queryId;
1315 141350 : key.toplevel = (nesting_level == 0);
1316 :
1317 : /* Lookup the hash table entry with shared lock. */
1318 141350 : LWLockAcquire(pgss->lock, LW_SHARED);
1319 :
1320 141350 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
1321 :
1322 : /* Create new entry, if not present */
1323 141350 : if (!entry)
1324 : {
1325 : Size query_offset;
1326 : int gc_count;
1327 : bool stored;
1328 : bool do_gc;
1329 :
1330 : /*
1331 : * Create a new, normalized query string if caller asked. We don't
1332 : * need to hold the lock while doing this work. (Note: in any case,
1333 : * it's possible that someone else creates a duplicate hashtable entry
1334 : * in the interval where we don't hold the lock below. That case is
1335 : * handled by entry_alloc.)
1336 : */
1337 49918 : if (jstate)
1338 : {
1339 17940 : LWLockRelease(pgss->lock);
1340 17940 : norm_query = generate_normalized_query(jstate, query,
1341 : query_location,
1342 : &query_len);
1343 17940 : LWLockAcquire(pgss->lock, LW_SHARED);
1344 : }
1345 :
1346 : /* Append new query text to file with only shared lock held */
1347 49918 : stored = qtext_store(norm_query ? norm_query : query, query_len,
1348 : &query_offset, &gc_count);
1349 :
1350 : /*
1351 : * Determine whether we need to garbage collect external query texts
1352 : * while the shared lock is still held. This micro-optimization
1353 : * avoids taking the time to decide this while holding exclusive lock.
1354 : */
1355 49918 : do_gc = need_gc_qtexts();
1356 :
1357 : /* Need exclusive lock to make a new hashtable entry - promote */
1358 49918 : LWLockRelease(pgss->lock);
1359 49918 : LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
1360 :
1361 : /*
1362 : * A garbage collection may have occurred while we weren't holding the
1363 : * lock. In the unlikely event that this happens, the query text we
1364 : * stored above will have been garbage collected, so write it again.
1365 : * This should be infrequent enough that doing it while holding
1366 : * exclusive lock isn't a performance problem.
1367 : */
1368 49918 : if (!stored || pgss->gc_count != gc_count)
1369 0 : stored = qtext_store(norm_query ? norm_query : query, query_len,
1370 : &query_offset, NULL);
1371 :
1372 : /* If we failed to write to the text file, give up */
1373 49918 : if (!stored)
1374 0 : goto done;
1375 :
1376 : /* OK to create a new hashtable entry */
1377 49918 : entry = entry_alloc(&key, query_offset, query_len, encoding,
1378 : jstate != NULL);
1379 :
1380 : /* If needed, perform garbage collection while exclusive lock held */
1381 49918 : if (do_gc)
1382 0 : gc_qtexts();
1383 : }
1384 :
1385 : /* Increment the counts, except when jstate is not NULL */
1386 141350 : if (!jstate)
1387 : {
1388 : /*
1389 : * Grab the spinlock while updating the counters (see comment about
1390 : * locking rules at the head of the file)
1391 : */
1392 92090 : volatile pgssEntry *e = (volatile pgssEntry *) entry;
1393 :
1394 : Assert(kind == PGSS_PLAN || kind == PGSS_EXEC);
1395 :
1396 92090 : SpinLockAcquire(&e->mutex);
1397 :
1398 : /* "Unstick" entry if it was previously sticky */
1399 92090 : if (IS_STICKY(e->counters))
1400 48800 : e->counters.usage = USAGE_INIT;
1401 :
1402 92090 : e->counters.calls[kind] += 1;
1403 92090 : e->counters.total_time[kind] += total_time;
1404 :
1405 92090 : if (e->counters.calls[kind] == 1)
1406 : {
1407 48908 : e->counters.min_time[kind] = total_time;
1408 48908 : e->counters.max_time[kind] = total_time;
1409 48908 : e->counters.mean_time[kind] = total_time;
1410 : }
1411 : else
1412 : {
1413 : /*
1414 : * Welford's method for accurately computing variance. See
1415 : * <http://www.johndcook.com/blog/standard_deviation/>
1416 : */
1417 43182 : double old_mean = e->counters.mean_time[kind];
1418 :
1419 43182 : e->counters.mean_time[kind] +=
1420 43182 : (total_time - old_mean) / e->counters.calls[kind];
1421 43182 : e->counters.sum_var_time[kind] +=
1422 43182 : (total_time - old_mean) * (total_time - e->counters.mean_time[kind]);
1423 :
1424 : /*
1425 : * Calculate min and max time. min = 0 and max = 0 means that the
1426 : * min/max statistics were reset
1427 : */
1428 43182 : if (e->counters.min_time[kind] == 0
1429 4 : && e->counters.max_time[kind] == 0)
1430 : {
1431 4 : e->counters.min_time[kind] = total_time;
1432 4 : e->counters.max_time[kind] = total_time;
1433 : }
1434 : else
1435 : {
1436 43178 : if (e->counters.min_time[kind] > total_time)
1437 11098 : e->counters.min_time[kind] = total_time;
1438 43178 : if (e->counters.max_time[kind] < total_time)
1439 5056 : e->counters.max_time[kind] = total_time;
1440 : }
1441 : }
1442 92090 : e->counters.rows += rows;
1443 92090 : e->counters.shared_blks_hit += bufusage->shared_blks_hit;
1444 92090 : e->counters.shared_blks_read += bufusage->shared_blks_read;
1445 92090 : e->counters.shared_blks_dirtied += bufusage->shared_blks_dirtied;
1446 92090 : e->counters.shared_blks_written += bufusage->shared_blks_written;
1447 92090 : e->counters.local_blks_hit += bufusage->local_blks_hit;
1448 92090 : e->counters.local_blks_read += bufusage->local_blks_read;
1449 92090 : e->counters.local_blks_dirtied += bufusage->local_blks_dirtied;
1450 92090 : e->counters.local_blks_written += bufusage->local_blks_written;
1451 92090 : e->counters.temp_blks_read += bufusage->temp_blks_read;
1452 92090 : e->counters.temp_blks_written += bufusage->temp_blks_written;
1453 92090 : e->counters.shared_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->shared_blk_read_time);
1454 92090 : e->counters.shared_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->shared_blk_write_time);
1455 92090 : e->counters.local_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->local_blk_read_time);
1456 92090 : e->counters.local_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->local_blk_write_time);
1457 92090 : e->counters.temp_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->temp_blk_read_time);
1458 92090 : e->counters.temp_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->temp_blk_write_time);
1459 92090 : e->counters.usage += USAGE_EXEC(total_time);
1460 92090 : e->counters.wal_records += walusage->wal_records;
1461 92090 : e->counters.wal_fpi += walusage->wal_fpi;
1462 92090 : e->counters.wal_bytes += walusage->wal_bytes;
1463 92090 : if (jitusage)
1464 : {
1465 186 : e->counters.jit_functions += jitusage->created_functions;
1466 186 : e->counters.jit_generation_time += INSTR_TIME_GET_MILLISEC(jitusage->generation_counter);
1467 :
1468 186 : if (INSTR_TIME_GET_MILLISEC(jitusage->deform_counter))
1469 170 : e->counters.jit_deform_count++;
1470 186 : e->counters.jit_deform_time += INSTR_TIME_GET_MILLISEC(jitusage->deform_counter);
1471 :
1472 186 : if (INSTR_TIME_GET_MILLISEC(jitusage->inlining_counter))
1473 124 : e->counters.jit_inlining_count++;
1474 186 : e->counters.jit_inlining_time += INSTR_TIME_GET_MILLISEC(jitusage->inlining_counter);
1475 :
1476 186 : if (INSTR_TIME_GET_MILLISEC(jitusage->optimization_counter))
1477 182 : e->counters.jit_optimization_count++;
1478 186 : e->counters.jit_optimization_time += INSTR_TIME_GET_MILLISEC(jitusage->optimization_counter);
1479 :
1480 186 : if (INSTR_TIME_GET_MILLISEC(jitusage->emission_counter))
1481 182 : e->counters.jit_emission_count++;
1482 186 : e->counters.jit_emission_time += INSTR_TIME_GET_MILLISEC(jitusage->emission_counter);
1483 : }
1484 :
1485 92090 : SpinLockRelease(&e->mutex);
1486 : }
1487 :
1488 49260 : done:
1489 141350 : LWLockRelease(pgss->lock);
1490 :
1491 : /* We postpone this clean-up until we're out of the lock */
1492 141350 : if (norm_query)
1493 17940 : pfree(norm_query);
1494 : }
1495 :
1496 : /*
1497 : * Reset statement statistics corresponding to userid, dbid, and queryid.
1498 : */
1499 : Datum
1500 0 : pg_stat_statements_reset_1_7(PG_FUNCTION_ARGS)
1501 : {
1502 : Oid userid;
1503 : Oid dbid;
1504 : uint64 queryid;
1505 :
1506 0 : userid = PG_GETARG_OID(0);
1507 0 : dbid = PG_GETARG_OID(1);
1508 0 : queryid = (uint64) PG_GETARG_INT64(2);
1509 :
1510 0 : entry_reset(userid, dbid, queryid, false);
1511 :
1512 0 : PG_RETURN_VOID();
1513 : }
1514 :
1515 : Datum
1516 96 : pg_stat_statements_reset_1_11(PG_FUNCTION_ARGS)
1517 : {
1518 : Oid userid;
1519 : Oid dbid;
1520 : uint64 queryid;
1521 : bool minmax_only;
1522 :
1523 96 : userid = PG_GETARG_OID(0);
1524 96 : dbid = PG_GETARG_OID(1);
1525 96 : queryid = (uint64) PG_GETARG_INT64(2);
1526 96 : minmax_only = PG_GETARG_BOOL(3);
1527 :
1528 96 : PG_RETURN_TIMESTAMPTZ(entry_reset(userid, dbid, queryid, minmax_only));
1529 : }
1530 :
1531 : /*
1532 : * Reset statement statistics.
1533 : */
1534 : Datum
1535 2 : pg_stat_statements_reset(PG_FUNCTION_ARGS)
1536 : {
1537 2 : entry_reset(0, 0, 0, false);
1538 :
1539 2 : PG_RETURN_VOID();
1540 : }
1541 :
1542 : /* Number of output arguments (columns) for various API versions */
1543 : #define PG_STAT_STATEMENTS_COLS_V1_0 14
1544 : #define PG_STAT_STATEMENTS_COLS_V1_1 18
1545 : #define PG_STAT_STATEMENTS_COLS_V1_2 19
1546 : #define PG_STAT_STATEMENTS_COLS_V1_3 23
1547 : #define PG_STAT_STATEMENTS_COLS_V1_8 32
1548 : #define PG_STAT_STATEMENTS_COLS_V1_9 33
1549 : #define PG_STAT_STATEMENTS_COLS_V1_10 43
1550 : #define PG_STAT_STATEMENTS_COLS_V1_11 49
1551 : #define PG_STAT_STATEMENTS_COLS 49 /* maximum of above */
1552 :
1553 : /*
1554 : * Retrieve statement statistics.
1555 : *
1556 : * The SQL API of this function has changed multiple times, and will likely
1557 : * do so again in future. To support the case where a newer version of this
1558 : * loadable module is being used with an old SQL declaration of the function,
1559 : * we continue to support the older API versions. For 1.2 and later, the
1560 : * expected API version is identified by embedding it in the C name of the
1561 : * function. Unfortunately we weren't bright enough to do that for 1.1.
1562 : */
1563 : Datum
1564 110 : pg_stat_statements_1_11(PG_FUNCTION_ARGS)
1565 : {
1566 110 : bool showtext = PG_GETARG_BOOL(0);
1567 :
1568 110 : pg_stat_statements_internal(fcinfo, PGSS_V1_11, showtext);
1569 :
1570 110 : return (Datum) 0;
1571 : }
1572 :
1573 : Datum
1574 2 : pg_stat_statements_1_10(PG_FUNCTION_ARGS)
1575 : {
1576 2 : bool showtext = PG_GETARG_BOOL(0);
1577 :
1578 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_10, showtext);
1579 :
1580 2 : return (Datum) 0;
1581 : }
1582 :
1583 : Datum
1584 2 : pg_stat_statements_1_9(PG_FUNCTION_ARGS)
1585 : {
1586 2 : bool showtext = PG_GETARG_BOOL(0);
1587 :
1588 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_9, showtext);
1589 :
1590 2 : return (Datum) 0;
1591 : }
1592 :
1593 : Datum
1594 0 : pg_stat_statements_1_8(PG_FUNCTION_ARGS)
1595 : {
1596 0 : bool showtext = PG_GETARG_BOOL(0);
1597 :
1598 0 : pg_stat_statements_internal(fcinfo, PGSS_V1_8, showtext);
1599 :
1600 0 : return (Datum) 0;
1601 : }
1602 :
1603 : Datum
1604 2 : pg_stat_statements_1_3(PG_FUNCTION_ARGS)
1605 : {
1606 2 : bool showtext = PG_GETARG_BOOL(0);
1607 :
1608 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_3, showtext);
1609 :
1610 2 : return (Datum) 0;
1611 : }
1612 :
1613 : Datum
1614 0 : pg_stat_statements_1_2(PG_FUNCTION_ARGS)
1615 : {
1616 0 : bool showtext = PG_GETARG_BOOL(0);
1617 :
1618 0 : pg_stat_statements_internal(fcinfo, PGSS_V1_2, showtext);
1619 :
1620 0 : return (Datum) 0;
1621 : }
1622 :
1623 : /*
1624 : * Legacy entry point for pg_stat_statements() API versions 1.0 and 1.1.
1625 : * This can be removed someday, perhaps.
1626 : */
1627 : Datum
1628 0 : pg_stat_statements(PG_FUNCTION_ARGS)
1629 : {
1630 : /* If it's really API 1.1, we'll figure that out below */
1631 0 : pg_stat_statements_internal(fcinfo, PGSS_V1_0, true);
1632 :
1633 0 : return (Datum) 0;
1634 : }
1635 :
1636 : /* Common code for all versions of pg_stat_statements() */
1637 : static void
1638 116 : pg_stat_statements_internal(FunctionCallInfo fcinfo,
1639 : pgssVersion api_version,
1640 : bool showtext)
1641 : {
1642 116 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1643 116 : Oid userid = GetUserId();
1644 116 : bool is_allowed_role = false;
1645 116 : char *qbuffer = NULL;
1646 116 : Size qbuffer_size = 0;
1647 116 : Size extent = 0;
1648 116 : int gc_count = 0;
1649 : HASH_SEQ_STATUS hash_seq;
1650 : pgssEntry *entry;
1651 :
1652 : /*
1653 : * Superusers or roles with the privileges of pg_read_all_stats members
1654 : * are allowed
1655 : */
1656 116 : is_allowed_role = has_privs_of_role(userid, ROLE_PG_READ_ALL_STATS);
1657 :
1658 : /* hash table must exist already */
1659 116 : if (!pgss || !pgss_hash)
1660 0 : ereport(ERROR,
1661 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1662 : errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
1663 :
1664 116 : InitMaterializedSRF(fcinfo, 0);
1665 :
1666 : /*
1667 : * Check we have the expected number of output arguments. Aside from
1668 : * being a good safety check, we need a kluge here to detect API version
1669 : * 1.1, which was wedged into the code in an ill-considered way.
1670 : */
1671 116 : switch (rsinfo->setDesc->natts)
1672 : {
1673 0 : case PG_STAT_STATEMENTS_COLS_V1_0:
1674 0 : if (api_version != PGSS_V1_0)
1675 0 : elog(ERROR, "incorrect number of output arguments");
1676 0 : break;
1677 0 : case PG_STAT_STATEMENTS_COLS_V1_1:
1678 : /* pg_stat_statements() should have told us 1.0 */
1679 0 : if (api_version != PGSS_V1_0)
1680 0 : elog(ERROR, "incorrect number of output arguments");
1681 0 : api_version = PGSS_V1_1;
1682 0 : break;
1683 0 : case PG_STAT_STATEMENTS_COLS_V1_2:
1684 0 : if (api_version != PGSS_V1_2)
1685 0 : elog(ERROR, "incorrect number of output arguments");
1686 0 : break;
1687 2 : case PG_STAT_STATEMENTS_COLS_V1_3:
1688 2 : if (api_version != PGSS_V1_3)
1689 0 : elog(ERROR, "incorrect number of output arguments");
1690 2 : break;
1691 0 : case PG_STAT_STATEMENTS_COLS_V1_8:
1692 0 : if (api_version != PGSS_V1_8)
1693 0 : elog(ERROR, "incorrect number of output arguments");
1694 0 : break;
1695 2 : case PG_STAT_STATEMENTS_COLS_V1_9:
1696 2 : if (api_version != PGSS_V1_9)
1697 0 : elog(ERROR, "incorrect number of output arguments");
1698 2 : break;
1699 2 : case PG_STAT_STATEMENTS_COLS_V1_10:
1700 2 : if (api_version != PGSS_V1_10)
1701 0 : elog(ERROR, "incorrect number of output arguments");
1702 2 : break;
1703 110 : case PG_STAT_STATEMENTS_COLS_V1_11:
1704 110 : if (api_version != PGSS_V1_11)
1705 0 : elog(ERROR, "incorrect number of output arguments");
1706 110 : break;
1707 0 : default:
1708 0 : elog(ERROR, "incorrect number of output arguments");
1709 : }
1710 :
1711 : /*
1712 : * We'd like to load the query text file (if needed) while not holding any
1713 : * lock on pgss->lock. In the worst case we'll have to do this again
1714 : * after we have the lock, but it's unlikely enough to make this a win
1715 : * despite occasional duplicated work. We need to reload if anybody
1716 : * writes to the file (either a retail qtext_store(), or a garbage
1717 : * collection) between this point and where we've gotten shared lock. If
1718 : * a qtext_store is actually in progress when we look, we might as well
1719 : * skip the speculative load entirely.
1720 : */
1721 116 : if (showtext)
1722 : {
1723 : int n_writers;
1724 :
1725 : /* Take the mutex so we can examine variables */
1726 : {
1727 116 : volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
1728 :
1729 116 : SpinLockAcquire(&s->mutex);
1730 116 : extent = s->extent;
1731 116 : n_writers = s->n_writers;
1732 116 : gc_count = s->gc_count;
1733 116 : SpinLockRelease(&s->mutex);
1734 : }
1735 :
1736 : /* No point in loading file now if there are active writers */
1737 116 : if (n_writers == 0)
1738 116 : qbuffer = qtext_load_file(&qbuffer_size);
1739 : }
1740 :
1741 : /*
1742 : * Get shared lock, load or reload the query text file if we must, and
1743 : * iterate over the hashtable entries.
1744 : *
1745 : * With a large hash table, we might be holding the lock rather longer
1746 : * than one could wish. However, this only blocks creation of new hash
1747 : * table entries, and the larger the hash table the less likely that is to
1748 : * be needed. So we can hope this is okay. Perhaps someday we'll decide
1749 : * we need to partition the hash table to limit the time spent holding any
1750 : * one lock.
1751 : */
1752 116 : LWLockAcquire(pgss->lock, LW_SHARED);
1753 :
1754 116 : if (showtext)
1755 : {
1756 : /*
1757 : * Here it is safe to examine extent and gc_count without taking the
1758 : * mutex. Note that although other processes might change
1759 : * pgss->extent just after we look at it, the strings they then write
1760 : * into the file cannot yet be referenced in the hashtable, so we
1761 : * don't care whether we see them or not.
1762 : *
1763 : * If qtext_load_file fails, we just press on; we'll return NULL for
1764 : * every query text.
1765 : */
1766 116 : if (qbuffer == NULL ||
1767 116 : pgss->extent != extent ||
1768 116 : pgss->gc_count != gc_count)
1769 : {
1770 0 : free(qbuffer);
1771 0 : qbuffer = qtext_load_file(&qbuffer_size);
1772 : }
1773 : }
1774 :
1775 116 : hash_seq_init(&hash_seq, pgss_hash);
1776 48650 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
1777 : {
1778 : Datum values[PG_STAT_STATEMENTS_COLS];
1779 : bool nulls[PG_STAT_STATEMENTS_COLS];
1780 48534 : int i = 0;
1781 : Counters tmp;
1782 : double stddev;
1783 48534 : int64 queryid = entry->key.queryid;
1784 : TimestampTz stats_since;
1785 : TimestampTz minmax_stats_since;
1786 :
1787 48534 : memset(values, 0, sizeof(values));
1788 48534 : memset(nulls, 0, sizeof(nulls));
1789 :
1790 48534 : values[i++] = ObjectIdGetDatum(entry->key.userid);
1791 48534 : values[i++] = ObjectIdGetDatum(entry->key.dbid);
1792 48534 : if (api_version >= PGSS_V1_9)
1793 48514 : values[i++] = BoolGetDatum(entry->key.toplevel);
1794 :
1795 48534 : if (is_allowed_role || entry->key.userid == userid)
1796 : {
1797 48534 : if (api_version >= PGSS_V1_2)
1798 48534 : values[i++] = Int64GetDatumFast(queryid);
1799 :
1800 48534 : if (showtext)
1801 : {
1802 48534 : char *qstr = qtext_fetch(entry->query_offset,
1803 : entry->query_len,
1804 : qbuffer,
1805 : qbuffer_size);
1806 :
1807 48534 : if (qstr)
1808 : {
1809 : char *enc;
1810 :
1811 48534 : enc = pg_any_to_server(qstr,
1812 : entry->query_len,
1813 : entry->encoding);
1814 :
1815 48534 : values[i++] = CStringGetTextDatum(enc);
1816 :
1817 48534 : if (enc != qstr)
1818 0 : pfree(enc);
1819 : }
1820 : else
1821 : {
1822 : /* Just return a null if we fail to find the text */
1823 0 : nulls[i++] = true;
1824 : }
1825 : }
1826 : else
1827 : {
1828 : /* Query text not requested */
1829 0 : nulls[i++] = true;
1830 : }
1831 : }
1832 : else
1833 : {
1834 : /* Don't show queryid */
1835 0 : if (api_version >= PGSS_V1_2)
1836 0 : nulls[i++] = true;
1837 :
1838 : /*
1839 : * Don't show query text, but hint as to the reason for not doing
1840 : * so if it was requested
1841 : */
1842 0 : if (showtext)
1843 0 : values[i++] = CStringGetTextDatum("<insufficient privilege>");
1844 : else
1845 0 : nulls[i++] = true;
1846 : }
1847 :
1848 : /* copy counters to a local variable to keep locking time short */
1849 : {
1850 48534 : volatile pgssEntry *e = (volatile pgssEntry *) entry;
1851 :
1852 48534 : SpinLockAcquire(&e->mutex);
1853 48534 : tmp = e->counters;
1854 48534 : stats_since = e->stats_since;
1855 48534 : minmax_stats_since = e->minmax_stats_since;
1856 48534 : SpinLockRelease(&e->mutex);
1857 : }
1858 :
1859 : /* Skip entry if unexecuted (ie, it's a pending "sticky" entry) */
1860 48534 : if (IS_STICKY(tmp))
1861 1140 : continue;
1862 :
1863 : /* Note that we rely on PGSS_PLAN being 0 and PGSS_EXEC being 1. */
1864 142182 : for (int kind = 0; kind < PGSS_NUMKIND; kind++)
1865 : {
1866 94788 : if (kind == PGSS_EXEC || api_version >= PGSS_V1_8)
1867 : {
1868 94770 : values[i++] = Int64GetDatumFast(tmp.calls[kind]);
1869 94770 : values[i++] = Float8GetDatumFast(tmp.total_time[kind]);
1870 : }
1871 :
1872 94788 : if ((kind == PGSS_EXEC && api_version >= PGSS_V1_3) ||
1873 : api_version >= PGSS_V1_8)
1874 : {
1875 94770 : values[i++] = Float8GetDatumFast(tmp.min_time[kind]);
1876 94770 : values[i++] = Float8GetDatumFast(tmp.max_time[kind]);
1877 94770 : values[i++] = Float8GetDatumFast(tmp.mean_time[kind]);
1878 :
1879 : /*
1880 : * Note we are calculating the population variance here, not
1881 : * the sample variance, as we have data for the whole
1882 : * population, so Bessel's correction is not used, and we
1883 : * don't divide by tmp.calls - 1.
1884 : */
1885 94770 : if (tmp.calls[kind] > 1)
1886 8430 : stddev = sqrt(tmp.sum_var_time[kind] / tmp.calls[kind]);
1887 : else
1888 86340 : stddev = 0.0;
1889 94770 : values[i++] = Float8GetDatumFast(stddev);
1890 : }
1891 : }
1892 47394 : values[i++] = Int64GetDatumFast(tmp.rows);
1893 47394 : values[i++] = Int64GetDatumFast(tmp.shared_blks_hit);
1894 47394 : values[i++] = Int64GetDatumFast(tmp.shared_blks_read);
1895 47394 : if (api_version >= PGSS_V1_1)
1896 47394 : values[i++] = Int64GetDatumFast(tmp.shared_blks_dirtied);
1897 47394 : values[i++] = Int64GetDatumFast(tmp.shared_blks_written);
1898 47394 : values[i++] = Int64GetDatumFast(tmp.local_blks_hit);
1899 47394 : values[i++] = Int64GetDatumFast(tmp.local_blks_read);
1900 47394 : if (api_version >= PGSS_V1_1)
1901 47394 : values[i++] = Int64GetDatumFast(tmp.local_blks_dirtied);
1902 47394 : values[i++] = Int64GetDatumFast(tmp.local_blks_written);
1903 47394 : values[i++] = Int64GetDatumFast(tmp.temp_blks_read);
1904 47394 : values[i++] = Int64GetDatumFast(tmp.temp_blks_written);
1905 47394 : if (api_version >= PGSS_V1_1)
1906 : {
1907 47394 : values[i++] = Float8GetDatumFast(tmp.shared_blk_read_time);
1908 47394 : values[i++] = Float8GetDatumFast(tmp.shared_blk_write_time);
1909 : }
1910 47394 : if (api_version >= PGSS_V1_11)
1911 : {
1912 47324 : values[i++] = Float8GetDatumFast(tmp.local_blk_read_time);
1913 47324 : values[i++] = Float8GetDatumFast(tmp.local_blk_write_time);
1914 : }
1915 47394 : if (api_version >= PGSS_V1_10)
1916 : {
1917 47352 : values[i++] = Float8GetDatumFast(tmp.temp_blk_read_time);
1918 47352 : values[i++] = Float8GetDatumFast(tmp.temp_blk_write_time);
1919 : }
1920 47394 : if (api_version >= PGSS_V1_8)
1921 : {
1922 : char buf[256];
1923 : Datum wal_bytes;
1924 :
1925 47376 : values[i++] = Int64GetDatumFast(tmp.wal_records);
1926 47376 : values[i++] = Int64GetDatumFast(tmp.wal_fpi);
1927 :
1928 47376 : snprintf(buf, sizeof buf, UINT64_FORMAT, tmp.wal_bytes);
1929 :
1930 : /* Convert to numeric. */
1931 47376 : wal_bytes = DirectFunctionCall3(numeric_in,
1932 : CStringGetDatum(buf),
1933 : ObjectIdGetDatum(0),
1934 : Int32GetDatum(-1));
1935 47376 : values[i++] = wal_bytes;
1936 : }
1937 47394 : if (api_version >= PGSS_V1_10)
1938 : {
1939 47352 : values[i++] = Int64GetDatumFast(tmp.jit_functions);
1940 47352 : values[i++] = Float8GetDatumFast(tmp.jit_generation_time);
1941 47352 : values[i++] = Int64GetDatumFast(tmp.jit_inlining_count);
1942 47352 : values[i++] = Float8GetDatumFast(tmp.jit_inlining_time);
1943 47352 : values[i++] = Int64GetDatumFast(tmp.jit_optimization_count);
1944 47352 : values[i++] = Float8GetDatumFast(tmp.jit_optimization_time);
1945 47352 : values[i++] = Int64GetDatumFast(tmp.jit_emission_count);
1946 47352 : values[i++] = Float8GetDatumFast(tmp.jit_emission_time);
1947 : }
1948 47394 : if (api_version >= PGSS_V1_11)
1949 : {
1950 47324 : values[i++] = Int64GetDatumFast(tmp.jit_deform_count);
1951 47324 : values[i++] = Float8GetDatumFast(tmp.jit_deform_time);
1952 47324 : values[i++] = TimestampTzGetDatum(stats_since);
1953 47324 : values[i++] = TimestampTzGetDatum(minmax_stats_since);
1954 : }
1955 :
1956 : Assert(i == (api_version == PGSS_V1_0 ? PG_STAT_STATEMENTS_COLS_V1_0 :
1957 : api_version == PGSS_V1_1 ? PG_STAT_STATEMENTS_COLS_V1_1 :
1958 : api_version == PGSS_V1_2 ? PG_STAT_STATEMENTS_COLS_V1_2 :
1959 : api_version == PGSS_V1_3 ? PG_STAT_STATEMENTS_COLS_V1_3 :
1960 : api_version == PGSS_V1_8 ? PG_STAT_STATEMENTS_COLS_V1_8 :
1961 : api_version == PGSS_V1_9 ? PG_STAT_STATEMENTS_COLS_V1_9 :
1962 : api_version == PGSS_V1_10 ? PG_STAT_STATEMENTS_COLS_V1_10 :
1963 : api_version == PGSS_V1_11 ? PG_STAT_STATEMENTS_COLS_V1_11 :
1964 : -1 /* fail if you forget to update this assert */ ));
1965 :
1966 47394 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1967 : }
1968 :
1969 116 : LWLockRelease(pgss->lock);
1970 :
1971 116 : free(qbuffer);
1972 116 : }
1973 :
1974 : /* Number of output arguments (columns) for pg_stat_statements_info */
1975 : #define PG_STAT_STATEMENTS_INFO_COLS 2
1976 :
1977 : /*
1978 : * Return statistics of pg_stat_statements.
1979 : */
1980 : Datum
1981 4 : pg_stat_statements_info(PG_FUNCTION_ARGS)
1982 : {
1983 : pgssGlobalStats stats;
1984 : TupleDesc tupdesc;
1985 4 : Datum values[PG_STAT_STATEMENTS_INFO_COLS] = {0};
1986 4 : bool nulls[PG_STAT_STATEMENTS_INFO_COLS] = {0};
1987 :
1988 4 : if (!pgss || !pgss_hash)
1989 0 : ereport(ERROR,
1990 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1991 : errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
1992 :
1993 : /* Build a tuple descriptor for our result type */
1994 4 : if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
1995 0 : elog(ERROR, "return type must be a row type");
1996 :
1997 : /* Read global statistics for pg_stat_statements */
1998 : {
1999 4 : volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
2000 :
2001 4 : SpinLockAcquire(&s->mutex);
2002 4 : stats = s->stats;
2003 4 : SpinLockRelease(&s->mutex);
2004 : }
2005 :
2006 4 : values[0] = Int64GetDatum(stats.dealloc);
2007 4 : values[1] = TimestampTzGetDatum(stats.stats_reset);
2008 :
2009 4 : PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
2010 : }
2011 :
2012 : /*
2013 : * Estimate shared memory space needed.
2014 : */
2015 : static Size
2016 6 : pgss_memsize(void)
2017 : {
2018 : Size size;
2019 :
2020 6 : size = MAXALIGN(sizeof(pgssSharedState));
2021 6 : size = add_size(size, hash_estimate_size(pgss_max, sizeof(pgssEntry)));
2022 :
2023 6 : return size;
2024 : }
2025 :
2026 : /*
2027 : * Allocate a new hashtable entry.
2028 : * caller must hold an exclusive lock on pgss->lock
2029 : *
2030 : * "query" need not be null-terminated; we rely on query_len instead
2031 : *
2032 : * If "sticky" is true, make the new entry artificially sticky so that it will
2033 : * probably still be there when the query finishes execution. We do this by
2034 : * giving it a median usage value rather than the normal value. (Strictly
2035 : * speaking, query strings are normalized on a best effort basis, though it
2036 : * would be difficult to demonstrate this even under artificial conditions.)
2037 : *
2038 : * Note: despite needing exclusive lock, it's not an error for the target
2039 : * entry to already exist. This is because pgss_store releases and
2040 : * reacquires lock after failing to find a match; so someone else could
2041 : * have made the entry while we waited to get exclusive lock.
2042 : */
2043 : static pgssEntry *
2044 49918 : entry_alloc(pgssHashKey *key, Size query_offset, int query_len, int encoding,
2045 : bool sticky)
2046 : {
2047 : pgssEntry *entry;
2048 : bool found;
2049 :
2050 : /* Make space if needed */
2051 49918 : while (hash_get_num_entries(pgss_hash) >= pgss_max)
2052 0 : entry_dealloc();
2053 :
2054 : /* Find or create an entry with desired hash code */
2055 49918 : entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found);
2056 :
2057 49918 : if (!found)
2058 : {
2059 : /* New entry, initialize it */
2060 :
2061 : /* reset the statistics */
2062 49918 : memset(&entry->counters, 0, sizeof(Counters));
2063 : /* set the appropriate initial usage count */
2064 49918 : entry->counters.usage = sticky ? pgss->cur_median_usage : USAGE_INIT;
2065 : /* re-initialize the mutex each time ... we assume no one using it */
2066 49918 : SpinLockInit(&entry->mutex);
2067 : /* ... and don't forget the query text metadata */
2068 : Assert(query_len >= 0);
2069 49918 : entry->query_offset = query_offset;
2070 49918 : entry->query_len = query_len;
2071 49918 : entry->encoding = encoding;
2072 49918 : entry->stats_since = GetCurrentTimestamp();
2073 49918 : entry->minmax_stats_since = entry->stats_since;
2074 : }
2075 :
2076 49918 : return entry;
2077 : }
2078 :
2079 : /*
2080 : * qsort comparator for sorting into increasing usage order
2081 : */
2082 : static int
2083 0 : entry_cmp(const void *lhs, const void *rhs)
2084 : {
2085 0 : double l_usage = (*(pgssEntry *const *) lhs)->counters.usage;
2086 0 : double r_usage = (*(pgssEntry *const *) rhs)->counters.usage;
2087 :
2088 0 : if (l_usage < r_usage)
2089 0 : return -1;
2090 0 : else if (l_usage > r_usage)
2091 0 : return +1;
2092 : else
2093 0 : return 0;
2094 : }
2095 :
2096 : /*
2097 : * Deallocate least-used entries.
2098 : *
2099 : * Caller must hold an exclusive lock on pgss->lock.
2100 : */
2101 : static void
2102 0 : entry_dealloc(void)
2103 : {
2104 : HASH_SEQ_STATUS hash_seq;
2105 : pgssEntry **entries;
2106 : pgssEntry *entry;
2107 : int nvictims;
2108 : int i;
2109 : Size tottextlen;
2110 : int nvalidtexts;
2111 :
2112 : /*
2113 : * Sort entries by usage and deallocate USAGE_DEALLOC_PERCENT of them.
2114 : * While we're scanning the table, apply the decay factor to the usage
2115 : * values, and update the mean query length.
2116 : *
2117 : * Note that the mean query length is almost immediately obsolete, since
2118 : * we compute it before not after discarding the least-used entries.
2119 : * Hopefully, that doesn't affect the mean too much; it doesn't seem worth
2120 : * making two passes to get a more current result. Likewise, the new
2121 : * cur_median_usage includes the entries we're about to zap.
2122 : */
2123 :
2124 0 : entries = palloc(hash_get_num_entries(pgss_hash) * sizeof(pgssEntry *));
2125 :
2126 0 : i = 0;
2127 0 : tottextlen = 0;
2128 0 : nvalidtexts = 0;
2129 :
2130 0 : hash_seq_init(&hash_seq, pgss_hash);
2131 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2132 : {
2133 0 : entries[i++] = entry;
2134 : /* "Sticky" entries get a different usage decay rate. */
2135 0 : if (IS_STICKY(entry->counters))
2136 0 : entry->counters.usage *= STICKY_DECREASE_FACTOR;
2137 : else
2138 0 : entry->counters.usage *= USAGE_DECREASE_FACTOR;
2139 : /* In the mean length computation, ignore dropped texts. */
2140 0 : if (entry->query_len >= 0)
2141 : {
2142 0 : tottextlen += entry->query_len + 1;
2143 0 : nvalidtexts++;
2144 : }
2145 : }
2146 :
2147 : /* Sort into increasing order by usage */
2148 0 : qsort(entries, i, sizeof(pgssEntry *), entry_cmp);
2149 :
2150 : /* Record the (approximate) median usage */
2151 0 : if (i > 0)
2152 0 : pgss->cur_median_usage = entries[i / 2]->counters.usage;
2153 : /* Record the mean query length */
2154 0 : if (nvalidtexts > 0)
2155 0 : pgss->mean_query_len = tottextlen / nvalidtexts;
2156 : else
2157 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2158 :
2159 : /* Now zap an appropriate fraction of lowest-usage entries */
2160 0 : nvictims = Max(10, i * USAGE_DEALLOC_PERCENT / 100);
2161 0 : nvictims = Min(nvictims, i);
2162 :
2163 0 : for (i = 0; i < nvictims; i++)
2164 : {
2165 0 : hash_search(pgss_hash, &entries[i]->key, HASH_REMOVE, NULL);
2166 : }
2167 :
2168 0 : pfree(entries);
2169 :
2170 : /* Increment the number of times entries are deallocated */
2171 : {
2172 0 : volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
2173 :
2174 0 : SpinLockAcquire(&s->mutex);
2175 0 : s->stats.dealloc += 1;
2176 0 : SpinLockRelease(&s->mutex);
2177 : }
2178 0 : }
2179 :
2180 : /*
2181 : * Given a query string (not necessarily null-terminated), allocate a new
2182 : * entry in the external query text file and store the string there.
2183 : *
2184 : * If successful, returns true, and stores the new entry's offset in the file
2185 : * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
2186 : * number of garbage collections that have occurred so far.
2187 : *
2188 : * On failure, returns false.
2189 : *
2190 : * At least a shared lock on pgss->lock must be held by the caller, so as
2191 : * to prevent a concurrent garbage collection. Share-lock-holding callers
2192 : * should pass a gc_count pointer to obtain the number of garbage collections,
2193 : * so that they can recheck the count after obtaining exclusive lock to
2194 : * detect whether a garbage collection occurred (and removed this entry).
2195 : */
2196 : static bool
2197 49918 : qtext_store(const char *query, int query_len,
2198 : Size *query_offset, int *gc_count)
2199 : {
2200 : Size off;
2201 : int fd;
2202 :
2203 : /*
2204 : * We use a spinlock to protect extent/n_writers/gc_count, so that
2205 : * multiple processes may execute this function concurrently.
2206 : */
2207 : {
2208 49918 : volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
2209 :
2210 49918 : SpinLockAcquire(&s->mutex);
2211 49918 : off = s->extent;
2212 49918 : s->extent += query_len + 1;
2213 49918 : s->n_writers++;
2214 49918 : if (gc_count)
2215 49918 : *gc_count = s->gc_count;
2216 49918 : SpinLockRelease(&s->mutex);
2217 : }
2218 :
2219 49918 : *query_offset = off;
2220 :
2221 : /*
2222 : * Don't allow the file to grow larger than what qtext_load_file can
2223 : * (theoretically) handle. This has been seen to be reachable on 32-bit
2224 : * platforms.
2225 : */
2226 49918 : if (unlikely(query_len >= MaxAllocHugeSize - off))
2227 : {
2228 0 : errno = EFBIG; /* not quite right, but it'll do */
2229 0 : fd = -1;
2230 0 : goto error;
2231 : }
2232 :
2233 : /* Now write the data into the successfully-reserved part of the file */
2234 49918 : fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDWR | O_CREAT | PG_BINARY);
2235 49918 : if (fd < 0)
2236 0 : goto error;
2237 :
2238 49918 : if (pg_pwrite(fd, query, query_len, off) != query_len)
2239 0 : goto error;
2240 49918 : if (pg_pwrite(fd, "\0", 1, off + query_len) != 1)
2241 0 : goto error;
2242 :
2243 49918 : CloseTransientFile(fd);
2244 :
2245 : /* Mark our write complete */
2246 : {
2247 49918 : volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
2248 :
2249 49918 : SpinLockAcquire(&s->mutex);
2250 49918 : s->n_writers--;
2251 49918 : SpinLockRelease(&s->mutex);
2252 : }
2253 :
2254 49918 : return true;
2255 :
2256 0 : error:
2257 0 : ereport(LOG,
2258 : (errcode_for_file_access(),
2259 : errmsg("could not write file \"%s\": %m",
2260 : PGSS_TEXT_FILE)));
2261 :
2262 0 : if (fd >= 0)
2263 0 : CloseTransientFile(fd);
2264 :
2265 : /* Mark our write complete */
2266 : {
2267 0 : volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
2268 :
2269 0 : SpinLockAcquire(&s->mutex);
2270 0 : s->n_writers--;
2271 0 : SpinLockRelease(&s->mutex);
2272 : }
2273 :
2274 0 : return false;
2275 : }
2276 :
2277 : /*
2278 : * Read the external query text file into a malloc'd buffer.
2279 : *
2280 : * Returns NULL (without throwing an error) if unable to read, eg
2281 : * file not there or insufficient memory.
2282 : *
2283 : * On success, the buffer size is also returned into *buffer_size.
2284 : *
2285 : * This can be called without any lock on pgss->lock, but in that case
2286 : * the caller is responsible for verifying that the result is sane.
2287 : */
2288 : static char *
2289 122 : qtext_load_file(Size *buffer_size)
2290 : {
2291 : char *buf;
2292 : int fd;
2293 : struct stat stat;
2294 : Size nread;
2295 :
2296 122 : fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDONLY | PG_BINARY);
2297 122 : if (fd < 0)
2298 : {
2299 0 : if (errno != ENOENT)
2300 0 : ereport(LOG,
2301 : (errcode_for_file_access(),
2302 : errmsg("could not read file \"%s\": %m",
2303 : PGSS_TEXT_FILE)));
2304 0 : return NULL;
2305 : }
2306 :
2307 : /* Get file length */
2308 122 : if (fstat(fd, &stat))
2309 : {
2310 0 : ereport(LOG,
2311 : (errcode_for_file_access(),
2312 : errmsg("could not stat file \"%s\": %m",
2313 : PGSS_TEXT_FILE)));
2314 0 : CloseTransientFile(fd);
2315 0 : return NULL;
2316 : }
2317 :
2318 : /* Allocate buffer; beware that off_t might be wider than size_t */
2319 122 : if (stat.st_size <= MaxAllocHugeSize)
2320 122 : buf = (char *) malloc(stat.st_size);
2321 : else
2322 0 : buf = NULL;
2323 122 : if (buf == NULL)
2324 : {
2325 0 : ereport(LOG,
2326 : (errcode(ERRCODE_OUT_OF_MEMORY),
2327 : errmsg("out of memory"),
2328 : errdetail("Could not allocate enough memory to read file \"%s\".",
2329 : PGSS_TEXT_FILE)));
2330 0 : CloseTransientFile(fd);
2331 0 : return NULL;
2332 : }
2333 :
2334 : /*
2335 : * OK, slurp in the file. Windows fails if we try to read more than
2336 : * INT_MAX bytes at once, and other platforms might not like that either,
2337 : * so read a very large file in 1GB segments.
2338 : */
2339 122 : nread = 0;
2340 242 : while (nread < stat.st_size)
2341 : {
2342 120 : int toread = Min(1024 * 1024 * 1024, stat.st_size - nread);
2343 :
2344 : /*
2345 : * If we get a short read and errno doesn't get set, the reason is
2346 : * probably that garbage collection truncated the file since we did
2347 : * the fstat(), so we don't log a complaint --- but we don't return
2348 : * the data, either, since it's most likely corrupt due to concurrent
2349 : * writes from garbage collection.
2350 : */
2351 120 : errno = 0;
2352 120 : if (read(fd, buf + nread, toread) != toread)
2353 : {
2354 0 : if (errno)
2355 0 : ereport(LOG,
2356 : (errcode_for_file_access(),
2357 : errmsg("could not read file \"%s\": %m",
2358 : PGSS_TEXT_FILE)));
2359 0 : free(buf);
2360 0 : CloseTransientFile(fd);
2361 0 : return NULL;
2362 : }
2363 120 : nread += toread;
2364 : }
2365 :
2366 122 : if (CloseTransientFile(fd) != 0)
2367 0 : ereport(LOG,
2368 : (errcode_for_file_access(),
2369 : errmsg("could not close file \"%s\": %m", PGSS_TEXT_FILE)));
2370 :
2371 122 : *buffer_size = nread;
2372 122 : return buf;
2373 : }
2374 :
2375 : /*
2376 : * Locate a query text in the file image previously read by qtext_load_file().
2377 : *
2378 : * We validate the given offset/length, and return NULL if bogus. Otherwise,
2379 : * the result points to a null-terminated string within the buffer.
2380 : */
2381 : static char *
2382 97756 : qtext_fetch(Size query_offset, int query_len,
2383 : char *buffer, Size buffer_size)
2384 : {
2385 : /* File read failed? */
2386 97756 : if (buffer == NULL)
2387 0 : return NULL;
2388 : /* Bogus offset/length? */
2389 97756 : if (query_len < 0 ||
2390 97756 : query_offset + query_len >= buffer_size)
2391 0 : return NULL;
2392 : /* As a further sanity check, make sure there's a trailing null */
2393 97756 : if (buffer[query_offset + query_len] != '\0')
2394 0 : return NULL;
2395 : /* Looks OK */
2396 97756 : return buffer + query_offset;
2397 : }
2398 :
2399 : /*
2400 : * Do we need to garbage-collect the external query text file?
2401 : *
2402 : * Caller should hold at least a shared lock on pgss->lock.
2403 : */
2404 : static bool
2405 49918 : need_gc_qtexts(void)
2406 : {
2407 : Size extent;
2408 :
2409 : /* Read shared extent pointer */
2410 : {
2411 49918 : volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
2412 :
2413 49918 : SpinLockAcquire(&s->mutex);
2414 49918 : extent = s->extent;
2415 49918 : SpinLockRelease(&s->mutex);
2416 : }
2417 :
2418 : /*
2419 : * Don't proceed if file does not exceed 512 bytes per possible entry.
2420 : *
2421 : * Here and in the next test, 32-bit machines have overflow hazards if
2422 : * pgss_max and/or mean_query_len are large. Force the multiplications
2423 : * and comparisons to be done in uint64 arithmetic to forestall trouble.
2424 : */
2425 49918 : if ((uint64) extent < (uint64) 512 * pgss_max)
2426 49918 : return false;
2427 :
2428 : /*
2429 : * Don't proceed if file is less than about 50% bloat. Nothing can or
2430 : * should be done in the event of unusually large query texts accounting
2431 : * for file's large size. We go to the trouble of maintaining the mean
2432 : * query length in order to prevent garbage collection from thrashing
2433 : * uselessly.
2434 : */
2435 0 : if ((uint64) extent < (uint64) pgss->mean_query_len * pgss_max * 2)
2436 0 : return false;
2437 :
2438 0 : return true;
2439 : }
2440 :
2441 : /*
2442 : * Garbage-collect orphaned query texts in external file.
2443 : *
2444 : * This won't be called often in the typical case, since it's likely that
2445 : * there won't be too much churn, and besides, a similar compaction process
2446 : * occurs when serializing to disk at shutdown or as part of resetting.
2447 : * Despite this, it seems prudent to plan for the edge case where the file
2448 : * becomes unreasonably large, with no other method of compaction likely to
2449 : * occur in the foreseeable future.
2450 : *
2451 : * The caller must hold an exclusive lock on pgss->lock.
2452 : *
2453 : * At the first sign of trouble we unlink the query text file to get a clean
2454 : * slate (although existing statistics are retained), rather than risk
2455 : * thrashing by allowing the same problem case to recur indefinitely.
2456 : */
2457 : static void
2458 0 : gc_qtexts(void)
2459 : {
2460 : char *qbuffer;
2461 : Size qbuffer_size;
2462 0 : FILE *qfile = NULL;
2463 : HASH_SEQ_STATUS hash_seq;
2464 : pgssEntry *entry;
2465 : Size extent;
2466 : int nentries;
2467 :
2468 : /*
2469 : * When called from pgss_store, some other session might have proceeded
2470 : * with garbage collection in the no-lock-held interim of lock strength
2471 : * escalation. Check once more that this is actually necessary.
2472 : */
2473 0 : if (!need_gc_qtexts())
2474 0 : return;
2475 :
2476 : /*
2477 : * Load the old texts file. If we fail (out of memory, for instance),
2478 : * invalidate query texts. Hopefully this is rare. It might seem better
2479 : * to leave things alone on an OOM failure, but the problem is that the
2480 : * file is only going to get bigger; hoping for a future non-OOM result is
2481 : * risky and can easily lead to complete denial of service.
2482 : */
2483 0 : qbuffer = qtext_load_file(&qbuffer_size);
2484 0 : if (qbuffer == NULL)
2485 0 : goto gc_fail;
2486 :
2487 : /*
2488 : * We overwrite the query texts file in place, so as to reduce the risk of
2489 : * an out-of-disk-space failure. Since the file is guaranteed not to get
2490 : * larger, this should always work on traditional filesystems; though we
2491 : * could still lose on copy-on-write filesystems.
2492 : */
2493 0 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2494 0 : if (qfile == NULL)
2495 : {
2496 0 : ereport(LOG,
2497 : (errcode_for_file_access(),
2498 : errmsg("could not write file \"%s\": %m",
2499 : PGSS_TEXT_FILE)));
2500 0 : goto gc_fail;
2501 : }
2502 :
2503 0 : extent = 0;
2504 0 : nentries = 0;
2505 :
2506 0 : hash_seq_init(&hash_seq, pgss_hash);
2507 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2508 : {
2509 0 : int query_len = entry->query_len;
2510 0 : char *qry = qtext_fetch(entry->query_offset,
2511 : query_len,
2512 : qbuffer,
2513 : qbuffer_size);
2514 :
2515 0 : if (qry == NULL)
2516 : {
2517 : /* Trouble ... drop the text */
2518 0 : entry->query_offset = 0;
2519 0 : entry->query_len = -1;
2520 : /* entry will not be counted in mean query length computation */
2521 0 : continue;
2522 : }
2523 :
2524 0 : if (fwrite(qry, 1, query_len + 1, qfile) != query_len + 1)
2525 : {
2526 0 : ereport(LOG,
2527 : (errcode_for_file_access(),
2528 : errmsg("could not write file \"%s\": %m",
2529 : PGSS_TEXT_FILE)));
2530 0 : hash_seq_term(&hash_seq);
2531 0 : goto gc_fail;
2532 : }
2533 :
2534 0 : entry->query_offset = extent;
2535 0 : extent += query_len + 1;
2536 0 : nentries++;
2537 : }
2538 :
2539 : /*
2540 : * Truncate away any now-unused space. If this fails for some odd reason,
2541 : * we log it, but there's no need to fail.
2542 : */
2543 0 : if (ftruncate(fileno(qfile), extent) != 0)
2544 0 : ereport(LOG,
2545 : (errcode_for_file_access(),
2546 : errmsg("could not truncate file \"%s\": %m",
2547 : PGSS_TEXT_FILE)));
2548 :
2549 0 : if (FreeFile(qfile))
2550 : {
2551 0 : ereport(LOG,
2552 : (errcode_for_file_access(),
2553 : errmsg("could not write file \"%s\": %m",
2554 : PGSS_TEXT_FILE)));
2555 0 : qfile = NULL;
2556 0 : goto gc_fail;
2557 : }
2558 :
2559 0 : elog(DEBUG1, "pgss gc of queries file shrunk size from %zu to %zu",
2560 : pgss->extent, extent);
2561 :
2562 : /* Reset the shared extent pointer */
2563 0 : pgss->extent = extent;
2564 :
2565 : /*
2566 : * Also update the mean query length, to be sure that need_gc_qtexts()
2567 : * won't still think we have a problem.
2568 : */
2569 0 : if (nentries > 0)
2570 0 : pgss->mean_query_len = extent / nentries;
2571 : else
2572 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2573 :
2574 0 : free(qbuffer);
2575 :
2576 : /*
2577 : * OK, count a garbage collection cycle. (Note: even though we have
2578 : * exclusive lock on pgss->lock, we must take pgss->mutex for this, since
2579 : * other processes may examine gc_count while holding only the mutex.
2580 : * Also, we have to advance the count *after* we've rewritten the file,
2581 : * else other processes might not realize they read a stale file.)
2582 : */
2583 0 : record_gc_qtexts();
2584 :
2585 0 : return;
2586 :
2587 0 : gc_fail:
2588 : /* clean up resources */
2589 0 : if (qfile)
2590 0 : FreeFile(qfile);
2591 0 : free(qbuffer);
2592 :
2593 : /*
2594 : * Since the contents of the external file are now uncertain, mark all
2595 : * hashtable entries as having invalid texts.
2596 : */
2597 0 : hash_seq_init(&hash_seq, pgss_hash);
2598 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2599 : {
2600 0 : entry->query_offset = 0;
2601 0 : entry->query_len = -1;
2602 : }
2603 :
2604 : /*
2605 : * Destroy the query text file and create a new, empty one
2606 : */
2607 0 : (void) unlink(PGSS_TEXT_FILE);
2608 0 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2609 0 : if (qfile == NULL)
2610 0 : ereport(LOG,
2611 : (errcode_for_file_access(),
2612 : errmsg("could not recreate file \"%s\": %m",
2613 : PGSS_TEXT_FILE)));
2614 : else
2615 0 : FreeFile(qfile);
2616 :
2617 : /* Reset the shared extent pointer */
2618 0 : pgss->extent = 0;
2619 :
2620 : /* Reset mean_query_len to match the new state */
2621 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2622 :
2623 : /*
2624 : * Bump the GC count even though we failed.
2625 : *
2626 : * This is needed to make concurrent readers of file without any lock on
2627 : * pgss->lock notice existence of new version of file. Once readers
2628 : * subsequently observe a change in GC count with pgss->lock held, that
2629 : * forces a safe reopen of file. Writers also require that we bump here,
2630 : * of course. (As required by locking protocol, readers and writers don't
2631 : * trust earlier file contents until gc_count is found unchanged after
2632 : * pgss->lock acquired in shared or exclusive mode respectively.)
2633 : */
2634 0 : record_gc_qtexts();
2635 : }
2636 :
2637 : #define SINGLE_ENTRY_RESET(e) \
2638 : if (e) { \
2639 : if (minmax_only) { \
2640 : /* When requested reset only min/max statistics of an entry */ \
2641 : for (int kind = 0; kind < PGSS_NUMKIND; kind++) \
2642 : { \
2643 : e->counters.max_time[kind] = 0; \
2644 : e->counters.min_time[kind] = 0; \
2645 : } \
2646 : e->minmax_stats_since = stats_reset; \
2647 : } \
2648 : else \
2649 : { \
2650 : /* Remove the key otherwise */ \
2651 : hash_search(pgss_hash, &e->key, HASH_REMOVE, NULL); \
2652 : num_remove++; \
2653 : } \
2654 : }
2655 :
2656 : /*
2657 : * Reset entries corresponding to parameters passed.
2658 : */
2659 : static TimestampTz
2660 98 : entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
2661 : {
2662 : HASH_SEQ_STATUS hash_seq;
2663 : pgssEntry *entry;
2664 : FILE *qfile;
2665 : long num_entries;
2666 98 : long num_remove = 0;
2667 : pgssHashKey key;
2668 : TimestampTz stats_reset;
2669 :
2670 98 : if (!pgss || !pgss_hash)
2671 0 : ereport(ERROR,
2672 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2673 : errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
2674 :
2675 98 : LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
2676 98 : num_entries = hash_get_num_entries(pgss_hash);
2677 :
2678 98 : stats_reset = GetCurrentTimestamp();
2679 :
2680 98 : if (userid != 0 && dbid != 0 && queryid != UINT64CONST(0))
2681 : {
2682 : /* If all the parameters are available, use the fast path. */
2683 2 : memset(&key, 0, sizeof(pgssHashKey));
2684 2 : key.userid = userid;
2685 2 : key.dbid = dbid;
2686 2 : key.queryid = queryid;
2687 :
2688 : /*
2689 : * Reset the entry if it exists, starting with the non-top-level
2690 : * entry.
2691 : */
2692 2 : key.toplevel = false;
2693 2 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
2694 :
2695 2 : SINGLE_ENTRY_RESET(entry);
2696 :
2697 : /* Also reset the top-level entry if it exists. */
2698 2 : key.toplevel = true;
2699 2 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
2700 :
2701 2 : SINGLE_ENTRY_RESET(entry);
2702 : }
2703 96 : else if (userid != 0 || dbid != 0 || queryid != UINT64CONST(0))
2704 : {
2705 : /* Reset entries corresponding to valid parameters. */
2706 8 : hash_seq_init(&hash_seq, pgss_hash);
2707 96 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2708 : {
2709 88 : if ((!userid || entry->key.userid == userid) &&
2710 68 : (!dbid || entry->key.dbid == dbid) &&
2711 64 : (!queryid || entry->key.queryid == queryid))
2712 : {
2713 14 : SINGLE_ENTRY_RESET(entry);
2714 : }
2715 : }
2716 : }
2717 : else
2718 : {
2719 : /* Reset all entries. */
2720 88 : hash_seq_init(&hash_seq, pgss_hash);
2721 796 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2722 : {
2723 752 : SINGLE_ENTRY_RESET(entry);
2724 : }
2725 : }
2726 :
2727 : /* All entries are removed? */
2728 98 : if (num_entries != num_remove)
2729 12 : goto release_lock;
2730 :
2731 : /*
2732 : * Reset global statistics for pg_stat_statements since all entries are
2733 : * removed.
2734 : */
2735 : {
2736 86 : volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
2737 :
2738 86 : SpinLockAcquire(&s->mutex);
2739 86 : s->stats.dealloc = 0;
2740 86 : s->stats.stats_reset = stats_reset;
2741 86 : SpinLockRelease(&s->mutex);
2742 : }
2743 :
2744 : /*
2745 : * Write new empty query file, perhaps even creating a new one to recover
2746 : * if the file was missing.
2747 : */
2748 86 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2749 86 : if (qfile == NULL)
2750 : {
2751 0 : ereport(LOG,
2752 : (errcode_for_file_access(),
2753 : errmsg("could not create file \"%s\": %m",
2754 : PGSS_TEXT_FILE)));
2755 0 : goto done;
2756 : }
2757 :
2758 : /* If ftruncate fails, log it, but it's not a fatal problem */
2759 86 : if (ftruncate(fileno(qfile), 0) != 0)
2760 0 : ereport(LOG,
2761 : (errcode_for_file_access(),
2762 : errmsg("could not truncate file \"%s\": %m",
2763 : PGSS_TEXT_FILE)));
2764 :
2765 86 : FreeFile(qfile);
2766 :
2767 86 : done:
2768 86 : pgss->extent = 0;
2769 : /* This counts as a query text garbage collection for our purposes */
2770 86 : record_gc_qtexts();
2771 :
2772 98 : release_lock:
2773 98 : LWLockRelease(pgss->lock);
2774 :
2775 98 : return stats_reset;
2776 : }
2777 :
2778 : /*
2779 : * Generate a normalized version of the query string that will be used to
2780 : * represent all similar queries.
2781 : *
2782 : * Note that the normalized representation may well vary depending on
2783 : * just which "equivalent" query is used to create the hashtable entry.
2784 : * We assume this is OK.
2785 : *
2786 : * If query_loc > 0, then "query" has been advanced by that much compared to
2787 : * the original string start, so we need to translate the provided locations
2788 : * to compensate. (This lets us avoid re-scanning statements before the one
2789 : * of interest, so it's worth doing.)
2790 : *
2791 : * *query_len_p contains the input string length, and is updated with
2792 : * the result string length on exit. The resulting string might be longer
2793 : * or shorter depending on what happens with replacement of constants.
2794 : *
2795 : * Returns a palloc'd string.
2796 : */
2797 : static char *
2798 17940 : generate_normalized_query(JumbleState *jstate, const char *query,
2799 : int query_loc, int *query_len_p)
2800 : {
2801 : char *norm_query;
2802 17940 : int query_len = *query_len_p;
2803 : int i,
2804 : norm_query_buflen, /* Space allowed for norm_query */
2805 : len_to_wrt, /* Length (in bytes) to write */
2806 17940 : quer_loc = 0, /* Source query byte location */
2807 17940 : n_quer_loc = 0, /* Normalized query byte location */
2808 17940 : last_off = 0, /* Offset from start for previous tok */
2809 17940 : last_tok_len = 0; /* Length (in bytes) of that tok */
2810 :
2811 : /*
2812 : * Get constants' lengths (core system only gives us locations). Note
2813 : * this also ensures the items are sorted by location.
2814 : */
2815 17940 : fill_in_constant_lengths(jstate, query, query_loc);
2816 :
2817 : /*
2818 : * Allow for $n symbols to be longer than the constants they replace.
2819 : * Constants must take at least one byte in text form, while a $n symbol
2820 : * certainly isn't more than 11 bytes, even if n reaches INT_MAX. We
2821 : * could refine that limit based on the max value of n for the current
2822 : * query, but it hardly seems worth any extra effort to do so.
2823 : */
2824 17940 : norm_query_buflen = query_len + jstate->clocations_count * 10;
2825 :
2826 : /* Allocate result buffer */
2827 17940 : norm_query = palloc(norm_query_buflen + 1);
2828 :
2829 74944 : for (i = 0; i < jstate->clocations_count; i++)
2830 : {
2831 : int off, /* Offset from start for cur tok */
2832 : tok_len; /* Length (in bytes) of that tok */
2833 :
2834 57004 : off = jstate->clocations[i].location;
2835 : /* Adjust recorded location if we're dealing with partial string */
2836 57004 : off -= query_loc;
2837 :
2838 57004 : tok_len = jstate->clocations[i].length;
2839 :
2840 57004 : if (tok_len < 0)
2841 396 : continue; /* ignore any duplicates */
2842 :
2843 : /* Copy next chunk (what precedes the next constant) */
2844 56608 : len_to_wrt = off - last_off;
2845 56608 : len_to_wrt -= last_tok_len;
2846 :
2847 : Assert(len_to_wrt >= 0);
2848 56608 : memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
2849 56608 : n_quer_loc += len_to_wrt;
2850 :
2851 : /* And insert a param symbol in place of the constant token */
2852 113216 : n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d",
2853 56608 : i + 1 + jstate->highest_extern_param_id);
2854 :
2855 56608 : quer_loc = off + tok_len;
2856 56608 : last_off = off;
2857 56608 : last_tok_len = tok_len;
2858 : }
2859 :
2860 : /*
2861 : * We've copied up until the last ignorable constant. Copy over the
2862 : * remaining bytes of the original query string.
2863 : */
2864 17940 : len_to_wrt = query_len - quer_loc;
2865 :
2866 : Assert(len_to_wrt >= 0);
2867 17940 : memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
2868 17940 : n_quer_loc += len_to_wrt;
2869 :
2870 : Assert(n_quer_loc <= norm_query_buflen);
2871 17940 : norm_query[n_quer_loc] = '\0';
2872 :
2873 17940 : *query_len_p = n_quer_loc;
2874 17940 : return norm_query;
2875 : }
2876 :
2877 : /*
2878 : * Given a valid SQL string and an array of constant-location records,
2879 : * fill in the textual lengths of those constants.
2880 : *
2881 : * The constants may use any allowed constant syntax, such as float literals,
2882 : * bit-strings, single-quoted strings and dollar-quoted strings. This is
2883 : * accomplished by using the public API for the core scanner.
2884 : *
2885 : * It is the caller's job to ensure that the string is a valid SQL statement
2886 : * with constants at the indicated locations. Since in practice the string
2887 : * has already been parsed, and the locations that the caller provides will
2888 : * have originated from within the authoritative parser, this should not be
2889 : * a problem.
2890 : *
2891 : * Duplicate constant pointers are possible, and will have their lengths
2892 : * marked as '-1', so that they are later ignored. (Actually, we assume the
2893 : * lengths were initialized as -1 to start with, and don't change them here.)
2894 : *
2895 : * If query_loc > 0, then "query" has been advanced by that much compared to
2896 : * the original string start, so we need to translate the provided locations
2897 : * to compensate. (This lets us avoid re-scanning statements before the one
2898 : * of interest, so it's worth doing.)
2899 : *
2900 : * N.B. There is an assumption that a '-' character at a Const location begins
2901 : * a negative numeric constant. This precludes there ever being another
2902 : * reason for a constant to start with a '-'.
2903 : */
2904 : static void
2905 17940 : fill_in_constant_lengths(JumbleState *jstate, const char *query,
2906 : int query_loc)
2907 : {
2908 : LocationLen *locs;
2909 : core_yyscan_t yyscanner;
2910 : core_yy_extra_type yyextra;
2911 : core_YYSTYPE yylval;
2912 : YYLTYPE yylloc;
2913 17940 : int last_loc = -1;
2914 : int i;
2915 :
2916 : /*
2917 : * Sort the records by location so that we can process them in order while
2918 : * scanning the query text.
2919 : */
2920 17940 : if (jstate->clocations_count > 1)
2921 11770 : qsort(jstate->clocations, jstate->clocations_count,
2922 : sizeof(LocationLen), comp_location);
2923 17940 : locs = jstate->clocations;
2924 :
2925 : /* initialize the flex scanner --- should match raw_parser() */
2926 17940 : yyscanner = scanner_init(query,
2927 : &yyextra,
2928 : &ScanKeywords,
2929 : ScanKeywordTokens);
2930 :
2931 : /* we don't want to re-emit any escape string warnings */
2932 17940 : yyextra.escape_string_warning = false;
2933 :
2934 : /* Search for each constant, in sequence */
2935 74944 : for (i = 0; i < jstate->clocations_count; i++)
2936 : {
2937 57004 : int loc = locs[i].location;
2938 : int tok;
2939 :
2940 : /* Adjust recorded location if we're dealing with partial string */
2941 57004 : loc -= query_loc;
2942 :
2943 : Assert(loc >= 0);
2944 :
2945 57004 : if (loc <= last_loc)
2946 396 : continue; /* Duplicate constant, ignore */
2947 :
2948 : /* Lex tokens until we find the desired constant */
2949 : for (;;)
2950 : {
2951 430170 : tok = core_yylex(&yylval, &yylloc, yyscanner);
2952 :
2953 : /* We should not hit end-of-string, but if we do, behave sanely */
2954 430170 : if (tok == 0)
2955 0 : break; /* out of inner for-loop */
2956 :
2957 : /*
2958 : * We should find the token position exactly, but if we somehow
2959 : * run past it, work with that.
2960 : */
2961 430170 : if (yylloc >= loc)
2962 : {
2963 56608 : if (query[loc] == '-')
2964 : {
2965 : /*
2966 : * It's a negative value - this is the one and only case
2967 : * where we replace more than a single token.
2968 : *
2969 : * Do not compensate for the core system's special-case
2970 : * adjustment of location to that of the leading '-'
2971 : * operator in the event of a negative constant. It is
2972 : * also useful for our purposes to start from the minus
2973 : * symbol. In this way, queries like "select * from foo
2974 : * where bar = 1" and "select * from foo where bar = -2"
2975 : * will have identical normalized query strings.
2976 : */
2977 738 : tok = core_yylex(&yylval, &yylloc, yyscanner);
2978 738 : if (tok == 0)
2979 0 : break; /* out of inner for-loop */
2980 : }
2981 :
2982 : /*
2983 : * We now rely on the assumption that flex has placed a zero
2984 : * byte after the text of the current token in scanbuf.
2985 : */
2986 56608 : locs[i].length = strlen(yyextra.scanbuf + loc);
2987 56608 : break; /* out of inner for-loop */
2988 : }
2989 : }
2990 :
2991 : /* If we hit end-of-string, give up, leaving remaining lengths -1 */
2992 56608 : if (tok == 0)
2993 0 : break;
2994 :
2995 56608 : last_loc = loc;
2996 : }
2997 :
2998 17940 : scanner_finish(yyscanner);
2999 17940 : }
3000 :
3001 : /*
3002 : * comp_location: comparator for qsorting LocationLen structs by location
3003 : */
3004 : static int
3005 69136 : comp_location(const void *a, const void *b)
3006 : {
3007 69136 : int l = ((const LocationLen *) a)->location;
3008 69136 : int r = ((const LocationLen *) b)->location;
3009 :
3010 69136 : if (l < r)
3011 46666 : return -1;
3012 22470 : else if (l > r)
3013 22060 : return +1;
3014 : else
3015 410 : return 0;
3016 : }
|