Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * pg_stat_statements.c
4 : * Track statement planning and execution times as well as resource
5 : * usage across a whole database cluster.
6 : *
7 : * Execution costs are totaled for each distinct source query, and kept in
8 : * a shared hashtable. (We track only as many distinct queries as will fit
9 : * in the designated amount of shared memory.)
10 : *
11 : * Starting in Postgres 9.2, this module normalized query entries. As of
12 : * Postgres 14, the normalization is done by the core if compute_query_id is
13 : * enabled, or optionally by third-party modules.
14 : *
15 : * To facilitate presenting entries to users, we create "representative" query
16 : * strings in which constants are replaced with parameter symbols ($n), to
17 : * make it clearer what a normalized entry can represent. To save on shared
18 : * memory, and to avoid having to truncate oversized query strings, we store
19 : * these strings in a temporary external query-texts file. Offsets into this
20 : * file are kept in shared memory.
21 : *
22 : * Note about locking issues: to create or delete an entry in the shared
23 : * hashtable, one must hold pgss->lock exclusively. Modifying any field
24 : * in an entry except the counters requires the same. To look up an entry,
25 : * one must hold the lock shared. To read or update the counters within
26 : * an entry, one must hold the lock shared or exclusive (so the entry doesn't
27 : * disappear!) and also take the entry's mutex spinlock.
28 : * The shared state variable pgss->extent (the next free spot in the external
29 : * query-text file) should be accessed only while holding either the
30 : * pgss->mutex spinlock, or exclusive lock on pgss->lock. We use the mutex to
31 : * allow reserving file space while holding only shared lock on pgss->lock.
32 : * Rewriting the entire external query-text file, eg for garbage collection,
33 : * requires holding pgss->lock exclusively; this allows individual entries
34 : * in the file to be read or written while holding only shared lock.
35 : *
36 : *
37 : * Copyright (c) 2008-2024, PostgreSQL Global Development Group
38 : *
39 : * IDENTIFICATION
40 : * contrib/pg_stat_statements/pg_stat_statements.c
41 : *
42 : *-------------------------------------------------------------------------
43 : */
44 : #include "postgres.h"
45 :
46 : #include <math.h>
47 : #include <sys/stat.h>
48 : #include <unistd.h>
49 :
50 : #include "access/parallel.h"
51 : #include "catalog/pg_authid.h"
52 : #include "common/int.h"
53 : #include "executor/instrument.h"
54 : #include "funcapi.h"
55 : #include "jit/jit.h"
56 : #include "mb/pg_wchar.h"
57 : #include "miscadmin.h"
58 : #include "nodes/queryjumble.h"
59 : #include "optimizer/planner.h"
60 : #include "parser/analyze.h"
61 : #include "parser/scanner.h"
62 : #include "pgstat.h"
63 : #include "storage/fd.h"
64 : #include "storage/ipc.h"
65 : #include "storage/lwlock.h"
66 : #include "storage/shmem.h"
67 : #include "storage/spin.h"
68 : #include "tcop/utility.h"
69 : #include "utils/acl.h"
70 : #include "utils/builtins.h"
71 : #include "utils/memutils.h"
72 : #include "utils/timestamp.h"
73 :
74 16 : PG_MODULE_MAGIC;
75 :
76 : /* Location of permanent stats file (valid when database is shut down) */
77 : #define PGSS_DUMP_FILE PGSTAT_STAT_PERMANENT_DIRECTORY "/pg_stat_statements.stat"
78 :
79 : /*
80 : * Location of external query text file.
81 : */
82 : #define PGSS_TEXT_FILE PG_STAT_TMP_DIR "/pgss_query_texts.stat"
83 :
84 : /* Magic number identifying the stats file format */
85 : static const uint32 PGSS_FILE_HEADER = 0x20220408;
86 :
87 : /* PostgreSQL major version number, changes in which invalidate all entries */
88 : static const uint32 PGSS_PG_MAJOR_VERSION = PG_VERSION_NUM / 100;
89 :
90 : /* XXX: Should USAGE_EXEC reflect execution time and/or buffer usage? */
91 : #define USAGE_EXEC(duration) (1.0)
92 : #define USAGE_INIT (1.0) /* including initial planning */
93 : #define ASSUMED_MEDIAN_INIT (10.0) /* initial assumed median usage */
94 : #define ASSUMED_LENGTH_INIT 1024 /* initial assumed mean query length */
95 : #define USAGE_DECREASE_FACTOR (0.99) /* decreased every entry_dealloc */
96 : #define STICKY_DECREASE_FACTOR (0.50) /* factor for sticky entries */
97 : #define USAGE_DEALLOC_PERCENT 5 /* free this % of entries at once */
98 : #define IS_STICKY(c) ((c.calls[PGSS_PLAN] + c.calls[PGSS_EXEC]) == 0)
99 :
100 : /*
101 : * Extension version number, for supporting older extension versions' objects
102 : */
103 : typedef enum pgssVersion
104 : {
105 : PGSS_V1_0 = 0,
106 : PGSS_V1_1,
107 : PGSS_V1_2,
108 : PGSS_V1_3,
109 : PGSS_V1_8,
110 : PGSS_V1_9,
111 : PGSS_V1_10,
112 : PGSS_V1_11,
113 : PGSS_V1_12,
114 : } pgssVersion;
115 :
116 : typedef enum pgssStoreKind
117 : {
118 : PGSS_INVALID = -1,
119 :
120 : /*
121 : * PGSS_PLAN and PGSS_EXEC must be respectively 0 and 1 as they're used to
122 : * reference the underlying values in the arrays in the Counters struct,
123 : * and this order is required in pg_stat_statements_internal().
124 : */
125 : PGSS_PLAN = 0,
126 : PGSS_EXEC,
127 : } pgssStoreKind;
128 :
129 : #define PGSS_NUMKIND (PGSS_EXEC + 1)
130 :
131 : /*
132 : * Hashtable key that defines the identity of a hashtable entry. We separate
133 : * queries by user and by database even if they are otherwise identical.
134 : *
135 : * If you add a new key to this struct, make sure to teach pgss_store() to
136 : * zero the padding bytes. Otherwise, things will break, because pgss_hash is
137 : * created using HASH_BLOBS, and thus tag_hash is used to hash this.
138 :
139 : */
140 : typedef struct pgssHashKey
141 : {
142 : Oid userid; /* user OID */
143 : Oid dbid; /* database OID */
144 : uint64 queryid; /* query identifier */
145 : bool toplevel; /* query executed at top level */
146 : } pgssHashKey;
147 :
148 : /*
149 : * The actual stats counters kept within pgssEntry.
150 : */
151 : typedef struct Counters
152 : {
153 : int64 calls[PGSS_NUMKIND]; /* # of times planned/executed */
154 : double total_time[PGSS_NUMKIND]; /* total planning/execution time,
155 : * in msec */
156 : double min_time[PGSS_NUMKIND]; /* minimum planning/execution time in
157 : * msec since min/max reset */
158 : double max_time[PGSS_NUMKIND]; /* maximum planning/execution time in
159 : * msec since min/max reset */
160 : double mean_time[PGSS_NUMKIND]; /* mean planning/execution time in
161 : * msec */
162 : double sum_var_time[PGSS_NUMKIND]; /* sum of variances in
163 : * planning/execution time in msec */
164 : int64 rows; /* total # of retrieved or affected rows */
165 : int64 shared_blks_hit; /* # of shared buffer hits */
166 : int64 shared_blks_read; /* # of shared disk blocks read */
167 : int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */
168 : int64 shared_blks_written; /* # of shared disk blocks written */
169 : int64 local_blks_hit; /* # of local buffer hits */
170 : int64 local_blks_read; /* # of local disk blocks read */
171 : int64 local_blks_dirtied; /* # of local disk blocks dirtied */
172 : int64 local_blks_written; /* # of local disk blocks written */
173 : int64 temp_blks_read; /* # of temp blocks read */
174 : int64 temp_blks_written; /* # of temp blocks written */
175 : double shared_blk_read_time; /* time spent reading shared blocks,
176 : * in msec */
177 : double shared_blk_write_time; /* time spent writing shared blocks,
178 : * in msec */
179 : double local_blk_read_time; /* time spent reading local blocks, in
180 : * msec */
181 : double local_blk_write_time; /* time spent writing local blocks, in
182 : * msec */
183 : double temp_blk_read_time; /* time spent reading temp blocks, in msec */
184 : double temp_blk_write_time; /* time spent writing temp blocks, in
185 : * msec */
186 : double usage; /* usage factor */
187 : int64 wal_records; /* # of WAL records generated */
188 : int64 wal_fpi; /* # of WAL full page images generated */
189 : uint64 wal_bytes; /* total amount of WAL generated in bytes */
190 : int64 jit_functions; /* total number of JIT functions emitted */
191 : double jit_generation_time; /* total time to generate jit code */
192 : int64 jit_inlining_count; /* number of times inlining time has been
193 : * > 0 */
194 : double jit_deform_time; /* total time to deform tuples in jit code */
195 : int64 jit_deform_count; /* number of times deform time has been >
196 : * 0 */
197 :
198 : double jit_inlining_time; /* total time to inline jit code */
199 : int64 jit_optimization_count; /* number of times optimization time
200 : * has been > 0 */
201 : double jit_optimization_time; /* total time to optimize jit code */
202 : int64 jit_emission_count; /* number of times emission time has been
203 : * > 0 */
204 : double jit_emission_time; /* total time to emit jit code */
205 : int64 parallel_workers_to_launch; /* # of parallel workers planned
206 : * to be launched */
207 : int64 parallel_workers_launched; /* # of parallel workers actually
208 : * launched */
209 : } Counters;
210 :
211 : /*
212 : * Global statistics for pg_stat_statements
213 : */
214 : typedef struct pgssGlobalStats
215 : {
216 : int64 dealloc; /* # of times entries were deallocated */
217 : TimestampTz stats_reset; /* timestamp with all stats reset */
218 : } pgssGlobalStats;
219 :
220 : /*
221 : * Statistics per statement
222 : *
223 : * Note: in event of a failure in garbage collection of the query text file,
224 : * we reset query_offset to zero and query_len to -1. This will be seen as
225 : * an invalid state by qtext_fetch().
226 : */
227 : typedef struct pgssEntry
228 : {
229 : pgssHashKey key; /* hash key of entry - MUST BE FIRST */
230 : Counters counters; /* the statistics for this query */
231 : Size query_offset; /* query text offset in external file */
232 : int query_len; /* # of valid bytes in query string, or -1 */
233 : int encoding; /* query text encoding */
234 : TimestampTz stats_since; /* timestamp of entry allocation */
235 : TimestampTz minmax_stats_since; /* timestamp of last min/max values reset */
236 : slock_t mutex; /* protects the counters only */
237 : } pgssEntry;
238 :
239 : /*
240 : * Global shared state
241 : */
242 : typedef struct pgssSharedState
243 : {
244 : LWLock *lock; /* protects hashtable search/modification */
245 : double cur_median_usage; /* current median usage in hashtable */
246 : Size mean_query_len; /* current mean entry text length */
247 : slock_t mutex; /* protects following fields only: */
248 : Size extent; /* current extent of query file */
249 : int n_writers; /* number of active writers to query file */
250 : int gc_count; /* query file garbage collection cycle count */
251 : pgssGlobalStats stats; /* global statistics for pgss */
252 : } pgssSharedState;
253 :
254 : /*---- Local variables ----*/
255 :
256 : /* Current nesting depth of planner/ExecutorRun/ProcessUtility calls */
257 : static int nesting_level = 0;
258 :
259 : /* Saved hook values in case of unload */
260 : static shmem_request_hook_type prev_shmem_request_hook = NULL;
261 : static shmem_startup_hook_type prev_shmem_startup_hook = NULL;
262 : static post_parse_analyze_hook_type prev_post_parse_analyze_hook = NULL;
263 : static planner_hook_type prev_planner_hook = NULL;
264 : static ExecutorStart_hook_type prev_ExecutorStart = NULL;
265 : static ExecutorRun_hook_type prev_ExecutorRun = NULL;
266 : static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
267 : static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
268 : static ProcessUtility_hook_type prev_ProcessUtility = NULL;
269 :
270 : /* Links to shared memory state */
271 : static pgssSharedState *pgss = NULL;
272 : static HTAB *pgss_hash = NULL;
273 :
274 : /*---- GUC variables ----*/
275 :
276 : typedef enum
277 : {
278 : PGSS_TRACK_NONE, /* track no statements */
279 : PGSS_TRACK_TOP, /* only top level statements */
280 : PGSS_TRACK_ALL, /* all statements, including nested ones */
281 : } PGSSTrackLevel;
282 :
283 : static const struct config_enum_entry track_options[] =
284 : {
285 : {"none", PGSS_TRACK_NONE, false},
286 : {"top", PGSS_TRACK_TOP, false},
287 : {"all", PGSS_TRACK_ALL, false},
288 : {NULL, 0, false}
289 : };
290 :
291 : static int pgss_max = 5000; /* max # statements to track */
292 : static int pgss_track = PGSS_TRACK_TOP; /* tracking level */
293 : static bool pgss_track_utility = true; /* whether to track utility commands */
294 : static bool pgss_track_planning = false; /* whether to track planning
295 : * duration */
296 : static bool pgss_save = true; /* whether to save stats across shutdown */
297 :
298 :
299 : #define pgss_enabled(level) \
300 : (!IsParallelWorker() && \
301 : (pgss_track == PGSS_TRACK_ALL || \
302 : (pgss_track == PGSS_TRACK_TOP && (level) == 0)))
303 :
304 : #define record_gc_qtexts() \
305 : do { \
306 : SpinLockAcquire(&pgss->mutex); \
307 : pgss->gc_count++; \
308 : SpinLockRelease(&pgss->mutex); \
309 : } while(0)
310 :
311 : /*---- Function declarations ----*/
312 :
313 12 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset);
314 12 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_7);
315 36 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_11);
316 0 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_2);
317 12 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_3);
318 12 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_8);
319 12 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_9);
320 12 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_10);
321 12 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_11);
322 44 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_12);
323 0 : PG_FUNCTION_INFO_V1(pg_stat_statements);
324 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_info);
325 :
326 : static void pgss_shmem_request(void);
327 : static void pgss_shmem_startup(void);
328 : static void pgss_shmem_shutdown(int code, Datum arg);
329 : static void pgss_post_parse_analyze(ParseState *pstate, Query *query,
330 : JumbleState *jstate);
331 : static PlannedStmt *pgss_planner(Query *parse,
332 : const char *query_string,
333 : int cursorOptions,
334 : ParamListInfo boundParams);
335 : static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
336 : static void pgss_ExecutorRun(QueryDesc *queryDesc,
337 : ScanDirection direction,
338 : uint64 count, bool execute_once);
339 : static void pgss_ExecutorFinish(QueryDesc *queryDesc);
340 : static void pgss_ExecutorEnd(QueryDesc *queryDesc);
341 : static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
342 : bool readOnlyTree,
343 : ProcessUtilityContext context, ParamListInfo params,
344 : QueryEnvironment *queryEnv,
345 : DestReceiver *dest, QueryCompletion *qc);
346 : static void pgss_store(const char *query, uint64 queryId,
347 : int query_location, int query_len,
348 : pgssStoreKind kind,
349 : double total_time, uint64 rows,
350 : const BufferUsage *bufusage,
351 : const WalUsage *walusage,
352 : const struct JitInstrumentation *jitusage,
353 : JumbleState *jstate,
354 : int parallel_workers_to_launch,
355 : int parallel_workers_launched);
356 : static void pg_stat_statements_internal(FunctionCallInfo fcinfo,
357 : pgssVersion api_version,
358 : bool showtext);
359 : static Size pgss_memsize(void);
360 : static pgssEntry *entry_alloc(pgssHashKey *key, Size query_offset, int query_len,
361 : int encoding, bool sticky);
362 : static void entry_dealloc(void);
363 : static bool qtext_store(const char *query, int query_len,
364 : Size *query_offset, int *gc_count);
365 : static char *qtext_load_file(Size *buffer_size);
366 : static char *qtext_fetch(Size query_offset, int query_len,
367 : char *buffer, Size buffer_size);
368 : static bool need_gc_qtexts(void);
369 : static void gc_qtexts(void);
370 : static TimestampTz entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only);
371 : static char *generate_normalized_query(JumbleState *jstate, const char *query,
372 : int query_loc, int *query_len_p);
373 : static void fill_in_constant_lengths(JumbleState *jstate, const char *query,
374 : int query_loc);
375 : static int comp_location(const void *a, const void *b);
376 :
377 :
378 : /*
379 : * Module load callback
380 : */
381 : void
382 16 : _PG_init(void)
383 : {
384 : /*
385 : * In order to create our shared memory area, we have to be loaded via
386 : * shared_preload_libraries. If not, fall out without hooking into any of
387 : * the main system. (We don't throw error here because it seems useful to
388 : * allow the pg_stat_statements functions to be created even when the
389 : * module isn't active. The functions must protect themselves against
390 : * being called then, however.)
391 : */
392 16 : if (!process_shared_preload_libraries_in_progress)
393 2 : return;
394 :
395 : /*
396 : * Inform the postmaster that we want to enable query_id calculation if
397 : * compute_query_id is set to auto.
398 : */
399 14 : EnableQueryId();
400 :
401 : /*
402 : * Define (or redefine) custom GUC variables.
403 : */
404 14 : DefineCustomIntVariable("pg_stat_statements.max",
405 : "Sets the maximum number of statements tracked by pg_stat_statements.",
406 : NULL,
407 : &pgss_max,
408 : 5000,
409 : 100,
410 : INT_MAX / 2,
411 : PGC_POSTMASTER,
412 : 0,
413 : NULL,
414 : NULL,
415 : NULL);
416 :
417 14 : DefineCustomEnumVariable("pg_stat_statements.track",
418 : "Selects which statements are tracked by pg_stat_statements.",
419 : NULL,
420 : &pgss_track,
421 : PGSS_TRACK_TOP,
422 : track_options,
423 : PGC_SUSET,
424 : 0,
425 : NULL,
426 : NULL,
427 : NULL);
428 :
429 14 : DefineCustomBoolVariable("pg_stat_statements.track_utility",
430 : "Selects whether utility commands are tracked by pg_stat_statements.",
431 : NULL,
432 : &pgss_track_utility,
433 : true,
434 : PGC_SUSET,
435 : 0,
436 : NULL,
437 : NULL,
438 : NULL);
439 :
440 14 : DefineCustomBoolVariable("pg_stat_statements.track_planning",
441 : "Selects whether planning duration is tracked by pg_stat_statements.",
442 : NULL,
443 : &pgss_track_planning,
444 : false,
445 : PGC_SUSET,
446 : 0,
447 : NULL,
448 : NULL,
449 : NULL);
450 :
451 14 : DefineCustomBoolVariable("pg_stat_statements.save",
452 : "Save pg_stat_statements statistics across server shutdowns.",
453 : NULL,
454 : &pgss_save,
455 : true,
456 : PGC_SIGHUP,
457 : 0,
458 : NULL,
459 : NULL,
460 : NULL);
461 :
462 14 : MarkGUCPrefixReserved("pg_stat_statements");
463 :
464 : /*
465 : * Install hooks.
466 : */
467 14 : prev_shmem_request_hook = shmem_request_hook;
468 14 : shmem_request_hook = pgss_shmem_request;
469 14 : prev_shmem_startup_hook = shmem_startup_hook;
470 14 : shmem_startup_hook = pgss_shmem_startup;
471 14 : prev_post_parse_analyze_hook = post_parse_analyze_hook;
472 14 : post_parse_analyze_hook = pgss_post_parse_analyze;
473 14 : prev_planner_hook = planner_hook;
474 14 : planner_hook = pgss_planner;
475 14 : prev_ExecutorStart = ExecutorStart_hook;
476 14 : ExecutorStart_hook = pgss_ExecutorStart;
477 14 : prev_ExecutorRun = ExecutorRun_hook;
478 14 : ExecutorRun_hook = pgss_ExecutorRun;
479 14 : prev_ExecutorFinish = ExecutorFinish_hook;
480 14 : ExecutorFinish_hook = pgss_ExecutorFinish;
481 14 : prev_ExecutorEnd = ExecutorEnd_hook;
482 14 : ExecutorEnd_hook = pgss_ExecutorEnd;
483 14 : prev_ProcessUtility = ProcessUtility_hook;
484 14 : ProcessUtility_hook = pgss_ProcessUtility;
485 : }
486 :
487 : /*
488 : * shmem_request hook: request additional shared resources. We'll allocate or
489 : * attach to the shared resources in pgss_shmem_startup().
490 : */
491 : static void
492 14 : pgss_shmem_request(void)
493 : {
494 14 : if (prev_shmem_request_hook)
495 0 : prev_shmem_request_hook();
496 :
497 14 : RequestAddinShmemSpace(pgss_memsize());
498 14 : RequestNamedLWLockTranche("pg_stat_statements", 1);
499 14 : }
500 :
501 : /*
502 : * shmem_startup hook: allocate or attach to shared memory,
503 : * then load any pre-existing statistics from file.
504 : * Also create and load the query-texts file, which is expected to exist
505 : * (even if empty) while the module is enabled.
506 : */
507 : static void
508 14 : pgss_shmem_startup(void)
509 : {
510 : bool found;
511 : HASHCTL info;
512 14 : FILE *file = NULL;
513 14 : FILE *qfile = NULL;
514 : uint32 header;
515 : int32 num;
516 : int32 pgver;
517 : int32 i;
518 : int buffer_size;
519 14 : char *buffer = NULL;
520 :
521 14 : if (prev_shmem_startup_hook)
522 0 : prev_shmem_startup_hook();
523 :
524 : /* reset in case this is a restart within the postmaster */
525 14 : pgss = NULL;
526 14 : pgss_hash = NULL;
527 :
528 : /*
529 : * Create or attach to the shared memory state, including hash table
530 : */
531 14 : LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);
532 :
533 14 : pgss = ShmemInitStruct("pg_stat_statements",
534 : sizeof(pgssSharedState),
535 : &found);
536 :
537 14 : if (!found)
538 : {
539 : /* First time through ... */
540 14 : pgss->lock = &(GetNamedLWLockTranche("pg_stat_statements"))->lock;
541 14 : pgss->cur_median_usage = ASSUMED_MEDIAN_INIT;
542 14 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
543 14 : SpinLockInit(&pgss->mutex);
544 14 : pgss->extent = 0;
545 14 : pgss->n_writers = 0;
546 14 : pgss->gc_count = 0;
547 14 : pgss->stats.dealloc = 0;
548 14 : pgss->stats.stats_reset = GetCurrentTimestamp();
549 : }
550 :
551 14 : info.keysize = sizeof(pgssHashKey);
552 14 : info.entrysize = sizeof(pgssEntry);
553 14 : pgss_hash = ShmemInitHash("pg_stat_statements hash",
554 : pgss_max, pgss_max,
555 : &info,
556 : HASH_ELEM | HASH_BLOBS);
557 :
558 14 : LWLockRelease(AddinShmemInitLock);
559 :
560 : /*
561 : * If we're in the postmaster (or a standalone backend...), set up a shmem
562 : * exit hook to dump the statistics to disk.
563 : */
564 14 : if (!IsUnderPostmaster)
565 14 : on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
566 :
567 : /*
568 : * Done if some other process already completed our initialization.
569 : */
570 14 : if (found)
571 14 : return;
572 :
573 : /*
574 : * Note: we don't bother with locks here, because there should be no other
575 : * processes running when this code is reached.
576 : */
577 :
578 : /* Unlink query text file possibly left over from crash */
579 14 : unlink(PGSS_TEXT_FILE);
580 :
581 : /* Allocate new query text temp file */
582 14 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
583 14 : if (qfile == NULL)
584 0 : goto write_error;
585 :
586 : /*
587 : * If we were told not to load old statistics, we're done. (Note we do
588 : * not try to unlink any old dump file in this case. This seems a bit
589 : * questionable but it's the historical behavior.)
590 : */
591 14 : if (!pgss_save)
592 : {
593 2 : FreeFile(qfile);
594 2 : return;
595 : }
596 :
597 : /*
598 : * Attempt to load old statistics from the dump file.
599 : */
600 12 : file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R);
601 12 : if (file == NULL)
602 : {
603 8 : if (errno != ENOENT)
604 0 : goto read_error;
605 : /* No existing persisted stats file, so we're done */
606 8 : FreeFile(qfile);
607 8 : return;
608 : }
609 :
610 4 : buffer_size = 2048;
611 4 : buffer = (char *) palloc(buffer_size);
612 :
613 8 : if (fread(&header, sizeof(uint32), 1, file) != 1 ||
614 8 : fread(&pgver, sizeof(uint32), 1, file) != 1 ||
615 4 : fread(&num, sizeof(int32), 1, file) != 1)
616 0 : goto read_error;
617 :
618 4 : if (header != PGSS_FILE_HEADER ||
619 4 : pgver != PGSS_PG_MAJOR_VERSION)
620 0 : goto data_error;
621 :
622 51042 : for (i = 0; i < num; i++)
623 : {
624 : pgssEntry temp;
625 : pgssEntry *entry;
626 : Size query_offset;
627 :
628 51038 : if (fread(&temp, sizeof(pgssEntry), 1, file) != 1)
629 0 : goto read_error;
630 :
631 : /* Encoding is the only field we can easily sanity-check */
632 51038 : if (!PG_VALID_BE_ENCODING(temp.encoding))
633 0 : goto data_error;
634 :
635 : /* Resize buffer as needed */
636 51038 : if (temp.query_len >= buffer_size)
637 : {
638 6 : buffer_size = Max(buffer_size * 2, temp.query_len + 1);
639 6 : buffer = repalloc(buffer, buffer_size);
640 : }
641 :
642 51038 : if (fread(buffer, 1, temp.query_len + 1, file) != temp.query_len + 1)
643 0 : goto read_error;
644 :
645 : /* Should have a trailing null, but let's make sure */
646 51038 : buffer[temp.query_len] = '\0';
647 :
648 : /* Skip loading "sticky" entries */
649 51038 : if (IS_STICKY(temp.counters))
650 1460 : continue;
651 :
652 : /* Store the query text */
653 49578 : query_offset = pgss->extent;
654 49578 : if (fwrite(buffer, 1, temp.query_len + 1, qfile) != temp.query_len + 1)
655 0 : goto write_error;
656 49578 : pgss->extent += temp.query_len + 1;
657 :
658 : /* make the hashtable entry (discards old entries if too many) */
659 49578 : entry = entry_alloc(&temp.key, query_offset, temp.query_len,
660 : temp.encoding,
661 : false);
662 :
663 : /* copy in the actual stats */
664 49578 : entry->counters = temp.counters;
665 49578 : entry->stats_since = temp.stats_since;
666 49578 : entry->minmax_stats_since = temp.minmax_stats_since;
667 : }
668 :
669 : /* Read global statistics for pg_stat_statements */
670 4 : if (fread(&pgss->stats, sizeof(pgssGlobalStats), 1, file) != 1)
671 0 : goto read_error;
672 :
673 4 : pfree(buffer);
674 4 : FreeFile(file);
675 4 : FreeFile(qfile);
676 :
677 : /*
678 : * Remove the persisted stats file so it's not included in
679 : * backups/replication standbys, etc. A new file will be written on next
680 : * shutdown.
681 : *
682 : * Note: it's okay if the PGSS_TEXT_FILE is included in a basebackup,
683 : * because we remove that file on startup; it acts inversely to
684 : * PGSS_DUMP_FILE, in that it is only supposed to be around when the
685 : * server is running, whereas PGSS_DUMP_FILE is only supposed to be around
686 : * when the server is not running. Leaving the file creates no danger of
687 : * a newly restored database having a spurious record of execution costs,
688 : * which is what we're really concerned about here.
689 : */
690 4 : unlink(PGSS_DUMP_FILE);
691 :
692 4 : return;
693 :
694 0 : read_error:
695 0 : ereport(LOG,
696 : (errcode_for_file_access(),
697 : errmsg("could not read file \"%s\": %m",
698 : PGSS_DUMP_FILE)));
699 0 : goto fail;
700 0 : data_error:
701 0 : ereport(LOG,
702 : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
703 : errmsg("ignoring invalid data in file \"%s\"",
704 : PGSS_DUMP_FILE)));
705 0 : goto fail;
706 0 : write_error:
707 0 : ereport(LOG,
708 : (errcode_for_file_access(),
709 : errmsg("could not write file \"%s\": %m",
710 : PGSS_TEXT_FILE)));
711 0 : fail:
712 0 : if (buffer)
713 0 : pfree(buffer);
714 0 : if (file)
715 0 : FreeFile(file);
716 0 : if (qfile)
717 0 : FreeFile(qfile);
718 : /* If possible, throw away the bogus file; ignore any error */
719 0 : unlink(PGSS_DUMP_FILE);
720 :
721 : /*
722 : * Don't unlink PGSS_TEXT_FILE here; it should always be around while the
723 : * server is running with pg_stat_statements enabled
724 : */
725 : }
726 :
727 : /*
728 : * shmem_shutdown hook: Dump statistics into file.
729 : *
730 : * Note: we don't bother with acquiring lock, because there should be no
731 : * other processes running when this is called.
732 : */
733 : static void
734 14 : pgss_shmem_shutdown(int code, Datum arg)
735 : {
736 : FILE *file;
737 14 : char *qbuffer = NULL;
738 14 : Size qbuffer_size = 0;
739 : HASH_SEQ_STATUS hash_seq;
740 : int32 num_entries;
741 : pgssEntry *entry;
742 :
743 : /* Don't try to dump during a crash. */
744 14 : if (code)
745 14 : return;
746 :
747 : /* Safety check ... shouldn't get here unless shmem is set up. */
748 14 : if (!pgss || !pgss_hash)
749 0 : return;
750 :
751 : /* Don't dump if told not to. */
752 14 : if (!pgss_save)
753 4 : return;
754 :
755 10 : file = AllocateFile(PGSS_DUMP_FILE ".tmp", PG_BINARY_W);
756 10 : if (file == NULL)
757 0 : goto error;
758 :
759 10 : if (fwrite(&PGSS_FILE_HEADER, sizeof(uint32), 1, file) != 1)
760 0 : goto error;
761 10 : if (fwrite(&PGSS_PG_MAJOR_VERSION, sizeof(uint32), 1, file) != 1)
762 0 : goto error;
763 10 : num_entries = hash_get_num_entries(pgss_hash);
764 10 : if (fwrite(&num_entries, sizeof(int32), 1, file) != 1)
765 0 : goto error;
766 :
767 10 : qbuffer = qtext_load_file(&qbuffer_size);
768 10 : if (qbuffer == NULL)
769 0 : goto error;
770 :
771 : /*
772 : * When serializing to disk, we store query texts immediately after their
773 : * entry data. Any orphaned query texts are thereby excluded.
774 : */
775 10 : hash_seq_init(&hash_seq, pgss_hash);
776 102496 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
777 : {
778 102486 : int len = entry->query_len;
779 102486 : char *qstr = qtext_fetch(entry->query_offset, len,
780 : qbuffer, qbuffer_size);
781 :
782 102486 : if (qstr == NULL)
783 0 : continue; /* Ignore any entries with bogus texts */
784 :
785 102486 : if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 ||
786 102486 : fwrite(qstr, 1, len + 1, file) != len + 1)
787 : {
788 : /* note: we assume hash_seq_term won't change errno */
789 0 : hash_seq_term(&hash_seq);
790 0 : goto error;
791 : }
792 : }
793 :
794 : /* Dump global statistics for pg_stat_statements */
795 10 : if (fwrite(&pgss->stats, sizeof(pgssGlobalStats), 1, file) != 1)
796 0 : goto error;
797 :
798 10 : free(qbuffer);
799 10 : qbuffer = NULL;
800 :
801 10 : if (FreeFile(file))
802 : {
803 0 : file = NULL;
804 0 : goto error;
805 : }
806 :
807 : /*
808 : * Rename file into place, so we atomically replace any old one.
809 : */
810 10 : (void) durable_rename(PGSS_DUMP_FILE ".tmp", PGSS_DUMP_FILE, LOG);
811 :
812 : /* Unlink query-texts file; it's not needed while shutdown */
813 10 : unlink(PGSS_TEXT_FILE);
814 :
815 10 : return;
816 :
817 0 : error:
818 0 : ereport(LOG,
819 : (errcode_for_file_access(),
820 : errmsg("could not write file \"%s\": %m",
821 : PGSS_DUMP_FILE ".tmp")));
822 0 : free(qbuffer);
823 0 : if (file)
824 0 : FreeFile(file);
825 0 : unlink(PGSS_DUMP_FILE ".tmp");
826 0 : unlink(PGSS_TEXT_FILE);
827 : }
828 :
829 : /*
830 : * Post-parse-analysis hook: mark query with a queryId
831 : */
832 : static void
833 147116 : pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate)
834 : {
835 147116 : if (prev_post_parse_analyze_hook)
836 0 : prev_post_parse_analyze_hook(pstate, query, jstate);
837 :
838 : /* Safety check... */
839 147116 : if (!pgss || !pgss_hash || !pgss_enabled(nesting_level))
840 24776 : return;
841 :
842 : /*
843 : * If it's EXECUTE, clear the queryId so that stats will accumulate for
844 : * the underlying PREPARE. But don't do this if we're not tracking
845 : * utility statements, to avoid messing up another extension that might be
846 : * tracking them.
847 : */
848 122340 : if (query->utilityStmt)
849 : {
850 54540 : if (pgss_track_utility && IsA(query->utilityStmt, ExecuteStmt))
851 : {
852 6344 : query->queryId = UINT64CONST(0);
853 6344 : return;
854 : }
855 : }
856 :
857 : /*
858 : * If query jumbling were able to identify any ignorable constants, we
859 : * immediately create a hash table entry for the query, so that we can
860 : * record the normalized form of the query string. If there were no such
861 : * constants, the normalized string would be the same as the query text
862 : * anyway, so there's no need for an early entry.
863 : */
864 115996 : if (jstate && jstate->clocations_count > 0)
865 66866 : pgss_store(pstate->p_sourcetext,
866 : query->queryId,
867 : query->stmt_location,
868 : query->stmt_len,
869 : PGSS_INVALID,
870 : 0,
871 : 0,
872 : NULL,
873 : NULL,
874 : NULL,
875 : jstate,
876 : 0,
877 : 0);
878 : }
879 :
880 : /*
881 : * Planner hook: forward to regular planner, but measure planning time
882 : * if needed.
883 : */
884 : static PlannedStmt *
885 90592 : pgss_planner(Query *parse,
886 : const char *query_string,
887 : int cursorOptions,
888 : ParamListInfo boundParams)
889 : {
890 : PlannedStmt *result;
891 :
892 : /*
893 : * We can't process the query if no query_string is provided, as
894 : * pgss_store needs it. We also ignore query without queryid, as it would
895 : * be treated as a utility statement, which may not be the case.
896 : */
897 90592 : if (pgss_enabled(nesting_level)
898 68104 : && pgss_track_planning && query_string
899 146 : && parse->queryId != UINT64CONST(0))
900 146 : {
901 : instr_time start;
902 : instr_time duration;
903 : BufferUsage bufusage_start,
904 : bufusage;
905 : WalUsage walusage_start,
906 : walusage;
907 :
908 : /* We need to track buffer usage as the planner can access them. */
909 146 : bufusage_start = pgBufferUsage;
910 :
911 : /*
912 : * Similarly the planner could write some WAL records in some cases
913 : * (e.g. setting a hint bit with those being WAL-logged)
914 : */
915 146 : walusage_start = pgWalUsage;
916 146 : INSTR_TIME_SET_CURRENT(start);
917 :
918 146 : nesting_level++;
919 146 : PG_TRY();
920 : {
921 146 : if (prev_planner_hook)
922 0 : result = prev_planner_hook(parse, query_string, cursorOptions,
923 : boundParams);
924 : else
925 146 : result = standard_planner(parse, query_string, cursorOptions,
926 : boundParams);
927 : }
928 0 : PG_FINALLY();
929 : {
930 146 : nesting_level--;
931 : }
932 146 : PG_END_TRY();
933 :
934 146 : INSTR_TIME_SET_CURRENT(duration);
935 146 : INSTR_TIME_SUBTRACT(duration, start);
936 :
937 : /* calc differences of buffer counters. */
938 146 : memset(&bufusage, 0, sizeof(BufferUsage));
939 146 : BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
940 :
941 : /* calc differences of WAL counters. */
942 146 : memset(&walusage, 0, sizeof(WalUsage));
943 146 : WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
944 :
945 146 : pgss_store(query_string,
946 : parse->queryId,
947 : parse->stmt_location,
948 : parse->stmt_len,
949 : PGSS_PLAN,
950 146 : INSTR_TIME_GET_MILLISEC(duration),
951 : 0,
952 : &bufusage,
953 : &walusage,
954 : NULL,
955 : NULL,
956 : 0,
957 : 0);
958 : }
959 : else
960 : {
961 : /*
962 : * Even though we're not tracking plan time for this statement, we
963 : * must still increment the nesting level, to ensure that functions
964 : * evaluated during planning are not seen as top-level calls.
965 : */
966 90446 : nesting_level++;
967 90446 : PG_TRY();
968 : {
969 90446 : if (prev_planner_hook)
970 0 : result = prev_planner_hook(parse, query_string, cursorOptions,
971 : boundParams);
972 : else
973 90446 : result = standard_planner(parse, query_string, cursorOptions,
974 : boundParams);
975 : }
976 1306 : PG_FINALLY();
977 : {
978 90446 : nesting_level--;
979 : }
980 90446 : PG_END_TRY();
981 : }
982 :
983 89286 : return result;
984 : }
985 :
986 : /*
987 : * ExecutorStart hook: start up tracking if needed
988 : */
989 : static void
990 109094 : pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
991 : {
992 109094 : if (prev_ExecutorStart)
993 0 : prev_ExecutorStart(queryDesc, eflags);
994 : else
995 109094 : standard_ExecutorStart(queryDesc, eflags);
996 :
997 : /*
998 : * If query has queryId zero, don't track it. This prevents double
999 : * counting of optimizable statements that are directly contained in
1000 : * utility statements.
1001 : */
1002 108476 : if (pgss_enabled(nesting_level) && queryDesc->plannedstmt->queryId != UINT64CONST(0))
1003 : {
1004 : /*
1005 : * Set up to track total elapsed time in ExecutorRun. Make sure the
1006 : * space is allocated in the per-query context so it will go away at
1007 : * ExecutorEnd.
1008 : */
1009 71974 : if (queryDesc->totaltime == NULL)
1010 : {
1011 : MemoryContext oldcxt;
1012 :
1013 71974 : oldcxt = MemoryContextSwitchTo(queryDesc->estate->es_query_cxt);
1014 71974 : queryDesc->totaltime = InstrAlloc(1, INSTRUMENT_ALL, false);
1015 71974 : MemoryContextSwitchTo(oldcxt);
1016 : }
1017 : }
1018 108476 : }
1019 :
1020 : /*
1021 : * ExecutorRun hook: all we need do is track nesting depth
1022 : */
1023 : static void
1024 106368 : pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count,
1025 : bool execute_once)
1026 : {
1027 106368 : nesting_level++;
1028 106368 : PG_TRY();
1029 : {
1030 106368 : if (prev_ExecutorRun)
1031 0 : prev_ExecutorRun(queryDesc, direction, count, execute_once);
1032 : else
1033 106368 : standard_ExecutorRun(queryDesc, direction, count, execute_once);
1034 : }
1035 6894 : PG_FINALLY();
1036 : {
1037 106368 : nesting_level--;
1038 : }
1039 106368 : PG_END_TRY();
1040 99474 : }
1041 :
1042 : /*
1043 : * ExecutorFinish hook: all we need do is track nesting depth
1044 : */
1045 : static void
1046 96218 : pgss_ExecutorFinish(QueryDesc *queryDesc)
1047 : {
1048 96218 : nesting_level++;
1049 96218 : PG_TRY();
1050 : {
1051 96218 : if (prev_ExecutorFinish)
1052 0 : prev_ExecutorFinish(queryDesc);
1053 : else
1054 96218 : standard_ExecutorFinish(queryDesc);
1055 : }
1056 326 : PG_FINALLY();
1057 : {
1058 96218 : nesting_level--;
1059 : }
1060 96218 : PG_END_TRY();
1061 95892 : }
1062 :
1063 : /*
1064 : * ExecutorEnd hook: store results if needed
1065 : */
1066 : static void
1067 101176 : pgss_ExecutorEnd(QueryDesc *queryDesc)
1068 : {
1069 101176 : uint64 queryId = queryDesc->plannedstmt->queryId;
1070 :
1071 101176 : if (queryId != UINT64CONST(0) && queryDesc->totaltime &&
1072 69020 : pgss_enabled(nesting_level))
1073 : {
1074 : /*
1075 : * Make sure stats accumulation is done. (Note: it's okay if several
1076 : * levels of hook all do this.)
1077 : */
1078 69020 : InstrEndLoop(queryDesc->totaltime);
1079 :
1080 68864 : pgss_store(queryDesc->sourceText,
1081 : queryId,
1082 69020 : queryDesc->plannedstmt->stmt_location,
1083 69020 : queryDesc->plannedstmt->stmt_len,
1084 : PGSS_EXEC,
1085 69020 : queryDesc->totaltime->total * 1000.0, /* convert to msec */
1086 69020 : queryDesc->estate->es_total_processed,
1087 69020 : &queryDesc->totaltime->bufusage,
1088 69020 : &queryDesc->totaltime->walusage,
1089 156 : queryDesc->estate->es_jit ? &queryDesc->estate->es_jit->instr : NULL,
1090 : NULL,
1091 69020 : queryDesc->estate->es_parallel_workers_to_launch,
1092 69020 : queryDesc->estate->es_parallel_workers_launched);
1093 : }
1094 :
1095 101176 : if (prev_ExecutorEnd)
1096 0 : prev_ExecutorEnd(queryDesc);
1097 : else
1098 101176 : standard_ExecutorEnd(queryDesc);
1099 101176 : }
1100 :
1101 : /*
1102 : * ProcessUtility hook
1103 : */
1104 : static void
1105 63764 : pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
1106 : bool readOnlyTree,
1107 : ProcessUtilityContext context,
1108 : ParamListInfo params, QueryEnvironment *queryEnv,
1109 : DestReceiver *dest, QueryCompletion *qc)
1110 : {
1111 63764 : Node *parsetree = pstmt->utilityStmt;
1112 63764 : uint64 saved_queryId = pstmt->queryId;
1113 63764 : int saved_stmt_location = pstmt->stmt_location;
1114 63764 : int saved_stmt_len = pstmt->stmt_len;
1115 63764 : bool enabled = pgss_track_utility && pgss_enabled(nesting_level);
1116 :
1117 : /*
1118 : * Force utility statements to get queryId zero. We do this even in cases
1119 : * where the statement contains an optimizable statement for which a
1120 : * queryId could be derived (such as EXPLAIN or DECLARE CURSOR). For such
1121 : * cases, runtime control will first go through ProcessUtility and then
1122 : * the executor, and we don't want the executor hooks to do anything,
1123 : * since we are already measuring the statement's costs at the utility
1124 : * level.
1125 : *
1126 : * Note that this is only done if pg_stat_statements is enabled and
1127 : * configured to track utility statements, in the unlikely possibility
1128 : * that user configured another extension to handle utility statements
1129 : * only.
1130 : */
1131 63764 : if (enabled)
1132 54388 : pstmt->queryId = UINT64CONST(0);
1133 :
1134 : /*
1135 : * If it's an EXECUTE statement, we don't track it and don't increment the
1136 : * nesting level. This allows the cycles to be charged to the underlying
1137 : * PREPARE instead (by the Executor hooks), which is much more useful.
1138 : *
1139 : * We also don't track execution of PREPARE. If we did, we would get one
1140 : * hash table entry for the PREPARE (with hash calculated from the query
1141 : * string), and then a different one with the same query string (but hash
1142 : * calculated from the query tree) would be used to accumulate costs of
1143 : * ensuing EXECUTEs. This would be confusing. Since PREPARE doesn't
1144 : * actually run the planner (only parse+rewrite), its costs are generally
1145 : * pretty negligible and it seems okay to just ignore it.
1146 : */
1147 63764 : if (enabled &&
1148 54388 : !IsA(parsetree, ExecuteStmt) &&
1149 48048 : !IsA(parsetree, PrepareStmt))
1150 43392 : {
1151 : instr_time start;
1152 : instr_time duration;
1153 : uint64 rows;
1154 : BufferUsage bufusage_start,
1155 : bufusage;
1156 : WalUsage walusage_start,
1157 : walusage;
1158 :
1159 47816 : bufusage_start = pgBufferUsage;
1160 47816 : walusage_start = pgWalUsage;
1161 47816 : INSTR_TIME_SET_CURRENT(start);
1162 :
1163 47816 : nesting_level++;
1164 47816 : PG_TRY();
1165 : {
1166 47816 : if (prev_ProcessUtility)
1167 0 : prev_ProcessUtility(pstmt, queryString, readOnlyTree,
1168 : context, params, queryEnv,
1169 : dest, qc);
1170 : else
1171 47816 : standard_ProcessUtility(pstmt, queryString, readOnlyTree,
1172 : context, params, queryEnv,
1173 : dest, qc);
1174 : }
1175 4424 : PG_FINALLY();
1176 : {
1177 47816 : nesting_level--;
1178 : }
1179 47816 : PG_END_TRY();
1180 :
1181 : /*
1182 : * CAUTION: do not access the *pstmt data structure again below here.
1183 : * If it was a ROLLBACK or similar, that data structure may have been
1184 : * freed. We must copy everything we still need into local variables,
1185 : * which we did above.
1186 : *
1187 : * For the same reason, we can't risk restoring pstmt->queryId to its
1188 : * former value, which'd otherwise be a good idea.
1189 : */
1190 :
1191 43392 : INSTR_TIME_SET_CURRENT(duration);
1192 43392 : INSTR_TIME_SUBTRACT(duration, start);
1193 :
1194 : /*
1195 : * Track the total number of rows retrieved or affected by the utility
1196 : * statements of COPY, FETCH, CREATE TABLE AS, CREATE MATERIALIZED
1197 : * VIEW, REFRESH MATERIALIZED VIEW and SELECT INTO.
1198 : */
1199 43386 : rows = (qc && (qc->commandTag == CMDTAG_COPY ||
1200 40398 : qc->commandTag == CMDTAG_FETCH ||
1201 39932 : qc->commandTag == CMDTAG_SELECT ||
1202 39564 : qc->commandTag == CMDTAG_REFRESH_MATERIALIZED_VIEW)) ?
1203 86778 : qc->nprocessed : 0;
1204 :
1205 : /* calc differences of buffer counters. */
1206 43392 : memset(&bufusage, 0, sizeof(BufferUsage));
1207 43392 : BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
1208 :
1209 : /* calc differences of WAL counters. */
1210 43392 : memset(&walusage, 0, sizeof(WalUsage));
1211 43392 : WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
1212 :
1213 43392 : pgss_store(queryString,
1214 : saved_queryId,
1215 : saved_stmt_location,
1216 : saved_stmt_len,
1217 : PGSS_EXEC,
1218 43392 : INSTR_TIME_GET_MILLISEC(duration),
1219 : rows,
1220 : &bufusage,
1221 : &walusage,
1222 : NULL,
1223 : NULL,
1224 : 0,
1225 : 0);
1226 : }
1227 : else
1228 : {
1229 : /*
1230 : * Even though we're not tracking execution time for this statement,
1231 : * we must still increment the nesting level, to ensure that functions
1232 : * evaluated within it are not seen as top-level calls. But don't do
1233 : * so for EXECUTE; that way, when control reaches pgss_planner or
1234 : * pgss_ExecutorStart, we will treat the costs as top-level if
1235 : * appropriate. Likewise, don't bump for PREPARE, so that parse
1236 : * analysis will treat the statement as top-level if appropriate.
1237 : *
1238 : * To be absolutely certain we don't mess up the nesting level,
1239 : * evaluate the bump_level condition just once.
1240 : */
1241 15948 : bool bump_level =
1242 25554 : !IsA(parsetree, ExecuteStmt) &&
1243 9606 : !IsA(parsetree, PrepareStmt);
1244 :
1245 15948 : if (bump_level)
1246 9372 : nesting_level++;
1247 15948 : PG_TRY();
1248 : {
1249 15948 : if (prev_ProcessUtility)
1250 0 : prev_ProcessUtility(pstmt, queryString, readOnlyTree,
1251 : context, params, queryEnv,
1252 : dest, qc);
1253 : else
1254 15948 : standard_ProcessUtility(pstmt, queryString, readOnlyTree,
1255 : context, params, queryEnv,
1256 : dest, qc);
1257 : }
1258 238 : PG_FINALLY();
1259 : {
1260 15948 : if (bump_level)
1261 9372 : nesting_level--;
1262 : }
1263 15948 : PG_END_TRY();
1264 : }
1265 59102 : }
1266 :
1267 : /*
1268 : * Store some statistics for a statement.
1269 : *
1270 : * If jstate is not NULL then we're trying to create an entry for which
1271 : * we have no statistics as yet; we just want to record the normalized
1272 : * query string. total_time, rows, bufusage and walusage are ignored in this
1273 : * case.
1274 : *
1275 : * If kind is PGSS_PLAN or PGSS_EXEC, its value is used as the array position
1276 : * for the arrays in the Counters field.
1277 : */
1278 : static void
1279 179424 : pgss_store(const char *query, uint64 queryId,
1280 : int query_location, int query_len,
1281 : pgssStoreKind kind,
1282 : double total_time, uint64 rows,
1283 : const BufferUsage *bufusage,
1284 : const WalUsage *walusage,
1285 : const struct JitInstrumentation *jitusage,
1286 : JumbleState *jstate,
1287 : int parallel_workers_to_launch,
1288 : int parallel_workers_launched)
1289 : {
1290 : pgssHashKey key;
1291 : pgssEntry *entry;
1292 179424 : char *norm_query = NULL;
1293 179424 : int encoding = GetDatabaseEncoding();
1294 :
1295 : Assert(query != NULL);
1296 :
1297 : /* Safety check... */
1298 179424 : if (!pgss || !pgss_hash)
1299 0 : return;
1300 :
1301 : /*
1302 : * Nothing to do if compute_query_id isn't enabled and no other module
1303 : * computed a query identifier.
1304 : */
1305 179424 : if (queryId == UINT64CONST(0))
1306 0 : return;
1307 :
1308 : /*
1309 : * Confine our attention to the relevant part of the string, if the query
1310 : * is a portion of a multi-statement source string, and update query
1311 : * location and length if needed.
1312 : */
1313 179424 : query = CleanQuerytext(query, &query_location, &query_len);
1314 :
1315 : /* Set up key for hashtable search */
1316 :
1317 : /* clear padding */
1318 179424 : memset(&key, 0, sizeof(pgssHashKey));
1319 :
1320 179424 : key.userid = GetUserId();
1321 179424 : key.dbid = MyDatabaseId;
1322 179424 : key.queryid = queryId;
1323 179424 : key.toplevel = (nesting_level == 0);
1324 :
1325 : /* Lookup the hash table entry with shared lock. */
1326 179424 : LWLockAcquire(pgss->lock, LW_SHARED);
1327 :
1328 179424 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
1329 :
1330 : /* Create new entry, if not present */
1331 179424 : if (!entry)
1332 : {
1333 : Size query_offset;
1334 : int gc_count;
1335 : bool stored;
1336 : bool do_gc;
1337 :
1338 : /*
1339 : * Create a new, normalized query string if caller asked. We don't
1340 : * need to hold the lock while doing this work. (Note: in any case,
1341 : * it's possible that someone else creates a duplicate hashtable entry
1342 : * in the interval where we don't hold the lock below. That case is
1343 : * handled by entry_alloc.)
1344 : */
1345 54246 : if (jstate)
1346 : {
1347 20484 : LWLockRelease(pgss->lock);
1348 20484 : norm_query = generate_normalized_query(jstate, query,
1349 : query_location,
1350 : &query_len);
1351 20484 : LWLockAcquire(pgss->lock, LW_SHARED);
1352 : }
1353 :
1354 : /* Append new query text to file with only shared lock held */
1355 54246 : stored = qtext_store(norm_query ? norm_query : query, query_len,
1356 : &query_offset, &gc_count);
1357 :
1358 : /*
1359 : * Determine whether we need to garbage collect external query texts
1360 : * while the shared lock is still held. This micro-optimization
1361 : * avoids taking the time to decide this while holding exclusive lock.
1362 : */
1363 54246 : do_gc = need_gc_qtexts();
1364 :
1365 : /* Need exclusive lock to make a new hashtable entry - promote */
1366 54246 : LWLockRelease(pgss->lock);
1367 54246 : LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
1368 :
1369 : /*
1370 : * A garbage collection may have occurred while we weren't holding the
1371 : * lock. In the unlikely event that this happens, the query text we
1372 : * stored above will have been garbage collected, so write it again.
1373 : * This should be infrequent enough that doing it while holding
1374 : * exclusive lock isn't a performance problem.
1375 : */
1376 54246 : if (!stored || pgss->gc_count != gc_count)
1377 0 : stored = qtext_store(norm_query ? norm_query : query, query_len,
1378 : &query_offset, NULL);
1379 :
1380 : /* If we failed to write to the text file, give up */
1381 54246 : if (!stored)
1382 0 : goto done;
1383 :
1384 : /* OK to create a new hashtable entry */
1385 54246 : entry = entry_alloc(&key, query_offset, query_len, encoding,
1386 : jstate != NULL);
1387 :
1388 : /* If needed, perform garbage collection while exclusive lock held */
1389 54246 : if (do_gc)
1390 0 : gc_qtexts();
1391 : }
1392 :
1393 : /* Increment the counts, except when jstate is not NULL */
1394 179424 : if (!jstate)
1395 : {
1396 : Assert(kind == PGSS_PLAN || kind == PGSS_EXEC);
1397 :
1398 : /*
1399 : * Grab the spinlock while updating the counters (see comment about
1400 : * locking rules at the head of the file)
1401 : */
1402 112558 : SpinLockAcquire(&entry->mutex);
1403 :
1404 : /* "Unstick" entry if it was previously sticky */
1405 112558 : if (IS_STICKY(entry->counters))
1406 52738 : entry->counters.usage = USAGE_INIT;
1407 :
1408 112558 : entry->counters.calls[kind] += 1;
1409 112558 : entry->counters.total_time[kind] += total_time;
1410 :
1411 112558 : if (entry->counters.calls[kind] == 1)
1412 : {
1413 52846 : entry->counters.min_time[kind] = total_time;
1414 52846 : entry->counters.max_time[kind] = total_time;
1415 52846 : entry->counters.mean_time[kind] = total_time;
1416 : }
1417 : else
1418 : {
1419 : /*
1420 : * Welford's method for accurately computing variance. See
1421 : * <http://www.johndcook.com/blog/standard_deviation/>
1422 : */
1423 59712 : double old_mean = entry->counters.mean_time[kind];
1424 :
1425 59712 : entry->counters.mean_time[kind] +=
1426 59712 : (total_time - old_mean) / entry->counters.calls[kind];
1427 59712 : entry->counters.sum_var_time[kind] +=
1428 59712 : (total_time - old_mean) * (total_time - entry->counters.mean_time[kind]);
1429 :
1430 : /*
1431 : * Calculate min and max time. min = 0 and max = 0 means that the
1432 : * min/max statistics were reset
1433 : */
1434 59712 : if (entry->counters.min_time[kind] == 0
1435 4 : && entry->counters.max_time[kind] == 0)
1436 : {
1437 4 : entry->counters.min_time[kind] = total_time;
1438 4 : entry->counters.max_time[kind] = total_time;
1439 : }
1440 : else
1441 : {
1442 59708 : if (entry->counters.min_time[kind] > total_time)
1443 12012 : entry->counters.min_time[kind] = total_time;
1444 59708 : if (entry->counters.max_time[kind] < total_time)
1445 5630 : entry->counters.max_time[kind] = total_time;
1446 : }
1447 : }
1448 112558 : entry->counters.rows += rows;
1449 112558 : entry->counters.shared_blks_hit += bufusage->shared_blks_hit;
1450 112558 : entry->counters.shared_blks_read += bufusage->shared_blks_read;
1451 112558 : entry->counters.shared_blks_dirtied += bufusage->shared_blks_dirtied;
1452 112558 : entry->counters.shared_blks_written += bufusage->shared_blks_written;
1453 112558 : entry->counters.local_blks_hit += bufusage->local_blks_hit;
1454 112558 : entry->counters.local_blks_read += bufusage->local_blks_read;
1455 112558 : entry->counters.local_blks_dirtied += bufusage->local_blks_dirtied;
1456 112558 : entry->counters.local_blks_written += bufusage->local_blks_written;
1457 112558 : entry->counters.temp_blks_read += bufusage->temp_blks_read;
1458 112558 : entry->counters.temp_blks_written += bufusage->temp_blks_written;
1459 112558 : entry->counters.shared_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->shared_blk_read_time);
1460 112558 : entry->counters.shared_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->shared_blk_write_time);
1461 112558 : entry->counters.local_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->local_blk_read_time);
1462 112558 : entry->counters.local_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->local_blk_write_time);
1463 112558 : entry->counters.temp_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->temp_blk_read_time);
1464 112558 : entry->counters.temp_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->temp_blk_write_time);
1465 112558 : entry->counters.usage += USAGE_EXEC(total_time);
1466 112558 : entry->counters.wal_records += walusage->wal_records;
1467 112558 : entry->counters.wal_fpi += walusage->wal_fpi;
1468 112558 : entry->counters.wal_bytes += walusage->wal_bytes;
1469 112558 : if (jitusage)
1470 : {
1471 156 : entry->counters.jit_functions += jitusage->created_functions;
1472 156 : entry->counters.jit_generation_time += INSTR_TIME_GET_MILLISEC(jitusage->generation_counter);
1473 :
1474 156 : if (INSTR_TIME_GET_MILLISEC(jitusage->deform_counter))
1475 152 : entry->counters.jit_deform_count++;
1476 156 : entry->counters.jit_deform_time += INSTR_TIME_GET_MILLISEC(jitusage->deform_counter);
1477 :
1478 156 : if (INSTR_TIME_GET_MILLISEC(jitusage->inlining_counter))
1479 74 : entry->counters.jit_inlining_count++;
1480 156 : entry->counters.jit_inlining_time += INSTR_TIME_GET_MILLISEC(jitusage->inlining_counter);
1481 :
1482 156 : if (INSTR_TIME_GET_MILLISEC(jitusage->optimization_counter))
1483 152 : entry->counters.jit_optimization_count++;
1484 156 : entry->counters.jit_optimization_time += INSTR_TIME_GET_MILLISEC(jitusage->optimization_counter);
1485 :
1486 156 : if (INSTR_TIME_GET_MILLISEC(jitusage->emission_counter))
1487 152 : entry->counters.jit_emission_count++;
1488 156 : entry->counters.jit_emission_time += INSTR_TIME_GET_MILLISEC(jitusage->emission_counter);
1489 : }
1490 :
1491 : /* parallel worker counters */
1492 112558 : entry->counters.parallel_workers_to_launch += parallel_workers_to_launch;
1493 112558 : entry->counters.parallel_workers_launched += parallel_workers_launched;
1494 :
1495 112558 : SpinLockRelease(&entry->mutex);
1496 : }
1497 :
1498 66866 : done:
1499 179424 : LWLockRelease(pgss->lock);
1500 :
1501 : /* We postpone this clean-up until we're out of the lock */
1502 179424 : if (norm_query)
1503 20484 : pfree(norm_query);
1504 : }
1505 :
1506 : /*
1507 : * Reset statement statistics corresponding to userid, dbid, and queryid.
1508 : */
1509 : Datum
1510 2 : pg_stat_statements_reset_1_7(PG_FUNCTION_ARGS)
1511 : {
1512 : Oid userid;
1513 : Oid dbid;
1514 : uint64 queryid;
1515 :
1516 2 : userid = PG_GETARG_OID(0);
1517 2 : dbid = PG_GETARG_OID(1);
1518 2 : queryid = (uint64) PG_GETARG_INT64(2);
1519 :
1520 2 : entry_reset(userid, dbid, queryid, false);
1521 :
1522 2 : PG_RETURN_VOID();
1523 : }
1524 :
1525 : Datum
1526 156 : pg_stat_statements_reset_1_11(PG_FUNCTION_ARGS)
1527 : {
1528 : Oid userid;
1529 : Oid dbid;
1530 : uint64 queryid;
1531 : bool minmax_only;
1532 :
1533 156 : userid = PG_GETARG_OID(0);
1534 156 : dbid = PG_GETARG_OID(1);
1535 156 : queryid = (uint64) PG_GETARG_INT64(2);
1536 156 : minmax_only = PG_GETARG_BOOL(3);
1537 :
1538 156 : PG_RETURN_TIMESTAMPTZ(entry_reset(userid, dbid, queryid, minmax_only));
1539 : }
1540 :
1541 : /*
1542 : * Reset statement statistics.
1543 : */
1544 : Datum
1545 2 : pg_stat_statements_reset(PG_FUNCTION_ARGS)
1546 : {
1547 2 : entry_reset(0, 0, 0, false);
1548 :
1549 2 : PG_RETURN_VOID();
1550 : }
1551 :
1552 : /* Number of output arguments (columns) for various API versions */
1553 : #define PG_STAT_STATEMENTS_COLS_V1_0 14
1554 : #define PG_STAT_STATEMENTS_COLS_V1_1 18
1555 : #define PG_STAT_STATEMENTS_COLS_V1_2 19
1556 : #define PG_STAT_STATEMENTS_COLS_V1_3 23
1557 : #define PG_STAT_STATEMENTS_COLS_V1_8 32
1558 : #define PG_STAT_STATEMENTS_COLS_V1_9 33
1559 : #define PG_STAT_STATEMENTS_COLS_V1_10 43
1560 : #define PG_STAT_STATEMENTS_COLS_V1_11 49
1561 : #define PG_STAT_STATEMENTS_COLS_V1_12 51
1562 : #define PG_STAT_STATEMENTS_COLS 51 /* maximum of above */
1563 :
1564 : /*
1565 : * Retrieve statement statistics.
1566 : *
1567 : * The SQL API of this function has changed multiple times, and will likely
1568 : * do so again in future. To support the case where a newer version of this
1569 : * loadable module is being used with an old SQL declaration of the function,
1570 : * we continue to support the older API versions. For 1.2 and later, the
1571 : * expected API version is identified by embedding it in the C name of the
1572 : * function. Unfortunately we weren't bright enough to do that for 1.1.
1573 : */
1574 : Datum
1575 176 : pg_stat_statements_1_12(PG_FUNCTION_ARGS)
1576 : {
1577 176 : bool showtext = PG_GETARG_BOOL(0);
1578 :
1579 176 : pg_stat_statements_internal(fcinfo, PGSS_V1_12, showtext);
1580 :
1581 176 : return (Datum) 0;
1582 : }
1583 :
1584 : Datum
1585 2 : pg_stat_statements_1_11(PG_FUNCTION_ARGS)
1586 : {
1587 2 : bool showtext = PG_GETARG_BOOL(0);
1588 :
1589 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_11, showtext);
1590 :
1591 2 : return (Datum) 0;
1592 : }
1593 :
1594 : Datum
1595 2 : pg_stat_statements_1_10(PG_FUNCTION_ARGS)
1596 : {
1597 2 : bool showtext = PG_GETARG_BOOL(0);
1598 :
1599 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_10, showtext);
1600 :
1601 2 : return (Datum) 0;
1602 : }
1603 :
1604 : Datum
1605 2 : pg_stat_statements_1_9(PG_FUNCTION_ARGS)
1606 : {
1607 2 : bool showtext = PG_GETARG_BOOL(0);
1608 :
1609 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_9, showtext);
1610 :
1611 2 : return (Datum) 0;
1612 : }
1613 :
1614 : Datum
1615 2 : pg_stat_statements_1_8(PG_FUNCTION_ARGS)
1616 : {
1617 2 : bool showtext = PG_GETARG_BOOL(0);
1618 :
1619 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_8, showtext);
1620 :
1621 2 : return (Datum) 0;
1622 : }
1623 :
1624 : Datum
1625 2 : pg_stat_statements_1_3(PG_FUNCTION_ARGS)
1626 : {
1627 2 : bool showtext = PG_GETARG_BOOL(0);
1628 :
1629 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_3, showtext);
1630 :
1631 2 : return (Datum) 0;
1632 : }
1633 :
1634 : Datum
1635 0 : pg_stat_statements_1_2(PG_FUNCTION_ARGS)
1636 : {
1637 0 : bool showtext = PG_GETARG_BOOL(0);
1638 :
1639 0 : pg_stat_statements_internal(fcinfo, PGSS_V1_2, showtext);
1640 :
1641 0 : return (Datum) 0;
1642 : }
1643 :
1644 : /*
1645 : * Legacy entry point for pg_stat_statements() API versions 1.0 and 1.1.
1646 : * This can be removed someday, perhaps.
1647 : */
1648 : Datum
1649 0 : pg_stat_statements(PG_FUNCTION_ARGS)
1650 : {
1651 : /* If it's really API 1.1, we'll figure that out below */
1652 0 : pg_stat_statements_internal(fcinfo, PGSS_V1_0, true);
1653 :
1654 0 : return (Datum) 0;
1655 : }
1656 :
1657 : /* Common code for all versions of pg_stat_statements() */
1658 : static void
1659 186 : pg_stat_statements_internal(FunctionCallInfo fcinfo,
1660 : pgssVersion api_version,
1661 : bool showtext)
1662 : {
1663 186 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1664 186 : Oid userid = GetUserId();
1665 186 : bool is_allowed_role = false;
1666 186 : char *qbuffer = NULL;
1667 186 : Size qbuffer_size = 0;
1668 186 : Size extent = 0;
1669 186 : int gc_count = 0;
1670 : HASH_SEQ_STATUS hash_seq;
1671 : pgssEntry *entry;
1672 :
1673 : /*
1674 : * Superusers or roles with the privileges of pg_read_all_stats members
1675 : * are allowed
1676 : */
1677 186 : is_allowed_role = has_privs_of_role(userid, ROLE_PG_READ_ALL_STATS);
1678 :
1679 : /* hash table must exist already */
1680 186 : if (!pgss || !pgss_hash)
1681 0 : ereport(ERROR,
1682 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1683 : errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
1684 :
1685 186 : InitMaterializedSRF(fcinfo, 0);
1686 :
1687 : /*
1688 : * Check we have the expected number of output arguments. Aside from
1689 : * being a good safety check, we need a kluge here to detect API version
1690 : * 1.1, which was wedged into the code in an ill-considered way.
1691 : */
1692 186 : switch (rsinfo->setDesc->natts)
1693 : {
1694 0 : case PG_STAT_STATEMENTS_COLS_V1_0:
1695 0 : if (api_version != PGSS_V1_0)
1696 0 : elog(ERROR, "incorrect number of output arguments");
1697 0 : break;
1698 0 : case PG_STAT_STATEMENTS_COLS_V1_1:
1699 : /* pg_stat_statements() should have told us 1.0 */
1700 0 : if (api_version != PGSS_V1_0)
1701 0 : elog(ERROR, "incorrect number of output arguments");
1702 0 : api_version = PGSS_V1_1;
1703 0 : break;
1704 0 : case PG_STAT_STATEMENTS_COLS_V1_2:
1705 0 : if (api_version != PGSS_V1_2)
1706 0 : elog(ERROR, "incorrect number of output arguments");
1707 0 : break;
1708 2 : case PG_STAT_STATEMENTS_COLS_V1_3:
1709 2 : if (api_version != PGSS_V1_3)
1710 0 : elog(ERROR, "incorrect number of output arguments");
1711 2 : break;
1712 2 : case PG_STAT_STATEMENTS_COLS_V1_8:
1713 2 : if (api_version != PGSS_V1_8)
1714 0 : elog(ERROR, "incorrect number of output arguments");
1715 2 : break;
1716 2 : case PG_STAT_STATEMENTS_COLS_V1_9:
1717 2 : if (api_version != PGSS_V1_9)
1718 0 : elog(ERROR, "incorrect number of output arguments");
1719 2 : break;
1720 2 : case PG_STAT_STATEMENTS_COLS_V1_10:
1721 2 : if (api_version != PGSS_V1_10)
1722 0 : elog(ERROR, "incorrect number of output arguments");
1723 2 : break;
1724 2 : case PG_STAT_STATEMENTS_COLS_V1_11:
1725 2 : if (api_version != PGSS_V1_11)
1726 0 : elog(ERROR, "incorrect number of output arguments");
1727 2 : break;
1728 176 : case PG_STAT_STATEMENTS_COLS_V1_12:
1729 176 : if (api_version != PGSS_V1_12)
1730 0 : elog(ERROR, "incorrect number of output arguments");
1731 176 : break;
1732 0 : default:
1733 0 : elog(ERROR, "incorrect number of output arguments");
1734 : }
1735 :
1736 : /*
1737 : * We'd like to load the query text file (if needed) while not holding any
1738 : * lock on pgss->lock. In the worst case we'll have to do this again
1739 : * after we have the lock, but it's unlikely enough to make this a win
1740 : * despite occasional duplicated work. We need to reload if anybody
1741 : * writes to the file (either a retail qtext_store(), or a garbage
1742 : * collection) between this point and where we've gotten shared lock. If
1743 : * a qtext_store is actually in progress when we look, we might as well
1744 : * skip the speculative load entirely.
1745 : */
1746 186 : if (showtext)
1747 : {
1748 : int n_writers;
1749 :
1750 : /* Take the mutex so we can examine variables */
1751 186 : SpinLockAcquire(&pgss->mutex);
1752 186 : extent = pgss->extent;
1753 186 : n_writers = pgss->n_writers;
1754 186 : gc_count = pgss->gc_count;
1755 186 : SpinLockRelease(&pgss->mutex);
1756 :
1757 : /* No point in loading file now if there are active writers */
1758 186 : if (n_writers == 0)
1759 186 : qbuffer = qtext_load_file(&qbuffer_size);
1760 : }
1761 :
1762 : /*
1763 : * Get shared lock, load or reload the query text file if we must, and
1764 : * iterate over the hashtable entries.
1765 : *
1766 : * With a large hash table, we might be holding the lock rather longer
1767 : * than one could wish. However, this only blocks creation of new hash
1768 : * table entries, and the larger the hash table the less likely that is to
1769 : * be needed. So we can hope this is okay. Perhaps someday we'll decide
1770 : * we need to partition the hash table to limit the time spent holding any
1771 : * one lock.
1772 : */
1773 186 : LWLockAcquire(pgss->lock, LW_SHARED);
1774 :
1775 186 : if (showtext)
1776 : {
1777 : /*
1778 : * Here it is safe to examine extent and gc_count without taking the
1779 : * mutex. Note that although other processes might change
1780 : * pgss->extent just after we look at it, the strings they then write
1781 : * into the file cannot yet be referenced in the hashtable, so we
1782 : * don't care whether we see them or not.
1783 : *
1784 : * If qtext_load_file fails, we just press on; we'll return NULL for
1785 : * every query text.
1786 : */
1787 186 : if (qbuffer == NULL ||
1788 186 : pgss->extent != extent ||
1789 186 : pgss->gc_count != gc_count)
1790 : {
1791 0 : free(qbuffer);
1792 0 : qbuffer = qtext_load_file(&qbuffer_size);
1793 : }
1794 : }
1795 :
1796 186 : hash_seq_init(&hash_seq, pgss_hash);
1797 51382 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
1798 : {
1799 : Datum values[PG_STAT_STATEMENTS_COLS];
1800 : bool nulls[PG_STAT_STATEMENTS_COLS];
1801 51196 : int i = 0;
1802 : Counters tmp;
1803 : double stddev;
1804 51196 : int64 queryid = entry->key.queryid;
1805 : TimestampTz stats_since;
1806 : TimestampTz minmax_stats_since;
1807 :
1808 51196 : memset(values, 0, sizeof(values));
1809 51196 : memset(nulls, 0, sizeof(nulls));
1810 :
1811 51196 : values[i++] = ObjectIdGetDatum(entry->key.userid);
1812 51196 : values[i++] = ObjectIdGetDatum(entry->key.dbid);
1813 51196 : if (api_version >= PGSS_V1_9)
1814 51170 : values[i++] = BoolGetDatum(entry->key.toplevel);
1815 :
1816 51196 : if (is_allowed_role || entry->key.userid == userid)
1817 : {
1818 51188 : if (api_version >= PGSS_V1_2)
1819 51188 : values[i++] = Int64GetDatumFast(queryid);
1820 :
1821 51188 : if (showtext)
1822 : {
1823 51188 : char *qstr = qtext_fetch(entry->query_offset,
1824 : entry->query_len,
1825 : qbuffer,
1826 : qbuffer_size);
1827 :
1828 51188 : if (qstr)
1829 : {
1830 : char *enc;
1831 :
1832 51188 : enc = pg_any_to_server(qstr,
1833 : entry->query_len,
1834 : entry->encoding);
1835 :
1836 51188 : values[i++] = CStringGetTextDatum(enc);
1837 :
1838 51188 : if (enc != qstr)
1839 0 : pfree(enc);
1840 : }
1841 : else
1842 : {
1843 : /* Just return a null if we fail to find the text */
1844 0 : nulls[i++] = true;
1845 : }
1846 : }
1847 : else
1848 : {
1849 : /* Query text not requested */
1850 0 : nulls[i++] = true;
1851 : }
1852 : }
1853 : else
1854 : {
1855 : /* Don't show queryid */
1856 8 : if (api_version >= PGSS_V1_2)
1857 8 : nulls[i++] = true;
1858 :
1859 : /*
1860 : * Don't show query text, but hint as to the reason for not doing
1861 : * so if it was requested
1862 : */
1863 8 : if (showtext)
1864 8 : values[i++] = CStringGetTextDatum("<insufficient privilege>");
1865 : else
1866 0 : nulls[i++] = true;
1867 : }
1868 :
1869 : /* copy counters to a local variable to keep locking time short */
1870 51196 : SpinLockAcquire(&entry->mutex);
1871 51196 : tmp = entry->counters;
1872 51196 : SpinLockRelease(&entry->mutex);
1873 :
1874 : /*
1875 : * The spinlock is not required when reading these two as they are
1876 : * always updated when holding pgss->lock exclusively.
1877 : */
1878 51196 : stats_since = entry->stats_since;
1879 51196 : minmax_stats_since = entry->minmax_stats_since;
1880 :
1881 : /* Skip entry if unexecuted (ie, it's a pending "sticky" entry) */
1882 51196 : if (IS_STICKY(tmp))
1883 78 : continue;
1884 :
1885 : /* Note that we rely on PGSS_PLAN being 0 and PGSS_EXEC being 1. */
1886 153354 : for (int kind = 0; kind < PGSS_NUMKIND; kind++)
1887 : {
1888 102236 : if (kind == PGSS_EXEC || api_version >= PGSS_V1_8)
1889 : {
1890 102228 : values[i++] = Int64GetDatumFast(tmp.calls[kind]);
1891 102228 : values[i++] = Float8GetDatumFast(tmp.total_time[kind]);
1892 : }
1893 :
1894 102236 : if ((kind == PGSS_EXEC && api_version >= PGSS_V1_3) ||
1895 : api_version >= PGSS_V1_8)
1896 : {
1897 102228 : values[i++] = Float8GetDatumFast(tmp.min_time[kind]);
1898 102228 : values[i++] = Float8GetDatumFast(tmp.max_time[kind]);
1899 102228 : values[i++] = Float8GetDatumFast(tmp.mean_time[kind]);
1900 :
1901 : /*
1902 : * Note we are calculating the population variance here, not
1903 : * the sample variance, as we have data for the whole
1904 : * population, so Bessel's correction is not used, and we
1905 : * don't divide by tmp.calls - 1.
1906 : */
1907 102228 : if (tmp.calls[kind] > 1)
1908 9084 : stddev = sqrt(tmp.sum_var_time[kind] / tmp.calls[kind]);
1909 : else
1910 93144 : stddev = 0.0;
1911 102228 : values[i++] = Float8GetDatumFast(stddev);
1912 : }
1913 : }
1914 51118 : values[i++] = Int64GetDatumFast(tmp.rows);
1915 51118 : values[i++] = Int64GetDatumFast(tmp.shared_blks_hit);
1916 51118 : values[i++] = Int64GetDatumFast(tmp.shared_blks_read);
1917 51118 : if (api_version >= PGSS_V1_1)
1918 51118 : values[i++] = Int64GetDatumFast(tmp.shared_blks_dirtied);
1919 51118 : values[i++] = Int64GetDatumFast(tmp.shared_blks_written);
1920 51118 : values[i++] = Int64GetDatumFast(tmp.local_blks_hit);
1921 51118 : values[i++] = Int64GetDatumFast(tmp.local_blks_read);
1922 51118 : if (api_version >= PGSS_V1_1)
1923 51118 : values[i++] = Int64GetDatumFast(tmp.local_blks_dirtied);
1924 51118 : values[i++] = Int64GetDatumFast(tmp.local_blks_written);
1925 51118 : values[i++] = Int64GetDatumFast(tmp.temp_blks_read);
1926 51118 : values[i++] = Int64GetDatumFast(tmp.temp_blks_written);
1927 51118 : if (api_version >= PGSS_V1_1)
1928 : {
1929 51118 : values[i++] = Float8GetDatumFast(tmp.shared_blk_read_time);
1930 51118 : values[i++] = Float8GetDatumFast(tmp.shared_blk_write_time);
1931 : }
1932 51118 : if (api_version >= PGSS_V1_11)
1933 : {
1934 51056 : values[i++] = Float8GetDatumFast(tmp.local_blk_read_time);
1935 51056 : values[i++] = Float8GetDatumFast(tmp.local_blk_write_time);
1936 : }
1937 51118 : if (api_version >= PGSS_V1_10)
1938 : {
1939 51078 : values[i++] = Float8GetDatumFast(tmp.temp_blk_read_time);
1940 51078 : values[i++] = Float8GetDatumFast(tmp.temp_blk_write_time);
1941 : }
1942 51118 : if (api_version >= PGSS_V1_8)
1943 : {
1944 : char buf[256];
1945 : Datum wal_bytes;
1946 :
1947 51110 : values[i++] = Int64GetDatumFast(tmp.wal_records);
1948 51110 : values[i++] = Int64GetDatumFast(tmp.wal_fpi);
1949 :
1950 51110 : snprintf(buf, sizeof buf, UINT64_FORMAT, tmp.wal_bytes);
1951 :
1952 : /* Convert to numeric. */
1953 51110 : wal_bytes = DirectFunctionCall3(numeric_in,
1954 : CStringGetDatum(buf),
1955 : ObjectIdGetDatum(0),
1956 : Int32GetDatum(-1));
1957 51110 : values[i++] = wal_bytes;
1958 : }
1959 51118 : if (api_version >= PGSS_V1_10)
1960 : {
1961 51078 : values[i++] = Int64GetDatumFast(tmp.jit_functions);
1962 51078 : values[i++] = Float8GetDatumFast(tmp.jit_generation_time);
1963 51078 : values[i++] = Int64GetDatumFast(tmp.jit_inlining_count);
1964 51078 : values[i++] = Float8GetDatumFast(tmp.jit_inlining_time);
1965 51078 : values[i++] = Int64GetDatumFast(tmp.jit_optimization_count);
1966 51078 : values[i++] = Float8GetDatumFast(tmp.jit_optimization_time);
1967 51078 : values[i++] = Int64GetDatumFast(tmp.jit_emission_count);
1968 51078 : values[i++] = Float8GetDatumFast(tmp.jit_emission_time);
1969 : }
1970 51118 : if (api_version >= PGSS_V1_11)
1971 : {
1972 51056 : values[i++] = Int64GetDatumFast(tmp.jit_deform_count);
1973 51056 : values[i++] = Float8GetDatumFast(tmp.jit_deform_time);
1974 : }
1975 51118 : if (api_version >= PGSS_V1_12)
1976 : {
1977 51030 : values[i++] = Int64GetDatumFast(tmp.parallel_workers_to_launch);
1978 51030 : values[i++] = Int64GetDatumFast(tmp.parallel_workers_launched);
1979 : }
1980 51118 : if (api_version >= PGSS_V1_11)
1981 : {
1982 51056 : values[i++] = TimestampTzGetDatum(stats_since);
1983 51056 : values[i++] = TimestampTzGetDatum(minmax_stats_since);
1984 : }
1985 :
1986 : Assert(i == (api_version == PGSS_V1_0 ? PG_STAT_STATEMENTS_COLS_V1_0 :
1987 : api_version == PGSS_V1_1 ? PG_STAT_STATEMENTS_COLS_V1_1 :
1988 : api_version == PGSS_V1_2 ? PG_STAT_STATEMENTS_COLS_V1_2 :
1989 : api_version == PGSS_V1_3 ? PG_STAT_STATEMENTS_COLS_V1_3 :
1990 : api_version == PGSS_V1_8 ? PG_STAT_STATEMENTS_COLS_V1_8 :
1991 : api_version == PGSS_V1_9 ? PG_STAT_STATEMENTS_COLS_V1_9 :
1992 : api_version == PGSS_V1_10 ? PG_STAT_STATEMENTS_COLS_V1_10 :
1993 : api_version == PGSS_V1_11 ? PG_STAT_STATEMENTS_COLS_V1_11 :
1994 : api_version == PGSS_V1_12 ? PG_STAT_STATEMENTS_COLS_V1_12 :
1995 : -1 /* fail if you forget to update this assert */ ));
1996 :
1997 51118 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1998 : }
1999 :
2000 186 : LWLockRelease(pgss->lock);
2001 :
2002 186 : free(qbuffer);
2003 186 : }
2004 :
2005 : /* Number of output arguments (columns) for pg_stat_statements_info */
2006 : #define PG_STAT_STATEMENTS_INFO_COLS 2
2007 :
2008 : /*
2009 : * Return statistics of pg_stat_statements.
2010 : */
2011 : Datum
2012 4 : pg_stat_statements_info(PG_FUNCTION_ARGS)
2013 : {
2014 : pgssGlobalStats stats;
2015 : TupleDesc tupdesc;
2016 4 : Datum values[PG_STAT_STATEMENTS_INFO_COLS] = {0};
2017 4 : bool nulls[PG_STAT_STATEMENTS_INFO_COLS] = {0};
2018 :
2019 4 : if (!pgss || !pgss_hash)
2020 0 : ereport(ERROR,
2021 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2022 : errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
2023 :
2024 : /* Build a tuple descriptor for our result type */
2025 4 : if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
2026 0 : elog(ERROR, "return type must be a row type");
2027 :
2028 : /* Read global statistics for pg_stat_statements */
2029 4 : SpinLockAcquire(&pgss->mutex);
2030 4 : stats = pgss->stats;
2031 4 : SpinLockRelease(&pgss->mutex);
2032 :
2033 4 : values[0] = Int64GetDatum(stats.dealloc);
2034 4 : values[1] = TimestampTzGetDatum(stats.stats_reset);
2035 :
2036 4 : PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
2037 : }
2038 :
2039 : /*
2040 : * Estimate shared memory space needed.
2041 : */
2042 : static Size
2043 14 : pgss_memsize(void)
2044 : {
2045 : Size size;
2046 :
2047 14 : size = MAXALIGN(sizeof(pgssSharedState));
2048 14 : size = add_size(size, hash_estimate_size(pgss_max, sizeof(pgssEntry)));
2049 :
2050 14 : return size;
2051 : }
2052 :
2053 : /*
2054 : * Allocate a new hashtable entry.
2055 : * caller must hold an exclusive lock on pgss->lock
2056 : *
2057 : * "query" need not be null-terminated; we rely on query_len instead
2058 : *
2059 : * If "sticky" is true, make the new entry artificially sticky so that it will
2060 : * probably still be there when the query finishes execution. We do this by
2061 : * giving it a median usage value rather than the normal value. (Strictly
2062 : * speaking, query strings are normalized on a best effort basis, though it
2063 : * would be difficult to demonstrate this even under artificial conditions.)
2064 : *
2065 : * Note: despite needing exclusive lock, it's not an error for the target
2066 : * entry to already exist. This is because pgss_store releases and
2067 : * reacquires lock after failing to find a match; so someone else could
2068 : * have made the entry while we waited to get exclusive lock.
2069 : */
2070 : static pgssEntry *
2071 103824 : entry_alloc(pgssHashKey *key, Size query_offset, int query_len, int encoding,
2072 : bool sticky)
2073 : {
2074 : pgssEntry *entry;
2075 : bool found;
2076 :
2077 : /* Make space if needed */
2078 103824 : while (hash_get_num_entries(pgss_hash) >= pgss_max)
2079 0 : entry_dealloc();
2080 :
2081 : /* Find or create an entry with desired hash code */
2082 103824 : entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found);
2083 :
2084 103824 : if (!found)
2085 : {
2086 : /* New entry, initialize it */
2087 :
2088 : /* reset the statistics */
2089 103824 : memset(&entry->counters, 0, sizeof(Counters));
2090 : /* set the appropriate initial usage count */
2091 103824 : entry->counters.usage = sticky ? pgss->cur_median_usage : USAGE_INIT;
2092 : /* re-initialize the mutex each time ... we assume no one using it */
2093 103824 : SpinLockInit(&entry->mutex);
2094 : /* ... and don't forget the query text metadata */
2095 : Assert(query_len >= 0);
2096 103824 : entry->query_offset = query_offset;
2097 103824 : entry->query_len = query_len;
2098 103824 : entry->encoding = encoding;
2099 103824 : entry->stats_since = GetCurrentTimestamp();
2100 103824 : entry->minmax_stats_since = entry->stats_since;
2101 : }
2102 :
2103 103824 : return entry;
2104 : }
2105 :
2106 : /*
2107 : * qsort comparator for sorting into increasing usage order
2108 : */
2109 : static int
2110 0 : entry_cmp(const void *lhs, const void *rhs)
2111 : {
2112 0 : double l_usage = (*(pgssEntry *const *) lhs)->counters.usage;
2113 0 : double r_usage = (*(pgssEntry *const *) rhs)->counters.usage;
2114 :
2115 0 : if (l_usage < r_usage)
2116 0 : return -1;
2117 0 : else if (l_usage > r_usage)
2118 0 : return +1;
2119 : else
2120 0 : return 0;
2121 : }
2122 :
2123 : /*
2124 : * Deallocate least-used entries.
2125 : *
2126 : * Caller must hold an exclusive lock on pgss->lock.
2127 : */
2128 : static void
2129 0 : entry_dealloc(void)
2130 : {
2131 : HASH_SEQ_STATUS hash_seq;
2132 : pgssEntry **entries;
2133 : pgssEntry *entry;
2134 : int nvictims;
2135 : int i;
2136 : Size tottextlen;
2137 : int nvalidtexts;
2138 :
2139 : /*
2140 : * Sort entries by usage and deallocate USAGE_DEALLOC_PERCENT of them.
2141 : * While we're scanning the table, apply the decay factor to the usage
2142 : * values, and update the mean query length.
2143 : *
2144 : * Note that the mean query length is almost immediately obsolete, since
2145 : * we compute it before not after discarding the least-used entries.
2146 : * Hopefully, that doesn't affect the mean too much; it doesn't seem worth
2147 : * making two passes to get a more current result. Likewise, the new
2148 : * cur_median_usage includes the entries we're about to zap.
2149 : */
2150 :
2151 0 : entries = palloc(hash_get_num_entries(pgss_hash) * sizeof(pgssEntry *));
2152 :
2153 0 : i = 0;
2154 0 : tottextlen = 0;
2155 0 : nvalidtexts = 0;
2156 :
2157 0 : hash_seq_init(&hash_seq, pgss_hash);
2158 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2159 : {
2160 0 : entries[i++] = entry;
2161 : /* "Sticky" entries get a different usage decay rate. */
2162 0 : if (IS_STICKY(entry->counters))
2163 0 : entry->counters.usage *= STICKY_DECREASE_FACTOR;
2164 : else
2165 0 : entry->counters.usage *= USAGE_DECREASE_FACTOR;
2166 : /* In the mean length computation, ignore dropped texts. */
2167 0 : if (entry->query_len >= 0)
2168 : {
2169 0 : tottextlen += entry->query_len + 1;
2170 0 : nvalidtexts++;
2171 : }
2172 : }
2173 :
2174 : /* Sort into increasing order by usage */
2175 0 : qsort(entries, i, sizeof(pgssEntry *), entry_cmp);
2176 :
2177 : /* Record the (approximate) median usage */
2178 0 : if (i > 0)
2179 0 : pgss->cur_median_usage = entries[i / 2]->counters.usage;
2180 : /* Record the mean query length */
2181 0 : if (nvalidtexts > 0)
2182 0 : pgss->mean_query_len = tottextlen / nvalidtexts;
2183 : else
2184 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2185 :
2186 : /* Now zap an appropriate fraction of lowest-usage entries */
2187 0 : nvictims = Max(10, i * USAGE_DEALLOC_PERCENT / 100);
2188 0 : nvictims = Min(nvictims, i);
2189 :
2190 0 : for (i = 0; i < nvictims; i++)
2191 : {
2192 0 : hash_search(pgss_hash, &entries[i]->key, HASH_REMOVE, NULL);
2193 : }
2194 :
2195 0 : pfree(entries);
2196 :
2197 : /* Increment the number of times entries are deallocated */
2198 0 : SpinLockAcquire(&pgss->mutex);
2199 0 : pgss->stats.dealloc += 1;
2200 0 : SpinLockRelease(&pgss->mutex);
2201 0 : }
2202 :
2203 : /*
2204 : * Given a query string (not necessarily null-terminated), allocate a new
2205 : * entry in the external query text file and store the string there.
2206 : *
2207 : * If successful, returns true, and stores the new entry's offset in the file
2208 : * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
2209 : * number of garbage collections that have occurred so far.
2210 : *
2211 : * On failure, returns false.
2212 : *
2213 : * At least a shared lock on pgss->lock must be held by the caller, so as
2214 : * to prevent a concurrent garbage collection. Share-lock-holding callers
2215 : * should pass a gc_count pointer to obtain the number of garbage collections,
2216 : * so that they can recheck the count after obtaining exclusive lock to
2217 : * detect whether a garbage collection occurred (and removed this entry).
2218 : */
2219 : static bool
2220 54246 : qtext_store(const char *query, int query_len,
2221 : Size *query_offset, int *gc_count)
2222 : {
2223 : Size off;
2224 : int fd;
2225 :
2226 : /*
2227 : * We use a spinlock to protect extent/n_writers/gc_count, so that
2228 : * multiple processes may execute this function concurrently.
2229 : */
2230 54246 : SpinLockAcquire(&pgss->mutex);
2231 54246 : off = pgss->extent;
2232 54246 : pgss->extent += query_len + 1;
2233 54246 : pgss->n_writers++;
2234 54246 : if (gc_count)
2235 54246 : *gc_count = pgss->gc_count;
2236 54246 : SpinLockRelease(&pgss->mutex);
2237 :
2238 54246 : *query_offset = off;
2239 :
2240 : /*
2241 : * Don't allow the file to grow larger than what qtext_load_file can
2242 : * (theoretically) handle. This has been seen to be reachable on 32-bit
2243 : * platforms.
2244 : */
2245 54246 : if (unlikely(query_len >= MaxAllocHugeSize - off))
2246 : {
2247 0 : errno = EFBIG; /* not quite right, but it'll do */
2248 0 : fd = -1;
2249 0 : goto error;
2250 : }
2251 :
2252 : /* Now write the data into the successfully-reserved part of the file */
2253 54246 : fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDWR | O_CREAT | PG_BINARY);
2254 54246 : if (fd < 0)
2255 0 : goto error;
2256 :
2257 54246 : if (pg_pwrite(fd, query, query_len, off) != query_len)
2258 0 : goto error;
2259 54246 : if (pg_pwrite(fd, "\0", 1, off + query_len) != 1)
2260 0 : goto error;
2261 :
2262 54246 : CloseTransientFile(fd);
2263 :
2264 : /* Mark our write complete */
2265 54246 : SpinLockAcquire(&pgss->mutex);
2266 54246 : pgss->n_writers--;
2267 54246 : SpinLockRelease(&pgss->mutex);
2268 :
2269 54246 : return true;
2270 :
2271 0 : error:
2272 0 : ereport(LOG,
2273 : (errcode_for_file_access(),
2274 : errmsg("could not write file \"%s\": %m",
2275 : PGSS_TEXT_FILE)));
2276 :
2277 0 : if (fd >= 0)
2278 0 : CloseTransientFile(fd);
2279 :
2280 : /* Mark our write complete */
2281 0 : SpinLockAcquire(&pgss->mutex);
2282 0 : pgss->n_writers--;
2283 0 : SpinLockRelease(&pgss->mutex);
2284 :
2285 0 : return false;
2286 : }
2287 :
2288 : /*
2289 : * Read the external query text file into a malloc'd buffer.
2290 : *
2291 : * Returns NULL (without throwing an error) if unable to read, eg
2292 : * file not there or insufficient memory.
2293 : *
2294 : * On success, the buffer size is also returned into *buffer_size.
2295 : *
2296 : * This can be called without any lock on pgss->lock, but in that case
2297 : * the caller is responsible for verifying that the result is sane.
2298 : */
2299 : static char *
2300 196 : qtext_load_file(Size *buffer_size)
2301 : {
2302 : char *buf;
2303 : int fd;
2304 : struct stat stat;
2305 : Size nread;
2306 :
2307 196 : fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDONLY | PG_BINARY);
2308 196 : if (fd < 0)
2309 : {
2310 0 : if (errno != ENOENT)
2311 0 : ereport(LOG,
2312 : (errcode_for_file_access(),
2313 : errmsg("could not read file \"%s\": %m",
2314 : PGSS_TEXT_FILE)));
2315 0 : return NULL;
2316 : }
2317 :
2318 : /* Get file length */
2319 196 : if (fstat(fd, &stat))
2320 : {
2321 0 : ereport(LOG,
2322 : (errcode_for_file_access(),
2323 : errmsg("could not stat file \"%s\": %m",
2324 : PGSS_TEXT_FILE)));
2325 0 : CloseTransientFile(fd);
2326 0 : return NULL;
2327 : }
2328 :
2329 : /* Allocate buffer; beware that off_t might be wider than size_t */
2330 196 : if (stat.st_size <= MaxAllocHugeSize)
2331 196 : buf = (char *) malloc(stat.st_size);
2332 : else
2333 0 : buf = NULL;
2334 196 : if (buf == NULL)
2335 : {
2336 0 : ereport(LOG,
2337 : (errcode(ERRCODE_OUT_OF_MEMORY),
2338 : errmsg("out of memory"),
2339 : errdetail("Could not allocate enough memory to read file \"%s\".",
2340 : PGSS_TEXT_FILE)));
2341 0 : CloseTransientFile(fd);
2342 0 : return NULL;
2343 : }
2344 :
2345 : /*
2346 : * OK, slurp in the file. Windows fails if we try to read more than
2347 : * INT_MAX bytes at once, and other platforms might not like that either,
2348 : * so read a very large file in 1GB segments.
2349 : */
2350 196 : nread = 0;
2351 390 : while (nread < stat.st_size)
2352 : {
2353 194 : int toread = Min(1024 * 1024 * 1024, stat.st_size - nread);
2354 :
2355 : /*
2356 : * If we get a short read and errno doesn't get set, the reason is
2357 : * probably that garbage collection truncated the file since we did
2358 : * the fstat(), so we don't log a complaint --- but we don't return
2359 : * the data, either, since it's most likely corrupt due to concurrent
2360 : * writes from garbage collection.
2361 : */
2362 194 : errno = 0;
2363 194 : if (read(fd, buf + nread, toread) != toread)
2364 : {
2365 0 : if (errno)
2366 0 : ereport(LOG,
2367 : (errcode_for_file_access(),
2368 : errmsg("could not read file \"%s\": %m",
2369 : PGSS_TEXT_FILE)));
2370 0 : free(buf);
2371 0 : CloseTransientFile(fd);
2372 0 : return NULL;
2373 : }
2374 194 : nread += toread;
2375 : }
2376 :
2377 196 : if (CloseTransientFile(fd) != 0)
2378 0 : ereport(LOG,
2379 : (errcode_for_file_access(),
2380 : errmsg("could not close file \"%s\": %m", PGSS_TEXT_FILE)));
2381 :
2382 196 : *buffer_size = nread;
2383 196 : return buf;
2384 : }
2385 :
2386 : /*
2387 : * Locate a query text in the file image previously read by qtext_load_file().
2388 : *
2389 : * We validate the given offset/length, and return NULL if bogus. Otherwise,
2390 : * the result points to a null-terminated string within the buffer.
2391 : */
2392 : static char *
2393 153674 : qtext_fetch(Size query_offset, int query_len,
2394 : char *buffer, Size buffer_size)
2395 : {
2396 : /* File read failed? */
2397 153674 : if (buffer == NULL)
2398 0 : return NULL;
2399 : /* Bogus offset/length? */
2400 153674 : if (query_len < 0 ||
2401 153674 : query_offset + query_len >= buffer_size)
2402 0 : return NULL;
2403 : /* As a further sanity check, make sure there's a trailing null */
2404 153674 : if (buffer[query_offset + query_len] != '\0')
2405 0 : return NULL;
2406 : /* Looks OK */
2407 153674 : return buffer + query_offset;
2408 : }
2409 :
2410 : /*
2411 : * Do we need to garbage-collect the external query text file?
2412 : *
2413 : * Caller should hold at least a shared lock on pgss->lock.
2414 : */
2415 : static bool
2416 54246 : need_gc_qtexts(void)
2417 : {
2418 : Size extent;
2419 :
2420 : /* Read shared extent pointer */
2421 54246 : SpinLockAcquire(&pgss->mutex);
2422 54246 : extent = pgss->extent;
2423 54246 : SpinLockRelease(&pgss->mutex);
2424 :
2425 : /*
2426 : * Don't proceed if file does not exceed 512 bytes per possible entry.
2427 : *
2428 : * Here and in the next test, 32-bit machines have overflow hazards if
2429 : * pgss_max and/or mean_query_len are large. Force the multiplications
2430 : * and comparisons to be done in uint64 arithmetic to forestall trouble.
2431 : */
2432 54246 : if ((uint64) extent < (uint64) 512 * pgss_max)
2433 54246 : return false;
2434 :
2435 : /*
2436 : * Don't proceed if file is less than about 50% bloat. Nothing can or
2437 : * should be done in the event of unusually large query texts accounting
2438 : * for file's large size. We go to the trouble of maintaining the mean
2439 : * query length in order to prevent garbage collection from thrashing
2440 : * uselessly.
2441 : */
2442 0 : if ((uint64) extent < (uint64) pgss->mean_query_len * pgss_max * 2)
2443 0 : return false;
2444 :
2445 0 : return true;
2446 : }
2447 :
2448 : /*
2449 : * Garbage-collect orphaned query texts in external file.
2450 : *
2451 : * This won't be called often in the typical case, since it's likely that
2452 : * there won't be too much churn, and besides, a similar compaction process
2453 : * occurs when serializing to disk at shutdown or as part of resetting.
2454 : * Despite this, it seems prudent to plan for the edge case where the file
2455 : * becomes unreasonably large, with no other method of compaction likely to
2456 : * occur in the foreseeable future.
2457 : *
2458 : * The caller must hold an exclusive lock on pgss->lock.
2459 : *
2460 : * At the first sign of trouble we unlink the query text file to get a clean
2461 : * slate (although existing statistics are retained), rather than risk
2462 : * thrashing by allowing the same problem case to recur indefinitely.
2463 : */
2464 : static void
2465 0 : gc_qtexts(void)
2466 : {
2467 : char *qbuffer;
2468 : Size qbuffer_size;
2469 0 : FILE *qfile = NULL;
2470 : HASH_SEQ_STATUS hash_seq;
2471 : pgssEntry *entry;
2472 : Size extent;
2473 : int nentries;
2474 :
2475 : /*
2476 : * When called from pgss_store, some other session might have proceeded
2477 : * with garbage collection in the no-lock-held interim of lock strength
2478 : * escalation. Check once more that this is actually necessary.
2479 : */
2480 0 : if (!need_gc_qtexts())
2481 0 : return;
2482 :
2483 : /*
2484 : * Load the old texts file. If we fail (out of memory, for instance),
2485 : * invalidate query texts. Hopefully this is rare. It might seem better
2486 : * to leave things alone on an OOM failure, but the problem is that the
2487 : * file is only going to get bigger; hoping for a future non-OOM result is
2488 : * risky and can easily lead to complete denial of service.
2489 : */
2490 0 : qbuffer = qtext_load_file(&qbuffer_size);
2491 0 : if (qbuffer == NULL)
2492 0 : goto gc_fail;
2493 :
2494 : /*
2495 : * We overwrite the query texts file in place, so as to reduce the risk of
2496 : * an out-of-disk-space failure. Since the file is guaranteed not to get
2497 : * larger, this should always work on traditional filesystems; though we
2498 : * could still lose on copy-on-write filesystems.
2499 : */
2500 0 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2501 0 : if (qfile == NULL)
2502 : {
2503 0 : ereport(LOG,
2504 : (errcode_for_file_access(),
2505 : errmsg("could not write file \"%s\": %m",
2506 : PGSS_TEXT_FILE)));
2507 0 : goto gc_fail;
2508 : }
2509 :
2510 0 : extent = 0;
2511 0 : nentries = 0;
2512 :
2513 0 : hash_seq_init(&hash_seq, pgss_hash);
2514 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2515 : {
2516 0 : int query_len = entry->query_len;
2517 0 : char *qry = qtext_fetch(entry->query_offset,
2518 : query_len,
2519 : qbuffer,
2520 : qbuffer_size);
2521 :
2522 0 : if (qry == NULL)
2523 : {
2524 : /* Trouble ... drop the text */
2525 0 : entry->query_offset = 0;
2526 0 : entry->query_len = -1;
2527 : /* entry will not be counted in mean query length computation */
2528 0 : continue;
2529 : }
2530 :
2531 0 : if (fwrite(qry, 1, query_len + 1, qfile) != query_len + 1)
2532 : {
2533 0 : ereport(LOG,
2534 : (errcode_for_file_access(),
2535 : errmsg("could not write file \"%s\": %m",
2536 : PGSS_TEXT_FILE)));
2537 0 : hash_seq_term(&hash_seq);
2538 0 : goto gc_fail;
2539 : }
2540 :
2541 0 : entry->query_offset = extent;
2542 0 : extent += query_len + 1;
2543 0 : nentries++;
2544 : }
2545 :
2546 : /*
2547 : * Truncate away any now-unused space. If this fails for some odd reason,
2548 : * we log it, but there's no need to fail.
2549 : */
2550 0 : if (ftruncate(fileno(qfile), extent) != 0)
2551 0 : ereport(LOG,
2552 : (errcode_for_file_access(),
2553 : errmsg("could not truncate file \"%s\": %m",
2554 : PGSS_TEXT_FILE)));
2555 :
2556 0 : if (FreeFile(qfile))
2557 : {
2558 0 : ereport(LOG,
2559 : (errcode_for_file_access(),
2560 : errmsg("could not write file \"%s\": %m",
2561 : PGSS_TEXT_FILE)));
2562 0 : qfile = NULL;
2563 0 : goto gc_fail;
2564 : }
2565 :
2566 0 : elog(DEBUG1, "pgss gc of queries file shrunk size from %zu to %zu",
2567 : pgss->extent, extent);
2568 :
2569 : /* Reset the shared extent pointer */
2570 0 : pgss->extent = extent;
2571 :
2572 : /*
2573 : * Also update the mean query length, to be sure that need_gc_qtexts()
2574 : * won't still think we have a problem.
2575 : */
2576 0 : if (nentries > 0)
2577 0 : pgss->mean_query_len = extent / nentries;
2578 : else
2579 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2580 :
2581 0 : free(qbuffer);
2582 :
2583 : /*
2584 : * OK, count a garbage collection cycle. (Note: even though we have
2585 : * exclusive lock on pgss->lock, we must take pgss->mutex for this, since
2586 : * other processes may examine gc_count while holding only the mutex.
2587 : * Also, we have to advance the count *after* we've rewritten the file,
2588 : * else other processes might not realize they read a stale file.)
2589 : */
2590 0 : record_gc_qtexts();
2591 :
2592 0 : return;
2593 :
2594 0 : gc_fail:
2595 : /* clean up resources */
2596 0 : if (qfile)
2597 0 : FreeFile(qfile);
2598 0 : free(qbuffer);
2599 :
2600 : /*
2601 : * Since the contents of the external file are now uncertain, mark all
2602 : * hashtable entries as having invalid texts.
2603 : */
2604 0 : hash_seq_init(&hash_seq, pgss_hash);
2605 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2606 : {
2607 0 : entry->query_offset = 0;
2608 0 : entry->query_len = -1;
2609 : }
2610 :
2611 : /*
2612 : * Destroy the query text file and create a new, empty one
2613 : */
2614 0 : (void) unlink(PGSS_TEXT_FILE);
2615 0 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2616 0 : if (qfile == NULL)
2617 0 : ereport(LOG,
2618 : (errcode_for_file_access(),
2619 : errmsg("could not recreate file \"%s\": %m",
2620 : PGSS_TEXT_FILE)));
2621 : else
2622 0 : FreeFile(qfile);
2623 :
2624 : /* Reset the shared extent pointer */
2625 0 : pgss->extent = 0;
2626 :
2627 : /* Reset mean_query_len to match the new state */
2628 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2629 :
2630 : /*
2631 : * Bump the GC count even though we failed.
2632 : *
2633 : * This is needed to make concurrent readers of file without any lock on
2634 : * pgss->lock notice existence of new version of file. Once readers
2635 : * subsequently observe a change in GC count with pgss->lock held, that
2636 : * forces a safe reopen of file. Writers also require that we bump here,
2637 : * of course. (As required by locking protocol, readers and writers don't
2638 : * trust earlier file contents until gc_count is found unchanged after
2639 : * pgss->lock acquired in shared or exclusive mode respectively.)
2640 : */
2641 0 : record_gc_qtexts();
2642 : }
2643 :
2644 : #define SINGLE_ENTRY_RESET(e) \
2645 : if (e) { \
2646 : if (minmax_only) { \
2647 : /* When requested reset only min/max statistics of an entry */ \
2648 : for (int kind = 0; kind < PGSS_NUMKIND; kind++) \
2649 : { \
2650 : e->counters.max_time[kind] = 0; \
2651 : e->counters.min_time[kind] = 0; \
2652 : } \
2653 : e->minmax_stats_since = stats_reset; \
2654 : } \
2655 : else \
2656 : { \
2657 : /* Remove the key otherwise */ \
2658 : hash_search(pgss_hash, &e->key, HASH_REMOVE, NULL); \
2659 : num_remove++; \
2660 : } \
2661 : }
2662 :
2663 : /*
2664 : * Reset entries corresponding to parameters passed.
2665 : */
2666 : static TimestampTz
2667 160 : entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
2668 : {
2669 : HASH_SEQ_STATUS hash_seq;
2670 : pgssEntry *entry;
2671 : FILE *qfile;
2672 : long num_entries;
2673 160 : long num_remove = 0;
2674 : pgssHashKey key;
2675 : TimestampTz stats_reset;
2676 :
2677 160 : if (!pgss || !pgss_hash)
2678 0 : ereport(ERROR,
2679 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2680 : errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
2681 :
2682 160 : LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
2683 160 : num_entries = hash_get_num_entries(pgss_hash);
2684 :
2685 160 : stats_reset = GetCurrentTimestamp();
2686 :
2687 160 : if (userid != 0 && dbid != 0 && queryid != UINT64CONST(0))
2688 : {
2689 : /* If all the parameters are available, use the fast path. */
2690 2 : memset(&key, 0, sizeof(pgssHashKey));
2691 2 : key.userid = userid;
2692 2 : key.dbid = dbid;
2693 2 : key.queryid = queryid;
2694 :
2695 : /*
2696 : * Reset the entry if it exists, starting with the non-top-level
2697 : * entry.
2698 : */
2699 2 : key.toplevel = false;
2700 2 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
2701 :
2702 2 : SINGLE_ENTRY_RESET(entry);
2703 :
2704 : /* Also reset the top-level entry if it exists. */
2705 2 : key.toplevel = true;
2706 2 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
2707 :
2708 2 : SINGLE_ENTRY_RESET(entry);
2709 : }
2710 158 : else if (userid != 0 || dbid != 0 || queryid != UINT64CONST(0))
2711 : {
2712 : /* Reset entries corresponding to valid parameters. */
2713 8 : hash_seq_init(&hash_seq, pgss_hash);
2714 102 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2715 : {
2716 94 : if ((!userid || entry->key.userid == userid) &&
2717 72 : (!dbid || entry->key.dbid == dbid) &&
2718 68 : (!queryid || entry->key.queryid == queryid))
2719 : {
2720 14 : SINGLE_ENTRY_RESET(entry);
2721 : }
2722 : }
2723 : }
2724 : else
2725 : {
2726 : /* Reset all entries. */
2727 150 : hash_seq_init(&hash_seq, pgss_hash);
2728 1490 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2729 : {
2730 1384 : SINGLE_ENTRY_RESET(entry);
2731 : }
2732 : }
2733 :
2734 : /* All entries are removed? */
2735 160 : if (num_entries != num_remove)
2736 12 : goto release_lock;
2737 :
2738 : /*
2739 : * Reset global statistics for pg_stat_statements since all entries are
2740 : * removed.
2741 : */
2742 148 : SpinLockAcquire(&pgss->mutex);
2743 148 : pgss->stats.dealloc = 0;
2744 148 : pgss->stats.stats_reset = stats_reset;
2745 148 : SpinLockRelease(&pgss->mutex);
2746 :
2747 : /*
2748 : * Write new empty query file, perhaps even creating a new one to recover
2749 : * if the file was missing.
2750 : */
2751 148 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2752 148 : if (qfile == NULL)
2753 : {
2754 0 : ereport(LOG,
2755 : (errcode_for_file_access(),
2756 : errmsg("could not create file \"%s\": %m",
2757 : PGSS_TEXT_FILE)));
2758 0 : goto done;
2759 : }
2760 :
2761 : /* If ftruncate fails, log it, but it's not a fatal problem */
2762 148 : if (ftruncate(fileno(qfile), 0) != 0)
2763 0 : ereport(LOG,
2764 : (errcode_for_file_access(),
2765 : errmsg("could not truncate file \"%s\": %m",
2766 : PGSS_TEXT_FILE)));
2767 :
2768 148 : FreeFile(qfile);
2769 :
2770 148 : done:
2771 148 : pgss->extent = 0;
2772 : /* This counts as a query text garbage collection for our purposes */
2773 148 : record_gc_qtexts();
2774 :
2775 160 : release_lock:
2776 160 : LWLockRelease(pgss->lock);
2777 :
2778 160 : return stats_reset;
2779 : }
2780 :
2781 : /*
2782 : * Generate a normalized version of the query string that will be used to
2783 : * represent all similar queries.
2784 : *
2785 : * Note that the normalized representation may well vary depending on
2786 : * just which "equivalent" query is used to create the hashtable entry.
2787 : * We assume this is OK.
2788 : *
2789 : * If query_loc > 0, then "query" has been advanced by that much compared to
2790 : * the original string start, so we need to translate the provided locations
2791 : * to compensate. (This lets us avoid re-scanning statements before the one
2792 : * of interest, so it's worth doing.)
2793 : *
2794 : * *query_len_p contains the input string length, and is updated with
2795 : * the result string length on exit. The resulting string might be longer
2796 : * or shorter depending on what happens with replacement of constants.
2797 : *
2798 : * Returns a palloc'd string.
2799 : */
2800 : static char *
2801 20484 : generate_normalized_query(JumbleState *jstate, const char *query,
2802 : int query_loc, int *query_len_p)
2803 : {
2804 : char *norm_query;
2805 20484 : int query_len = *query_len_p;
2806 : int i,
2807 : norm_query_buflen, /* Space allowed for norm_query */
2808 : len_to_wrt, /* Length (in bytes) to write */
2809 20484 : quer_loc = 0, /* Source query byte location */
2810 20484 : n_quer_loc = 0, /* Normalized query byte location */
2811 20484 : last_off = 0, /* Offset from start for previous tok */
2812 20484 : last_tok_len = 0; /* Length (in bytes) of that tok */
2813 :
2814 : /*
2815 : * Get constants' lengths (core system only gives us locations). Note
2816 : * this also ensures the items are sorted by location.
2817 : */
2818 20484 : fill_in_constant_lengths(jstate, query, query_loc);
2819 :
2820 : /*
2821 : * Allow for $n symbols to be longer than the constants they replace.
2822 : * Constants must take at least one byte in text form, while a $n symbol
2823 : * certainly isn't more than 11 bytes, even if n reaches INT_MAX. We
2824 : * could refine that limit based on the max value of n for the current
2825 : * query, but it hardly seems worth any extra effort to do so.
2826 : */
2827 20484 : norm_query_buflen = query_len + jstate->clocations_count * 10;
2828 :
2829 : /* Allocate result buffer */
2830 20484 : norm_query = palloc(norm_query_buflen + 1);
2831 :
2832 84186 : for (i = 0; i < jstate->clocations_count; i++)
2833 : {
2834 : int off, /* Offset from start for cur tok */
2835 : tok_len; /* Length (in bytes) of that tok */
2836 :
2837 63702 : off = jstate->clocations[i].location;
2838 : /* Adjust recorded location if we're dealing with partial string */
2839 63702 : off -= query_loc;
2840 :
2841 63702 : tok_len = jstate->clocations[i].length;
2842 :
2843 63702 : if (tok_len < 0)
2844 466 : continue; /* ignore any duplicates */
2845 :
2846 : /* Copy next chunk (what precedes the next constant) */
2847 63236 : len_to_wrt = off - last_off;
2848 63236 : len_to_wrt -= last_tok_len;
2849 :
2850 : Assert(len_to_wrt >= 0);
2851 63236 : memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
2852 63236 : n_quer_loc += len_to_wrt;
2853 :
2854 : /* And insert a param symbol in place of the constant token */
2855 126472 : n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d",
2856 63236 : i + 1 + jstate->highest_extern_param_id);
2857 :
2858 63236 : quer_loc = off + tok_len;
2859 63236 : last_off = off;
2860 63236 : last_tok_len = tok_len;
2861 : }
2862 :
2863 : /*
2864 : * We've copied up until the last ignorable constant. Copy over the
2865 : * remaining bytes of the original query string.
2866 : */
2867 20484 : len_to_wrt = query_len - quer_loc;
2868 :
2869 : Assert(len_to_wrt >= 0);
2870 20484 : memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
2871 20484 : n_quer_loc += len_to_wrt;
2872 :
2873 : Assert(n_quer_loc <= norm_query_buflen);
2874 20484 : norm_query[n_quer_loc] = '\0';
2875 :
2876 20484 : *query_len_p = n_quer_loc;
2877 20484 : return norm_query;
2878 : }
2879 :
2880 : /*
2881 : * Given a valid SQL string and an array of constant-location records,
2882 : * fill in the textual lengths of those constants.
2883 : *
2884 : * The constants may use any allowed constant syntax, such as float literals,
2885 : * bit-strings, single-quoted strings and dollar-quoted strings. This is
2886 : * accomplished by using the public API for the core scanner.
2887 : *
2888 : * It is the caller's job to ensure that the string is a valid SQL statement
2889 : * with constants at the indicated locations. Since in practice the string
2890 : * has already been parsed, and the locations that the caller provides will
2891 : * have originated from within the authoritative parser, this should not be
2892 : * a problem.
2893 : *
2894 : * Duplicate constant pointers are possible, and will have their lengths
2895 : * marked as '-1', so that they are later ignored. (Actually, we assume the
2896 : * lengths were initialized as -1 to start with, and don't change them here.)
2897 : *
2898 : * If query_loc > 0, then "query" has been advanced by that much compared to
2899 : * the original string start, so we need to translate the provided locations
2900 : * to compensate. (This lets us avoid re-scanning statements before the one
2901 : * of interest, so it's worth doing.)
2902 : *
2903 : * N.B. There is an assumption that a '-' character at a Const location begins
2904 : * a negative numeric constant. This precludes there ever being another
2905 : * reason for a constant to start with a '-'.
2906 : */
2907 : static void
2908 20484 : fill_in_constant_lengths(JumbleState *jstate, const char *query,
2909 : int query_loc)
2910 : {
2911 : LocationLen *locs;
2912 : core_yyscan_t yyscanner;
2913 : core_yy_extra_type yyextra;
2914 : core_YYSTYPE yylval;
2915 : YYLTYPE yylloc;
2916 20484 : int last_loc = -1;
2917 : int i;
2918 :
2919 : /*
2920 : * Sort the records by location so that we can process them in order while
2921 : * scanning the query text.
2922 : */
2923 20484 : if (jstate->clocations_count > 1)
2924 13300 : qsort(jstate->clocations, jstate->clocations_count,
2925 : sizeof(LocationLen), comp_location);
2926 20484 : locs = jstate->clocations;
2927 :
2928 : /* initialize the flex scanner --- should match raw_parser() */
2929 20484 : yyscanner = scanner_init(query,
2930 : &yyextra,
2931 : &ScanKeywords,
2932 : ScanKeywordTokens);
2933 :
2934 : /* we don't want to re-emit any escape string warnings */
2935 20484 : yyextra.escape_string_warning = false;
2936 :
2937 : /* Search for each constant, in sequence */
2938 84186 : for (i = 0; i < jstate->clocations_count; i++)
2939 : {
2940 63702 : int loc = locs[i].location;
2941 : int tok;
2942 :
2943 : /* Adjust recorded location if we're dealing with partial string */
2944 63702 : loc -= query_loc;
2945 :
2946 : Assert(loc >= 0);
2947 :
2948 63702 : if (loc <= last_loc)
2949 466 : continue; /* Duplicate constant, ignore */
2950 :
2951 : /* Lex tokens until we find the desired constant */
2952 : for (;;)
2953 : {
2954 468232 : tok = core_yylex(&yylval, &yylloc, yyscanner);
2955 :
2956 : /* We should not hit end-of-string, but if we do, behave sanely */
2957 468232 : if (tok == 0)
2958 0 : break; /* out of inner for-loop */
2959 :
2960 : /*
2961 : * We should find the token position exactly, but if we somehow
2962 : * run past it, work with that.
2963 : */
2964 468232 : if (yylloc >= loc)
2965 : {
2966 63236 : if (query[loc] == '-')
2967 : {
2968 : /*
2969 : * It's a negative value - this is the one and only case
2970 : * where we replace more than a single token.
2971 : *
2972 : * Do not compensate for the core system's special-case
2973 : * adjustment of location to that of the leading '-'
2974 : * operator in the event of a negative constant. It is
2975 : * also useful for our purposes to start from the minus
2976 : * symbol. In this way, queries like "select * from foo
2977 : * where bar = 1" and "select * from foo where bar = -2"
2978 : * will have identical normalized query strings.
2979 : */
2980 814 : tok = core_yylex(&yylval, &yylloc, yyscanner);
2981 814 : if (tok == 0)
2982 0 : break; /* out of inner for-loop */
2983 : }
2984 :
2985 : /*
2986 : * We now rely on the assumption that flex has placed a zero
2987 : * byte after the text of the current token in scanbuf.
2988 : */
2989 63236 : locs[i].length = strlen(yyextra.scanbuf + loc);
2990 63236 : break; /* out of inner for-loop */
2991 : }
2992 : }
2993 :
2994 : /* If we hit end-of-string, give up, leaving remaining lengths -1 */
2995 63236 : if (tok == 0)
2996 0 : break;
2997 :
2998 63236 : last_loc = loc;
2999 : }
3000 :
3001 20484 : scanner_finish(yyscanner);
3002 20484 : }
3003 :
3004 : /*
3005 : * comp_location: comparator for qsorting LocationLen structs by location
3006 : */
3007 : static int
3008 74696 : comp_location(const void *a, const void *b)
3009 : {
3010 74696 : int l = ((const LocationLen *) a)->location;
3011 74696 : int r = ((const LocationLen *) b)->location;
3012 :
3013 74696 : return pg_cmp_s32(l, r);
3014 : }
|