Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * pg_stat_statements.c
4 : * Track statement planning and execution times as well as resource
5 : * usage across a whole database cluster.
6 : *
7 : * Execution costs are totaled for each distinct source query, and kept in
8 : * a shared hashtable. (We track only as many distinct queries as will fit
9 : * in the designated amount of shared memory.)
10 : *
11 : * Starting in Postgres 9.2, this module normalized query entries. As of
12 : * Postgres 14, the normalization is done by the core if compute_query_id is
13 : * enabled, or optionally by third-party modules.
14 : *
15 : * To facilitate presenting entries to users, we create "representative" query
16 : * strings in which constants are replaced with parameter symbols ($n), to
17 : * make it clearer what a normalized entry can represent. To save on shared
18 : * memory, and to avoid having to truncate oversized query strings, we store
19 : * these strings in a temporary external query-texts file. Offsets into this
20 : * file are kept in shared memory.
21 : *
22 : * Note about locking issues: to create or delete an entry in the shared
23 : * hashtable, one must hold pgss->lock exclusively. Modifying any field
24 : * in an entry except the counters requires the same. To look up an entry,
25 : * one must hold the lock shared. To read or update the counters within
26 : * an entry, one must hold the lock shared or exclusive (so the entry doesn't
27 : * disappear!) and also take the entry's mutex spinlock.
28 : * The shared state variable pgss->extent (the next free spot in the external
29 : * query-text file) should be accessed only while holding either the
30 : * pgss->mutex spinlock, or exclusive lock on pgss->lock. We use the mutex to
31 : * allow reserving file space while holding only shared lock on pgss->lock.
32 : * Rewriting the entire external query-text file, eg for garbage collection,
33 : * requires holding pgss->lock exclusively; this allows individual entries
34 : * in the file to be read or written while holding only shared lock.
35 : *
36 : *
37 : * Copyright (c) 2008-2025, PostgreSQL Global Development Group
38 : *
39 : * IDENTIFICATION
40 : * contrib/pg_stat_statements/pg_stat_statements.c
41 : *
42 : *-------------------------------------------------------------------------
43 : */
44 : #include "postgres.h"
45 :
46 : #include <math.h>
47 : #include <sys/stat.h>
48 : #include <unistd.h>
49 :
50 : #include "access/parallel.h"
51 : #include "catalog/pg_authid.h"
52 : #include "common/int.h"
53 : #include "executor/instrument.h"
54 : #include "funcapi.h"
55 : #include "jit/jit.h"
56 : #include "mb/pg_wchar.h"
57 : #include "miscadmin.h"
58 : #include "nodes/queryjumble.h"
59 : #include "optimizer/planner.h"
60 : #include "parser/analyze.h"
61 : #include "parser/scanner.h"
62 : #include "pgstat.h"
63 : #include "storage/fd.h"
64 : #include "storage/ipc.h"
65 : #include "storage/lwlock.h"
66 : #include "storage/shmem.h"
67 : #include "storage/spin.h"
68 : #include "tcop/utility.h"
69 : #include "utils/acl.h"
70 : #include "utils/builtins.h"
71 : #include "utils/memutils.h"
72 : #include "utils/timestamp.h"
73 :
74 16 : PG_MODULE_MAGIC_EXT(
75 : .name = "pg_stat_statements",
76 : .version = PG_VERSION
77 : );
78 :
79 : /* Location of permanent stats file (valid when database is shut down) */
80 : #define PGSS_DUMP_FILE PGSTAT_STAT_PERMANENT_DIRECTORY "/pg_stat_statements.stat"
81 :
82 : /*
83 : * Location of external query text file.
84 : */
85 : #define PGSS_TEXT_FILE PG_STAT_TMP_DIR "/pgss_query_texts.stat"
86 :
87 : /* Magic number identifying the stats file format */
88 : static const uint32 PGSS_FILE_HEADER = 0x20220408;
89 :
90 : /* PostgreSQL major version number, changes in which invalidate all entries */
91 : static const uint32 PGSS_PG_MAJOR_VERSION = PG_VERSION_NUM / 100;
92 :
93 : /* XXX: Should USAGE_EXEC reflect execution time and/or buffer usage? */
94 : #define USAGE_EXEC(duration) (1.0)
95 : #define USAGE_INIT (1.0) /* including initial planning */
96 : #define ASSUMED_MEDIAN_INIT (10.0) /* initial assumed median usage */
97 : #define ASSUMED_LENGTH_INIT 1024 /* initial assumed mean query length */
98 : #define USAGE_DECREASE_FACTOR (0.99) /* decreased every entry_dealloc */
99 : #define STICKY_DECREASE_FACTOR (0.50) /* factor for sticky entries */
100 : #define USAGE_DEALLOC_PERCENT 5 /* free this % of entries at once */
101 : #define IS_STICKY(c) ((c.calls[PGSS_PLAN] + c.calls[PGSS_EXEC]) == 0)
102 :
103 : /*
104 : * Extension version number, for supporting older extension versions' objects
105 : */
106 : typedef enum pgssVersion
107 : {
108 : PGSS_V1_0 = 0,
109 : PGSS_V1_1,
110 : PGSS_V1_2,
111 : PGSS_V1_3,
112 : PGSS_V1_8,
113 : PGSS_V1_9,
114 : PGSS_V1_10,
115 : PGSS_V1_11,
116 : PGSS_V1_12,
117 : } pgssVersion;
118 :
119 : typedef enum pgssStoreKind
120 : {
121 : PGSS_INVALID = -1,
122 :
123 : /*
124 : * PGSS_PLAN and PGSS_EXEC must be respectively 0 and 1 as they're used to
125 : * reference the underlying values in the arrays in the Counters struct,
126 : * and this order is required in pg_stat_statements_internal().
127 : */
128 : PGSS_PLAN = 0,
129 : PGSS_EXEC,
130 : } pgssStoreKind;
131 :
132 : #define PGSS_NUMKIND (PGSS_EXEC + 1)
133 :
134 : /*
135 : * Hashtable key that defines the identity of a hashtable entry. We separate
136 : * queries by user and by database even if they are otherwise identical.
137 : *
138 : * If you add a new key to this struct, make sure to teach pgss_store() to
139 : * zero the padding bytes. Otherwise, things will break, because pgss_hash is
140 : * created using HASH_BLOBS, and thus tag_hash is used to hash this.
141 :
142 : */
143 : typedef struct pgssHashKey
144 : {
145 : Oid userid; /* user OID */
146 : Oid dbid; /* database OID */
147 : uint64 queryid; /* query identifier */
148 : bool toplevel; /* query executed at top level */
149 : } pgssHashKey;
150 :
151 : /*
152 : * The actual stats counters kept within pgssEntry.
153 : */
154 : typedef struct Counters
155 : {
156 : int64 calls[PGSS_NUMKIND]; /* # of times planned/executed */
157 : double total_time[PGSS_NUMKIND]; /* total planning/execution time,
158 : * in msec */
159 : double min_time[PGSS_NUMKIND]; /* minimum planning/execution time in
160 : * msec since min/max reset */
161 : double max_time[PGSS_NUMKIND]; /* maximum planning/execution time in
162 : * msec since min/max reset */
163 : double mean_time[PGSS_NUMKIND]; /* mean planning/execution time in
164 : * msec */
165 : double sum_var_time[PGSS_NUMKIND]; /* sum of variances in
166 : * planning/execution time in msec */
167 : int64 rows; /* total # of retrieved or affected rows */
168 : int64 shared_blks_hit; /* # of shared buffer hits */
169 : int64 shared_blks_read; /* # of shared disk blocks read */
170 : int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */
171 : int64 shared_blks_written; /* # of shared disk blocks written */
172 : int64 local_blks_hit; /* # of local buffer hits */
173 : int64 local_blks_read; /* # of local disk blocks read */
174 : int64 local_blks_dirtied; /* # of local disk blocks dirtied */
175 : int64 local_blks_written; /* # of local disk blocks written */
176 : int64 temp_blks_read; /* # of temp blocks read */
177 : int64 temp_blks_written; /* # of temp blocks written */
178 : double shared_blk_read_time; /* time spent reading shared blocks,
179 : * in msec */
180 : double shared_blk_write_time; /* time spent writing shared blocks,
181 : * in msec */
182 : double local_blk_read_time; /* time spent reading local blocks, in
183 : * msec */
184 : double local_blk_write_time; /* time spent writing local blocks, in
185 : * msec */
186 : double temp_blk_read_time; /* time spent reading temp blocks, in msec */
187 : double temp_blk_write_time; /* time spent writing temp blocks, in
188 : * msec */
189 : double usage; /* usage factor */
190 : int64 wal_records; /* # of WAL records generated */
191 : int64 wal_fpi; /* # of WAL full page images generated */
192 : uint64 wal_bytes; /* total amount of WAL generated in bytes */
193 : int64 wal_buffers_full; /* # of times the WAL buffers became full */
194 : int64 jit_functions; /* total number of JIT functions emitted */
195 : double jit_generation_time; /* total time to generate jit code */
196 : int64 jit_inlining_count; /* number of times inlining time has been
197 : * > 0 */
198 : double jit_deform_time; /* total time to deform tuples in jit code */
199 : int64 jit_deform_count; /* number of times deform time has been >
200 : * 0 */
201 :
202 : double jit_inlining_time; /* total time to inline jit code */
203 : int64 jit_optimization_count; /* number of times optimization time
204 : * has been > 0 */
205 : double jit_optimization_time; /* total time to optimize jit code */
206 : int64 jit_emission_count; /* number of times emission time has been
207 : * > 0 */
208 : double jit_emission_time; /* total time to emit jit code */
209 : int64 parallel_workers_to_launch; /* # of parallel workers planned
210 : * to be launched */
211 : int64 parallel_workers_launched; /* # of parallel workers actually
212 : * launched */
213 : } Counters;
214 :
215 : /*
216 : * Global statistics for pg_stat_statements
217 : */
218 : typedef struct pgssGlobalStats
219 : {
220 : int64 dealloc; /* # of times entries were deallocated */
221 : TimestampTz stats_reset; /* timestamp with all stats reset */
222 : } pgssGlobalStats;
223 :
224 : /*
225 : * Statistics per statement
226 : *
227 : * Note: in event of a failure in garbage collection of the query text file,
228 : * we reset query_offset to zero and query_len to -1. This will be seen as
229 : * an invalid state by qtext_fetch().
230 : */
231 : typedef struct pgssEntry
232 : {
233 : pgssHashKey key; /* hash key of entry - MUST BE FIRST */
234 : Counters counters; /* the statistics for this query */
235 : Size query_offset; /* query text offset in external file */
236 : int query_len; /* # of valid bytes in query string, or -1 */
237 : int encoding; /* query text encoding */
238 : TimestampTz stats_since; /* timestamp of entry allocation */
239 : TimestampTz minmax_stats_since; /* timestamp of last min/max values reset */
240 : slock_t mutex; /* protects the counters only */
241 : } pgssEntry;
242 :
243 : /*
244 : * Global shared state
245 : */
246 : typedef struct pgssSharedState
247 : {
248 : LWLock *lock; /* protects hashtable search/modification */
249 : double cur_median_usage; /* current median usage in hashtable */
250 : Size mean_query_len; /* current mean entry text length */
251 : slock_t mutex; /* protects following fields only: */
252 : Size extent; /* current extent of query file */
253 : int n_writers; /* number of active writers to query file */
254 : int gc_count; /* query file garbage collection cycle count */
255 : pgssGlobalStats stats; /* global statistics for pgss */
256 : } pgssSharedState;
257 :
258 : /*---- Local variables ----*/
259 :
260 : /* Current nesting depth of planner/ExecutorRun/ProcessUtility calls */
261 : static int nesting_level = 0;
262 :
263 : /* Saved hook values */
264 : static shmem_request_hook_type prev_shmem_request_hook = NULL;
265 : static shmem_startup_hook_type prev_shmem_startup_hook = NULL;
266 : static post_parse_analyze_hook_type prev_post_parse_analyze_hook = NULL;
267 : static planner_hook_type prev_planner_hook = NULL;
268 : static ExecutorStart_hook_type prev_ExecutorStart = NULL;
269 : static ExecutorRun_hook_type prev_ExecutorRun = NULL;
270 : static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
271 : static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
272 : static ProcessUtility_hook_type prev_ProcessUtility = NULL;
273 :
274 : /* Links to shared memory state */
275 : static pgssSharedState *pgss = NULL;
276 : static HTAB *pgss_hash = NULL;
277 :
278 : /*---- GUC variables ----*/
279 :
280 : typedef enum
281 : {
282 : PGSS_TRACK_NONE, /* track no statements */
283 : PGSS_TRACK_TOP, /* only top level statements */
284 : PGSS_TRACK_ALL, /* all statements, including nested ones */
285 : } PGSSTrackLevel;
286 :
287 : static const struct config_enum_entry track_options[] =
288 : {
289 : {"none", PGSS_TRACK_NONE, false},
290 : {"top", PGSS_TRACK_TOP, false},
291 : {"all", PGSS_TRACK_ALL, false},
292 : {NULL, 0, false}
293 : };
294 :
295 : static int pgss_max = 5000; /* max # statements to track */
296 : static int pgss_track = PGSS_TRACK_TOP; /* tracking level */
297 : static bool pgss_track_utility = true; /* whether to track utility commands */
298 : static bool pgss_track_planning = false; /* whether to track planning
299 : * duration */
300 : static bool pgss_save = true; /* whether to save stats across shutdown */
301 :
302 : #define pgss_enabled(level) \
303 : (!IsParallelWorker() && \
304 : (pgss_track == PGSS_TRACK_ALL || \
305 : (pgss_track == PGSS_TRACK_TOP && (level) == 0)))
306 :
307 : #define record_gc_qtexts() \
308 : do { \
309 : SpinLockAcquire(&pgss->mutex); \
310 : pgss->gc_count++; \
311 : SpinLockRelease(&pgss->mutex); \
312 : } while(0)
313 :
314 : /*---- Function declarations ----*/
315 :
316 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset);
317 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_7);
318 40 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_11);
319 0 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_2);
320 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_3);
321 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_8);
322 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_9);
323 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_10);
324 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_11);
325 48 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_12);
326 0 : PG_FUNCTION_INFO_V1(pg_stat_statements);
327 16 : PG_FUNCTION_INFO_V1(pg_stat_statements_info);
328 :
329 : static void pgss_shmem_request(void);
330 : static void pgss_shmem_startup(void);
331 : static void pgss_shmem_shutdown(int code, Datum arg);
332 : static void pgss_post_parse_analyze(ParseState *pstate, Query *query,
333 : JumbleState *jstate);
334 : static PlannedStmt *pgss_planner(Query *parse,
335 : const char *query_string,
336 : int cursorOptions,
337 : ParamListInfo boundParams);
338 : static bool pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
339 : static void pgss_ExecutorRun(QueryDesc *queryDesc,
340 : ScanDirection direction,
341 : uint64 count);
342 : static void pgss_ExecutorFinish(QueryDesc *queryDesc);
343 : static void pgss_ExecutorEnd(QueryDesc *queryDesc);
344 : static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
345 : bool readOnlyTree,
346 : ProcessUtilityContext context, ParamListInfo params,
347 : QueryEnvironment *queryEnv,
348 : DestReceiver *dest, QueryCompletion *qc);
349 : static void pgss_store(const char *query, uint64 queryId,
350 : int query_location, int query_len,
351 : pgssStoreKind kind,
352 : double total_time, uint64 rows,
353 : const BufferUsage *bufusage,
354 : const WalUsage *walusage,
355 : const struct JitInstrumentation *jitusage,
356 : JumbleState *jstate,
357 : int parallel_workers_to_launch,
358 : int parallel_workers_launched);
359 : static void pg_stat_statements_internal(FunctionCallInfo fcinfo,
360 : pgssVersion api_version,
361 : bool showtext);
362 : static Size pgss_memsize(void);
363 : static pgssEntry *entry_alloc(pgssHashKey *key, Size query_offset, int query_len,
364 : int encoding, bool sticky);
365 : static void entry_dealloc(void);
366 : static bool qtext_store(const char *query, int query_len,
367 : Size *query_offset, int *gc_count);
368 : static char *qtext_load_file(Size *buffer_size);
369 : static char *qtext_fetch(Size query_offset, int query_len,
370 : char *buffer, Size buffer_size);
371 : static bool need_gc_qtexts(void);
372 : static void gc_qtexts(void);
373 : static TimestampTz entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only);
374 : static char *generate_normalized_query(JumbleState *jstate, const char *query,
375 : int query_loc, int *query_len_p);
376 : static void fill_in_constant_lengths(JumbleState *jstate, const char *query,
377 : int query_loc);
378 : static int comp_location(const void *a, const void *b);
379 :
380 :
381 : /*
382 : * Module load callback
383 : */
384 : void
385 16 : _PG_init(void)
386 : {
387 : /*
388 : * In order to create our shared memory area, we have to be loaded via
389 : * shared_preload_libraries. If not, fall out without hooking into any of
390 : * the main system. (We don't throw error here because it seems useful to
391 : * allow the pg_stat_statements functions to be created even when the
392 : * module isn't active. The functions must protect themselves against
393 : * being called then, however.)
394 : */
395 16 : if (!process_shared_preload_libraries_in_progress)
396 2 : return;
397 :
398 : /*
399 : * Inform the postmaster that we want to enable query_id calculation if
400 : * compute_query_id is set to auto.
401 : */
402 14 : EnableQueryId();
403 :
404 : /*
405 : * Define (or redefine) custom GUC variables.
406 : */
407 14 : DefineCustomIntVariable("pg_stat_statements.max",
408 : "Sets the maximum number of statements tracked by pg_stat_statements.",
409 : NULL,
410 : &pgss_max,
411 : 5000,
412 : 100,
413 : INT_MAX / 2,
414 : PGC_POSTMASTER,
415 : 0,
416 : NULL,
417 : NULL,
418 : NULL);
419 :
420 14 : DefineCustomEnumVariable("pg_stat_statements.track",
421 : "Selects which statements are tracked by pg_stat_statements.",
422 : NULL,
423 : &pgss_track,
424 : PGSS_TRACK_TOP,
425 : track_options,
426 : PGC_SUSET,
427 : 0,
428 : NULL,
429 : NULL,
430 : NULL);
431 :
432 14 : DefineCustomBoolVariable("pg_stat_statements.track_utility",
433 : "Selects whether utility commands are tracked by pg_stat_statements.",
434 : NULL,
435 : &pgss_track_utility,
436 : true,
437 : PGC_SUSET,
438 : 0,
439 : NULL,
440 : NULL,
441 : NULL);
442 :
443 14 : DefineCustomBoolVariable("pg_stat_statements.track_planning",
444 : "Selects whether planning duration is tracked by pg_stat_statements.",
445 : NULL,
446 : &pgss_track_planning,
447 : false,
448 : PGC_SUSET,
449 : 0,
450 : NULL,
451 : NULL,
452 : NULL);
453 :
454 14 : DefineCustomBoolVariable("pg_stat_statements.save",
455 : "Save pg_stat_statements statistics across server shutdowns.",
456 : NULL,
457 : &pgss_save,
458 : true,
459 : PGC_SIGHUP,
460 : 0,
461 : NULL,
462 : NULL,
463 : NULL);
464 :
465 14 : MarkGUCPrefixReserved("pg_stat_statements");
466 :
467 : /*
468 : * Install hooks.
469 : */
470 14 : prev_shmem_request_hook = shmem_request_hook;
471 14 : shmem_request_hook = pgss_shmem_request;
472 14 : prev_shmem_startup_hook = shmem_startup_hook;
473 14 : shmem_startup_hook = pgss_shmem_startup;
474 14 : prev_post_parse_analyze_hook = post_parse_analyze_hook;
475 14 : post_parse_analyze_hook = pgss_post_parse_analyze;
476 14 : prev_planner_hook = planner_hook;
477 14 : planner_hook = pgss_planner;
478 14 : prev_ExecutorStart = ExecutorStart_hook;
479 14 : ExecutorStart_hook = pgss_ExecutorStart;
480 14 : prev_ExecutorRun = ExecutorRun_hook;
481 14 : ExecutorRun_hook = pgss_ExecutorRun;
482 14 : prev_ExecutorFinish = ExecutorFinish_hook;
483 14 : ExecutorFinish_hook = pgss_ExecutorFinish;
484 14 : prev_ExecutorEnd = ExecutorEnd_hook;
485 14 : ExecutorEnd_hook = pgss_ExecutorEnd;
486 14 : prev_ProcessUtility = ProcessUtility_hook;
487 14 : ProcessUtility_hook = pgss_ProcessUtility;
488 : }
489 :
490 : /*
491 : * shmem_request hook: request additional shared resources. We'll allocate or
492 : * attach to the shared resources in pgss_shmem_startup().
493 : */
494 : static void
495 14 : pgss_shmem_request(void)
496 : {
497 14 : if (prev_shmem_request_hook)
498 0 : prev_shmem_request_hook();
499 :
500 14 : RequestAddinShmemSpace(pgss_memsize());
501 14 : RequestNamedLWLockTranche("pg_stat_statements", 1);
502 14 : }
503 :
504 : /*
505 : * shmem_startup hook: allocate or attach to shared memory,
506 : * then load any pre-existing statistics from file.
507 : * Also create and load the query-texts file, which is expected to exist
508 : * (even if empty) while the module is enabled.
509 : */
510 : static void
511 14 : pgss_shmem_startup(void)
512 : {
513 : bool found;
514 : HASHCTL info;
515 14 : FILE *file = NULL;
516 14 : FILE *qfile = NULL;
517 : uint32 header;
518 : int32 num;
519 : int32 pgver;
520 : int32 i;
521 : int buffer_size;
522 14 : char *buffer = NULL;
523 :
524 14 : if (prev_shmem_startup_hook)
525 0 : prev_shmem_startup_hook();
526 :
527 : /* reset in case this is a restart within the postmaster */
528 14 : pgss = NULL;
529 14 : pgss_hash = NULL;
530 :
531 : /*
532 : * Create or attach to the shared memory state, including hash table
533 : */
534 14 : LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);
535 :
536 14 : pgss = ShmemInitStruct("pg_stat_statements",
537 : sizeof(pgssSharedState),
538 : &found);
539 :
540 14 : if (!found)
541 : {
542 : /* First time through ... */
543 14 : pgss->lock = &(GetNamedLWLockTranche("pg_stat_statements"))->lock;
544 14 : pgss->cur_median_usage = ASSUMED_MEDIAN_INIT;
545 14 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
546 14 : SpinLockInit(&pgss->mutex);
547 14 : pgss->extent = 0;
548 14 : pgss->n_writers = 0;
549 14 : pgss->gc_count = 0;
550 14 : pgss->stats.dealloc = 0;
551 14 : pgss->stats.stats_reset = GetCurrentTimestamp();
552 : }
553 :
554 14 : info.keysize = sizeof(pgssHashKey);
555 14 : info.entrysize = sizeof(pgssEntry);
556 14 : pgss_hash = ShmemInitHash("pg_stat_statements hash",
557 : pgss_max, pgss_max,
558 : &info,
559 : HASH_ELEM | HASH_BLOBS);
560 :
561 14 : LWLockRelease(AddinShmemInitLock);
562 :
563 : /*
564 : * If we're in the postmaster (or a standalone backend...), set up a shmem
565 : * exit hook to dump the statistics to disk.
566 : */
567 14 : if (!IsUnderPostmaster)
568 14 : on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
569 :
570 : /*
571 : * Done if some other process already completed our initialization.
572 : */
573 14 : if (found)
574 14 : return;
575 :
576 : /*
577 : * Note: we don't bother with locks here, because there should be no other
578 : * processes running when this code is reached.
579 : */
580 :
581 : /* Unlink query text file possibly left over from crash */
582 14 : unlink(PGSS_TEXT_FILE);
583 :
584 : /* Allocate new query text temp file */
585 14 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
586 14 : if (qfile == NULL)
587 0 : goto write_error;
588 :
589 : /*
590 : * If we were told not to load old statistics, we're done. (Note we do
591 : * not try to unlink any old dump file in this case. This seems a bit
592 : * questionable but it's the historical behavior.)
593 : */
594 14 : if (!pgss_save)
595 : {
596 2 : FreeFile(qfile);
597 2 : return;
598 : }
599 :
600 : /*
601 : * Attempt to load old statistics from the dump file.
602 : */
603 12 : file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R);
604 12 : if (file == NULL)
605 : {
606 8 : if (errno != ENOENT)
607 0 : goto read_error;
608 : /* No existing persisted stats file, so we're done */
609 8 : FreeFile(qfile);
610 8 : return;
611 : }
612 :
613 4 : buffer_size = 2048;
614 4 : buffer = (char *) palloc(buffer_size);
615 :
616 8 : if (fread(&header, sizeof(uint32), 1, file) != 1 ||
617 8 : fread(&pgver, sizeof(uint32), 1, file) != 1 ||
618 4 : fread(&num, sizeof(int32), 1, file) != 1)
619 0 : goto read_error;
620 :
621 4 : if (header != PGSS_FILE_HEADER ||
622 4 : pgver != PGSS_PG_MAJOR_VERSION)
623 0 : goto data_error;
624 :
625 51810 : for (i = 0; i < num; i++)
626 : {
627 : pgssEntry temp;
628 : pgssEntry *entry;
629 : Size query_offset;
630 :
631 51806 : if (fread(&temp, sizeof(pgssEntry), 1, file) != 1)
632 0 : goto read_error;
633 :
634 : /* Encoding is the only field we can easily sanity-check */
635 51806 : if (!PG_VALID_BE_ENCODING(temp.encoding))
636 0 : goto data_error;
637 :
638 : /* Resize buffer as needed */
639 51806 : if (temp.query_len >= buffer_size)
640 : {
641 4 : buffer_size = Max(buffer_size * 2, temp.query_len + 1);
642 4 : buffer = repalloc(buffer, buffer_size);
643 : }
644 :
645 51806 : if (fread(buffer, 1, temp.query_len + 1, file) != temp.query_len + 1)
646 0 : goto read_error;
647 :
648 : /* Should have a trailing null, but let's make sure */
649 51806 : buffer[temp.query_len] = '\0';
650 :
651 : /* Skip loading "sticky" entries */
652 51806 : if (IS_STICKY(temp.counters))
653 1390 : continue;
654 :
655 : /* Store the query text */
656 50416 : query_offset = pgss->extent;
657 50416 : if (fwrite(buffer, 1, temp.query_len + 1, qfile) != temp.query_len + 1)
658 0 : goto write_error;
659 50416 : pgss->extent += temp.query_len + 1;
660 :
661 : /* make the hashtable entry (discards old entries if too many) */
662 50416 : entry = entry_alloc(&temp.key, query_offset, temp.query_len,
663 : temp.encoding,
664 : false);
665 :
666 : /* copy in the actual stats */
667 50416 : entry->counters = temp.counters;
668 50416 : entry->stats_since = temp.stats_since;
669 50416 : entry->minmax_stats_since = temp.minmax_stats_since;
670 : }
671 :
672 : /* Read global statistics for pg_stat_statements */
673 4 : if (fread(&pgss->stats, sizeof(pgssGlobalStats), 1, file) != 1)
674 0 : goto read_error;
675 :
676 4 : pfree(buffer);
677 4 : FreeFile(file);
678 4 : FreeFile(qfile);
679 :
680 : /*
681 : * Remove the persisted stats file so it's not included in
682 : * backups/replication standbys, etc. A new file will be written on next
683 : * shutdown.
684 : *
685 : * Note: it's okay if the PGSS_TEXT_FILE is included in a basebackup,
686 : * because we remove that file on startup; it acts inversely to
687 : * PGSS_DUMP_FILE, in that it is only supposed to be around when the
688 : * server is running, whereas PGSS_DUMP_FILE is only supposed to be around
689 : * when the server is not running. Leaving the file creates no danger of
690 : * a newly restored database having a spurious record of execution costs,
691 : * which is what we're really concerned about here.
692 : */
693 4 : unlink(PGSS_DUMP_FILE);
694 :
695 4 : return;
696 :
697 0 : read_error:
698 0 : ereport(LOG,
699 : (errcode_for_file_access(),
700 : errmsg("could not read file \"%s\": %m",
701 : PGSS_DUMP_FILE)));
702 0 : goto fail;
703 0 : data_error:
704 0 : ereport(LOG,
705 : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
706 : errmsg("ignoring invalid data in file \"%s\"",
707 : PGSS_DUMP_FILE)));
708 0 : goto fail;
709 0 : write_error:
710 0 : ereport(LOG,
711 : (errcode_for_file_access(),
712 : errmsg("could not write file \"%s\": %m",
713 : PGSS_TEXT_FILE)));
714 0 : fail:
715 0 : if (buffer)
716 0 : pfree(buffer);
717 0 : if (file)
718 0 : FreeFile(file);
719 0 : if (qfile)
720 0 : FreeFile(qfile);
721 : /* If possible, throw away the bogus file; ignore any error */
722 0 : unlink(PGSS_DUMP_FILE);
723 :
724 : /*
725 : * Don't unlink PGSS_TEXT_FILE here; it should always be around while the
726 : * server is running with pg_stat_statements enabled
727 : */
728 : }
729 :
730 : /*
731 : * shmem_shutdown hook: Dump statistics into file.
732 : *
733 : * Note: we don't bother with acquiring lock, because there should be no
734 : * other processes running when this is called.
735 : */
736 : static void
737 14 : pgss_shmem_shutdown(int code, Datum arg)
738 : {
739 : FILE *file;
740 14 : char *qbuffer = NULL;
741 14 : Size qbuffer_size = 0;
742 : HASH_SEQ_STATUS hash_seq;
743 : int32 num_entries;
744 : pgssEntry *entry;
745 :
746 : /* Don't try to dump during a crash. */
747 14 : if (code)
748 14 : return;
749 :
750 : /* Safety check ... shouldn't get here unless shmem is set up. */
751 14 : if (!pgss || !pgss_hash)
752 0 : return;
753 :
754 : /* Don't dump if told not to. */
755 14 : if (!pgss_save)
756 4 : return;
757 :
758 10 : file = AllocateFile(PGSS_DUMP_FILE ".tmp", PG_BINARY_W);
759 10 : if (file == NULL)
760 0 : goto error;
761 :
762 10 : if (fwrite(&PGSS_FILE_HEADER, sizeof(uint32), 1, file) != 1)
763 0 : goto error;
764 10 : if (fwrite(&PGSS_PG_MAJOR_VERSION, sizeof(uint32), 1, file) != 1)
765 0 : goto error;
766 10 : num_entries = hash_get_num_entries(pgss_hash);
767 10 : if (fwrite(&num_entries, sizeof(int32), 1, file) != 1)
768 0 : goto error;
769 :
770 10 : qbuffer = qtext_load_file(&qbuffer_size);
771 10 : if (qbuffer == NULL)
772 0 : goto error;
773 :
774 : /*
775 : * When serializing to disk, we store query texts immediately after their
776 : * entry data. Any orphaned query texts are thereby excluded.
777 : */
778 10 : hash_seq_init(&hash_seq, pgss_hash);
779 104188 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
780 : {
781 104178 : int len = entry->query_len;
782 104178 : char *qstr = qtext_fetch(entry->query_offset, len,
783 : qbuffer, qbuffer_size);
784 :
785 104178 : if (qstr == NULL)
786 0 : continue; /* Ignore any entries with bogus texts */
787 :
788 104178 : if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 ||
789 104178 : fwrite(qstr, 1, len + 1, file) != len + 1)
790 : {
791 : /* note: we assume hash_seq_term won't change errno */
792 0 : hash_seq_term(&hash_seq);
793 0 : goto error;
794 : }
795 : }
796 :
797 : /* Dump global statistics for pg_stat_statements */
798 10 : if (fwrite(&pgss->stats, sizeof(pgssGlobalStats), 1, file) != 1)
799 0 : goto error;
800 :
801 10 : free(qbuffer);
802 10 : qbuffer = NULL;
803 :
804 10 : if (FreeFile(file))
805 : {
806 0 : file = NULL;
807 0 : goto error;
808 : }
809 :
810 : /*
811 : * Rename file into place, so we atomically replace any old one.
812 : */
813 10 : (void) durable_rename(PGSS_DUMP_FILE ".tmp", PGSS_DUMP_FILE, LOG);
814 :
815 : /* Unlink query-texts file; it's not needed while shutdown */
816 10 : unlink(PGSS_TEXT_FILE);
817 :
818 10 : return;
819 :
820 0 : error:
821 0 : ereport(LOG,
822 : (errcode_for_file_access(),
823 : errmsg("could not write file \"%s\": %m",
824 : PGSS_DUMP_FILE ".tmp")));
825 0 : free(qbuffer);
826 0 : if (file)
827 0 : FreeFile(file);
828 0 : unlink(PGSS_DUMP_FILE ".tmp");
829 0 : unlink(PGSS_TEXT_FILE);
830 : }
831 :
832 : /*
833 : * Post-parse-analysis hook: mark query with a queryId
834 : */
835 : static void
836 154728 : pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate)
837 : {
838 154728 : if (prev_post_parse_analyze_hook)
839 0 : prev_post_parse_analyze_hook(pstate, query, jstate);
840 :
841 : /* Safety check... */
842 154728 : if (!pgss || !pgss_hash || !pgss_enabled(nesting_level))
843 27080 : return;
844 :
845 : /*
846 : * If it's EXECUTE, clear the queryId so that stats will accumulate for
847 : * the underlying PREPARE. But don't do this if we're not tracking
848 : * utility statements, to avoid messing up another extension that might be
849 : * tracking them.
850 : */
851 127648 : if (query->utilityStmt)
852 : {
853 57650 : if (pgss_track_utility && IsA(query->utilityStmt, ExecuteStmt))
854 : {
855 7166 : query->queryId = UINT64CONST(0);
856 7166 : return;
857 : }
858 : }
859 :
860 : /*
861 : * If query jumbling were able to identify any ignorable constants, we
862 : * immediately create a hash table entry for the query, so that we can
863 : * record the normalized form of the query string. If there were no such
864 : * constants, the normalized string would be the same as the query text
865 : * anyway, so there's no need for an early entry.
866 : */
867 120482 : if (jstate && jstate->clocations_count > 0)
868 68864 : pgss_store(pstate->p_sourcetext,
869 : query->queryId,
870 : query->stmt_location,
871 : query->stmt_len,
872 : PGSS_INVALID,
873 : 0,
874 : 0,
875 : NULL,
876 : NULL,
877 : NULL,
878 : jstate,
879 : 0,
880 : 0);
881 : }
882 :
883 : /*
884 : * Planner hook: forward to regular planner, but measure planning time
885 : * if needed.
886 : */
887 : static PlannedStmt *
888 93708 : pgss_planner(Query *parse,
889 : const char *query_string,
890 : int cursorOptions,
891 : ParamListInfo boundParams)
892 : {
893 : PlannedStmt *result;
894 :
895 : /*
896 : * We can't process the query if no query_string is provided, as
897 : * pgss_store needs it. We also ignore query without queryid, as it would
898 : * be treated as a utility statement, which may not be the case.
899 : */
900 93708 : if (pgss_enabled(nesting_level)
901 70290 : && pgss_track_planning && query_string
902 242 : && parse->queryId != UINT64CONST(0))
903 242 : {
904 : instr_time start;
905 : instr_time duration;
906 : BufferUsage bufusage_start,
907 : bufusage;
908 : WalUsage walusage_start,
909 : walusage;
910 :
911 : /* We need to track buffer usage as the planner can access them. */
912 242 : bufusage_start = pgBufferUsage;
913 :
914 : /*
915 : * Similarly the planner could write some WAL records in some cases
916 : * (e.g. setting a hint bit with those being WAL-logged)
917 : */
918 242 : walusage_start = pgWalUsage;
919 242 : INSTR_TIME_SET_CURRENT(start);
920 :
921 242 : nesting_level++;
922 242 : PG_TRY();
923 : {
924 242 : if (prev_planner_hook)
925 0 : result = prev_planner_hook(parse, query_string, cursorOptions,
926 : boundParams);
927 : else
928 242 : result = standard_planner(parse, query_string, cursorOptions,
929 : boundParams);
930 : }
931 0 : PG_FINALLY();
932 : {
933 242 : nesting_level--;
934 : }
935 242 : PG_END_TRY();
936 :
937 242 : INSTR_TIME_SET_CURRENT(duration);
938 242 : INSTR_TIME_SUBTRACT(duration, start);
939 :
940 : /* calc differences of buffer counters. */
941 242 : memset(&bufusage, 0, sizeof(BufferUsage));
942 242 : BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
943 :
944 : /* calc differences of WAL counters. */
945 242 : memset(&walusage, 0, sizeof(WalUsage));
946 242 : WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
947 :
948 242 : pgss_store(query_string,
949 : parse->queryId,
950 : parse->stmt_location,
951 : parse->stmt_len,
952 : PGSS_PLAN,
953 242 : INSTR_TIME_GET_MILLISEC(duration),
954 : 0,
955 : &bufusage,
956 : &walusage,
957 : NULL,
958 : NULL,
959 : 0,
960 : 0);
961 : }
962 : else
963 : {
964 : /*
965 : * Even though we're not tracking plan time for this statement, we
966 : * must still increment the nesting level, to ensure that functions
967 : * evaluated during planning are not seen as top-level calls.
968 : */
969 93466 : nesting_level++;
970 93466 : PG_TRY();
971 : {
972 93466 : if (prev_planner_hook)
973 0 : result = prev_planner_hook(parse, query_string, cursorOptions,
974 : boundParams);
975 : else
976 93466 : result = standard_planner(parse, query_string, cursorOptions,
977 : boundParams);
978 : }
979 1342 : PG_FINALLY();
980 : {
981 93466 : nesting_level--;
982 : }
983 93466 : PG_END_TRY();
984 : }
985 :
986 92366 : return result;
987 : }
988 :
989 : /*
990 : * ExecutorStart hook: start up tracking if needed
991 : */
992 : static bool
993 113332 : pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
994 : {
995 : bool plan_valid;
996 :
997 113332 : if (prev_ExecutorStart)
998 0 : plan_valid = prev_ExecutorStart(queryDesc, eflags);
999 : else
1000 113332 : plan_valid = standard_ExecutorStart(queryDesc, eflags);
1001 :
1002 : /* The plan may have become invalid during standard_ExecutorStart() */
1003 112700 : if (!plan_valid)
1004 0 : return false;
1005 :
1006 : /*
1007 : * If query has queryId zero, don't track it. This prevents double
1008 : * counting of optimizable statements that are directly contained in
1009 : * utility statements.
1010 : */
1011 112700 : if (pgss_enabled(nesting_level) && queryDesc->plannedstmt->queryId != UINT64CONST(0))
1012 : {
1013 : /*
1014 : * Set up to track total elapsed time in ExecutorRun. Make sure the
1015 : * space is allocated in the per-query context so it will go away at
1016 : * ExecutorEnd.
1017 : */
1018 74886 : if (queryDesc->totaltime == NULL)
1019 : {
1020 : MemoryContext oldcxt;
1021 :
1022 74886 : oldcxt = MemoryContextSwitchTo(queryDesc->estate->es_query_cxt);
1023 74886 : queryDesc->totaltime = InstrAlloc(1, INSTRUMENT_ALL, false);
1024 74886 : MemoryContextSwitchTo(oldcxt);
1025 : }
1026 : }
1027 :
1028 112700 : return true;
1029 : }
1030 :
1031 : /*
1032 : * ExecutorRun hook: all we need do is track nesting depth
1033 : */
1034 : static void
1035 110254 : pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count)
1036 : {
1037 110254 : nesting_level++;
1038 110254 : PG_TRY();
1039 : {
1040 110254 : if (prev_ExecutorRun)
1041 0 : prev_ExecutorRun(queryDesc, direction, count);
1042 : else
1043 110254 : standard_ExecutorRun(queryDesc, direction, count);
1044 : }
1045 6778 : PG_FINALLY();
1046 : {
1047 110254 : nesting_level--;
1048 : }
1049 110254 : PG_END_TRY();
1050 103476 : }
1051 :
1052 : /*
1053 : * ExecutorFinish hook: all we need do is track nesting depth
1054 : */
1055 : static void
1056 99642 : pgss_ExecutorFinish(QueryDesc *queryDesc)
1057 : {
1058 99642 : nesting_level++;
1059 99642 : PG_TRY();
1060 : {
1061 99642 : if (prev_ExecutorFinish)
1062 0 : prev_ExecutorFinish(queryDesc);
1063 : else
1064 99642 : standard_ExecutorFinish(queryDesc);
1065 : }
1066 314 : PG_FINALLY();
1067 : {
1068 99642 : nesting_level--;
1069 : }
1070 99642 : PG_END_TRY();
1071 99328 : }
1072 :
1073 : /*
1074 : * ExecutorEnd hook: store results if needed
1075 : */
1076 : static void
1077 104954 : pgss_ExecutorEnd(QueryDesc *queryDesc)
1078 : {
1079 104954 : uint64 queryId = queryDesc->plannedstmt->queryId;
1080 :
1081 104954 : if (queryId != UINT64CONST(0) && queryDesc->totaltime &&
1082 71872 : pgss_enabled(nesting_level))
1083 : {
1084 : /*
1085 : * Make sure stats accumulation is done. (Note: it's okay if several
1086 : * levels of hook all do this.)
1087 : */
1088 71872 : InstrEndLoop(queryDesc->totaltime);
1089 :
1090 71714 : pgss_store(queryDesc->sourceText,
1091 : queryId,
1092 71872 : queryDesc->plannedstmt->stmt_location,
1093 71872 : queryDesc->plannedstmt->stmt_len,
1094 : PGSS_EXEC,
1095 71872 : queryDesc->totaltime->total * 1000.0, /* convert to msec */
1096 71872 : queryDesc->estate->es_total_processed,
1097 71872 : &queryDesc->totaltime->bufusage,
1098 71872 : &queryDesc->totaltime->walusage,
1099 158 : queryDesc->estate->es_jit ? &queryDesc->estate->es_jit->instr : NULL,
1100 : NULL,
1101 71872 : queryDesc->estate->es_parallel_workers_to_launch,
1102 71872 : queryDesc->estate->es_parallel_workers_launched);
1103 : }
1104 :
1105 104954 : if (prev_ExecutorEnd)
1106 0 : prev_ExecutorEnd(queryDesc);
1107 : else
1108 104954 : standard_ExecutorEnd(queryDesc);
1109 104954 : }
1110 :
1111 : /*
1112 : * ProcessUtility hook
1113 : */
1114 : static void
1115 68322 : pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
1116 : bool readOnlyTree,
1117 : ProcessUtilityContext context,
1118 : ParamListInfo params, QueryEnvironment *queryEnv,
1119 : DestReceiver *dest, QueryCompletion *qc)
1120 : {
1121 68322 : Node *parsetree = pstmt->utilityStmt;
1122 68322 : uint64 saved_queryId = pstmt->queryId;
1123 68322 : int saved_stmt_location = pstmt->stmt_location;
1124 68322 : int saved_stmt_len = pstmt->stmt_len;
1125 68322 : bool enabled = pgss_track_utility && pgss_enabled(nesting_level);
1126 :
1127 : /*
1128 : * Force utility statements to get queryId zero. We do this even in cases
1129 : * where the statement contains an optimizable statement for which a
1130 : * queryId could be derived (such as EXPLAIN or DECLARE CURSOR). For such
1131 : * cases, runtime control will first go through ProcessUtility and then
1132 : * the executor, and we don't want the executor hooks to do anything,
1133 : * since we are already measuring the statement's costs at the utility
1134 : * level.
1135 : *
1136 : * Note that this is only done if pg_stat_statements is enabled and
1137 : * configured to track utility statements, in the unlikely possibility
1138 : * that user configured another extension to handle utility statements
1139 : * only.
1140 : */
1141 68322 : if (enabled)
1142 57450 : pstmt->queryId = UINT64CONST(0);
1143 :
1144 : /*
1145 : * If it's an EXECUTE statement, we don't track it and don't increment the
1146 : * nesting level. This allows the cycles to be charged to the underlying
1147 : * PREPARE instead (by the Executor hooks), which is much more useful.
1148 : *
1149 : * We also don't track execution of PREPARE. If we did, we would get one
1150 : * hash table entry for the PREPARE (with hash calculated from the query
1151 : * string), and then a different one with the same query string (but hash
1152 : * calculated from the query tree) would be used to accumulate costs of
1153 : * ensuing EXECUTEs. This would be confusing. Since PREPARE doesn't
1154 : * actually run the planner (only parse+rewrite), its costs are generally
1155 : * pretty negligible and it seems okay to just ignore it.
1156 : */
1157 68322 : if (enabled &&
1158 57450 : !IsA(parsetree, ExecuteStmt) &&
1159 50288 : !IsA(parsetree, PrepareStmt))
1160 45338 : {
1161 : instr_time start;
1162 : instr_time duration;
1163 : uint64 rows;
1164 : BufferUsage bufusage_start,
1165 : bufusage;
1166 : WalUsage walusage_start,
1167 : walusage;
1168 :
1169 50050 : bufusage_start = pgBufferUsage;
1170 50050 : walusage_start = pgWalUsage;
1171 50050 : INSTR_TIME_SET_CURRENT(start);
1172 :
1173 50050 : nesting_level++;
1174 50050 : PG_TRY();
1175 : {
1176 50050 : if (prev_ProcessUtility)
1177 0 : prev_ProcessUtility(pstmt, queryString, readOnlyTree,
1178 : context, params, queryEnv,
1179 : dest, qc);
1180 : else
1181 50050 : standard_ProcessUtility(pstmt, queryString, readOnlyTree,
1182 : context, params, queryEnv,
1183 : dest, qc);
1184 : }
1185 4712 : PG_FINALLY();
1186 : {
1187 50050 : nesting_level--;
1188 : }
1189 50050 : PG_END_TRY();
1190 :
1191 : /*
1192 : * CAUTION: do not access the *pstmt data structure again below here.
1193 : * If it was a ROLLBACK or similar, that data structure may have been
1194 : * freed. We must copy everything we still need into local variables,
1195 : * which we did above.
1196 : *
1197 : * For the same reason, we can't risk restoring pstmt->queryId to its
1198 : * former value, which'd otherwise be a good idea.
1199 : */
1200 :
1201 45338 : INSTR_TIME_SET_CURRENT(duration);
1202 45338 : INSTR_TIME_SUBTRACT(duration, start);
1203 :
1204 : /*
1205 : * Track the total number of rows retrieved or affected by the utility
1206 : * statements of COPY, FETCH, CREATE TABLE AS, CREATE MATERIALIZED
1207 : * VIEW, REFRESH MATERIALIZED VIEW and SELECT INTO.
1208 : */
1209 45332 : rows = (qc && (qc->commandTag == CMDTAG_COPY ||
1210 42114 : qc->commandTag == CMDTAG_FETCH ||
1211 41644 : qc->commandTag == CMDTAG_SELECT ||
1212 41272 : qc->commandTag == CMDTAG_REFRESH_MATERIALIZED_VIEW)) ?
1213 90670 : qc->nprocessed : 0;
1214 :
1215 : /* calc differences of buffer counters. */
1216 45338 : memset(&bufusage, 0, sizeof(BufferUsage));
1217 45338 : BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
1218 :
1219 : /* calc differences of WAL counters. */
1220 45338 : memset(&walusage, 0, sizeof(WalUsage));
1221 45338 : WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
1222 :
1223 45338 : pgss_store(queryString,
1224 : saved_queryId,
1225 : saved_stmt_location,
1226 : saved_stmt_len,
1227 : PGSS_EXEC,
1228 45338 : INSTR_TIME_GET_MILLISEC(duration),
1229 : rows,
1230 : &bufusage,
1231 : &walusage,
1232 : NULL,
1233 : NULL,
1234 : 0,
1235 : 0);
1236 : }
1237 : else
1238 : {
1239 : /*
1240 : * Even though we're not tracking execution time for this statement,
1241 : * we must still increment the nesting level, to ensure that functions
1242 : * evaluated within it are not seen as top-level calls. But don't do
1243 : * so for EXECUTE; that way, when control reaches pgss_planner or
1244 : * pgss_ExecutorStart, we will treat the costs as top-level if
1245 : * appropriate. Likewise, don't bump for PREPARE, so that parse
1246 : * analysis will treat the statement as top-level if appropriate.
1247 : *
1248 : * To be absolutely certain we don't mess up the nesting level,
1249 : * evaluate the bump_level condition just once.
1250 : */
1251 18272 : bool bump_level =
1252 29380 : !IsA(parsetree, ExecuteStmt) &&
1253 11108 : !IsA(parsetree, PrepareStmt);
1254 :
1255 18272 : if (bump_level)
1256 10868 : nesting_level++;
1257 18272 : PG_TRY();
1258 : {
1259 18272 : if (prev_ProcessUtility)
1260 0 : prev_ProcessUtility(pstmt, queryString, readOnlyTree,
1261 : context, params, queryEnv,
1262 : dest, qc);
1263 : else
1264 18272 : standard_ProcessUtility(pstmt, queryString, readOnlyTree,
1265 : context, params, queryEnv,
1266 : dest, qc);
1267 : }
1268 260 : PG_FINALLY();
1269 : {
1270 18272 : if (bump_level)
1271 10868 : nesting_level--;
1272 : }
1273 18272 : PG_END_TRY();
1274 : }
1275 63350 : }
1276 :
1277 : /*
1278 : * Store some statistics for a statement.
1279 : *
1280 : * If jstate is not NULL then we're trying to create an entry for which
1281 : * we have no statistics as yet; we just want to record the normalized
1282 : * query string. total_time, rows, bufusage and walusage are ignored in this
1283 : * case.
1284 : *
1285 : * If kind is PGSS_PLAN or PGSS_EXEC, its value is used as the array position
1286 : * for the arrays in the Counters field.
1287 : */
1288 : static void
1289 186316 : pgss_store(const char *query, uint64 queryId,
1290 : int query_location, int query_len,
1291 : pgssStoreKind kind,
1292 : double total_time, uint64 rows,
1293 : const BufferUsage *bufusage,
1294 : const WalUsage *walusage,
1295 : const struct JitInstrumentation *jitusage,
1296 : JumbleState *jstate,
1297 : int parallel_workers_to_launch,
1298 : int parallel_workers_launched)
1299 : {
1300 : pgssHashKey key;
1301 : pgssEntry *entry;
1302 186316 : char *norm_query = NULL;
1303 186316 : int encoding = GetDatabaseEncoding();
1304 :
1305 : Assert(query != NULL);
1306 :
1307 : /* Safety check... */
1308 186316 : if (!pgss || !pgss_hash)
1309 0 : return;
1310 :
1311 : /*
1312 : * Nothing to do if compute_query_id isn't enabled and no other module
1313 : * computed a query identifier.
1314 : */
1315 186316 : if (queryId == UINT64CONST(0))
1316 0 : return;
1317 :
1318 : /*
1319 : * Confine our attention to the relevant part of the string, if the query
1320 : * is a portion of a multi-statement source string, and update query
1321 : * location and length if needed.
1322 : */
1323 186316 : query = CleanQuerytext(query, &query_location, &query_len);
1324 :
1325 : /* Set up key for hashtable search */
1326 :
1327 : /* clear padding */
1328 186316 : memset(&key, 0, sizeof(pgssHashKey));
1329 :
1330 186316 : key.userid = GetUserId();
1331 186316 : key.dbid = MyDatabaseId;
1332 186316 : key.queryid = queryId;
1333 186316 : key.toplevel = (nesting_level == 0);
1334 :
1335 : /* Lookup the hash table entry with shared lock. */
1336 186316 : LWLockAcquire(pgss->lock, LW_SHARED);
1337 :
1338 186316 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
1339 :
1340 : /* Create new entry, if not present */
1341 186316 : if (!entry)
1342 : {
1343 : Size query_offset;
1344 : int gc_count;
1345 : bool stored;
1346 : bool do_gc;
1347 :
1348 : /*
1349 : * Create a new, normalized query string if caller asked. We don't
1350 : * need to hold the lock while doing this work. (Note: in any case,
1351 : * it's possible that someone else creates a duplicate hashtable entry
1352 : * in the interval where we don't hold the lock below. That case is
1353 : * handled by entry_alloc.)
1354 : */
1355 55252 : if (jstate)
1356 : {
1357 20424 : LWLockRelease(pgss->lock);
1358 20424 : norm_query = generate_normalized_query(jstate, query,
1359 : query_location,
1360 : &query_len);
1361 20424 : LWLockAcquire(pgss->lock, LW_SHARED);
1362 : }
1363 :
1364 : /* Append new query text to file with only shared lock held */
1365 55252 : stored = qtext_store(norm_query ? norm_query : query, query_len,
1366 : &query_offset, &gc_count);
1367 :
1368 : /*
1369 : * Determine whether we need to garbage collect external query texts
1370 : * while the shared lock is still held. This micro-optimization
1371 : * avoids taking the time to decide this while holding exclusive lock.
1372 : */
1373 55252 : do_gc = need_gc_qtexts();
1374 :
1375 : /* Need exclusive lock to make a new hashtable entry - promote */
1376 55252 : LWLockRelease(pgss->lock);
1377 55252 : LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
1378 :
1379 : /*
1380 : * A garbage collection may have occurred while we weren't holding the
1381 : * lock. In the unlikely event that this happens, the query text we
1382 : * stored above will have been garbage collected, so write it again.
1383 : * This should be infrequent enough that doing it while holding
1384 : * exclusive lock isn't a performance problem.
1385 : */
1386 55252 : if (!stored || pgss->gc_count != gc_count)
1387 0 : stored = qtext_store(norm_query ? norm_query : query, query_len,
1388 : &query_offset, NULL);
1389 :
1390 : /* If we failed to write to the text file, give up */
1391 55252 : if (!stored)
1392 0 : goto done;
1393 :
1394 : /* OK to create a new hashtable entry */
1395 55252 : entry = entry_alloc(&key, query_offset, query_len, encoding,
1396 : jstate != NULL);
1397 :
1398 : /* If needed, perform garbage collection while exclusive lock held */
1399 55252 : if (do_gc)
1400 0 : gc_qtexts();
1401 : }
1402 :
1403 : /* Increment the counts, except when jstate is not NULL */
1404 186316 : if (!jstate)
1405 : {
1406 : Assert(kind == PGSS_PLAN || kind == PGSS_EXEC);
1407 :
1408 : /*
1409 : * Grab the spinlock while updating the counters (see comment about
1410 : * locking rules at the head of the file)
1411 : */
1412 117452 : SpinLockAcquire(&entry->mutex);
1413 :
1414 : /* "Unstick" entry if it was previously sticky */
1415 117452 : if (IS_STICKY(entry->counters))
1416 53812 : entry->counters.usage = USAGE_INIT;
1417 :
1418 117452 : entry->counters.calls[kind] += 1;
1419 117452 : entry->counters.total_time[kind] += total_time;
1420 :
1421 117452 : if (entry->counters.calls[kind] == 1)
1422 : {
1423 53952 : entry->counters.min_time[kind] = total_time;
1424 53952 : entry->counters.max_time[kind] = total_time;
1425 53952 : entry->counters.mean_time[kind] = total_time;
1426 : }
1427 : else
1428 : {
1429 : /*
1430 : * Welford's method for accurately computing variance. See
1431 : * <http://www.johndcook.com/blog/standard_deviation/>
1432 : */
1433 63500 : double old_mean = entry->counters.mean_time[kind];
1434 :
1435 63500 : entry->counters.mean_time[kind] +=
1436 63500 : (total_time - old_mean) / entry->counters.calls[kind];
1437 63500 : entry->counters.sum_var_time[kind] +=
1438 63500 : (total_time - old_mean) * (total_time - entry->counters.mean_time[kind]);
1439 :
1440 : /*
1441 : * Calculate min and max time. min = 0 and max = 0 means that the
1442 : * min/max statistics were reset
1443 : */
1444 63500 : if (entry->counters.min_time[kind] == 0
1445 4 : && entry->counters.max_time[kind] == 0)
1446 : {
1447 4 : entry->counters.min_time[kind] = total_time;
1448 4 : entry->counters.max_time[kind] = total_time;
1449 : }
1450 : else
1451 : {
1452 63496 : if (entry->counters.min_time[kind] > total_time)
1453 12870 : entry->counters.min_time[kind] = total_time;
1454 63496 : if (entry->counters.max_time[kind] < total_time)
1455 6118 : entry->counters.max_time[kind] = total_time;
1456 : }
1457 : }
1458 117452 : entry->counters.rows += rows;
1459 117452 : entry->counters.shared_blks_hit += bufusage->shared_blks_hit;
1460 117452 : entry->counters.shared_blks_read += bufusage->shared_blks_read;
1461 117452 : entry->counters.shared_blks_dirtied += bufusage->shared_blks_dirtied;
1462 117452 : entry->counters.shared_blks_written += bufusage->shared_blks_written;
1463 117452 : entry->counters.local_blks_hit += bufusage->local_blks_hit;
1464 117452 : entry->counters.local_blks_read += bufusage->local_blks_read;
1465 117452 : entry->counters.local_blks_dirtied += bufusage->local_blks_dirtied;
1466 117452 : entry->counters.local_blks_written += bufusage->local_blks_written;
1467 117452 : entry->counters.temp_blks_read += bufusage->temp_blks_read;
1468 117452 : entry->counters.temp_blks_written += bufusage->temp_blks_written;
1469 117452 : entry->counters.shared_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->shared_blk_read_time);
1470 117452 : entry->counters.shared_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->shared_blk_write_time);
1471 117452 : entry->counters.local_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->local_blk_read_time);
1472 117452 : entry->counters.local_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->local_blk_write_time);
1473 117452 : entry->counters.temp_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->temp_blk_read_time);
1474 117452 : entry->counters.temp_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->temp_blk_write_time);
1475 117452 : entry->counters.usage += USAGE_EXEC(total_time);
1476 117452 : entry->counters.wal_records += walusage->wal_records;
1477 117452 : entry->counters.wal_fpi += walusage->wal_fpi;
1478 117452 : entry->counters.wal_bytes += walusage->wal_bytes;
1479 117452 : entry->counters.wal_buffers_full += walusage->wal_buffers_full;
1480 117452 : if (jitusage)
1481 : {
1482 158 : entry->counters.jit_functions += jitusage->created_functions;
1483 158 : entry->counters.jit_generation_time += INSTR_TIME_GET_MILLISEC(jitusage->generation_counter);
1484 :
1485 158 : if (INSTR_TIME_GET_MILLISEC(jitusage->deform_counter))
1486 154 : entry->counters.jit_deform_count++;
1487 158 : entry->counters.jit_deform_time += INSTR_TIME_GET_MILLISEC(jitusage->deform_counter);
1488 :
1489 158 : if (INSTR_TIME_GET_MILLISEC(jitusage->inlining_counter))
1490 74 : entry->counters.jit_inlining_count++;
1491 158 : entry->counters.jit_inlining_time += INSTR_TIME_GET_MILLISEC(jitusage->inlining_counter);
1492 :
1493 158 : if (INSTR_TIME_GET_MILLISEC(jitusage->optimization_counter))
1494 154 : entry->counters.jit_optimization_count++;
1495 158 : entry->counters.jit_optimization_time += INSTR_TIME_GET_MILLISEC(jitusage->optimization_counter);
1496 :
1497 158 : if (INSTR_TIME_GET_MILLISEC(jitusage->emission_counter))
1498 154 : entry->counters.jit_emission_count++;
1499 158 : entry->counters.jit_emission_time += INSTR_TIME_GET_MILLISEC(jitusage->emission_counter);
1500 : }
1501 :
1502 : /* parallel worker counters */
1503 117452 : entry->counters.parallel_workers_to_launch += parallel_workers_to_launch;
1504 117452 : entry->counters.parallel_workers_launched += parallel_workers_launched;
1505 :
1506 117452 : SpinLockRelease(&entry->mutex);
1507 : }
1508 :
1509 68864 : done:
1510 186316 : LWLockRelease(pgss->lock);
1511 :
1512 : /* We postpone this clean-up until we're out of the lock */
1513 186316 : if (norm_query)
1514 20424 : pfree(norm_query);
1515 : }
1516 :
1517 : /*
1518 : * Reset statement statistics corresponding to userid, dbid, and queryid.
1519 : */
1520 : Datum
1521 2 : pg_stat_statements_reset_1_7(PG_FUNCTION_ARGS)
1522 : {
1523 : Oid userid;
1524 : Oid dbid;
1525 : uint64 queryid;
1526 :
1527 2 : userid = PG_GETARG_OID(0);
1528 2 : dbid = PG_GETARG_OID(1);
1529 2 : queryid = (uint64) PG_GETARG_INT64(2);
1530 :
1531 2 : entry_reset(userid, dbid, queryid, false);
1532 :
1533 2 : PG_RETURN_VOID();
1534 : }
1535 :
1536 : Datum
1537 188 : pg_stat_statements_reset_1_11(PG_FUNCTION_ARGS)
1538 : {
1539 : Oid userid;
1540 : Oid dbid;
1541 : uint64 queryid;
1542 : bool minmax_only;
1543 :
1544 188 : userid = PG_GETARG_OID(0);
1545 188 : dbid = PG_GETARG_OID(1);
1546 188 : queryid = (uint64) PG_GETARG_INT64(2);
1547 188 : minmax_only = PG_GETARG_BOOL(3);
1548 :
1549 188 : PG_RETURN_TIMESTAMPTZ(entry_reset(userid, dbid, queryid, minmax_only));
1550 : }
1551 :
1552 : /*
1553 : * Reset statement statistics.
1554 : */
1555 : Datum
1556 2 : pg_stat_statements_reset(PG_FUNCTION_ARGS)
1557 : {
1558 2 : entry_reset(0, 0, 0, false);
1559 :
1560 2 : PG_RETURN_VOID();
1561 : }
1562 :
1563 : /* Number of output arguments (columns) for various API versions */
1564 : #define PG_STAT_STATEMENTS_COLS_V1_0 14
1565 : #define PG_STAT_STATEMENTS_COLS_V1_1 18
1566 : #define PG_STAT_STATEMENTS_COLS_V1_2 19
1567 : #define PG_STAT_STATEMENTS_COLS_V1_3 23
1568 : #define PG_STAT_STATEMENTS_COLS_V1_8 32
1569 : #define PG_STAT_STATEMENTS_COLS_V1_9 33
1570 : #define PG_STAT_STATEMENTS_COLS_V1_10 43
1571 : #define PG_STAT_STATEMENTS_COLS_V1_11 49
1572 : #define PG_STAT_STATEMENTS_COLS_V1_12 52
1573 : #define PG_STAT_STATEMENTS_COLS 52 /* maximum of above */
1574 :
1575 : /*
1576 : * Retrieve statement statistics.
1577 : *
1578 : * The SQL API of this function has changed multiple times, and will likely
1579 : * do so again in future. To support the case where a newer version of this
1580 : * loadable module is being used with an old SQL declaration of the function,
1581 : * we continue to support the older API versions. For 1.2 and later, the
1582 : * expected API version is identified by embedding it in the C name of the
1583 : * function. Unfortunately we weren't bright enough to do that for 1.1.
1584 : */
1585 : Datum
1586 210 : pg_stat_statements_1_12(PG_FUNCTION_ARGS)
1587 : {
1588 210 : bool showtext = PG_GETARG_BOOL(0);
1589 :
1590 210 : pg_stat_statements_internal(fcinfo, PGSS_V1_12, showtext);
1591 :
1592 210 : return (Datum) 0;
1593 : }
1594 :
1595 : Datum
1596 2 : pg_stat_statements_1_11(PG_FUNCTION_ARGS)
1597 : {
1598 2 : bool showtext = PG_GETARG_BOOL(0);
1599 :
1600 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_11, showtext);
1601 :
1602 2 : return (Datum) 0;
1603 : }
1604 :
1605 : Datum
1606 2 : pg_stat_statements_1_10(PG_FUNCTION_ARGS)
1607 : {
1608 2 : bool showtext = PG_GETARG_BOOL(0);
1609 :
1610 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_10, showtext);
1611 :
1612 2 : return (Datum) 0;
1613 : }
1614 :
1615 : Datum
1616 2 : pg_stat_statements_1_9(PG_FUNCTION_ARGS)
1617 : {
1618 2 : bool showtext = PG_GETARG_BOOL(0);
1619 :
1620 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_9, showtext);
1621 :
1622 2 : return (Datum) 0;
1623 : }
1624 :
1625 : Datum
1626 2 : pg_stat_statements_1_8(PG_FUNCTION_ARGS)
1627 : {
1628 2 : bool showtext = PG_GETARG_BOOL(0);
1629 :
1630 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_8, showtext);
1631 :
1632 2 : return (Datum) 0;
1633 : }
1634 :
1635 : Datum
1636 2 : pg_stat_statements_1_3(PG_FUNCTION_ARGS)
1637 : {
1638 2 : bool showtext = PG_GETARG_BOOL(0);
1639 :
1640 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_3, showtext);
1641 :
1642 2 : return (Datum) 0;
1643 : }
1644 :
1645 : Datum
1646 0 : pg_stat_statements_1_2(PG_FUNCTION_ARGS)
1647 : {
1648 0 : bool showtext = PG_GETARG_BOOL(0);
1649 :
1650 0 : pg_stat_statements_internal(fcinfo, PGSS_V1_2, showtext);
1651 :
1652 0 : return (Datum) 0;
1653 : }
1654 :
1655 : /*
1656 : * Legacy entry point for pg_stat_statements() API versions 1.0 and 1.1.
1657 : * This can be removed someday, perhaps.
1658 : */
1659 : Datum
1660 0 : pg_stat_statements(PG_FUNCTION_ARGS)
1661 : {
1662 : /* If it's really API 1.1, we'll figure that out below */
1663 0 : pg_stat_statements_internal(fcinfo, PGSS_V1_0, true);
1664 :
1665 0 : return (Datum) 0;
1666 : }
1667 :
1668 : /* Common code for all versions of pg_stat_statements() */
1669 : static void
1670 220 : pg_stat_statements_internal(FunctionCallInfo fcinfo,
1671 : pgssVersion api_version,
1672 : bool showtext)
1673 : {
1674 220 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1675 220 : Oid userid = GetUserId();
1676 220 : bool is_allowed_role = false;
1677 220 : char *qbuffer = NULL;
1678 220 : Size qbuffer_size = 0;
1679 220 : Size extent = 0;
1680 220 : int gc_count = 0;
1681 : HASH_SEQ_STATUS hash_seq;
1682 : pgssEntry *entry;
1683 :
1684 : /*
1685 : * Superusers or roles with the privileges of pg_read_all_stats members
1686 : * are allowed
1687 : */
1688 220 : is_allowed_role = has_privs_of_role(userid, ROLE_PG_READ_ALL_STATS);
1689 :
1690 : /* hash table must exist already */
1691 220 : if (!pgss || !pgss_hash)
1692 0 : ereport(ERROR,
1693 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1694 : errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
1695 :
1696 220 : InitMaterializedSRF(fcinfo, 0);
1697 :
1698 : /*
1699 : * Check we have the expected number of output arguments. Aside from
1700 : * being a good safety check, we need a kluge here to detect API version
1701 : * 1.1, which was wedged into the code in an ill-considered way.
1702 : */
1703 220 : switch (rsinfo->setDesc->natts)
1704 : {
1705 0 : case PG_STAT_STATEMENTS_COLS_V1_0:
1706 0 : if (api_version != PGSS_V1_0)
1707 0 : elog(ERROR, "incorrect number of output arguments");
1708 0 : break;
1709 0 : case PG_STAT_STATEMENTS_COLS_V1_1:
1710 : /* pg_stat_statements() should have told us 1.0 */
1711 0 : if (api_version != PGSS_V1_0)
1712 0 : elog(ERROR, "incorrect number of output arguments");
1713 0 : api_version = PGSS_V1_1;
1714 0 : break;
1715 0 : case PG_STAT_STATEMENTS_COLS_V1_2:
1716 0 : if (api_version != PGSS_V1_2)
1717 0 : elog(ERROR, "incorrect number of output arguments");
1718 0 : break;
1719 2 : case PG_STAT_STATEMENTS_COLS_V1_3:
1720 2 : if (api_version != PGSS_V1_3)
1721 0 : elog(ERROR, "incorrect number of output arguments");
1722 2 : break;
1723 2 : case PG_STAT_STATEMENTS_COLS_V1_8:
1724 2 : if (api_version != PGSS_V1_8)
1725 0 : elog(ERROR, "incorrect number of output arguments");
1726 2 : break;
1727 2 : case PG_STAT_STATEMENTS_COLS_V1_9:
1728 2 : if (api_version != PGSS_V1_9)
1729 0 : elog(ERROR, "incorrect number of output arguments");
1730 2 : break;
1731 2 : case PG_STAT_STATEMENTS_COLS_V1_10:
1732 2 : if (api_version != PGSS_V1_10)
1733 0 : elog(ERROR, "incorrect number of output arguments");
1734 2 : break;
1735 2 : case PG_STAT_STATEMENTS_COLS_V1_11:
1736 2 : if (api_version != PGSS_V1_11)
1737 0 : elog(ERROR, "incorrect number of output arguments");
1738 2 : break;
1739 210 : case PG_STAT_STATEMENTS_COLS_V1_12:
1740 210 : if (api_version != PGSS_V1_12)
1741 0 : elog(ERROR, "incorrect number of output arguments");
1742 210 : break;
1743 0 : default:
1744 0 : elog(ERROR, "incorrect number of output arguments");
1745 : }
1746 :
1747 : /*
1748 : * We'd like to load the query text file (if needed) while not holding any
1749 : * lock on pgss->lock. In the worst case we'll have to do this again
1750 : * after we have the lock, but it's unlikely enough to make this a win
1751 : * despite occasional duplicated work. We need to reload if anybody
1752 : * writes to the file (either a retail qtext_store(), or a garbage
1753 : * collection) between this point and where we've gotten shared lock. If
1754 : * a qtext_store is actually in progress when we look, we might as well
1755 : * skip the speculative load entirely.
1756 : */
1757 220 : if (showtext)
1758 : {
1759 : int n_writers;
1760 :
1761 : /* Take the mutex so we can examine variables */
1762 220 : SpinLockAcquire(&pgss->mutex);
1763 220 : extent = pgss->extent;
1764 220 : n_writers = pgss->n_writers;
1765 220 : gc_count = pgss->gc_count;
1766 220 : SpinLockRelease(&pgss->mutex);
1767 :
1768 : /* No point in loading file now if there are active writers */
1769 220 : if (n_writers == 0)
1770 220 : qbuffer = qtext_load_file(&qbuffer_size);
1771 : }
1772 :
1773 : /*
1774 : * Get shared lock, load or reload the query text file if we must, and
1775 : * iterate over the hashtable entries.
1776 : *
1777 : * With a large hash table, we might be holding the lock rather longer
1778 : * than one could wish. However, this only blocks creation of new hash
1779 : * table entries, and the larger the hash table the less likely that is to
1780 : * be needed. So we can hope this is okay. Perhaps someday we'll decide
1781 : * we need to partition the hash table to limit the time spent holding any
1782 : * one lock.
1783 : */
1784 220 : LWLockAcquire(pgss->lock, LW_SHARED);
1785 :
1786 220 : if (showtext)
1787 : {
1788 : /*
1789 : * Here it is safe to examine extent and gc_count without taking the
1790 : * mutex. Note that although other processes might change
1791 : * pgss->extent just after we look at it, the strings they then write
1792 : * into the file cannot yet be referenced in the hashtable, so we
1793 : * don't care whether we see them or not.
1794 : *
1795 : * If qtext_load_file fails, we just press on; we'll return NULL for
1796 : * every query text.
1797 : */
1798 220 : if (qbuffer == NULL ||
1799 220 : pgss->extent != extent ||
1800 220 : pgss->gc_count != gc_count)
1801 : {
1802 0 : free(qbuffer);
1803 0 : qbuffer = qtext_load_file(&qbuffer_size);
1804 : }
1805 : }
1806 :
1807 220 : hash_seq_init(&hash_seq, pgss_hash);
1808 52338 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
1809 : {
1810 : Datum values[PG_STAT_STATEMENTS_COLS];
1811 : bool nulls[PG_STAT_STATEMENTS_COLS];
1812 52118 : int i = 0;
1813 : Counters tmp;
1814 : double stddev;
1815 52118 : int64 queryid = entry->key.queryid;
1816 : TimestampTz stats_since;
1817 : TimestampTz minmax_stats_since;
1818 :
1819 52118 : memset(values, 0, sizeof(values));
1820 52118 : memset(nulls, 0, sizeof(nulls));
1821 :
1822 52118 : values[i++] = ObjectIdGetDatum(entry->key.userid);
1823 52118 : values[i++] = ObjectIdGetDatum(entry->key.dbid);
1824 52118 : if (api_version >= PGSS_V1_9)
1825 52094 : values[i++] = BoolGetDatum(entry->key.toplevel);
1826 :
1827 52118 : if (is_allowed_role || entry->key.userid == userid)
1828 : {
1829 52110 : if (api_version >= PGSS_V1_2)
1830 52110 : values[i++] = Int64GetDatumFast(queryid);
1831 :
1832 52110 : if (showtext)
1833 : {
1834 52110 : char *qstr = qtext_fetch(entry->query_offset,
1835 : entry->query_len,
1836 : qbuffer,
1837 : qbuffer_size);
1838 :
1839 52110 : if (qstr)
1840 : {
1841 : char *enc;
1842 :
1843 52110 : enc = pg_any_to_server(qstr,
1844 : entry->query_len,
1845 : entry->encoding);
1846 :
1847 52110 : values[i++] = CStringGetTextDatum(enc);
1848 :
1849 52110 : if (enc != qstr)
1850 0 : pfree(enc);
1851 : }
1852 : else
1853 : {
1854 : /* Just return a null if we fail to find the text */
1855 0 : nulls[i++] = true;
1856 : }
1857 : }
1858 : else
1859 : {
1860 : /* Query text not requested */
1861 0 : nulls[i++] = true;
1862 : }
1863 : }
1864 : else
1865 : {
1866 : /* Don't show queryid */
1867 8 : if (api_version >= PGSS_V1_2)
1868 8 : nulls[i++] = true;
1869 :
1870 : /*
1871 : * Don't show query text, but hint as to the reason for not doing
1872 : * so if it was requested
1873 : */
1874 8 : if (showtext)
1875 8 : values[i++] = CStringGetTextDatum("<insufficient privilege>");
1876 : else
1877 0 : nulls[i++] = true;
1878 : }
1879 :
1880 : /* copy counters to a local variable to keep locking time short */
1881 52118 : SpinLockAcquire(&entry->mutex);
1882 52118 : tmp = entry->counters;
1883 52118 : SpinLockRelease(&entry->mutex);
1884 :
1885 : /*
1886 : * The spinlock is not required when reading these two as they are
1887 : * always updated when holding pgss->lock exclusively.
1888 : */
1889 52118 : stats_since = entry->stats_since;
1890 52118 : minmax_stats_since = entry->minmax_stats_since;
1891 :
1892 : /* Skip entry if unexecuted (ie, it's a pending "sticky" entry) */
1893 52118 : if (IS_STICKY(tmp))
1894 72 : continue;
1895 :
1896 : /* Note that we rely on PGSS_PLAN being 0 and PGSS_EXEC being 1. */
1897 156138 : for (int kind = 0; kind < PGSS_NUMKIND; kind++)
1898 : {
1899 104092 : if (kind == PGSS_EXEC || api_version >= PGSS_V1_8)
1900 : {
1901 104084 : values[i++] = Int64GetDatumFast(tmp.calls[kind]);
1902 104084 : values[i++] = Float8GetDatumFast(tmp.total_time[kind]);
1903 : }
1904 :
1905 104092 : if ((kind == PGSS_EXEC && api_version >= PGSS_V1_3) ||
1906 : api_version >= PGSS_V1_8)
1907 : {
1908 104084 : values[i++] = Float8GetDatumFast(tmp.min_time[kind]);
1909 104084 : values[i++] = Float8GetDatumFast(tmp.max_time[kind]);
1910 104084 : values[i++] = Float8GetDatumFast(tmp.mean_time[kind]);
1911 :
1912 : /*
1913 : * Note we are calculating the population variance here, not
1914 : * the sample variance, as we have data for the whole
1915 : * population, so Bessel's correction is not used, and we
1916 : * don't divide by tmp.calls - 1.
1917 : */
1918 104084 : if (tmp.calls[kind] > 1)
1919 9816 : stddev = sqrt(tmp.sum_var_time[kind] / tmp.calls[kind]);
1920 : else
1921 94268 : stddev = 0.0;
1922 104084 : values[i++] = Float8GetDatumFast(stddev);
1923 : }
1924 : }
1925 52046 : values[i++] = Int64GetDatumFast(tmp.rows);
1926 52046 : values[i++] = Int64GetDatumFast(tmp.shared_blks_hit);
1927 52046 : values[i++] = Int64GetDatumFast(tmp.shared_blks_read);
1928 52046 : if (api_version >= PGSS_V1_1)
1929 52046 : values[i++] = Int64GetDatumFast(tmp.shared_blks_dirtied);
1930 52046 : values[i++] = Int64GetDatumFast(tmp.shared_blks_written);
1931 52046 : values[i++] = Int64GetDatumFast(tmp.local_blks_hit);
1932 52046 : values[i++] = Int64GetDatumFast(tmp.local_blks_read);
1933 52046 : if (api_version >= PGSS_V1_1)
1934 52046 : values[i++] = Int64GetDatumFast(tmp.local_blks_dirtied);
1935 52046 : values[i++] = Int64GetDatumFast(tmp.local_blks_written);
1936 52046 : values[i++] = Int64GetDatumFast(tmp.temp_blks_read);
1937 52046 : values[i++] = Int64GetDatumFast(tmp.temp_blks_written);
1938 52046 : if (api_version >= PGSS_V1_1)
1939 : {
1940 52046 : values[i++] = Float8GetDatumFast(tmp.shared_blk_read_time);
1941 52046 : values[i++] = Float8GetDatumFast(tmp.shared_blk_write_time);
1942 : }
1943 52046 : if (api_version >= PGSS_V1_11)
1944 : {
1945 51990 : values[i++] = Float8GetDatumFast(tmp.local_blk_read_time);
1946 51990 : values[i++] = Float8GetDatumFast(tmp.local_blk_write_time);
1947 : }
1948 52046 : if (api_version >= PGSS_V1_10)
1949 : {
1950 52008 : values[i++] = Float8GetDatumFast(tmp.temp_blk_read_time);
1951 52008 : values[i++] = Float8GetDatumFast(tmp.temp_blk_write_time);
1952 : }
1953 52046 : if (api_version >= PGSS_V1_8)
1954 : {
1955 : char buf[256];
1956 : Datum wal_bytes;
1957 :
1958 52038 : values[i++] = Int64GetDatumFast(tmp.wal_records);
1959 52038 : values[i++] = Int64GetDatumFast(tmp.wal_fpi);
1960 :
1961 52038 : snprintf(buf, sizeof buf, UINT64_FORMAT, tmp.wal_bytes);
1962 :
1963 : /* Convert to numeric. */
1964 52038 : wal_bytes = DirectFunctionCall3(numeric_in,
1965 : CStringGetDatum(buf),
1966 : ObjectIdGetDatum(0),
1967 : Int32GetDatum(-1));
1968 52038 : values[i++] = wal_bytes;
1969 : }
1970 52046 : if (api_version >= PGSS_V1_12)
1971 : {
1972 51970 : values[i++] = Int64GetDatumFast(tmp.wal_buffers_full);
1973 : }
1974 52046 : if (api_version >= PGSS_V1_10)
1975 : {
1976 52008 : values[i++] = Int64GetDatumFast(tmp.jit_functions);
1977 52008 : values[i++] = Float8GetDatumFast(tmp.jit_generation_time);
1978 52008 : values[i++] = Int64GetDatumFast(tmp.jit_inlining_count);
1979 52008 : values[i++] = Float8GetDatumFast(tmp.jit_inlining_time);
1980 52008 : values[i++] = Int64GetDatumFast(tmp.jit_optimization_count);
1981 52008 : values[i++] = Float8GetDatumFast(tmp.jit_optimization_time);
1982 52008 : values[i++] = Int64GetDatumFast(tmp.jit_emission_count);
1983 52008 : values[i++] = Float8GetDatumFast(tmp.jit_emission_time);
1984 : }
1985 52046 : if (api_version >= PGSS_V1_11)
1986 : {
1987 51990 : values[i++] = Int64GetDatumFast(tmp.jit_deform_count);
1988 51990 : values[i++] = Float8GetDatumFast(tmp.jit_deform_time);
1989 : }
1990 52046 : if (api_version >= PGSS_V1_12)
1991 : {
1992 51970 : values[i++] = Int64GetDatumFast(tmp.parallel_workers_to_launch);
1993 51970 : values[i++] = Int64GetDatumFast(tmp.parallel_workers_launched);
1994 : }
1995 52046 : if (api_version >= PGSS_V1_11)
1996 : {
1997 51990 : values[i++] = TimestampTzGetDatum(stats_since);
1998 51990 : values[i++] = TimestampTzGetDatum(minmax_stats_since);
1999 : }
2000 :
2001 : Assert(i == (api_version == PGSS_V1_0 ? PG_STAT_STATEMENTS_COLS_V1_0 :
2002 : api_version == PGSS_V1_1 ? PG_STAT_STATEMENTS_COLS_V1_1 :
2003 : api_version == PGSS_V1_2 ? PG_STAT_STATEMENTS_COLS_V1_2 :
2004 : api_version == PGSS_V1_3 ? PG_STAT_STATEMENTS_COLS_V1_3 :
2005 : api_version == PGSS_V1_8 ? PG_STAT_STATEMENTS_COLS_V1_8 :
2006 : api_version == PGSS_V1_9 ? PG_STAT_STATEMENTS_COLS_V1_9 :
2007 : api_version == PGSS_V1_10 ? PG_STAT_STATEMENTS_COLS_V1_10 :
2008 : api_version == PGSS_V1_11 ? PG_STAT_STATEMENTS_COLS_V1_11 :
2009 : api_version == PGSS_V1_12 ? PG_STAT_STATEMENTS_COLS_V1_12 :
2010 : -1 /* fail if you forget to update this assert */ ));
2011 :
2012 52046 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
2013 : }
2014 :
2015 220 : LWLockRelease(pgss->lock);
2016 :
2017 220 : free(qbuffer);
2018 220 : }
2019 :
2020 : /* Number of output arguments (columns) for pg_stat_statements_info */
2021 : #define PG_STAT_STATEMENTS_INFO_COLS 2
2022 :
2023 : /*
2024 : * Return statistics of pg_stat_statements.
2025 : */
2026 : Datum
2027 4 : pg_stat_statements_info(PG_FUNCTION_ARGS)
2028 : {
2029 : pgssGlobalStats stats;
2030 : TupleDesc tupdesc;
2031 4 : Datum values[PG_STAT_STATEMENTS_INFO_COLS] = {0};
2032 4 : bool nulls[PG_STAT_STATEMENTS_INFO_COLS] = {0};
2033 :
2034 4 : if (!pgss || !pgss_hash)
2035 0 : ereport(ERROR,
2036 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2037 : errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
2038 :
2039 : /* Build a tuple descriptor for our result type */
2040 4 : if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
2041 0 : elog(ERROR, "return type must be a row type");
2042 :
2043 : /* Read global statistics for pg_stat_statements */
2044 4 : SpinLockAcquire(&pgss->mutex);
2045 4 : stats = pgss->stats;
2046 4 : SpinLockRelease(&pgss->mutex);
2047 :
2048 4 : values[0] = Int64GetDatum(stats.dealloc);
2049 4 : values[1] = TimestampTzGetDatum(stats.stats_reset);
2050 :
2051 4 : PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
2052 : }
2053 :
2054 : /*
2055 : * Estimate shared memory space needed.
2056 : */
2057 : static Size
2058 14 : pgss_memsize(void)
2059 : {
2060 : Size size;
2061 :
2062 14 : size = MAXALIGN(sizeof(pgssSharedState));
2063 14 : size = add_size(size, hash_estimate_size(pgss_max, sizeof(pgssEntry)));
2064 :
2065 14 : return size;
2066 : }
2067 :
2068 : /*
2069 : * Allocate a new hashtable entry.
2070 : * caller must hold an exclusive lock on pgss->lock
2071 : *
2072 : * "query" need not be null-terminated; we rely on query_len instead
2073 : *
2074 : * If "sticky" is true, make the new entry artificially sticky so that it will
2075 : * probably still be there when the query finishes execution. We do this by
2076 : * giving it a median usage value rather than the normal value. (Strictly
2077 : * speaking, query strings are normalized on a best effort basis, though it
2078 : * would be difficult to demonstrate this even under artificial conditions.)
2079 : *
2080 : * Note: despite needing exclusive lock, it's not an error for the target
2081 : * entry to already exist. This is because pgss_store releases and
2082 : * reacquires lock after failing to find a match; so someone else could
2083 : * have made the entry while we waited to get exclusive lock.
2084 : */
2085 : static pgssEntry *
2086 105668 : entry_alloc(pgssHashKey *key, Size query_offset, int query_len, int encoding,
2087 : bool sticky)
2088 : {
2089 : pgssEntry *entry;
2090 : bool found;
2091 :
2092 : /* Make space if needed */
2093 105668 : while (hash_get_num_entries(pgss_hash) >= pgss_max)
2094 0 : entry_dealloc();
2095 :
2096 : /* Find or create an entry with desired hash code */
2097 105668 : entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found);
2098 :
2099 105668 : if (!found)
2100 : {
2101 : /* New entry, initialize it */
2102 :
2103 : /* reset the statistics */
2104 105668 : memset(&entry->counters, 0, sizeof(Counters));
2105 : /* set the appropriate initial usage count */
2106 105668 : entry->counters.usage = sticky ? pgss->cur_median_usage : USAGE_INIT;
2107 : /* re-initialize the mutex each time ... we assume no one using it */
2108 105668 : SpinLockInit(&entry->mutex);
2109 : /* ... and don't forget the query text metadata */
2110 : Assert(query_len >= 0);
2111 105668 : entry->query_offset = query_offset;
2112 105668 : entry->query_len = query_len;
2113 105668 : entry->encoding = encoding;
2114 105668 : entry->stats_since = GetCurrentTimestamp();
2115 105668 : entry->minmax_stats_since = entry->stats_since;
2116 : }
2117 :
2118 105668 : return entry;
2119 : }
2120 :
2121 : /*
2122 : * qsort comparator for sorting into increasing usage order
2123 : */
2124 : static int
2125 0 : entry_cmp(const void *lhs, const void *rhs)
2126 : {
2127 0 : double l_usage = (*(pgssEntry *const *) lhs)->counters.usage;
2128 0 : double r_usage = (*(pgssEntry *const *) rhs)->counters.usage;
2129 :
2130 0 : if (l_usage < r_usage)
2131 0 : return -1;
2132 0 : else if (l_usage > r_usage)
2133 0 : return +1;
2134 : else
2135 0 : return 0;
2136 : }
2137 :
2138 : /*
2139 : * Deallocate least-used entries.
2140 : *
2141 : * Caller must hold an exclusive lock on pgss->lock.
2142 : */
2143 : static void
2144 0 : entry_dealloc(void)
2145 : {
2146 : HASH_SEQ_STATUS hash_seq;
2147 : pgssEntry **entries;
2148 : pgssEntry *entry;
2149 : int nvictims;
2150 : int i;
2151 : Size tottextlen;
2152 : int nvalidtexts;
2153 :
2154 : /*
2155 : * Sort entries by usage and deallocate USAGE_DEALLOC_PERCENT of them.
2156 : * While we're scanning the table, apply the decay factor to the usage
2157 : * values, and update the mean query length.
2158 : *
2159 : * Note that the mean query length is almost immediately obsolete, since
2160 : * we compute it before not after discarding the least-used entries.
2161 : * Hopefully, that doesn't affect the mean too much; it doesn't seem worth
2162 : * making two passes to get a more current result. Likewise, the new
2163 : * cur_median_usage includes the entries we're about to zap.
2164 : */
2165 :
2166 0 : entries = palloc(hash_get_num_entries(pgss_hash) * sizeof(pgssEntry *));
2167 :
2168 0 : i = 0;
2169 0 : tottextlen = 0;
2170 0 : nvalidtexts = 0;
2171 :
2172 0 : hash_seq_init(&hash_seq, pgss_hash);
2173 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2174 : {
2175 0 : entries[i++] = entry;
2176 : /* "Sticky" entries get a different usage decay rate. */
2177 0 : if (IS_STICKY(entry->counters))
2178 0 : entry->counters.usage *= STICKY_DECREASE_FACTOR;
2179 : else
2180 0 : entry->counters.usage *= USAGE_DECREASE_FACTOR;
2181 : /* In the mean length computation, ignore dropped texts. */
2182 0 : if (entry->query_len >= 0)
2183 : {
2184 0 : tottextlen += entry->query_len + 1;
2185 0 : nvalidtexts++;
2186 : }
2187 : }
2188 :
2189 : /* Sort into increasing order by usage */
2190 0 : qsort(entries, i, sizeof(pgssEntry *), entry_cmp);
2191 :
2192 : /* Record the (approximate) median usage */
2193 0 : if (i > 0)
2194 0 : pgss->cur_median_usage = entries[i / 2]->counters.usage;
2195 : /* Record the mean query length */
2196 0 : if (nvalidtexts > 0)
2197 0 : pgss->mean_query_len = tottextlen / nvalidtexts;
2198 : else
2199 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2200 :
2201 : /* Now zap an appropriate fraction of lowest-usage entries */
2202 0 : nvictims = Max(10, i * USAGE_DEALLOC_PERCENT / 100);
2203 0 : nvictims = Min(nvictims, i);
2204 :
2205 0 : for (i = 0; i < nvictims; i++)
2206 : {
2207 0 : hash_search(pgss_hash, &entries[i]->key, HASH_REMOVE, NULL);
2208 : }
2209 :
2210 0 : pfree(entries);
2211 :
2212 : /* Increment the number of times entries are deallocated */
2213 0 : SpinLockAcquire(&pgss->mutex);
2214 0 : pgss->stats.dealloc += 1;
2215 0 : SpinLockRelease(&pgss->mutex);
2216 0 : }
2217 :
2218 : /*
2219 : * Given a query string (not necessarily null-terminated), allocate a new
2220 : * entry in the external query text file and store the string there.
2221 : *
2222 : * If successful, returns true, and stores the new entry's offset in the file
2223 : * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
2224 : * number of garbage collections that have occurred so far.
2225 : *
2226 : * On failure, returns false.
2227 : *
2228 : * At least a shared lock on pgss->lock must be held by the caller, so as
2229 : * to prevent a concurrent garbage collection. Share-lock-holding callers
2230 : * should pass a gc_count pointer to obtain the number of garbage collections,
2231 : * so that they can recheck the count after obtaining exclusive lock to
2232 : * detect whether a garbage collection occurred (and removed this entry).
2233 : */
2234 : static bool
2235 55252 : qtext_store(const char *query, int query_len,
2236 : Size *query_offset, int *gc_count)
2237 : {
2238 : Size off;
2239 : int fd;
2240 :
2241 : /*
2242 : * We use a spinlock to protect extent/n_writers/gc_count, so that
2243 : * multiple processes may execute this function concurrently.
2244 : */
2245 55252 : SpinLockAcquire(&pgss->mutex);
2246 55252 : off = pgss->extent;
2247 55252 : pgss->extent += query_len + 1;
2248 55252 : pgss->n_writers++;
2249 55252 : if (gc_count)
2250 55252 : *gc_count = pgss->gc_count;
2251 55252 : SpinLockRelease(&pgss->mutex);
2252 :
2253 55252 : *query_offset = off;
2254 :
2255 : /*
2256 : * Don't allow the file to grow larger than what qtext_load_file can
2257 : * (theoretically) handle. This has been seen to be reachable on 32-bit
2258 : * platforms.
2259 : */
2260 55252 : if (unlikely(query_len >= MaxAllocHugeSize - off))
2261 : {
2262 0 : errno = EFBIG; /* not quite right, but it'll do */
2263 0 : fd = -1;
2264 0 : goto error;
2265 : }
2266 :
2267 : /* Now write the data into the successfully-reserved part of the file */
2268 55252 : fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDWR | O_CREAT | PG_BINARY);
2269 55252 : if (fd < 0)
2270 0 : goto error;
2271 :
2272 55252 : if (pg_pwrite(fd, query, query_len, off) != query_len)
2273 0 : goto error;
2274 55252 : if (pg_pwrite(fd, "\0", 1, off + query_len) != 1)
2275 0 : goto error;
2276 :
2277 55252 : CloseTransientFile(fd);
2278 :
2279 : /* Mark our write complete */
2280 55252 : SpinLockAcquire(&pgss->mutex);
2281 55252 : pgss->n_writers--;
2282 55252 : SpinLockRelease(&pgss->mutex);
2283 :
2284 55252 : return true;
2285 :
2286 0 : error:
2287 0 : ereport(LOG,
2288 : (errcode_for_file_access(),
2289 : errmsg("could not write file \"%s\": %m",
2290 : PGSS_TEXT_FILE)));
2291 :
2292 0 : if (fd >= 0)
2293 0 : CloseTransientFile(fd);
2294 :
2295 : /* Mark our write complete */
2296 0 : SpinLockAcquire(&pgss->mutex);
2297 0 : pgss->n_writers--;
2298 0 : SpinLockRelease(&pgss->mutex);
2299 :
2300 0 : return false;
2301 : }
2302 :
2303 : /*
2304 : * Read the external query text file into a malloc'd buffer.
2305 : *
2306 : * Returns NULL (without throwing an error) if unable to read, eg
2307 : * file not there or insufficient memory.
2308 : *
2309 : * On success, the buffer size is also returned into *buffer_size.
2310 : *
2311 : * This can be called without any lock on pgss->lock, but in that case
2312 : * the caller is responsible for verifying that the result is sane.
2313 : */
2314 : static char *
2315 230 : qtext_load_file(Size *buffer_size)
2316 : {
2317 : char *buf;
2318 : int fd;
2319 : struct stat stat;
2320 : Size nread;
2321 :
2322 230 : fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDONLY | PG_BINARY);
2323 230 : if (fd < 0)
2324 : {
2325 0 : if (errno != ENOENT)
2326 0 : ereport(LOG,
2327 : (errcode_for_file_access(),
2328 : errmsg("could not read file \"%s\": %m",
2329 : PGSS_TEXT_FILE)));
2330 0 : return NULL;
2331 : }
2332 :
2333 : /* Get file length */
2334 230 : if (fstat(fd, &stat))
2335 : {
2336 0 : ereport(LOG,
2337 : (errcode_for_file_access(),
2338 : errmsg("could not stat file \"%s\": %m",
2339 : PGSS_TEXT_FILE)));
2340 0 : CloseTransientFile(fd);
2341 0 : return NULL;
2342 : }
2343 :
2344 : /* Allocate buffer; beware that off_t might be wider than size_t */
2345 230 : if (stat.st_size <= MaxAllocHugeSize)
2346 230 : buf = (char *) malloc(stat.st_size);
2347 : else
2348 0 : buf = NULL;
2349 230 : if (buf == NULL)
2350 : {
2351 0 : ereport(LOG,
2352 : (errcode(ERRCODE_OUT_OF_MEMORY),
2353 : errmsg("out of memory"),
2354 : errdetail("Could not allocate enough memory to read file \"%s\".",
2355 : PGSS_TEXT_FILE)));
2356 0 : CloseTransientFile(fd);
2357 0 : return NULL;
2358 : }
2359 :
2360 : /*
2361 : * OK, slurp in the file. Windows fails if we try to read more than
2362 : * INT_MAX bytes at once, and other platforms might not like that either,
2363 : * so read a very large file in 1GB segments.
2364 : */
2365 230 : nread = 0;
2366 458 : while (nread < stat.st_size)
2367 : {
2368 228 : int toread = Min(1024 * 1024 * 1024, stat.st_size - nread);
2369 :
2370 : /*
2371 : * If we get a short read and errno doesn't get set, the reason is
2372 : * probably that garbage collection truncated the file since we did
2373 : * the fstat(), so we don't log a complaint --- but we don't return
2374 : * the data, either, since it's most likely corrupt due to concurrent
2375 : * writes from garbage collection.
2376 : */
2377 228 : errno = 0;
2378 228 : if (read(fd, buf + nread, toread) != toread)
2379 : {
2380 0 : if (errno)
2381 0 : ereport(LOG,
2382 : (errcode_for_file_access(),
2383 : errmsg("could not read file \"%s\": %m",
2384 : PGSS_TEXT_FILE)));
2385 0 : free(buf);
2386 0 : CloseTransientFile(fd);
2387 0 : return NULL;
2388 : }
2389 228 : nread += toread;
2390 : }
2391 :
2392 230 : if (CloseTransientFile(fd) != 0)
2393 0 : ereport(LOG,
2394 : (errcode_for_file_access(),
2395 : errmsg("could not close file \"%s\": %m", PGSS_TEXT_FILE)));
2396 :
2397 230 : *buffer_size = nread;
2398 230 : return buf;
2399 : }
2400 :
2401 : /*
2402 : * Locate a query text in the file image previously read by qtext_load_file().
2403 : *
2404 : * We validate the given offset/length, and return NULL if bogus. Otherwise,
2405 : * the result points to a null-terminated string within the buffer.
2406 : */
2407 : static char *
2408 156288 : qtext_fetch(Size query_offset, int query_len,
2409 : char *buffer, Size buffer_size)
2410 : {
2411 : /* File read failed? */
2412 156288 : if (buffer == NULL)
2413 0 : return NULL;
2414 : /* Bogus offset/length? */
2415 156288 : if (query_len < 0 ||
2416 156288 : query_offset + query_len >= buffer_size)
2417 0 : return NULL;
2418 : /* As a further sanity check, make sure there's a trailing null */
2419 156288 : if (buffer[query_offset + query_len] != '\0')
2420 0 : return NULL;
2421 : /* Looks OK */
2422 156288 : return buffer + query_offset;
2423 : }
2424 :
2425 : /*
2426 : * Do we need to garbage-collect the external query text file?
2427 : *
2428 : * Caller should hold at least a shared lock on pgss->lock.
2429 : */
2430 : static bool
2431 55252 : need_gc_qtexts(void)
2432 : {
2433 : Size extent;
2434 :
2435 : /* Read shared extent pointer */
2436 55252 : SpinLockAcquire(&pgss->mutex);
2437 55252 : extent = pgss->extent;
2438 55252 : SpinLockRelease(&pgss->mutex);
2439 :
2440 : /*
2441 : * Don't proceed if file does not exceed 512 bytes per possible entry.
2442 : *
2443 : * Here and in the next test, 32-bit machines have overflow hazards if
2444 : * pgss_max and/or mean_query_len are large. Force the multiplications
2445 : * and comparisons to be done in uint64 arithmetic to forestall trouble.
2446 : */
2447 55252 : if ((uint64) extent < (uint64) 512 * pgss_max)
2448 55252 : return false;
2449 :
2450 : /*
2451 : * Don't proceed if file is less than about 50% bloat. Nothing can or
2452 : * should be done in the event of unusually large query texts accounting
2453 : * for file's large size. We go to the trouble of maintaining the mean
2454 : * query length in order to prevent garbage collection from thrashing
2455 : * uselessly.
2456 : */
2457 0 : if ((uint64) extent < (uint64) pgss->mean_query_len * pgss_max * 2)
2458 0 : return false;
2459 :
2460 0 : return true;
2461 : }
2462 :
2463 : /*
2464 : * Garbage-collect orphaned query texts in external file.
2465 : *
2466 : * This won't be called often in the typical case, since it's likely that
2467 : * there won't be too much churn, and besides, a similar compaction process
2468 : * occurs when serializing to disk at shutdown or as part of resetting.
2469 : * Despite this, it seems prudent to plan for the edge case where the file
2470 : * becomes unreasonably large, with no other method of compaction likely to
2471 : * occur in the foreseeable future.
2472 : *
2473 : * The caller must hold an exclusive lock on pgss->lock.
2474 : *
2475 : * At the first sign of trouble we unlink the query text file to get a clean
2476 : * slate (although existing statistics are retained), rather than risk
2477 : * thrashing by allowing the same problem case to recur indefinitely.
2478 : */
2479 : static void
2480 0 : gc_qtexts(void)
2481 : {
2482 : char *qbuffer;
2483 : Size qbuffer_size;
2484 0 : FILE *qfile = NULL;
2485 : HASH_SEQ_STATUS hash_seq;
2486 : pgssEntry *entry;
2487 : Size extent;
2488 : int nentries;
2489 :
2490 : /*
2491 : * When called from pgss_store, some other session might have proceeded
2492 : * with garbage collection in the no-lock-held interim of lock strength
2493 : * escalation. Check once more that this is actually necessary.
2494 : */
2495 0 : if (!need_gc_qtexts())
2496 0 : return;
2497 :
2498 : /*
2499 : * Load the old texts file. If we fail (out of memory, for instance),
2500 : * invalidate query texts. Hopefully this is rare. It might seem better
2501 : * to leave things alone on an OOM failure, but the problem is that the
2502 : * file is only going to get bigger; hoping for a future non-OOM result is
2503 : * risky and can easily lead to complete denial of service.
2504 : */
2505 0 : qbuffer = qtext_load_file(&qbuffer_size);
2506 0 : if (qbuffer == NULL)
2507 0 : goto gc_fail;
2508 :
2509 : /*
2510 : * We overwrite the query texts file in place, so as to reduce the risk of
2511 : * an out-of-disk-space failure. Since the file is guaranteed not to get
2512 : * larger, this should always work on traditional filesystems; though we
2513 : * could still lose on copy-on-write filesystems.
2514 : */
2515 0 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2516 0 : if (qfile == NULL)
2517 : {
2518 0 : ereport(LOG,
2519 : (errcode_for_file_access(),
2520 : errmsg("could not write file \"%s\": %m",
2521 : PGSS_TEXT_FILE)));
2522 0 : goto gc_fail;
2523 : }
2524 :
2525 0 : extent = 0;
2526 0 : nentries = 0;
2527 :
2528 0 : hash_seq_init(&hash_seq, pgss_hash);
2529 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2530 : {
2531 0 : int query_len = entry->query_len;
2532 0 : char *qry = qtext_fetch(entry->query_offset,
2533 : query_len,
2534 : qbuffer,
2535 : qbuffer_size);
2536 :
2537 0 : if (qry == NULL)
2538 : {
2539 : /* Trouble ... drop the text */
2540 0 : entry->query_offset = 0;
2541 0 : entry->query_len = -1;
2542 : /* entry will not be counted in mean query length computation */
2543 0 : continue;
2544 : }
2545 :
2546 0 : if (fwrite(qry, 1, query_len + 1, qfile) != query_len + 1)
2547 : {
2548 0 : ereport(LOG,
2549 : (errcode_for_file_access(),
2550 : errmsg("could not write file \"%s\": %m",
2551 : PGSS_TEXT_FILE)));
2552 0 : hash_seq_term(&hash_seq);
2553 0 : goto gc_fail;
2554 : }
2555 :
2556 0 : entry->query_offset = extent;
2557 0 : extent += query_len + 1;
2558 0 : nentries++;
2559 : }
2560 :
2561 : /*
2562 : * Truncate away any now-unused space. If this fails for some odd reason,
2563 : * we log it, but there's no need to fail.
2564 : */
2565 0 : if (ftruncate(fileno(qfile), extent) != 0)
2566 0 : ereport(LOG,
2567 : (errcode_for_file_access(),
2568 : errmsg("could not truncate file \"%s\": %m",
2569 : PGSS_TEXT_FILE)));
2570 :
2571 0 : if (FreeFile(qfile))
2572 : {
2573 0 : ereport(LOG,
2574 : (errcode_for_file_access(),
2575 : errmsg("could not write file \"%s\": %m",
2576 : PGSS_TEXT_FILE)));
2577 0 : qfile = NULL;
2578 0 : goto gc_fail;
2579 : }
2580 :
2581 0 : elog(DEBUG1, "pgss gc of queries file shrunk size from %zu to %zu",
2582 : pgss->extent, extent);
2583 :
2584 : /* Reset the shared extent pointer */
2585 0 : pgss->extent = extent;
2586 :
2587 : /*
2588 : * Also update the mean query length, to be sure that need_gc_qtexts()
2589 : * won't still think we have a problem.
2590 : */
2591 0 : if (nentries > 0)
2592 0 : pgss->mean_query_len = extent / nentries;
2593 : else
2594 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2595 :
2596 0 : free(qbuffer);
2597 :
2598 : /*
2599 : * OK, count a garbage collection cycle. (Note: even though we have
2600 : * exclusive lock on pgss->lock, we must take pgss->mutex for this, since
2601 : * other processes may examine gc_count while holding only the mutex.
2602 : * Also, we have to advance the count *after* we've rewritten the file,
2603 : * else other processes might not realize they read a stale file.)
2604 : */
2605 0 : record_gc_qtexts();
2606 :
2607 0 : return;
2608 :
2609 0 : gc_fail:
2610 : /* clean up resources */
2611 0 : if (qfile)
2612 0 : FreeFile(qfile);
2613 0 : free(qbuffer);
2614 :
2615 : /*
2616 : * Since the contents of the external file are now uncertain, mark all
2617 : * hashtable entries as having invalid texts.
2618 : */
2619 0 : hash_seq_init(&hash_seq, pgss_hash);
2620 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2621 : {
2622 0 : entry->query_offset = 0;
2623 0 : entry->query_len = -1;
2624 : }
2625 :
2626 : /*
2627 : * Destroy the query text file and create a new, empty one
2628 : */
2629 0 : (void) unlink(PGSS_TEXT_FILE);
2630 0 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2631 0 : if (qfile == NULL)
2632 0 : ereport(LOG,
2633 : (errcode_for_file_access(),
2634 : errmsg("could not recreate file \"%s\": %m",
2635 : PGSS_TEXT_FILE)));
2636 : else
2637 0 : FreeFile(qfile);
2638 :
2639 : /* Reset the shared extent pointer */
2640 0 : pgss->extent = 0;
2641 :
2642 : /* Reset mean_query_len to match the new state */
2643 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2644 :
2645 : /*
2646 : * Bump the GC count even though we failed.
2647 : *
2648 : * This is needed to make concurrent readers of file without any lock on
2649 : * pgss->lock notice existence of new version of file. Once readers
2650 : * subsequently observe a change in GC count with pgss->lock held, that
2651 : * forces a safe reopen of file. Writers also require that we bump here,
2652 : * of course. (As required by locking protocol, readers and writers don't
2653 : * trust earlier file contents until gc_count is found unchanged after
2654 : * pgss->lock acquired in shared or exclusive mode respectively.)
2655 : */
2656 0 : record_gc_qtexts();
2657 : }
2658 :
2659 : #define SINGLE_ENTRY_RESET(e) \
2660 : if (e) { \
2661 : if (minmax_only) { \
2662 : /* When requested reset only min/max statistics of an entry */ \
2663 : for (int kind = 0; kind < PGSS_NUMKIND; kind++) \
2664 : { \
2665 : e->counters.max_time[kind] = 0; \
2666 : e->counters.min_time[kind] = 0; \
2667 : } \
2668 : e->minmax_stats_since = stats_reset; \
2669 : } \
2670 : else \
2671 : { \
2672 : /* Remove the key otherwise */ \
2673 : hash_search(pgss_hash, &e->key, HASH_REMOVE, NULL); \
2674 : num_remove++; \
2675 : } \
2676 : }
2677 :
2678 : /*
2679 : * Reset entries corresponding to parameters passed.
2680 : */
2681 : static TimestampTz
2682 192 : entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
2683 : {
2684 : HASH_SEQ_STATUS hash_seq;
2685 : pgssEntry *entry;
2686 : FILE *qfile;
2687 : long num_entries;
2688 192 : long num_remove = 0;
2689 : pgssHashKey key;
2690 : TimestampTz stats_reset;
2691 :
2692 192 : if (!pgss || !pgss_hash)
2693 0 : ereport(ERROR,
2694 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2695 : errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
2696 :
2697 192 : LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
2698 192 : num_entries = hash_get_num_entries(pgss_hash);
2699 :
2700 192 : stats_reset = GetCurrentTimestamp();
2701 :
2702 192 : if (userid != 0 && dbid != 0 && queryid != UINT64CONST(0))
2703 : {
2704 : /* If all the parameters are available, use the fast path. */
2705 2 : memset(&key, 0, sizeof(pgssHashKey));
2706 2 : key.userid = userid;
2707 2 : key.dbid = dbid;
2708 2 : key.queryid = queryid;
2709 :
2710 : /*
2711 : * Reset the entry if it exists, starting with the non-top-level
2712 : * entry.
2713 : */
2714 2 : key.toplevel = false;
2715 2 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
2716 :
2717 2 : SINGLE_ENTRY_RESET(entry);
2718 :
2719 : /* Also reset the top-level entry if it exists. */
2720 2 : key.toplevel = true;
2721 2 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
2722 :
2723 2 : SINGLE_ENTRY_RESET(entry);
2724 : }
2725 190 : else if (userid != 0 || dbid != 0 || queryid != UINT64CONST(0))
2726 : {
2727 : /* Reset entries corresponding to valid parameters. */
2728 8 : hash_seq_init(&hash_seq, pgss_hash);
2729 102 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2730 : {
2731 94 : if ((!userid || entry->key.userid == userid) &&
2732 72 : (!dbid || entry->key.dbid == dbid) &&
2733 68 : (!queryid || entry->key.queryid == queryid))
2734 : {
2735 14 : SINGLE_ENTRY_RESET(entry);
2736 : }
2737 : }
2738 : }
2739 : else
2740 : {
2741 : /* Reset all entries. */
2742 182 : hash_seq_init(&hash_seq, pgss_hash);
2743 1674 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2744 : {
2745 1536 : SINGLE_ENTRY_RESET(entry);
2746 : }
2747 : }
2748 :
2749 : /* All entries are removed? */
2750 192 : if (num_entries != num_remove)
2751 12 : goto release_lock;
2752 :
2753 : /*
2754 : * Reset global statistics for pg_stat_statements since all entries are
2755 : * removed.
2756 : */
2757 180 : SpinLockAcquire(&pgss->mutex);
2758 180 : pgss->stats.dealloc = 0;
2759 180 : pgss->stats.stats_reset = stats_reset;
2760 180 : SpinLockRelease(&pgss->mutex);
2761 :
2762 : /*
2763 : * Write new empty query file, perhaps even creating a new one to recover
2764 : * if the file was missing.
2765 : */
2766 180 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2767 180 : if (qfile == NULL)
2768 : {
2769 0 : ereport(LOG,
2770 : (errcode_for_file_access(),
2771 : errmsg("could not create file \"%s\": %m",
2772 : PGSS_TEXT_FILE)));
2773 0 : goto done;
2774 : }
2775 :
2776 : /* If ftruncate fails, log it, but it's not a fatal problem */
2777 180 : if (ftruncate(fileno(qfile), 0) != 0)
2778 0 : ereport(LOG,
2779 : (errcode_for_file_access(),
2780 : errmsg("could not truncate file \"%s\": %m",
2781 : PGSS_TEXT_FILE)));
2782 :
2783 180 : FreeFile(qfile);
2784 :
2785 180 : done:
2786 180 : pgss->extent = 0;
2787 : /* This counts as a query text garbage collection for our purposes */
2788 180 : record_gc_qtexts();
2789 :
2790 192 : release_lock:
2791 192 : LWLockRelease(pgss->lock);
2792 :
2793 192 : return stats_reset;
2794 : }
2795 :
2796 : /*
2797 : * Generate a normalized version of the query string that will be used to
2798 : * represent all similar queries.
2799 : *
2800 : * Note that the normalized representation may well vary depending on
2801 : * just which "equivalent" query is used to create the hashtable entry.
2802 : * We assume this is OK.
2803 : *
2804 : * If query_loc > 0, then "query" has been advanced by that much compared to
2805 : * the original string start, so we need to translate the provided locations
2806 : * to compensate. (This lets us avoid re-scanning statements before the one
2807 : * of interest, so it's worth doing.)
2808 : *
2809 : * *query_len_p contains the input string length, and is updated with
2810 : * the result string length on exit. The resulting string might be longer
2811 : * or shorter depending on what happens with replacement of constants.
2812 : *
2813 : * Returns a palloc'd string.
2814 : */
2815 : static char *
2816 20424 : generate_normalized_query(JumbleState *jstate, const char *query,
2817 : int query_loc, int *query_len_p)
2818 : {
2819 : char *norm_query;
2820 20424 : int query_len = *query_len_p;
2821 : int i,
2822 : norm_query_buflen, /* Space allowed for norm_query */
2823 : len_to_wrt, /* Length (in bytes) to write */
2824 20424 : quer_loc = 0, /* Source query byte location */
2825 20424 : n_quer_loc = 0, /* Normalized query byte location */
2826 20424 : last_off = 0, /* Offset from start for previous tok */
2827 20424 : last_tok_len = 0; /* Length (in bytes) of that tok */
2828 20424 : bool in_squashed = false; /* in a run of squashed consts? */
2829 20424 : int skipped_constants = 0; /* Position adjustment of later
2830 : * constants after squashed ones */
2831 :
2832 :
2833 : /*
2834 : * Get constants' lengths (core system only gives us locations). Note
2835 : * this also ensures the items are sorted by location.
2836 : */
2837 20424 : fill_in_constant_lengths(jstate, query, query_loc);
2838 :
2839 : /*
2840 : * Allow for $n symbols to be longer than the constants they replace.
2841 : * Constants must take at least one byte in text form, while a $n symbol
2842 : * certainly isn't more than 11 bytes, even if n reaches INT_MAX. We
2843 : * could refine that limit based on the max value of n for the current
2844 : * query, but it hardly seems worth any extra effort to do so.
2845 : *
2846 : * Note this also gives enough room for the commented-out ", ..." list
2847 : * syntax used by constant squashing.
2848 : */
2849 20424 : norm_query_buflen = query_len + jstate->clocations_count * 10;
2850 :
2851 : /* Allocate result buffer */
2852 20424 : norm_query = palloc(norm_query_buflen + 1);
2853 :
2854 82312 : for (i = 0; i < jstate->clocations_count; i++)
2855 : {
2856 : int off, /* Offset from start for cur tok */
2857 : tok_len; /* Length (in bytes) of that tok */
2858 :
2859 61888 : off = jstate->clocations[i].location;
2860 :
2861 : /* Adjust recorded location if we're dealing with partial string */
2862 61888 : off -= query_loc;
2863 :
2864 61888 : tok_len = jstate->clocations[i].length;
2865 :
2866 61888 : if (tok_len < 0)
2867 456 : continue; /* ignore any duplicates */
2868 :
2869 : /*
2870 : * What to do next depends on whether we're squashing constant lists,
2871 : * and whether we're already in a run of such constants.
2872 : */
2873 61432 : if (!jstate->clocations[i].squashed)
2874 : {
2875 : /*
2876 : * This location corresponds to a constant not to be squashed.
2877 : * Print what comes before the constant ...
2878 : */
2879 59040 : len_to_wrt = off - last_off;
2880 59040 : len_to_wrt -= last_tok_len;
2881 :
2882 : Assert(len_to_wrt >= 0);
2883 :
2884 59040 : memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
2885 59040 : n_quer_loc += len_to_wrt;
2886 :
2887 : /* ... and then a param symbol replacing the constant itself */
2888 118080 : n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d",
2889 59040 : i + 1 + jstate->highest_extern_param_id - skipped_constants);
2890 :
2891 : /* In case previous constants were merged away, stop doing that */
2892 59040 : in_squashed = false;
2893 : }
2894 2392 : else if (!in_squashed)
2895 : {
2896 : /*
2897 : * This location is the start position of a run of constants to be
2898 : * squashed, so we need to print the representation of starting a
2899 : * group of stashed constants.
2900 : *
2901 : * Print what comes before the constant ...
2902 : */
2903 1196 : len_to_wrt = off - last_off;
2904 1196 : len_to_wrt -= last_tok_len;
2905 : Assert(len_to_wrt >= 0);
2906 : Assert(i + 1 < jstate->clocations_count);
2907 : Assert(jstate->clocations[i + 1].squashed);
2908 1196 : memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
2909 1196 : n_quer_loc += len_to_wrt;
2910 :
2911 : /* ... and then start a run of squashed constants */
2912 2392 : n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d /*, ... */",
2913 1196 : i + 1 + jstate->highest_extern_param_id - skipped_constants);
2914 :
2915 : /* The next location will match the block below, to end the run */
2916 1196 : in_squashed = true;
2917 :
2918 1196 : skipped_constants++;
2919 : }
2920 : else
2921 : {
2922 : /*
2923 : * The second location of a run of squashable elements; this
2924 : * indicates its end.
2925 : */
2926 1196 : in_squashed = false;
2927 : }
2928 :
2929 : /* Otherwise the constant is squashed away -- move forward */
2930 61432 : quer_loc = off + tok_len;
2931 61432 : last_off = off;
2932 61432 : last_tok_len = tok_len;
2933 : }
2934 :
2935 : /*
2936 : * We've copied up until the last ignorable constant. Copy over the
2937 : * remaining bytes of the original query string.
2938 : */
2939 20424 : len_to_wrt = query_len - quer_loc;
2940 :
2941 : Assert(len_to_wrt >= 0);
2942 20424 : memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
2943 20424 : n_quer_loc += len_to_wrt;
2944 :
2945 : Assert(n_quer_loc <= norm_query_buflen);
2946 20424 : norm_query[n_quer_loc] = '\0';
2947 :
2948 20424 : *query_len_p = n_quer_loc;
2949 20424 : return norm_query;
2950 : }
2951 :
2952 : /*
2953 : * Given a valid SQL string and an array of constant-location records,
2954 : * fill in the textual lengths of those constants.
2955 : *
2956 : * The constants may use any allowed constant syntax, such as float literals,
2957 : * bit-strings, single-quoted strings and dollar-quoted strings. This is
2958 : * accomplished by using the public API for the core scanner.
2959 : *
2960 : * It is the caller's job to ensure that the string is a valid SQL statement
2961 : * with constants at the indicated locations. Since in practice the string
2962 : * has already been parsed, and the locations that the caller provides will
2963 : * have originated from within the authoritative parser, this should not be
2964 : * a problem.
2965 : *
2966 : * Duplicate constant pointers are possible, and will have their lengths
2967 : * marked as '-1', so that they are later ignored. (Actually, we assume the
2968 : * lengths were initialized as -1 to start with, and don't change them here.)
2969 : *
2970 : * If query_loc > 0, then "query" has been advanced by that much compared to
2971 : * the original string start, so we need to translate the provided locations
2972 : * to compensate. (This lets us avoid re-scanning statements before the one
2973 : * of interest, so it's worth doing.)
2974 : *
2975 : * N.B. There is an assumption that a '-' character at a Const location begins
2976 : * a negative numeric constant. This precludes there ever being another
2977 : * reason for a constant to start with a '-'.
2978 : */
2979 : static void
2980 20424 : fill_in_constant_lengths(JumbleState *jstate, const char *query,
2981 : int query_loc)
2982 : {
2983 : LocationLen *locs;
2984 : core_yyscan_t yyscanner;
2985 : core_yy_extra_type yyextra;
2986 : core_YYSTYPE yylval;
2987 : YYLTYPE yylloc;
2988 20424 : int last_loc = -1;
2989 : int i;
2990 :
2991 : /*
2992 : * Sort the records by location so that we can process them in order while
2993 : * scanning the query text.
2994 : */
2995 20424 : if (jstate->clocations_count > 1)
2996 13296 : qsort(jstate->clocations, jstate->clocations_count,
2997 : sizeof(LocationLen), comp_location);
2998 20424 : locs = jstate->clocations;
2999 :
3000 : /* initialize the flex scanner --- should match raw_parser() */
3001 20424 : yyscanner = scanner_init(query,
3002 : &yyextra,
3003 : &ScanKeywords,
3004 : ScanKeywordTokens);
3005 :
3006 : /* we don't want to re-emit any escape string warnings */
3007 20424 : yyextra.escape_string_warning = false;
3008 :
3009 : /* Search for each constant, in sequence */
3010 82312 : for (i = 0; i < jstate->clocations_count; i++)
3011 : {
3012 61888 : int loc = locs[i].location;
3013 : int tok;
3014 :
3015 : /* Adjust recorded location if we're dealing with partial string */
3016 61888 : loc -= query_loc;
3017 :
3018 : Assert(loc >= 0);
3019 :
3020 61888 : if (loc <= last_loc)
3021 456 : continue; /* Duplicate constant, ignore */
3022 :
3023 : /* Lex tokens until we find the desired constant */
3024 : for (;;)
3025 : {
3026 471846 : tok = core_yylex(&yylval, &yylloc, yyscanner);
3027 :
3028 : /* We should not hit end-of-string, but if we do, behave sanely */
3029 471846 : if (tok == 0)
3030 0 : break; /* out of inner for-loop */
3031 :
3032 : /*
3033 : * We should find the token position exactly, but if we somehow
3034 : * run past it, work with that.
3035 : */
3036 471846 : if (yylloc >= loc)
3037 : {
3038 61432 : if (query[loc] == '-')
3039 : {
3040 : /*
3041 : * It's a negative value - this is the one and only case
3042 : * where we replace more than a single token.
3043 : *
3044 : * Do not compensate for the core system's special-case
3045 : * adjustment of location to that of the leading '-'
3046 : * operator in the event of a negative constant. It is
3047 : * also useful for our purposes to start from the minus
3048 : * symbol. In this way, queries like "select * from foo
3049 : * where bar = 1" and "select * from foo where bar = -2"
3050 : * will have identical normalized query strings.
3051 : */
3052 754 : tok = core_yylex(&yylval, &yylloc, yyscanner);
3053 754 : if (tok == 0)
3054 0 : break; /* out of inner for-loop */
3055 : }
3056 :
3057 : /*
3058 : * We now rely on the assumption that flex has placed a zero
3059 : * byte after the text of the current token in scanbuf.
3060 : */
3061 61432 : locs[i].length = strlen(yyextra.scanbuf + loc);
3062 61432 : break; /* out of inner for-loop */
3063 : }
3064 : }
3065 :
3066 : /* If we hit end-of-string, give up, leaving remaining lengths -1 */
3067 61432 : if (tok == 0)
3068 0 : break;
3069 :
3070 61432 : last_loc = loc;
3071 : }
3072 :
3073 20424 : scanner_finish(yyscanner);
3074 20424 : }
3075 :
3076 : /*
3077 : * comp_location: comparator for qsorting LocationLen structs by location
3078 : */
3079 : static int
3080 69616 : comp_location(const void *a, const void *b)
3081 : {
3082 69616 : int l = ((const LocationLen *) a)->location;
3083 69616 : int r = ((const LocationLen *) b)->location;
3084 :
3085 69616 : return pg_cmp_s32(l, r);
3086 : }
|