Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * pg_stat_statements.c
4 : * Track statement planning and execution times as well as resource
5 : * usage across a whole database cluster.
6 : *
7 : * Execution costs are totaled for each distinct source query, and kept in
8 : * a shared hashtable. (We track only as many distinct queries as will fit
9 : * in the designated amount of shared memory.)
10 : *
11 : * Starting in Postgres 9.2, this module normalized query entries. As of
12 : * Postgres 14, the normalization is done by the core if compute_query_id is
13 : * enabled, or optionally by third-party modules.
14 : *
15 : * To facilitate presenting entries to users, we create "representative" query
16 : * strings in which constants are replaced with parameter symbols ($n), to
17 : * make it clearer what a normalized entry can represent. To save on shared
18 : * memory, and to avoid having to truncate oversized query strings, we store
19 : * these strings in a temporary external query-texts file. Offsets into this
20 : * file are kept in shared memory.
21 : *
22 : * Note about locking issues: to create or delete an entry in the shared
23 : * hashtable, one must hold pgss->lock exclusively. Modifying any field
24 : * in an entry except the counters requires the same. To look up an entry,
25 : * one must hold the lock shared. To read or update the counters within
26 : * an entry, one must hold the lock shared or exclusive (so the entry doesn't
27 : * disappear!) and also take the entry's mutex spinlock.
28 : * The shared state variable pgss->extent (the next free spot in the external
29 : * query-text file) should be accessed only while holding either the
30 : * pgss->mutex spinlock, or exclusive lock on pgss->lock. We use the mutex to
31 : * allow reserving file space while holding only shared lock on pgss->lock.
32 : * Rewriting the entire external query-text file, eg for garbage collection,
33 : * requires holding pgss->lock exclusively; this allows individual entries
34 : * in the file to be read or written while holding only shared lock.
35 : *
36 : *
37 : * Copyright (c) 2008-2025, PostgreSQL Global Development Group
38 : *
39 : * IDENTIFICATION
40 : * contrib/pg_stat_statements/pg_stat_statements.c
41 : *
42 : *-------------------------------------------------------------------------
43 : */
44 : #include "postgres.h"
45 :
46 : #include <math.h>
47 : #include <sys/stat.h>
48 : #include <unistd.h>
49 :
50 : #include "access/parallel.h"
51 : #include "catalog/pg_authid.h"
52 : #include "common/int.h"
53 : #include "executor/instrument.h"
54 : #include "funcapi.h"
55 : #include "jit/jit.h"
56 : #include "mb/pg_wchar.h"
57 : #include "miscadmin.h"
58 : #include "nodes/queryjumble.h"
59 : #include "optimizer/planner.h"
60 : #include "parser/analyze.h"
61 : #include "parser/scanner.h"
62 : #include "pgstat.h"
63 : #include "storage/fd.h"
64 : #include "storage/ipc.h"
65 : #include "storage/lwlock.h"
66 : #include "storage/shmem.h"
67 : #include "storage/spin.h"
68 : #include "tcop/utility.h"
69 : #include "utils/acl.h"
70 : #include "utils/builtins.h"
71 : #include "utils/memutils.h"
72 : #include "utils/timestamp.h"
73 :
74 16 : PG_MODULE_MAGIC_EXT(
75 : .name = "pg_stat_statements",
76 : .version = PG_VERSION
77 : );
78 :
79 : /* Location of permanent stats file (valid when database is shut down) */
80 : #define PGSS_DUMP_FILE PGSTAT_STAT_PERMANENT_DIRECTORY "/pg_stat_statements.stat"
81 :
82 : /*
83 : * Location of external query text file.
84 : */
85 : #define PGSS_TEXT_FILE PG_STAT_TMP_DIR "/pgss_query_texts.stat"
86 :
87 : /* Magic number identifying the stats file format */
88 : static const uint32 PGSS_FILE_HEADER = 0x20250731;
89 :
90 : /* PostgreSQL major version number, changes in which invalidate all entries */
91 : static const uint32 PGSS_PG_MAJOR_VERSION = PG_VERSION_NUM / 100;
92 :
93 : /* XXX: Should USAGE_EXEC reflect execution time and/or buffer usage? */
94 : #define USAGE_EXEC(duration) (1.0)
95 : #define USAGE_INIT (1.0) /* including initial planning */
96 : #define ASSUMED_MEDIAN_INIT (10.0) /* initial assumed median usage */
97 : #define ASSUMED_LENGTH_INIT 1024 /* initial assumed mean query length */
98 : #define USAGE_DECREASE_FACTOR (0.99) /* decreased every entry_dealloc */
99 : #define STICKY_DECREASE_FACTOR (0.50) /* factor for sticky entries */
100 : #define USAGE_DEALLOC_PERCENT 5 /* free this % of entries at once */
101 : #define IS_STICKY(c) ((c.calls[PGSS_PLAN] + c.calls[PGSS_EXEC]) == 0)
102 :
103 : /*
104 : * Extension version number, for supporting older extension versions' objects
105 : */
106 : typedef enum pgssVersion
107 : {
108 : PGSS_V1_0 = 0,
109 : PGSS_V1_1,
110 : PGSS_V1_2,
111 : PGSS_V1_3,
112 : PGSS_V1_8,
113 : PGSS_V1_9,
114 : PGSS_V1_10,
115 : PGSS_V1_11,
116 : PGSS_V1_12,
117 : PGSS_V1_13,
118 : } pgssVersion;
119 :
120 : typedef enum pgssStoreKind
121 : {
122 : PGSS_INVALID = -1,
123 :
124 : /*
125 : * PGSS_PLAN and PGSS_EXEC must be respectively 0 and 1 as they're used to
126 : * reference the underlying values in the arrays in the Counters struct,
127 : * and this order is required in pg_stat_statements_internal().
128 : */
129 : PGSS_PLAN = 0,
130 : PGSS_EXEC,
131 : } pgssStoreKind;
132 :
133 : #define PGSS_NUMKIND (PGSS_EXEC + 1)
134 :
135 : /*
136 : * Hashtable key that defines the identity of a hashtable entry. We separate
137 : * queries by user and by database even if they are otherwise identical.
138 : *
139 : * If you add a new key to this struct, make sure to teach pgss_store() to
140 : * zero the padding bytes. Otherwise, things will break, because pgss_hash is
141 : * created using HASH_BLOBS, and thus tag_hash is used to hash this.
142 : */
143 : typedef struct pgssHashKey
144 : {
145 : Oid userid; /* user OID */
146 : Oid dbid; /* database OID */
147 : int64 queryid; /* query identifier */
148 : bool toplevel; /* query executed at top level */
149 : } pgssHashKey;
150 :
151 : /*
152 : * The actual stats counters kept within pgssEntry.
153 : */
154 : typedef struct Counters
155 : {
156 : int64 calls[PGSS_NUMKIND]; /* # of times planned/executed */
157 : double total_time[PGSS_NUMKIND]; /* total planning/execution time,
158 : * in msec */
159 : double min_time[PGSS_NUMKIND]; /* minimum planning/execution time in
160 : * msec since min/max reset */
161 : double max_time[PGSS_NUMKIND]; /* maximum planning/execution time in
162 : * msec since min/max reset */
163 : double mean_time[PGSS_NUMKIND]; /* mean planning/execution time in
164 : * msec */
165 : double sum_var_time[PGSS_NUMKIND]; /* sum of variances in
166 : * planning/execution time in msec */
167 : int64 rows; /* total # of retrieved or affected rows */
168 : int64 shared_blks_hit; /* # of shared buffer hits */
169 : int64 shared_blks_read; /* # of shared disk blocks read */
170 : int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */
171 : int64 shared_blks_written; /* # of shared disk blocks written */
172 : int64 local_blks_hit; /* # of local buffer hits */
173 : int64 local_blks_read; /* # of local disk blocks read */
174 : int64 local_blks_dirtied; /* # of local disk blocks dirtied */
175 : int64 local_blks_written; /* # of local disk blocks written */
176 : int64 temp_blks_read; /* # of temp blocks read */
177 : int64 temp_blks_written; /* # of temp blocks written */
178 : double shared_blk_read_time; /* time spent reading shared blocks,
179 : * in msec */
180 : double shared_blk_write_time; /* time spent writing shared blocks,
181 : * in msec */
182 : double local_blk_read_time; /* time spent reading local blocks, in
183 : * msec */
184 : double local_blk_write_time; /* time spent writing local blocks, in
185 : * msec */
186 : double temp_blk_read_time; /* time spent reading temp blocks, in msec */
187 : double temp_blk_write_time; /* time spent writing temp blocks, in
188 : * msec */
189 : double usage; /* usage factor */
190 : int64 wal_records; /* # of WAL records generated */
191 : int64 wal_fpi; /* # of WAL full page images generated */
192 : uint64 wal_bytes; /* total amount of WAL generated in bytes */
193 : int64 wal_buffers_full; /* # of times the WAL buffers became full */
194 : int64 jit_functions; /* total number of JIT functions emitted */
195 : double jit_generation_time; /* total time to generate jit code */
196 : int64 jit_inlining_count; /* number of times inlining time has been
197 : * > 0 */
198 : double jit_deform_time; /* total time to deform tuples in jit code */
199 : int64 jit_deform_count; /* number of times deform time has been >
200 : * 0 */
201 :
202 : double jit_inlining_time; /* total time to inline jit code */
203 : int64 jit_optimization_count; /* number of times optimization time
204 : * has been > 0 */
205 : double jit_optimization_time; /* total time to optimize jit code */
206 : int64 jit_emission_count; /* number of times emission time has been
207 : * > 0 */
208 : double jit_emission_time; /* total time to emit jit code */
209 : int64 parallel_workers_to_launch; /* # of parallel workers planned
210 : * to be launched */
211 : int64 parallel_workers_launched; /* # of parallel workers actually
212 : * launched */
213 : int64 generic_plan_calls; /* number of calls using a generic plan */
214 : int64 custom_plan_calls; /* number of calls using a custom plan */
215 : } Counters;
216 :
217 : /*
218 : * Global statistics for pg_stat_statements
219 : */
220 : typedef struct pgssGlobalStats
221 : {
222 : int64 dealloc; /* # of times entries were deallocated */
223 : TimestampTz stats_reset; /* timestamp with all stats reset */
224 : } pgssGlobalStats;
225 :
226 : /*
227 : * Statistics per statement
228 : *
229 : * Note: in event of a failure in garbage collection of the query text file,
230 : * we reset query_offset to zero and query_len to -1. This will be seen as
231 : * an invalid state by qtext_fetch().
232 : */
233 : typedef struct pgssEntry
234 : {
235 : pgssHashKey key; /* hash key of entry - MUST BE FIRST */
236 : Counters counters; /* the statistics for this query */
237 : Size query_offset; /* query text offset in external file */
238 : int query_len; /* # of valid bytes in query string, or -1 */
239 : int encoding; /* query text encoding */
240 : TimestampTz stats_since; /* timestamp of entry allocation */
241 : TimestampTz minmax_stats_since; /* timestamp of last min/max values reset */
242 : slock_t mutex; /* protects the counters only */
243 : } pgssEntry;
244 :
245 : /*
246 : * Global shared state
247 : */
248 : typedef struct pgssSharedState
249 : {
250 : LWLock *lock; /* protects hashtable search/modification */
251 : double cur_median_usage; /* current median usage in hashtable */
252 : Size mean_query_len; /* current mean entry text length */
253 : slock_t mutex; /* protects following fields only: */
254 : Size extent; /* current extent of query file */
255 : int n_writers; /* number of active writers to query file */
256 : int gc_count; /* query file garbage collection cycle count */
257 : pgssGlobalStats stats; /* global statistics for pgss */
258 : } pgssSharedState;
259 :
260 : /*---- Local variables ----*/
261 :
262 : /* Current nesting depth of planner/ExecutorRun/ProcessUtility calls */
263 : static int nesting_level = 0;
264 :
265 : /* Saved hook values */
266 : static shmem_request_hook_type prev_shmem_request_hook = NULL;
267 : static shmem_startup_hook_type prev_shmem_startup_hook = NULL;
268 : static post_parse_analyze_hook_type prev_post_parse_analyze_hook = NULL;
269 : static planner_hook_type prev_planner_hook = NULL;
270 : static ExecutorStart_hook_type prev_ExecutorStart = NULL;
271 : static ExecutorRun_hook_type prev_ExecutorRun = NULL;
272 : static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
273 : static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
274 : static ProcessUtility_hook_type prev_ProcessUtility = NULL;
275 :
276 : /* Links to shared memory state */
277 : static pgssSharedState *pgss = NULL;
278 : static HTAB *pgss_hash = NULL;
279 :
280 : /*---- GUC variables ----*/
281 :
282 : typedef enum
283 : {
284 : PGSS_TRACK_NONE, /* track no statements */
285 : PGSS_TRACK_TOP, /* only top level statements */
286 : PGSS_TRACK_ALL, /* all statements, including nested ones */
287 : } PGSSTrackLevel;
288 :
289 : static const struct config_enum_entry track_options[] =
290 : {
291 : {"none", PGSS_TRACK_NONE, false},
292 : {"top", PGSS_TRACK_TOP, false},
293 : {"all", PGSS_TRACK_ALL, false},
294 : {NULL, 0, false}
295 : };
296 :
297 : static int pgss_max = 5000; /* max # statements to track */
298 : static int pgss_track = PGSS_TRACK_TOP; /* tracking level */
299 : static bool pgss_track_utility = true; /* whether to track utility commands */
300 : static bool pgss_track_planning = false; /* whether to track planning
301 : * duration */
302 : static bool pgss_save = true; /* whether to save stats across shutdown */
303 :
304 : #define pgss_enabled(level) \
305 : (!IsParallelWorker() && \
306 : (pgss_track == PGSS_TRACK_ALL || \
307 : (pgss_track == PGSS_TRACK_TOP && (level) == 0)))
308 :
309 : #define record_gc_qtexts() \
310 : do { \
311 : SpinLockAcquire(&pgss->mutex); \
312 : pgss->gc_count++; \
313 : SpinLockRelease(&pgss->mutex); \
314 : } while(0)
315 :
316 : /*---- Function declarations ----*/
317 :
318 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset);
319 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_7);
320 42 : PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_11);
321 0 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_2);
322 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_3);
323 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_8);
324 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_9);
325 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_10);
326 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_11);
327 14 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_12);
328 50 : PG_FUNCTION_INFO_V1(pg_stat_statements_1_13);
329 0 : PG_FUNCTION_INFO_V1(pg_stat_statements);
330 16 : PG_FUNCTION_INFO_V1(pg_stat_statements_info);
331 :
332 : static void pgss_shmem_request(void);
333 : static void pgss_shmem_startup(void);
334 : static void pgss_shmem_shutdown(int code, Datum arg);
335 : static void pgss_post_parse_analyze(ParseState *pstate, Query *query,
336 : JumbleState *jstate);
337 : static PlannedStmt *pgss_planner(Query *parse,
338 : const char *query_string,
339 : int cursorOptions,
340 : ParamListInfo boundParams);
341 : static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
342 : static void pgss_ExecutorRun(QueryDesc *queryDesc,
343 : ScanDirection direction,
344 : uint64 count);
345 : static void pgss_ExecutorFinish(QueryDesc *queryDesc);
346 : static void pgss_ExecutorEnd(QueryDesc *queryDesc);
347 : static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
348 : bool readOnlyTree,
349 : ProcessUtilityContext context, ParamListInfo params,
350 : QueryEnvironment *queryEnv,
351 : DestReceiver *dest, QueryCompletion *qc);
352 : static void pgss_store(const char *query, int64 queryId,
353 : int query_location, int query_len,
354 : pgssStoreKind kind,
355 : double total_time, uint64 rows,
356 : const BufferUsage *bufusage,
357 : const WalUsage *walusage,
358 : const struct JitInstrumentation *jitusage,
359 : JumbleState *jstate,
360 : int parallel_workers_to_launch,
361 : int parallel_workers_launched,
362 : PlannedStmtOrigin planOrigin);
363 : static void pg_stat_statements_internal(FunctionCallInfo fcinfo,
364 : pgssVersion api_version,
365 : bool showtext);
366 : static Size pgss_memsize(void);
367 : static pgssEntry *entry_alloc(pgssHashKey *key, Size query_offset, int query_len,
368 : int encoding, bool sticky);
369 : static void entry_dealloc(void);
370 : static bool qtext_store(const char *query, int query_len,
371 : Size *query_offset, int *gc_count);
372 : static char *qtext_load_file(Size *buffer_size);
373 : static char *qtext_fetch(Size query_offset, int query_len,
374 : char *buffer, Size buffer_size);
375 : static bool need_gc_qtexts(void);
376 : static void gc_qtexts(void);
377 : static TimestampTz entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only);
378 : static char *generate_normalized_query(JumbleState *jstate, const char *query,
379 : int query_loc, int *query_len_p);
380 : static void fill_in_constant_lengths(JumbleState *jstate, const char *query,
381 : int query_loc);
382 : static int comp_location(const void *a, const void *b);
383 :
384 :
385 : /*
386 : * Module load callback
387 : */
388 : void
389 16 : _PG_init(void)
390 : {
391 : /*
392 : * In order to create our shared memory area, we have to be loaded via
393 : * shared_preload_libraries. If not, fall out without hooking into any of
394 : * the main system. (We don't throw error here because it seems useful to
395 : * allow the pg_stat_statements functions to be created even when the
396 : * module isn't active. The functions must protect themselves against
397 : * being called then, however.)
398 : */
399 16 : if (!process_shared_preload_libraries_in_progress)
400 2 : return;
401 :
402 : /*
403 : * Inform the postmaster that we want to enable query_id calculation if
404 : * compute_query_id is set to auto.
405 : */
406 14 : EnableQueryId();
407 :
408 : /*
409 : * Define (or redefine) custom GUC variables.
410 : */
411 14 : DefineCustomIntVariable("pg_stat_statements.max",
412 : "Sets the maximum number of statements tracked by pg_stat_statements.",
413 : NULL,
414 : &pgss_max,
415 : 5000,
416 : 100,
417 : INT_MAX / 2,
418 : PGC_POSTMASTER,
419 : 0,
420 : NULL,
421 : NULL,
422 : NULL);
423 :
424 14 : DefineCustomEnumVariable("pg_stat_statements.track",
425 : "Selects which statements are tracked by pg_stat_statements.",
426 : NULL,
427 : &pgss_track,
428 : PGSS_TRACK_TOP,
429 : track_options,
430 : PGC_SUSET,
431 : 0,
432 : NULL,
433 : NULL,
434 : NULL);
435 :
436 14 : DefineCustomBoolVariable("pg_stat_statements.track_utility",
437 : "Selects whether utility commands are tracked by pg_stat_statements.",
438 : NULL,
439 : &pgss_track_utility,
440 : true,
441 : PGC_SUSET,
442 : 0,
443 : NULL,
444 : NULL,
445 : NULL);
446 :
447 14 : DefineCustomBoolVariable("pg_stat_statements.track_planning",
448 : "Selects whether planning duration is tracked by pg_stat_statements.",
449 : NULL,
450 : &pgss_track_planning,
451 : false,
452 : PGC_SUSET,
453 : 0,
454 : NULL,
455 : NULL,
456 : NULL);
457 :
458 14 : DefineCustomBoolVariable("pg_stat_statements.save",
459 : "Save pg_stat_statements statistics across server shutdowns.",
460 : NULL,
461 : &pgss_save,
462 : true,
463 : PGC_SIGHUP,
464 : 0,
465 : NULL,
466 : NULL,
467 : NULL);
468 :
469 14 : MarkGUCPrefixReserved("pg_stat_statements");
470 :
471 : /*
472 : * Install hooks.
473 : */
474 14 : prev_shmem_request_hook = shmem_request_hook;
475 14 : shmem_request_hook = pgss_shmem_request;
476 14 : prev_shmem_startup_hook = shmem_startup_hook;
477 14 : shmem_startup_hook = pgss_shmem_startup;
478 14 : prev_post_parse_analyze_hook = post_parse_analyze_hook;
479 14 : post_parse_analyze_hook = pgss_post_parse_analyze;
480 14 : prev_planner_hook = planner_hook;
481 14 : planner_hook = pgss_planner;
482 14 : prev_ExecutorStart = ExecutorStart_hook;
483 14 : ExecutorStart_hook = pgss_ExecutorStart;
484 14 : prev_ExecutorRun = ExecutorRun_hook;
485 14 : ExecutorRun_hook = pgss_ExecutorRun;
486 14 : prev_ExecutorFinish = ExecutorFinish_hook;
487 14 : ExecutorFinish_hook = pgss_ExecutorFinish;
488 14 : prev_ExecutorEnd = ExecutorEnd_hook;
489 14 : ExecutorEnd_hook = pgss_ExecutorEnd;
490 14 : prev_ProcessUtility = ProcessUtility_hook;
491 14 : ProcessUtility_hook = pgss_ProcessUtility;
492 : }
493 :
494 : /*
495 : * shmem_request hook: request additional shared resources. We'll allocate or
496 : * attach to the shared resources in pgss_shmem_startup().
497 : */
498 : static void
499 14 : pgss_shmem_request(void)
500 : {
501 14 : if (prev_shmem_request_hook)
502 0 : prev_shmem_request_hook();
503 :
504 14 : RequestAddinShmemSpace(pgss_memsize());
505 14 : RequestNamedLWLockTranche("pg_stat_statements", 1);
506 14 : }
507 :
508 : /*
509 : * shmem_startup hook: allocate or attach to shared memory,
510 : * then load any pre-existing statistics from file.
511 : * Also create and load the query-texts file, which is expected to exist
512 : * (even if empty) while the module is enabled.
513 : */
514 : static void
515 14 : pgss_shmem_startup(void)
516 : {
517 : bool found;
518 : HASHCTL info;
519 14 : FILE *file = NULL;
520 14 : FILE *qfile = NULL;
521 : uint32 header;
522 : int32 num;
523 : int32 pgver;
524 : int32 i;
525 : int buffer_size;
526 14 : char *buffer = NULL;
527 :
528 14 : if (prev_shmem_startup_hook)
529 0 : prev_shmem_startup_hook();
530 :
531 : /* reset in case this is a restart within the postmaster */
532 14 : pgss = NULL;
533 14 : pgss_hash = NULL;
534 :
535 : /*
536 : * Create or attach to the shared memory state, including hash table
537 : */
538 14 : LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);
539 :
540 14 : pgss = ShmemInitStruct("pg_stat_statements",
541 : sizeof(pgssSharedState),
542 : &found);
543 :
544 14 : if (!found)
545 : {
546 : /* First time through ... */
547 14 : pgss->lock = &(GetNamedLWLockTranche("pg_stat_statements"))->lock;
548 14 : pgss->cur_median_usage = ASSUMED_MEDIAN_INIT;
549 14 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
550 14 : SpinLockInit(&pgss->mutex);
551 14 : pgss->extent = 0;
552 14 : pgss->n_writers = 0;
553 14 : pgss->gc_count = 0;
554 14 : pgss->stats.dealloc = 0;
555 14 : pgss->stats.stats_reset = GetCurrentTimestamp();
556 : }
557 :
558 14 : info.keysize = sizeof(pgssHashKey);
559 14 : info.entrysize = sizeof(pgssEntry);
560 14 : pgss_hash = ShmemInitHash("pg_stat_statements hash",
561 : pgss_max, pgss_max,
562 : &info,
563 : HASH_ELEM | HASH_BLOBS);
564 :
565 14 : LWLockRelease(AddinShmemInitLock);
566 :
567 : /*
568 : * If we're in the postmaster (or a standalone backend...), set up a shmem
569 : * exit hook to dump the statistics to disk.
570 : */
571 14 : if (!IsUnderPostmaster)
572 14 : on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
573 :
574 : /*
575 : * Done if some other process already completed our initialization.
576 : */
577 14 : if (found)
578 14 : return;
579 :
580 : /*
581 : * Note: we don't bother with locks here, because there should be no other
582 : * processes running when this code is reached.
583 : */
584 :
585 : /* Unlink query text file possibly left over from crash */
586 14 : unlink(PGSS_TEXT_FILE);
587 :
588 : /* Allocate new query text temp file */
589 14 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
590 14 : if (qfile == NULL)
591 0 : goto write_error;
592 :
593 : /*
594 : * If we were told not to load old statistics, we're done. (Note we do
595 : * not try to unlink any old dump file in this case. This seems a bit
596 : * questionable but it's the historical behavior.)
597 : */
598 14 : if (!pgss_save)
599 : {
600 2 : FreeFile(qfile);
601 2 : return;
602 : }
603 :
604 : /*
605 : * Attempt to load old statistics from the dump file.
606 : */
607 12 : file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R);
608 12 : if (file == NULL)
609 : {
610 8 : if (errno != ENOENT)
611 0 : goto read_error;
612 : /* No existing persisted stats file, so we're done */
613 8 : FreeFile(qfile);
614 8 : return;
615 : }
616 :
617 4 : buffer_size = 2048;
618 4 : buffer = (char *) palloc(buffer_size);
619 :
620 8 : if (fread(&header, sizeof(uint32), 1, file) != 1 ||
621 8 : fread(&pgver, sizeof(uint32), 1, file) != 1 ||
622 4 : fread(&num, sizeof(int32), 1, file) != 1)
623 0 : goto read_error;
624 :
625 4 : if (header != PGSS_FILE_HEADER ||
626 4 : pgver != PGSS_PG_MAJOR_VERSION)
627 0 : goto data_error;
628 :
629 52988 : for (i = 0; i < num; i++)
630 : {
631 : pgssEntry temp;
632 : pgssEntry *entry;
633 : Size query_offset;
634 :
635 52984 : if (fread(&temp, sizeof(pgssEntry), 1, file) != 1)
636 0 : goto read_error;
637 :
638 : /* Encoding is the only field we can easily sanity-check */
639 52984 : if (!PG_VALID_BE_ENCODING(temp.encoding))
640 0 : goto data_error;
641 :
642 : /* Resize buffer as needed */
643 52984 : if (temp.query_len >= buffer_size)
644 : {
645 6 : buffer_size = Max(buffer_size * 2, temp.query_len + 1);
646 6 : buffer = repalloc(buffer, buffer_size);
647 : }
648 :
649 52984 : if (fread(buffer, 1, temp.query_len + 1, file) != temp.query_len + 1)
650 0 : goto read_error;
651 :
652 : /* Should have a trailing null, but let's make sure */
653 52984 : buffer[temp.query_len] = '\0';
654 :
655 : /* Skip loading "sticky" entries */
656 52984 : if (IS_STICKY(temp.counters))
657 1474 : continue;
658 :
659 : /* Store the query text */
660 51510 : query_offset = pgss->extent;
661 51510 : if (fwrite(buffer, 1, temp.query_len + 1, qfile) != temp.query_len + 1)
662 0 : goto write_error;
663 51510 : pgss->extent += temp.query_len + 1;
664 :
665 : /* make the hashtable entry (discards old entries if too many) */
666 51510 : entry = entry_alloc(&temp.key, query_offset, temp.query_len,
667 : temp.encoding,
668 : false);
669 :
670 : /* copy in the actual stats */
671 51510 : entry->counters = temp.counters;
672 51510 : entry->stats_since = temp.stats_since;
673 51510 : entry->minmax_stats_since = temp.minmax_stats_since;
674 : }
675 :
676 : /* Read global statistics for pg_stat_statements */
677 4 : if (fread(&pgss->stats, sizeof(pgssGlobalStats), 1, file) != 1)
678 0 : goto read_error;
679 :
680 4 : pfree(buffer);
681 4 : FreeFile(file);
682 4 : FreeFile(qfile);
683 :
684 : /*
685 : * Remove the persisted stats file so it's not included in
686 : * backups/replication standbys, etc. A new file will be written on next
687 : * shutdown.
688 : *
689 : * Note: it's okay if the PGSS_TEXT_FILE is included in a basebackup,
690 : * because we remove that file on startup; it acts inversely to
691 : * PGSS_DUMP_FILE, in that it is only supposed to be around when the
692 : * server is running, whereas PGSS_DUMP_FILE is only supposed to be around
693 : * when the server is not running. Leaving the file creates no danger of
694 : * a newly restored database having a spurious record of execution costs,
695 : * which is what we're really concerned about here.
696 : */
697 4 : unlink(PGSS_DUMP_FILE);
698 :
699 4 : return;
700 :
701 0 : read_error:
702 0 : ereport(LOG,
703 : (errcode_for_file_access(),
704 : errmsg("could not read file \"%s\": %m",
705 : PGSS_DUMP_FILE)));
706 0 : goto fail;
707 0 : data_error:
708 0 : ereport(LOG,
709 : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
710 : errmsg("ignoring invalid data in file \"%s\"",
711 : PGSS_DUMP_FILE)));
712 0 : goto fail;
713 0 : write_error:
714 0 : ereport(LOG,
715 : (errcode_for_file_access(),
716 : errmsg("could not write file \"%s\": %m",
717 : PGSS_TEXT_FILE)));
718 0 : fail:
719 0 : if (buffer)
720 0 : pfree(buffer);
721 0 : if (file)
722 0 : FreeFile(file);
723 0 : if (qfile)
724 0 : FreeFile(qfile);
725 : /* If possible, throw away the bogus file; ignore any error */
726 0 : unlink(PGSS_DUMP_FILE);
727 :
728 : /*
729 : * Don't unlink PGSS_TEXT_FILE here; it should always be around while the
730 : * server is running with pg_stat_statements enabled
731 : */
732 : }
733 :
734 : /*
735 : * shmem_shutdown hook: Dump statistics into file.
736 : *
737 : * Note: we don't bother with acquiring lock, because there should be no
738 : * other processes running when this is called.
739 : */
740 : static void
741 14 : pgss_shmem_shutdown(int code, Datum arg)
742 : {
743 : FILE *file;
744 14 : char *qbuffer = NULL;
745 14 : Size qbuffer_size = 0;
746 : HASH_SEQ_STATUS hash_seq;
747 : int32 num_entries;
748 : pgssEntry *entry;
749 :
750 : /* Don't try to dump during a crash. */
751 14 : if (code)
752 14 : return;
753 :
754 : /* Safety check ... shouldn't get here unless shmem is set up. */
755 14 : if (!pgss || !pgss_hash)
756 0 : return;
757 :
758 : /* Don't dump if told not to. */
759 14 : if (!pgss_save)
760 4 : return;
761 :
762 10 : file = AllocateFile(PGSS_DUMP_FILE ".tmp", PG_BINARY_W);
763 10 : if (file == NULL)
764 0 : goto error;
765 :
766 10 : if (fwrite(&PGSS_FILE_HEADER, sizeof(uint32), 1, file) != 1)
767 0 : goto error;
768 10 : if (fwrite(&PGSS_PG_MAJOR_VERSION, sizeof(uint32), 1, file) != 1)
769 0 : goto error;
770 10 : num_entries = hash_get_num_entries(pgss_hash);
771 10 : if (fwrite(&num_entries, sizeof(int32), 1, file) != 1)
772 0 : goto error;
773 :
774 10 : qbuffer = qtext_load_file(&qbuffer_size);
775 10 : if (qbuffer == NULL)
776 0 : goto error;
777 :
778 : /*
779 : * When serializing to disk, we store query texts immediately after their
780 : * entry data. Any orphaned query texts are thereby excluded.
781 : */
782 10 : hash_seq_init(&hash_seq, pgss_hash);
783 106518 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
784 : {
785 106508 : int len = entry->query_len;
786 106508 : char *qstr = qtext_fetch(entry->query_offset, len,
787 : qbuffer, qbuffer_size);
788 :
789 106508 : if (qstr == NULL)
790 0 : continue; /* Ignore any entries with bogus texts */
791 :
792 106508 : if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 ||
793 106508 : fwrite(qstr, 1, len + 1, file) != len + 1)
794 : {
795 : /* note: we assume hash_seq_term won't change errno */
796 0 : hash_seq_term(&hash_seq);
797 0 : goto error;
798 : }
799 : }
800 :
801 : /* Dump global statistics for pg_stat_statements */
802 10 : if (fwrite(&pgss->stats, sizeof(pgssGlobalStats), 1, file) != 1)
803 0 : goto error;
804 :
805 10 : free(qbuffer);
806 10 : qbuffer = NULL;
807 :
808 10 : if (FreeFile(file))
809 : {
810 0 : file = NULL;
811 0 : goto error;
812 : }
813 :
814 : /*
815 : * Rename file into place, so we atomically replace any old one.
816 : */
817 10 : (void) durable_rename(PGSS_DUMP_FILE ".tmp", PGSS_DUMP_FILE, LOG);
818 :
819 : /* Unlink query-texts file; it's not needed while shutdown */
820 10 : unlink(PGSS_TEXT_FILE);
821 :
822 10 : return;
823 :
824 0 : error:
825 0 : ereport(LOG,
826 : (errcode_for_file_access(),
827 : errmsg("could not write file \"%s\": %m",
828 : PGSS_DUMP_FILE ".tmp")));
829 0 : free(qbuffer);
830 0 : if (file)
831 0 : FreeFile(file);
832 0 : unlink(PGSS_DUMP_FILE ".tmp");
833 0 : unlink(PGSS_TEXT_FILE);
834 : }
835 :
836 : /*
837 : * Post-parse-analysis hook: mark query with a queryId
838 : */
839 : static void
840 155668 : pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate)
841 : {
842 155668 : if (prev_post_parse_analyze_hook)
843 0 : prev_post_parse_analyze_hook(pstate, query, jstate);
844 :
845 : /* Safety check... */
846 155668 : if (!pgss || !pgss_hash || !pgss_enabled(nesting_level))
847 24506 : return;
848 :
849 : /*
850 : * If it's EXECUTE, clear the queryId so that stats will accumulate for
851 : * the underlying PREPARE. But don't do this if we're not tracking
852 : * utility statements, to avoid messing up another extension that might be
853 : * tracking them.
854 : */
855 131162 : if (query->utilityStmt)
856 : {
857 58364 : if (pgss_track_utility && IsA(query->utilityStmt, ExecuteStmt))
858 : {
859 6484 : query->queryId = INT64CONST(0);
860 6484 : return;
861 : }
862 : }
863 :
864 : /*
865 : * If query jumbling were able to identify any ignorable constants, we
866 : * immediately create a hash table entry for the query, so that we can
867 : * record the normalized form of the query string. If there were no such
868 : * constants, the normalized string would be the same as the query text
869 : * anyway, so there's no need for an early entry.
870 : */
871 124678 : if (jstate && jstate->clocations_count > 0)
872 72344 : pgss_store(pstate->p_sourcetext,
873 : query->queryId,
874 : query->stmt_location,
875 : query->stmt_len,
876 : PGSS_INVALID,
877 : 0,
878 : 0,
879 : NULL,
880 : NULL,
881 : NULL,
882 : jstate,
883 : 0,
884 : 0,
885 : PLAN_STMT_UNKNOWN);
886 : }
887 :
888 : /*
889 : * Planner hook: forward to regular planner, but measure planning time
890 : * if needed.
891 : */
892 : static PlannedStmt *
893 94344 : pgss_planner(Query *parse,
894 : const char *query_string,
895 : int cursorOptions,
896 : ParamListInfo boundParams)
897 : {
898 : PlannedStmt *result;
899 :
900 : /*
901 : * We can't process the query if no query_string is provided, as
902 : * pgss_store needs it. We also ignore query without queryid, as it would
903 : * be treated as a utility statement, which may not be the case.
904 : */
905 94344 : if (pgss_enabled(nesting_level)
906 73130 : && pgss_track_planning && query_string
907 262 : && parse->queryId != INT64CONST(0))
908 262 : {
909 : instr_time start;
910 : instr_time duration;
911 : BufferUsage bufusage_start,
912 : bufusage;
913 : WalUsage walusage_start,
914 : walusage;
915 :
916 : /* We need to track buffer usage as the planner can access them. */
917 262 : bufusage_start = pgBufferUsage;
918 :
919 : /*
920 : * Similarly the planner could write some WAL records in some cases
921 : * (e.g. setting a hint bit with those being WAL-logged)
922 : */
923 262 : walusage_start = pgWalUsage;
924 262 : INSTR_TIME_SET_CURRENT(start);
925 :
926 262 : nesting_level++;
927 262 : PG_TRY();
928 : {
929 262 : if (prev_planner_hook)
930 0 : result = prev_planner_hook(parse, query_string, cursorOptions,
931 : boundParams);
932 : else
933 262 : result = standard_planner(parse, query_string, cursorOptions,
934 : boundParams);
935 : }
936 0 : PG_FINALLY();
937 : {
938 262 : nesting_level--;
939 : }
940 262 : PG_END_TRY();
941 :
942 262 : INSTR_TIME_SET_CURRENT(duration);
943 262 : INSTR_TIME_SUBTRACT(duration, start);
944 :
945 : /* calc differences of buffer counters. */
946 262 : memset(&bufusage, 0, sizeof(BufferUsage));
947 262 : BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
948 :
949 : /* calc differences of WAL counters. */
950 262 : memset(&walusage, 0, sizeof(WalUsage));
951 262 : WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
952 :
953 262 : pgss_store(query_string,
954 : parse->queryId,
955 : parse->stmt_location,
956 : parse->stmt_len,
957 : PGSS_PLAN,
958 262 : INSTR_TIME_GET_MILLISEC(duration),
959 : 0,
960 : &bufusage,
961 : &walusage,
962 : NULL,
963 : NULL,
964 : 0,
965 : 0,
966 : result->planOrigin);
967 : }
968 : else
969 : {
970 : /*
971 : * Even though we're not tracking plan time for this statement, we
972 : * must still increment the nesting level, to ensure that functions
973 : * evaluated during planning are not seen as top-level calls.
974 : */
975 94082 : nesting_level++;
976 94082 : PG_TRY();
977 : {
978 94082 : if (prev_planner_hook)
979 0 : result = prev_planner_hook(parse, query_string, cursorOptions,
980 : boundParams);
981 : else
982 94082 : result = standard_planner(parse, query_string, cursorOptions,
983 : boundParams);
984 : }
985 1496 : PG_FINALLY();
986 : {
987 94082 : nesting_level--;
988 : }
989 94082 : PG_END_TRY();
990 : }
991 :
992 92848 : return result;
993 : }
994 :
995 : /*
996 : * ExecutorStart hook: start up tracking if needed
997 : */
998 : static void
999 115696 : pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
1000 : {
1001 115696 : if (prev_ExecutorStart)
1002 0 : prev_ExecutorStart(queryDesc, eflags);
1003 : else
1004 115696 : standard_ExecutorStart(queryDesc, eflags);
1005 :
1006 : /*
1007 : * If query has queryId zero, don't track it. This prevents double
1008 : * counting of optimizable statements that are directly contained in
1009 : * utility statements.
1010 : */
1011 115162 : if (pgss_enabled(nesting_level) && queryDesc->plannedstmt->queryId != INT64CONST(0))
1012 : {
1013 : /*
1014 : * Set up to track total elapsed time in ExecutorRun. Make sure the
1015 : * space is allocated in the per-query context so it will go away at
1016 : * ExecutorEnd.
1017 : */
1018 76968 : if (queryDesc->totaltime == NULL)
1019 : {
1020 : MemoryContext oldcxt;
1021 :
1022 76968 : oldcxt = MemoryContextSwitchTo(queryDesc->estate->es_query_cxt);
1023 76968 : queryDesc->totaltime = InstrAlloc(1, INSTRUMENT_ALL, false);
1024 76968 : MemoryContextSwitchTo(oldcxt);
1025 : }
1026 : }
1027 115162 : }
1028 :
1029 : /*
1030 : * ExecutorRun hook: all we need do is track nesting depth
1031 : */
1032 : static void
1033 112608 : pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count)
1034 : {
1035 112608 : nesting_level++;
1036 112608 : PG_TRY();
1037 : {
1038 112608 : if (prev_ExecutorRun)
1039 0 : prev_ExecutorRun(queryDesc, direction, count);
1040 : else
1041 112608 : standard_ExecutorRun(queryDesc, direction, count);
1042 : }
1043 6860 : PG_FINALLY();
1044 : {
1045 112608 : nesting_level--;
1046 : }
1047 112608 : PG_END_TRY();
1048 105748 : }
1049 :
1050 : /*
1051 : * ExecutorFinish hook: all we need do is track nesting depth
1052 : */
1053 : static void
1054 101830 : pgss_ExecutorFinish(QueryDesc *queryDesc)
1055 : {
1056 101830 : nesting_level++;
1057 101830 : PG_TRY();
1058 : {
1059 101830 : if (prev_ExecutorFinish)
1060 0 : prev_ExecutorFinish(queryDesc);
1061 : else
1062 101830 : standard_ExecutorFinish(queryDesc);
1063 : }
1064 328 : PG_FINALLY();
1065 : {
1066 101830 : nesting_level--;
1067 : }
1068 101830 : PG_END_TRY();
1069 101502 : }
1070 :
1071 : /*
1072 : * ExecutorEnd hook: store results if needed
1073 : */
1074 : static void
1075 107320 : pgss_ExecutorEnd(QueryDesc *queryDesc)
1076 : {
1077 107320 : int64 queryId = queryDesc->plannedstmt->queryId;
1078 :
1079 107320 : if (queryId != INT64CONST(0) && queryDesc->totaltime &&
1080 73908 : pgss_enabled(nesting_level))
1081 : {
1082 : /*
1083 : * Make sure stats accumulation is done. (Note: it's okay if several
1084 : * levels of hook all do this.)
1085 : */
1086 73908 : InstrEndLoop(queryDesc->totaltime);
1087 :
1088 73758 : pgss_store(queryDesc->sourceText,
1089 : queryId,
1090 73908 : queryDesc->plannedstmt->stmt_location,
1091 73908 : queryDesc->plannedstmt->stmt_len,
1092 : PGSS_EXEC,
1093 73908 : queryDesc->totaltime->total * 1000.0, /* convert to msec */
1094 73908 : queryDesc->estate->es_total_processed,
1095 73908 : &queryDesc->totaltime->bufusage,
1096 73908 : &queryDesc->totaltime->walusage,
1097 150 : queryDesc->estate->es_jit ? &queryDesc->estate->es_jit->instr : NULL,
1098 : NULL,
1099 73908 : queryDesc->estate->es_parallel_workers_to_launch,
1100 73908 : queryDesc->estate->es_parallel_workers_launched,
1101 73908 : queryDesc->plannedstmt->planOrigin);
1102 : }
1103 :
1104 107320 : if (prev_ExecutorEnd)
1105 0 : prev_ExecutorEnd(queryDesc);
1106 : else
1107 107320 : standard_ExecutorEnd(queryDesc);
1108 107320 : }
1109 :
1110 : /*
1111 : * ProcessUtility hook
1112 : */
1113 : static void
1114 69138 : pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
1115 : bool readOnlyTree,
1116 : ProcessUtilityContext context,
1117 : ParamListInfo params, QueryEnvironment *queryEnv,
1118 : DestReceiver *dest, QueryCompletion *qc)
1119 : {
1120 69138 : Node *parsetree = pstmt->utilityStmt;
1121 69138 : int64 saved_queryId = pstmt->queryId;
1122 69138 : int saved_stmt_location = pstmt->stmt_location;
1123 69138 : int saved_stmt_len = pstmt->stmt_len;
1124 69138 : bool enabled = pgss_track_utility && pgss_enabled(nesting_level);
1125 :
1126 : /*
1127 : * Force utility statements to get queryId zero. We do this even in cases
1128 : * where the statement contains an optimizable statement for which a
1129 : * queryId could be derived (such as EXPLAIN or DECLARE CURSOR). For such
1130 : * cases, runtime control will first go through ProcessUtility and then
1131 : * the executor, and we don't want the executor hooks to do anything,
1132 : * since we are already measuring the statement's costs at the utility
1133 : * level.
1134 : *
1135 : * Note that this is only done if pg_stat_statements is enabled and
1136 : * configured to track utility statements, in the unlikely possibility
1137 : * that user configured another extension to handle utility statements
1138 : * only.
1139 : */
1140 69138 : if (enabled)
1141 58156 : pstmt->queryId = INT64CONST(0);
1142 :
1143 : /*
1144 : * If it's an EXECUTE statement, we don't track it and don't increment the
1145 : * nesting level. This allows the cycles to be charged to the underlying
1146 : * PREPARE instead (by the Executor hooks), which is much more useful.
1147 : *
1148 : * We also don't track execution of PREPARE. If we did, we would get one
1149 : * hash table entry for the PREPARE (with hash calculated from the query
1150 : * string), and then a different one with the same query string (but hash
1151 : * calculated from the query tree) would be used to accumulate costs of
1152 : * ensuing EXECUTEs. This would be confusing. Since PREPARE doesn't
1153 : * actually run the planner (only parse+rewrite), its costs are generally
1154 : * pretty negligible and it seems okay to just ignore it.
1155 : */
1156 69138 : if (enabled &&
1157 58156 : !IsA(parsetree, ExecuteStmt) &&
1158 51684 : !IsA(parsetree, PrepareStmt))
1159 46608 : {
1160 : instr_time start;
1161 : instr_time duration;
1162 : uint64 rows;
1163 : BufferUsage bufusage_start,
1164 : bufusage;
1165 : WalUsage walusage_start,
1166 : walusage;
1167 :
1168 51438 : bufusage_start = pgBufferUsage;
1169 51438 : walusage_start = pgWalUsage;
1170 51438 : INSTR_TIME_SET_CURRENT(start);
1171 :
1172 51438 : nesting_level++;
1173 51438 : PG_TRY();
1174 : {
1175 51438 : if (prev_ProcessUtility)
1176 0 : prev_ProcessUtility(pstmt, queryString, readOnlyTree,
1177 : context, params, queryEnv,
1178 : dest, qc);
1179 : else
1180 51438 : standard_ProcessUtility(pstmt, queryString, readOnlyTree,
1181 : context, params, queryEnv,
1182 : dest, qc);
1183 : }
1184 4830 : PG_FINALLY();
1185 : {
1186 51438 : nesting_level--;
1187 : }
1188 51438 : PG_END_TRY();
1189 :
1190 : /*
1191 : * CAUTION: do not access the *pstmt data structure again below here.
1192 : * If it was a ROLLBACK or similar, that data structure may have been
1193 : * freed. We must copy everything we still need into local variables,
1194 : * which we did above.
1195 : *
1196 : * For the same reason, we can't risk restoring pstmt->queryId to its
1197 : * former value, which'd otherwise be a good idea.
1198 : */
1199 :
1200 46608 : INSTR_TIME_SET_CURRENT(duration);
1201 46608 : INSTR_TIME_SUBTRACT(duration, start);
1202 :
1203 : /*
1204 : * Track the total number of rows retrieved or affected by the utility
1205 : * statements of COPY, FETCH, CREATE TABLE AS, CREATE MATERIALIZED
1206 : * VIEW, REFRESH MATERIALIZED VIEW and SELECT INTO.
1207 : */
1208 46602 : rows = (qc && (qc->commandTag == CMDTAG_COPY ||
1209 43302 : qc->commandTag == CMDTAG_FETCH ||
1210 42782 : qc->commandTag == CMDTAG_SELECT ||
1211 42408 : qc->commandTag == CMDTAG_REFRESH_MATERIALIZED_VIEW)) ?
1212 93210 : qc->nprocessed : 0;
1213 :
1214 : /* calc differences of buffer counters. */
1215 46608 : memset(&bufusage, 0, sizeof(BufferUsage));
1216 46608 : BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
1217 :
1218 : /* calc differences of WAL counters. */
1219 46608 : memset(&walusage, 0, sizeof(WalUsage));
1220 46608 : WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
1221 :
1222 46608 : pgss_store(queryString,
1223 : saved_queryId,
1224 : saved_stmt_location,
1225 : saved_stmt_len,
1226 : PGSS_EXEC,
1227 46608 : INSTR_TIME_GET_MILLISEC(duration),
1228 : rows,
1229 : &bufusage,
1230 : &walusage,
1231 : NULL,
1232 : NULL,
1233 : 0,
1234 : 0,
1235 : pstmt->planOrigin);
1236 : }
1237 : else
1238 : {
1239 : /*
1240 : * Even though we're not tracking execution time for this statement,
1241 : * we must still increment the nesting level, to ensure that functions
1242 : * evaluated within it are not seen as top-level calls. But don't do
1243 : * so for EXECUTE; that way, when control reaches pgss_planner or
1244 : * pgss_ExecutorStart, we will treat the costs as top-level if
1245 : * appropriate. Likewise, don't bump for PREPARE, so that parse
1246 : * analysis will treat the statement as top-level if appropriate.
1247 : *
1248 : * To be absolutely certain we don't mess up the nesting level,
1249 : * evaluate the bump_level condition just once.
1250 : */
1251 17700 : bool bump_level =
1252 28926 : !IsA(parsetree, ExecuteStmt) &&
1253 11226 : !IsA(parsetree, PrepareStmt);
1254 :
1255 17700 : if (bump_level)
1256 10978 : nesting_level++;
1257 17700 : PG_TRY();
1258 : {
1259 17700 : if (prev_ProcessUtility)
1260 0 : prev_ProcessUtility(pstmt, queryString, readOnlyTree,
1261 : context, params, queryEnv,
1262 : dest, qc);
1263 : else
1264 17700 : standard_ProcessUtility(pstmt, queryString, readOnlyTree,
1265 : context, params, queryEnv,
1266 : dest, qc);
1267 : }
1268 260 : PG_FINALLY();
1269 : {
1270 17700 : if (bump_level)
1271 10978 : nesting_level--;
1272 : }
1273 17700 : PG_END_TRY();
1274 : }
1275 64048 : }
1276 :
1277 : /*
1278 : * Store some statistics for a statement.
1279 : *
1280 : * If jstate is not NULL then we're trying to create an entry for which
1281 : * we have no statistics as yet; we just want to record the normalized
1282 : * query string. total_time, rows, bufusage and walusage are ignored in this
1283 : * case.
1284 : *
1285 : * If kind is PGSS_PLAN or PGSS_EXEC, its value is used as the array position
1286 : * for the arrays in the Counters field.
1287 : */
1288 : static void
1289 193122 : pgss_store(const char *query, int64 queryId,
1290 : int query_location, int query_len,
1291 : pgssStoreKind kind,
1292 : double total_time, uint64 rows,
1293 : const BufferUsage *bufusage,
1294 : const WalUsage *walusage,
1295 : const struct JitInstrumentation *jitusage,
1296 : JumbleState *jstate,
1297 : int parallel_workers_to_launch,
1298 : int parallel_workers_launched,
1299 : PlannedStmtOrigin planOrigin)
1300 : {
1301 : pgssHashKey key;
1302 : pgssEntry *entry;
1303 193122 : char *norm_query = NULL;
1304 193122 : int encoding = GetDatabaseEncoding();
1305 :
1306 : Assert(query != NULL);
1307 :
1308 : /* Safety check... */
1309 193122 : if (!pgss || !pgss_hash)
1310 0 : return;
1311 :
1312 : /*
1313 : * Nothing to do if compute_query_id isn't enabled and no other module
1314 : * computed a query identifier.
1315 : */
1316 193122 : if (queryId == INT64CONST(0))
1317 0 : return;
1318 :
1319 : /*
1320 : * Confine our attention to the relevant part of the string, if the query
1321 : * is a portion of a multi-statement source string, and update query
1322 : * location and length if needed.
1323 : */
1324 193122 : query = CleanQuerytext(query, &query_location, &query_len);
1325 :
1326 : /* Set up key for hashtable search */
1327 :
1328 : /* clear padding */
1329 193122 : memset(&key, 0, sizeof(pgssHashKey));
1330 :
1331 193122 : key.userid = GetUserId();
1332 193122 : key.dbid = MyDatabaseId;
1333 193122 : key.queryid = queryId;
1334 193122 : key.toplevel = (nesting_level == 0);
1335 :
1336 : /* Lookup the hash table entry with shared lock. */
1337 193122 : LWLockAcquire(pgss->lock, LW_SHARED);
1338 :
1339 193122 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
1340 :
1341 : /* Create new entry, if not present */
1342 193122 : if (!entry)
1343 : {
1344 : Size query_offset;
1345 : int gc_count;
1346 : bool stored;
1347 : bool do_gc;
1348 :
1349 : /*
1350 : * Create a new, normalized query string if caller asked. We don't
1351 : * need to hold the lock while doing this work. (Note: in any case,
1352 : * it's possible that someone else creates a duplicate hashtable entry
1353 : * in the interval where we don't hold the lock below. That case is
1354 : * handled by entry_alloc.)
1355 : */
1356 56756 : if (jstate)
1357 : {
1358 21218 : LWLockRelease(pgss->lock);
1359 21218 : norm_query = generate_normalized_query(jstate, query,
1360 : query_location,
1361 : &query_len);
1362 21218 : LWLockAcquire(pgss->lock, LW_SHARED);
1363 : }
1364 :
1365 : /* Append new query text to file with only shared lock held */
1366 56756 : stored = qtext_store(norm_query ? norm_query : query, query_len,
1367 : &query_offset, &gc_count);
1368 :
1369 : /*
1370 : * Determine whether we need to garbage collect external query texts
1371 : * while the shared lock is still held. This micro-optimization
1372 : * avoids taking the time to decide this while holding exclusive lock.
1373 : */
1374 56756 : do_gc = need_gc_qtexts();
1375 :
1376 : /* Need exclusive lock to make a new hashtable entry - promote */
1377 56756 : LWLockRelease(pgss->lock);
1378 56756 : LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
1379 :
1380 : /*
1381 : * A garbage collection may have occurred while we weren't holding the
1382 : * lock. In the unlikely event that this happens, the query text we
1383 : * stored above will have been garbage collected, so write it again.
1384 : * This should be infrequent enough that doing it while holding
1385 : * exclusive lock isn't a performance problem.
1386 : */
1387 56756 : if (!stored || pgss->gc_count != gc_count)
1388 0 : stored = qtext_store(norm_query ? norm_query : query, query_len,
1389 : &query_offset, NULL);
1390 :
1391 : /* If we failed to write to the text file, give up */
1392 56756 : if (!stored)
1393 0 : goto done;
1394 :
1395 : /* OK to create a new hashtable entry */
1396 56756 : entry = entry_alloc(&key, query_offset, query_len, encoding,
1397 : jstate != NULL);
1398 :
1399 : /* If needed, perform garbage collection while exclusive lock held */
1400 56756 : if (do_gc)
1401 0 : gc_qtexts();
1402 : }
1403 :
1404 : /* Increment the counts, except when jstate is not NULL */
1405 193122 : if (!jstate)
1406 : {
1407 : Assert(kind == PGSS_PLAN || kind == PGSS_EXEC);
1408 :
1409 : /*
1410 : * Grab the spinlock while updating the counters (see comment about
1411 : * locking rules at the head of the file)
1412 : */
1413 120778 : SpinLockAcquire(&entry->mutex);
1414 :
1415 : /* "Unstick" entry if it was previously sticky */
1416 120778 : if (IS_STICKY(entry->counters))
1417 55226 : entry->counters.usage = USAGE_INIT;
1418 :
1419 120778 : entry->counters.calls[kind] += 1;
1420 120778 : entry->counters.total_time[kind] += total_time;
1421 :
1422 120778 : if (entry->counters.calls[kind] == 1)
1423 : {
1424 55382 : entry->counters.min_time[kind] = total_time;
1425 55382 : entry->counters.max_time[kind] = total_time;
1426 55382 : entry->counters.mean_time[kind] = total_time;
1427 : }
1428 : else
1429 : {
1430 : /*
1431 : * Welford's method for accurately computing variance. See
1432 : * <http://www.johndcook.com/blog/standard_deviation/>
1433 : */
1434 65396 : double old_mean = entry->counters.mean_time[kind];
1435 :
1436 65396 : entry->counters.mean_time[kind] +=
1437 65396 : (total_time - old_mean) / entry->counters.calls[kind];
1438 65396 : entry->counters.sum_var_time[kind] +=
1439 65396 : (total_time - old_mean) * (total_time - entry->counters.mean_time[kind]);
1440 :
1441 : /*
1442 : * Calculate min and max time. min = 0 and max = 0 means that the
1443 : * min/max statistics were reset
1444 : */
1445 65396 : if (entry->counters.min_time[kind] == 0
1446 12 : && entry->counters.max_time[kind] == 0)
1447 : {
1448 6 : entry->counters.min_time[kind] = total_time;
1449 6 : entry->counters.max_time[kind] = total_time;
1450 : }
1451 : else
1452 : {
1453 65390 : if (entry->counters.min_time[kind] > total_time)
1454 13314 : entry->counters.min_time[kind] = total_time;
1455 65390 : if (entry->counters.max_time[kind] < total_time)
1456 6190 : entry->counters.max_time[kind] = total_time;
1457 : }
1458 : }
1459 120778 : entry->counters.rows += rows;
1460 120778 : entry->counters.shared_blks_hit += bufusage->shared_blks_hit;
1461 120778 : entry->counters.shared_blks_read += bufusage->shared_blks_read;
1462 120778 : entry->counters.shared_blks_dirtied += bufusage->shared_blks_dirtied;
1463 120778 : entry->counters.shared_blks_written += bufusage->shared_blks_written;
1464 120778 : entry->counters.local_blks_hit += bufusage->local_blks_hit;
1465 120778 : entry->counters.local_blks_read += bufusage->local_blks_read;
1466 120778 : entry->counters.local_blks_dirtied += bufusage->local_blks_dirtied;
1467 120778 : entry->counters.local_blks_written += bufusage->local_blks_written;
1468 120778 : entry->counters.temp_blks_read += bufusage->temp_blks_read;
1469 120778 : entry->counters.temp_blks_written += bufusage->temp_blks_written;
1470 120778 : entry->counters.shared_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->shared_blk_read_time);
1471 120778 : entry->counters.shared_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->shared_blk_write_time);
1472 120778 : entry->counters.local_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->local_blk_read_time);
1473 120778 : entry->counters.local_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->local_blk_write_time);
1474 120778 : entry->counters.temp_blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->temp_blk_read_time);
1475 120778 : entry->counters.temp_blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->temp_blk_write_time);
1476 120778 : entry->counters.usage += USAGE_EXEC(total_time);
1477 120778 : entry->counters.wal_records += walusage->wal_records;
1478 120778 : entry->counters.wal_fpi += walusage->wal_fpi;
1479 120778 : entry->counters.wal_bytes += walusage->wal_bytes;
1480 120778 : entry->counters.wal_buffers_full += walusage->wal_buffers_full;
1481 120778 : if (jitusage)
1482 : {
1483 150 : entry->counters.jit_functions += jitusage->created_functions;
1484 150 : entry->counters.jit_generation_time += INSTR_TIME_GET_MILLISEC(jitusage->generation_counter);
1485 :
1486 150 : if (INSTR_TIME_GET_MILLISEC(jitusage->deform_counter))
1487 146 : entry->counters.jit_deform_count++;
1488 150 : entry->counters.jit_deform_time += INSTR_TIME_GET_MILLISEC(jitusage->deform_counter);
1489 :
1490 150 : if (INSTR_TIME_GET_MILLISEC(jitusage->inlining_counter))
1491 66 : entry->counters.jit_inlining_count++;
1492 150 : entry->counters.jit_inlining_time += INSTR_TIME_GET_MILLISEC(jitusage->inlining_counter);
1493 :
1494 150 : if (INSTR_TIME_GET_MILLISEC(jitusage->optimization_counter))
1495 146 : entry->counters.jit_optimization_count++;
1496 150 : entry->counters.jit_optimization_time += INSTR_TIME_GET_MILLISEC(jitusage->optimization_counter);
1497 :
1498 150 : if (INSTR_TIME_GET_MILLISEC(jitusage->emission_counter))
1499 146 : entry->counters.jit_emission_count++;
1500 150 : entry->counters.jit_emission_time += INSTR_TIME_GET_MILLISEC(jitusage->emission_counter);
1501 : }
1502 :
1503 : /* parallel worker counters */
1504 120778 : entry->counters.parallel_workers_to_launch += parallel_workers_to_launch;
1505 120778 : entry->counters.parallel_workers_launched += parallel_workers_launched;
1506 :
1507 : /* plan cache counters */
1508 120778 : if (planOrigin == PLAN_STMT_CACHE_GENERIC)
1509 6092 : entry->counters.generic_plan_calls++;
1510 114686 : else if (planOrigin == PLAN_STMT_CACHE_CUSTOM)
1511 738 : entry->counters.custom_plan_calls++;
1512 :
1513 120778 : SpinLockRelease(&entry->mutex);
1514 : }
1515 :
1516 72344 : done:
1517 193122 : LWLockRelease(pgss->lock);
1518 :
1519 : /* We postpone this clean-up until we're out of the lock */
1520 193122 : if (norm_query)
1521 21218 : pfree(norm_query);
1522 : }
1523 :
1524 : /*
1525 : * Reset statement statistics corresponding to userid, dbid, and queryid.
1526 : */
1527 : Datum
1528 2 : pg_stat_statements_reset_1_7(PG_FUNCTION_ARGS)
1529 : {
1530 : Oid userid;
1531 : Oid dbid;
1532 : int64 queryid;
1533 :
1534 2 : userid = PG_GETARG_OID(0);
1535 2 : dbid = PG_GETARG_OID(1);
1536 2 : queryid = PG_GETARG_INT64(2);
1537 :
1538 2 : entry_reset(userid, dbid, queryid, false);
1539 :
1540 2 : PG_RETURN_VOID();
1541 : }
1542 :
1543 : Datum
1544 232 : pg_stat_statements_reset_1_11(PG_FUNCTION_ARGS)
1545 : {
1546 : Oid userid;
1547 : Oid dbid;
1548 : int64 queryid;
1549 : bool minmax_only;
1550 :
1551 232 : userid = PG_GETARG_OID(0);
1552 232 : dbid = PG_GETARG_OID(1);
1553 232 : queryid = PG_GETARG_INT64(2);
1554 232 : minmax_only = PG_GETARG_BOOL(3);
1555 :
1556 232 : PG_RETURN_TIMESTAMPTZ(entry_reset(userid, dbid, queryid, minmax_only));
1557 : }
1558 :
1559 : /*
1560 : * Reset statement statistics.
1561 : */
1562 : Datum
1563 2 : pg_stat_statements_reset(PG_FUNCTION_ARGS)
1564 : {
1565 2 : entry_reset(0, 0, 0, false);
1566 :
1567 2 : PG_RETURN_VOID();
1568 : }
1569 :
1570 : /* Number of output arguments (columns) for various API versions */
1571 : #define PG_STAT_STATEMENTS_COLS_V1_0 14
1572 : #define PG_STAT_STATEMENTS_COLS_V1_1 18
1573 : #define PG_STAT_STATEMENTS_COLS_V1_2 19
1574 : #define PG_STAT_STATEMENTS_COLS_V1_3 23
1575 : #define PG_STAT_STATEMENTS_COLS_V1_8 32
1576 : #define PG_STAT_STATEMENTS_COLS_V1_9 33
1577 : #define PG_STAT_STATEMENTS_COLS_V1_10 43
1578 : #define PG_STAT_STATEMENTS_COLS_V1_11 49
1579 : #define PG_STAT_STATEMENTS_COLS_V1_12 52
1580 : #define PG_STAT_STATEMENTS_COLS_V1_13 54
1581 : #define PG_STAT_STATEMENTS_COLS 54 /* maximum of above */
1582 :
1583 : /*
1584 : * Retrieve statement statistics.
1585 : *
1586 : * The SQL API of this function has changed multiple times, and will likely
1587 : * do so again in future. To support the case where a newer version of this
1588 : * loadable module is being used with an old SQL declaration of the function,
1589 : * we continue to support the older API versions. For 1.2 and later, the
1590 : * expected API version is identified by embedding it in the C name of the
1591 : * function. Unfortunately we weren't bright enough to do that for 1.1.
1592 : */
1593 : Datum
1594 250 : pg_stat_statements_1_13(PG_FUNCTION_ARGS)
1595 : {
1596 250 : bool showtext = PG_GETARG_BOOL(0);
1597 :
1598 250 : pg_stat_statements_internal(fcinfo, PGSS_V1_13, showtext);
1599 :
1600 250 : return (Datum) 0;
1601 : }
1602 :
1603 : Datum
1604 2 : pg_stat_statements_1_12(PG_FUNCTION_ARGS)
1605 : {
1606 2 : bool showtext = PG_GETARG_BOOL(0);
1607 :
1608 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_12, showtext);
1609 :
1610 2 : return (Datum) 0;
1611 : }
1612 :
1613 : Datum
1614 2 : pg_stat_statements_1_11(PG_FUNCTION_ARGS)
1615 : {
1616 2 : bool showtext = PG_GETARG_BOOL(0);
1617 :
1618 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_11, showtext);
1619 :
1620 2 : return (Datum) 0;
1621 : }
1622 :
1623 : Datum
1624 2 : pg_stat_statements_1_10(PG_FUNCTION_ARGS)
1625 : {
1626 2 : bool showtext = PG_GETARG_BOOL(0);
1627 :
1628 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_10, showtext);
1629 :
1630 2 : return (Datum) 0;
1631 : }
1632 :
1633 : Datum
1634 2 : pg_stat_statements_1_9(PG_FUNCTION_ARGS)
1635 : {
1636 2 : bool showtext = PG_GETARG_BOOL(0);
1637 :
1638 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_9, showtext);
1639 :
1640 2 : return (Datum) 0;
1641 : }
1642 :
1643 : Datum
1644 2 : pg_stat_statements_1_8(PG_FUNCTION_ARGS)
1645 : {
1646 2 : bool showtext = PG_GETARG_BOOL(0);
1647 :
1648 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_8, showtext);
1649 :
1650 2 : return (Datum) 0;
1651 : }
1652 :
1653 : Datum
1654 2 : pg_stat_statements_1_3(PG_FUNCTION_ARGS)
1655 : {
1656 2 : bool showtext = PG_GETARG_BOOL(0);
1657 :
1658 2 : pg_stat_statements_internal(fcinfo, PGSS_V1_3, showtext);
1659 :
1660 2 : return (Datum) 0;
1661 : }
1662 :
1663 : Datum
1664 0 : pg_stat_statements_1_2(PG_FUNCTION_ARGS)
1665 : {
1666 0 : bool showtext = PG_GETARG_BOOL(0);
1667 :
1668 0 : pg_stat_statements_internal(fcinfo, PGSS_V1_2, showtext);
1669 :
1670 0 : return (Datum) 0;
1671 : }
1672 :
1673 : /*
1674 : * Legacy entry point for pg_stat_statements() API versions 1.0 and 1.1.
1675 : * This can be removed someday, perhaps.
1676 : */
1677 : Datum
1678 0 : pg_stat_statements(PG_FUNCTION_ARGS)
1679 : {
1680 : /* If it's really API 1.1, we'll figure that out below */
1681 0 : pg_stat_statements_internal(fcinfo, PGSS_V1_0, true);
1682 :
1683 0 : return (Datum) 0;
1684 : }
1685 :
1686 : /* Common code for all versions of pg_stat_statements() */
1687 : static void
1688 262 : pg_stat_statements_internal(FunctionCallInfo fcinfo,
1689 : pgssVersion api_version,
1690 : bool showtext)
1691 : {
1692 262 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1693 262 : Oid userid = GetUserId();
1694 262 : bool is_allowed_role = false;
1695 262 : char *qbuffer = NULL;
1696 262 : Size qbuffer_size = 0;
1697 262 : Size extent = 0;
1698 262 : int gc_count = 0;
1699 : HASH_SEQ_STATUS hash_seq;
1700 : pgssEntry *entry;
1701 :
1702 : /*
1703 : * Superusers or roles with the privileges of pg_read_all_stats members
1704 : * are allowed
1705 : */
1706 262 : is_allowed_role = has_privs_of_role(userid, ROLE_PG_READ_ALL_STATS);
1707 :
1708 : /* hash table must exist already */
1709 262 : if (!pgss || !pgss_hash)
1710 0 : ereport(ERROR,
1711 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1712 : errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
1713 :
1714 262 : InitMaterializedSRF(fcinfo, 0);
1715 :
1716 : /*
1717 : * Check we have the expected number of output arguments. Aside from
1718 : * being a good safety check, we need a kluge here to detect API version
1719 : * 1.1, which was wedged into the code in an ill-considered way.
1720 : */
1721 262 : switch (rsinfo->setDesc->natts)
1722 : {
1723 0 : case PG_STAT_STATEMENTS_COLS_V1_0:
1724 0 : if (api_version != PGSS_V1_0)
1725 0 : elog(ERROR, "incorrect number of output arguments");
1726 0 : break;
1727 0 : case PG_STAT_STATEMENTS_COLS_V1_1:
1728 : /* pg_stat_statements() should have told us 1.0 */
1729 0 : if (api_version != PGSS_V1_0)
1730 0 : elog(ERROR, "incorrect number of output arguments");
1731 0 : api_version = PGSS_V1_1;
1732 0 : break;
1733 0 : case PG_STAT_STATEMENTS_COLS_V1_2:
1734 0 : if (api_version != PGSS_V1_2)
1735 0 : elog(ERROR, "incorrect number of output arguments");
1736 0 : break;
1737 2 : case PG_STAT_STATEMENTS_COLS_V1_3:
1738 2 : if (api_version != PGSS_V1_3)
1739 0 : elog(ERROR, "incorrect number of output arguments");
1740 2 : break;
1741 2 : case PG_STAT_STATEMENTS_COLS_V1_8:
1742 2 : if (api_version != PGSS_V1_8)
1743 0 : elog(ERROR, "incorrect number of output arguments");
1744 2 : break;
1745 2 : case PG_STAT_STATEMENTS_COLS_V1_9:
1746 2 : if (api_version != PGSS_V1_9)
1747 0 : elog(ERROR, "incorrect number of output arguments");
1748 2 : break;
1749 2 : case PG_STAT_STATEMENTS_COLS_V1_10:
1750 2 : if (api_version != PGSS_V1_10)
1751 0 : elog(ERROR, "incorrect number of output arguments");
1752 2 : break;
1753 2 : case PG_STAT_STATEMENTS_COLS_V1_11:
1754 2 : if (api_version != PGSS_V1_11)
1755 0 : elog(ERROR, "incorrect number of output arguments");
1756 2 : break;
1757 2 : case PG_STAT_STATEMENTS_COLS_V1_12:
1758 2 : if (api_version != PGSS_V1_12)
1759 0 : elog(ERROR, "incorrect number of output arguments");
1760 2 : break;
1761 250 : case PG_STAT_STATEMENTS_COLS_V1_13:
1762 250 : if (api_version != PGSS_V1_13)
1763 0 : elog(ERROR, "incorrect number of output arguments");
1764 250 : break;
1765 0 : default:
1766 0 : elog(ERROR, "incorrect number of output arguments");
1767 : }
1768 :
1769 : /*
1770 : * We'd like to load the query text file (if needed) while not holding any
1771 : * lock on pgss->lock. In the worst case we'll have to do this again
1772 : * after we have the lock, but it's unlikely enough to make this a win
1773 : * despite occasional duplicated work. We need to reload if anybody
1774 : * writes to the file (either a retail qtext_store(), or a garbage
1775 : * collection) between this point and where we've gotten shared lock. If
1776 : * a qtext_store is actually in progress when we look, we might as well
1777 : * skip the speculative load entirely.
1778 : */
1779 262 : if (showtext)
1780 : {
1781 : int n_writers;
1782 :
1783 : /* Take the mutex so we can examine variables */
1784 262 : SpinLockAcquire(&pgss->mutex);
1785 262 : extent = pgss->extent;
1786 262 : n_writers = pgss->n_writers;
1787 262 : gc_count = pgss->gc_count;
1788 262 : SpinLockRelease(&pgss->mutex);
1789 :
1790 : /* No point in loading file now if there are active writers */
1791 262 : if (n_writers == 0)
1792 262 : qbuffer = qtext_load_file(&qbuffer_size);
1793 : }
1794 :
1795 : /*
1796 : * Get shared lock, load or reload the query text file if we must, and
1797 : * iterate over the hashtable entries.
1798 : *
1799 : * With a large hash table, we might be holding the lock rather longer
1800 : * than one could wish. However, this only blocks creation of new hash
1801 : * table entries, and the larger the hash table the less likely that is to
1802 : * be needed. So we can hope this is okay. Perhaps someday we'll decide
1803 : * we need to partition the hash table to limit the time spent holding any
1804 : * one lock.
1805 : */
1806 262 : LWLockAcquire(pgss->lock, LW_SHARED);
1807 :
1808 262 : if (showtext)
1809 : {
1810 : /*
1811 : * Here it is safe to examine extent and gc_count without taking the
1812 : * mutex. Note that although other processes might change
1813 : * pgss->extent just after we look at it, the strings they then write
1814 : * into the file cannot yet be referenced in the hashtable, so we
1815 : * don't care whether we see them or not.
1816 : *
1817 : * If qtext_load_file fails, we just press on; we'll return NULL for
1818 : * every query text.
1819 : */
1820 262 : if (qbuffer == NULL ||
1821 262 : pgss->extent != extent ||
1822 262 : pgss->gc_count != gc_count)
1823 : {
1824 0 : free(qbuffer);
1825 0 : qbuffer = qtext_load_file(&qbuffer_size);
1826 : }
1827 : }
1828 :
1829 262 : hash_seq_init(&hash_seq, pgss_hash);
1830 53672 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
1831 : {
1832 : Datum values[PG_STAT_STATEMENTS_COLS];
1833 : bool nulls[PG_STAT_STATEMENTS_COLS];
1834 53410 : int i = 0;
1835 : Counters tmp;
1836 : double stddev;
1837 53410 : int64 queryid = entry->key.queryid;
1838 : TimestampTz stats_since;
1839 : TimestampTz minmax_stats_since;
1840 :
1841 53410 : memset(values, 0, sizeof(values));
1842 53410 : memset(nulls, 0, sizeof(nulls));
1843 :
1844 53410 : values[i++] = ObjectIdGetDatum(entry->key.userid);
1845 53410 : values[i++] = ObjectIdGetDatum(entry->key.dbid);
1846 53410 : if (api_version >= PGSS_V1_9)
1847 53386 : values[i++] = BoolGetDatum(entry->key.toplevel);
1848 :
1849 53410 : if (is_allowed_role || entry->key.userid == userid)
1850 : {
1851 53402 : if (api_version >= PGSS_V1_2)
1852 53402 : values[i++] = Int64GetDatumFast(queryid);
1853 :
1854 53402 : if (showtext)
1855 : {
1856 53402 : char *qstr = qtext_fetch(entry->query_offset,
1857 : entry->query_len,
1858 : qbuffer,
1859 : qbuffer_size);
1860 :
1861 53402 : if (qstr)
1862 : {
1863 : char *enc;
1864 :
1865 53402 : enc = pg_any_to_server(qstr,
1866 : entry->query_len,
1867 : entry->encoding);
1868 :
1869 53402 : values[i++] = CStringGetTextDatum(enc);
1870 :
1871 53402 : if (enc != qstr)
1872 0 : pfree(enc);
1873 : }
1874 : else
1875 : {
1876 : /* Just return a null if we fail to find the text */
1877 0 : nulls[i++] = true;
1878 : }
1879 : }
1880 : else
1881 : {
1882 : /* Query text not requested */
1883 0 : nulls[i++] = true;
1884 : }
1885 : }
1886 : else
1887 : {
1888 : /* Don't show queryid */
1889 8 : if (api_version >= PGSS_V1_2)
1890 8 : nulls[i++] = true;
1891 :
1892 : /*
1893 : * Don't show query text, but hint as to the reason for not doing
1894 : * so if it was requested
1895 : */
1896 8 : if (showtext)
1897 8 : values[i++] = CStringGetTextDatum("<insufficient privilege>");
1898 : else
1899 0 : nulls[i++] = true;
1900 : }
1901 :
1902 : /* copy counters to a local variable to keep locking time short */
1903 53410 : SpinLockAcquire(&entry->mutex);
1904 53410 : tmp = entry->counters;
1905 53410 : SpinLockRelease(&entry->mutex);
1906 :
1907 : /*
1908 : * The spinlock is not required when reading these two as they are
1909 : * always updated when holding pgss->lock exclusively.
1910 : */
1911 53410 : stats_since = entry->stats_since;
1912 53410 : minmax_stats_since = entry->minmax_stats_since;
1913 :
1914 : /* Skip entry if unexecuted (ie, it's a pending "sticky" entry) */
1915 53410 : if (IS_STICKY(tmp))
1916 78 : continue;
1917 :
1918 : /* Note that we rely on PGSS_PLAN being 0 and PGSS_EXEC being 1. */
1919 159996 : for (int kind = 0; kind < PGSS_NUMKIND; kind++)
1920 : {
1921 106664 : if (kind == PGSS_EXEC || api_version >= PGSS_V1_8)
1922 : {
1923 106656 : values[i++] = Int64GetDatumFast(tmp.calls[kind]);
1924 106656 : values[i++] = Float8GetDatumFast(tmp.total_time[kind]);
1925 : }
1926 :
1927 106664 : if ((kind == PGSS_EXEC && api_version >= PGSS_V1_3) ||
1928 : api_version >= PGSS_V1_8)
1929 : {
1930 106656 : values[i++] = Float8GetDatumFast(tmp.min_time[kind]);
1931 106656 : values[i++] = Float8GetDatumFast(tmp.max_time[kind]);
1932 106656 : values[i++] = Float8GetDatumFast(tmp.mean_time[kind]);
1933 :
1934 : /*
1935 : * Note we are calculating the population variance here, not
1936 : * the sample variance, as we have data for the whole
1937 : * population, so Bessel's correction is not used, and we
1938 : * don't divide by tmp.calls - 1.
1939 : */
1940 106656 : if (tmp.calls[kind] > 1)
1941 10016 : stddev = sqrt(tmp.sum_var_time[kind] / tmp.calls[kind]);
1942 : else
1943 96640 : stddev = 0.0;
1944 106656 : values[i++] = Float8GetDatumFast(stddev);
1945 : }
1946 : }
1947 53332 : values[i++] = Int64GetDatumFast(tmp.rows);
1948 53332 : values[i++] = Int64GetDatumFast(tmp.shared_blks_hit);
1949 53332 : values[i++] = Int64GetDatumFast(tmp.shared_blks_read);
1950 53332 : if (api_version >= PGSS_V1_1)
1951 53332 : values[i++] = Int64GetDatumFast(tmp.shared_blks_dirtied);
1952 53332 : values[i++] = Int64GetDatumFast(tmp.shared_blks_written);
1953 53332 : values[i++] = Int64GetDatumFast(tmp.local_blks_hit);
1954 53332 : values[i++] = Int64GetDatumFast(tmp.local_blks_read);
1955 53332 : if (api_version >= PGSS_V1_1)
1956 53332 : values[i++] = Int64GetDatumFast(tmp.local_blks_dirtied);
1957 53332 : values[i++] = Int64GetDatumFast(tmp.local_blks_written);
1958 53332 : values[i++] = Int64GetDatumFast(tmp.temp_blks_read);
1959 53332 : values[i++] = Int64GetDatumFast(tmp.temp_blks_written);
1960 53332 : if (api_version >= PGSS_V1_1)
1961 : {
1962 53332 : values[i++] = Float8GetDatumFast(tmp.shared_blk_read_time);
1963 53332 : values[i++] = Float8GetDatumFast(tmp.shared_blk_write_time);
1964 : }
1965 53332 : if (api_version >= PGSS_V1_11)
1966 : {
1967 53276 : values[i++] = Float8GetDatumFast(tmp.local_blk_read_time);
1968 53276 : values[i++] = Float8GetDatumFast(tmp.local_blk_write_time);
1969 : }
1970 53332 : if (api_version >= PGSS_V1_10)
1971 : {
1972 53294 : values[i++] = Float8GetDatumFast(tmp.temp_blk_read_time);
1973 53294 : values[i++] = Float8GetDatumFast(tmp.temp_blk_write_time);
1974 : }
1975 53332 : if (api_version >= PGSS_V1_8)
1976 : {
1977 : char buf[256];
1978 : Datum wal_bytes;
1979 :
1980 53324 : values[i++] = Int64GetDatumFast(tmp.wal_records);
1981 53324 : values[i++] = Int64GetDatumFast(tmp.wal_fpi);
1982 :
1983 53324 : snprintf(buf, sizeof buf, UINT64_FORMAT, tmp.wal_bytes);
1984 :
1985 : /* Convert to numeric. */
1986 53324 : wal_bytes = DirectFunctionCall3(numeric_in,
1987 : CStringGetDatum(buf),
1988 : ObjectIdGetDatum(0),
1989 : Int32GetDatum(-1));
1990 53324 : values[i++] = wal_bytes;
1991 : }
1992 53332 : if (api_version >= PGSS_V1_12)
1993 : {
1994 53256 : values[i++] = Int64GetDatumFast(tmp.wal_buffers_full);
1995 : }
1996 53332 : if (api_version >= PGSS_V1_10)
1997 : {
1998 53294 : values[i++] = Int64GetDatumFast(tmp.jit_functions);
1999 53294 : values[i++] = Float8GetDatumFast(tmp.jit_generation_time);
2000 53294 : values[i++] = Int64GetDatumFast(tmp.jit_inlining_count);
2001 53294 : values[i++] = Float8GetDatumFast(tmp.jit_inlining_time);
2002 53294 : values[i++] = Int64GetDatumFast(tmp.jit_optimization_count);
2003 53294 : values[i++] = Float8GetDatumFast(tmp.jit_optimization_time);
2004 53294 : values[i++] = Int64GetDatumFast(tmp.jit_emission_count);
2005 53294 : values[i++] = Float8GetDatumFast(tmp.jit_emission_time);
2006 : }
2007 53332 : if (api_version >= PGSS_V1_11)
2008 : {
2009 53276 : values[i++] = Int64GetDatumFast(tmp.jit_deform_count);
2010 53276 : values[i++] = Float8GetDatumFast(tmp.jit_deform_time);
2011 : }
2012 53332 : if (api_version >= PGSS_V1_12)
2013 : {
2014 53256 : values[i++] = Int64GetDatumFast(tmp.parallel_workers_to_launch);
2015 53256 : values[i++] = Int64GetDatumFast(tmp.parallel_workers_launched);
2016 : }
2017 53332 : if (api_version >= PGSS_V1_13)
2018 : {
2019 53246 : values[i++] = Int64GetDatumFast(tmp.generic_plan_calls);
2020 53246 : values[i++] = Int64GetDatumFast(tmp.custom_plan_calls);
2021 : }
2022 53332 : if (api_version >= PGSS_V1_11)
2023 : {
2024 53276 : values[i++] = TimestampTzGetDatum(stats_since);
2025 53276 : values[i++] = TimestampTzGetDatum(minmax_stats_since);
2026 : }
2027 :
2028 : Assert(i == (api_version == PGSS_V1_0 ? PG_STAT_STATEMENTS_COLS_V1_0 :
2029 : api_version == PGSS_V1_1 ? PG_STAT_STATEMENTS_COLS_V1_1 :
2030 : api_version == PGSS_V1_2 ? PG_STAT_STATEMENTS_COLS_V1_2 :
2031 : api_version == PGSS_V1_3 ? PG_STAT_STATEMENTS_COLS_V1_3 :
2032 : api_version == PGSS_V1_8 ? PG_STAT_STATEMENTS_COLS_V1_8 :
2033 : api_version == PGSS_V1_9 ? PG_STAT_STATEMENTS_COLS_V1_9 :
2034 : api_version == PGSS_V1_10 ? PG_STAT_STATEMENTS_COLS_V1_10 :
2035 : api_version == PGSS_V1_11 ? PG_STAT_STATEMENTS_COLS_V1_11 :
2036 : api_version == PGSS_V1_12 ? PG_STAT_STATEMENTS_COLS_V1_12 :
2037 : api_version == PGSS_V1_13 ? PG_STAT_STATEMENTS_COLS_V1_13 :
2038 : -1 /* fail if you forget to update this assert */ ));
2039 :
2040 53332 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
2041 : }
2042 :
2043 262 : LWLockRelease(pgss->lock);
2044 :
2045 262 : free(qbuffer);
2046 262 : }
2047 :
2048 : /* Number of output arguments (columns) for pg_stat_statements_info */
2049 : #define PG_STAT_STATEMENTS_INFO_COLS 2
2050 :
2051 : /*
2052 : * Return statistics of pg_stat_statements.
2053 : */
2054 : Datum
2055 4 : pg_stat_statements_info(PG_FUNCTION_ARGS)
2056 : {
2057 : pgssGlobalStats stats;
2058 : TupleDesc tupdesc;
2059 4 : Datum values[PG_STAT_STATEMENTS_INFO_COLS] = {0};
2060 4 : bool nulls[PG_STAT_STATEMENTS_INFO_COLS] = {0};
2061 :
2062 4 : if (!pgss || !pgss_hash)
2063 0 : ereport(ERROR,
2064 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2065 : errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
2066 :
2067 : /* Build a tuple descriptor for our result type */
2068 4 : if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
2069 0 : elog(ERROR, "return type must be a row type");
2070 :
2071 : /* Read global statistics for pg_stat_statements */
2072 4 : SpinLockAcquire(&pgss->mutex);
2073 4 : stats = pgss->stats;
2074 4 : SpinLockRelease(&pgss->mutex);
2075 :
2076 4 : values[0] = Int64GetDatum(stats.dealloc);
2077 4 : values[1] = TimestampTzGetDatum(stats.stats_reset);
2078 :
2079 4 : PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
2080 : }
2081 :
2082 : /*
2083 : * Estimate shared memory space needed.
2084 : */
2085 : static Size
2086 14 : pgss_memsize(void)
2087 : {
2088 : Size size;
2089 :
2090 14 : size = MAXALIGN(sizeof(pgssSharedState));
2091 14 : size = add_size(size, hash_estimate_size(pgss_max, sizeof(pgssEntry)));
2092 :
2093 14 : return size;
2094 : }
2095 :
2096 : /*
2097 : * Allocate a new hashtable entry.
2098 : * caller must hold an exclusive lock on pgss->lock
2099 : *
2100 : * "query" need not be null-terminated; we rely on query_len instead
2101 : *
2102 : * If "sticky" is true, make the new entry artificially sticky so that it will
2103 : * probably still be there when the query finishes execution. We do this by
2104 : * giving it a median usage value rather than the normal value. (Strictly
2105 : * speaking, query strings are normalized on a best effort basis, though it
2106 : * would be difficult to demonstrate this even under artificial conditions.)
2107 : *
2108 : * Note: despite needing exclusive lock, it's not an error for the target
2109 : * entry to already exist. This is because pgss_store releases and
2110 : * reacquires lock after failing to find a match; so someone else could
2111 : * have made the entry while we waited to get exclusive lock.
2112 : */
2113 : static pgssEntry *
2114 108266 : entry_alloc(pgssHashKey *key, Size query_offset, int query_len, int encoding,
2115 : bool sticky)
2116 : {
2117 : pgssEntry *entry;
2118 : bool found;
2119 :
2120 : /* Make space if needed */
2121 108266 : while (hash_get_num_entries(pgss_hash) >= pgss_max)
2122 0 : entry_dealloc();
2123 :
2124 : /* Find or create an entry with desired hash code */
2125 108266 : entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found);
2126 :
2127 108266 : if (!found)
2128 : {
2129 : /* New entry, initialize it */
2130 :
2131 : /* reset the statistics */
2132 108266 : memset(&entry->counters, 0, sizeof(Counters));
2133 : /* set the appropriate initial usage count */
2134 108266 : entry->counters.usage = sticky ? pgss->cur_median_usage : USAGE_INIT;
2135 : /* re-initialize the mutex each time ... we assume no one using it */
2136 108266 : SpinLockInit(&entry->mutex);
2137 : /* ... and don't forget the query text metadata */
2138 : Assert(query_len >= 0);
2139 108266 : entry->query_offset = query_offset;
2140 108266 : entry->query_len = query_len;
2141 108266 : entry->encoding = encoding;
2142 108266 : entry->stats_since = GetCurrentTimestamp();
2143 108266 : entry->minmax_stats_since = entry->stats_since;
2144 : }
2145 :
2146 108266 : return entry;
2147 : }
2148 :
2149 : /*
2150 : * qsort comparator for sorting into increasing usage order
2151 : */
2152 : static int
2153 0 : entry_cmp(const void *lhs, const void *rhs)
2154 : {
2155 0 : double l_usage = (*(pgssEntry *const *) lhs)->counters.usage;
2156 0 : double r_usage = (*(pgssEntry *const *) rhs)->counters.usage;
2157 :
2158 0 : if (l_usage < r_usage)
2159 0 : return -1;
2160 0 : else if (l_usage > r_usage)
2161 0 : return +1;
2162 : else
2163 0 : return 0;
2164 : }
2165 :
2166 : /*
2167 : * Deallocate least-used entries.
2168 : *
2169 : * Caller must hold an exclusive lock on pgss->lock.
2170 : */
2171 : static void
2172 0 : entry_dealloc(void)
2173 : {
2174 : HASH_SEQ_STATUS hash_seq;
2175 : pgssEntry **entries;
2176 : pgssEntry *entry;
2177 : int nvictims;
2178 : int i;
2179 : Size tottextlen;
2180 : int nvalidtexts;
2181 :
2182 : /*
2183 : * Sort entries by usage and deallocate USAGE_DEALLOC_PERCENT of them.
2184 : * While we're scanning the table, apply the decay factor to the usage
2185 : * values, and update the mean query length.
2186 : *
2187 : * Note that the mean query length is almost immediately obsolete, since
2188 : * we compute it before not after discarding the least-used entries.
2189 : * Hopefully, that doesn't affect the mean too much; it doesn't seem worth
2190 : * making two passes to get a more current result. Likewise, the new
2191 : * cur_median_usage includes the entries we're about to zap.
2192 : */
2193 :
2194 0 : entries = palloc(hash_get_num_entries(pgss_hash) * sizeof(pgssEntry *));
2195 :
2196 0 : i = 0;
2197 0 : tottextlen = 0;
2198 0 : nvalidtexts = 0;
2199 :
2200 0 : hash_seq_init(&hash_seq, pgss_hash);
2201 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2202 : {
2203 0 : entries[i++] = entry;
2204 : /* "Sticky" entries get a different usage decay rate. */
2205 0 : if (IS_STICKY(entry->counters))
2206 0 : entry->counters.usage *= STICKY_DECREASE_FACTOR;
2207 : else
2208 0 : entry->counters.usage *= USAGE_DECREASE_FACTOR;
2209 : /* In the mean length computation, ignore dropped texts. */
2210 0 : if (entry->query_len >= 0)
2211 : {
2212 0 : tottextlen += entry->query_len + 1;
2213 0 : nvalidtexts++;
2214 : }
2215 : }
2216 :
2217 : /* Sort into increasing order by usage */
2218 0 : qsort(entries, i, sizeof(pgssEntry *), entry_cmp);
2219 :
2220 : /* Record the (approximate) median usage */
2221 0 : if (i > 0)
2222 0 : pgss->cur_median_usage = entries[i / 2]->counters.usage;
2223 : /* Record the mean query length */
2224 0 : if (nvalidtexts > 0)
2225 0 : pgss->mean_query_len = tottextlen / nvalidtexts;
2226 : else
2227 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2228 :
2229 : /* Now zap an appropriate fraction of lowest-usage entries */
2230 0 : nvictims = Max(10, i * USAGE_DEALLOC_PERCENT / 100);
2231 0 : nvictims = Min(nvictims, i);
2232 :
2233 0 : for (i = 0; i < nvictims; i++)
2234 : {
2235 0 : hash_search(pgss_hash, &entries[i]->key, HASH_REMOVE, NULL);
2236 : }
2237 :
2238 0 : pfree(entries);
2239 :
2240 : /* Increment the number of times entries are deallocated */
2241 0 : SpinLockAcquire(&pgss->mutex);
2242 0 : pgss->stats.dealloc += 1;
2243 0 : SpinLockRelease(&pgss->mutex);
2244 0 : }
2245 :
2246 : /*
2247 : * Given a query string (not necessarily null-terminated), allocate a new
2248 : * entry in the external query text file and store the string there.
2249 : *
2250 : * If successful, returns true, and stores the new entry's offset in the file
2251 : * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
2252 : * number of garbage collections that have occurred so far.
2253 : *
2254 : * On failure, returns false.
2255 : *
2256 : * At least a shared lock on pgss->lock must be held by the caller, so as
2257 : * to prevent a concurrent garbage collection. Share-lock-holding callers
2258 : * should pass a gc_count pointer to obtain the number of garbage collections,
2259 : * so that they can recheck the count after obtaining exclusive lock to
2260 : * detect whether a garbage collection occurred (and removed this entry).
2261 : */
2262 : static bool
2263 56756 : qtext_store(const char *query, int query_len,
2264 : Size *query_offset, int *gc_count)
2265 : {
2266 : Size off;
2267 : int fd;
2268 :
2269 : /*
2270 : * We use a spinlock to protect extent/n_writers/gc_count, so that
2271 : * multiple processes may execute this function concurrently.
2272 : */
2273 56756 : SpinLockAcquire(&pgss->mutex);
2274 56756 : off = pgss->extent;
2275 56756 : pgss->extent += query_len + 1;
2276 56756 : pgss->n_writers++;
2277 56756 : if (gc_count)
2278 56756 : *gc_count = pgss->gc_count;
2279 56756 : SpinLockRelease(&pgss->mutex);
2280 :
2281 56756 : *query_offset = off;
2282 :
2283 : /*
2284 : * Don't allow the file to grow larger than what qtext_load_file can
2285 : * (theoretically) handle. This has been seen to be reachable on 32-bit
2286 : * platforms.
2287 : */
2288 56756 : if (unlikely(query_len >= MaxAllocHugeSize - off))
2289 : {
2290 0 : errno = EFBIG; /* not quite right, but it'll do */
2291 0 : fd = -1;
2292 0 : goto error;
2293 : }
2294 :
2295 : /* Now write the data into the successfully-reserved part of the file */
2296 56756 : fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDWR | O_CREAT | PG_BINARY);
2297 56756 : if (fd < 0)
2298 0 : goto error;
2299 :
2300 56756 : if (pg_pwrite(fd, query, query_len, off) != query_len)
2301 0 : goto error;
2302 56756 : if (pg_pwrite(fd, "\0", 1, off + query_len) != 1)
2303 0 : goto error;
2304 :
2305 56756 : CloseTransientFile(fd);
2306 :
2307 : /* Mark our write complete */
2308 56756 : SpinLockAcquire(&pgss->mutex);
2309 56756 : pgss->n_writers--;
2310 56756 : SpinLockRelease(&pgss->mutex);
2311 :
2312 56756 : return true;
2313 :
2314 0 : error:
2315 0 : ereport(LOG,
2316 : (errcode_for_file_access(),
2317 : errmsg("could not write file \"%s\": %m",
2318 : PGSS_TEXT_FILE)));
2319 :
2320 0 : if (fd >= 0)
2321 0 : CloseTransientFile(fd);
2322 :
2323 : /* Mark our write complete */
2324 0 : SpinLockAcquire(&pgss->mutex);
2325 0 : pgss->n_writers--;
2326 0 : SpinLockRelease(&pgss->mutex);
2327 :
2328 0 : return false;
2329 : }
2330 :
2331 : /*
2332 : * Read the external query text file into a malloc'd buffer.
2333 : *
2334 : * Returns NULL (without throwing an error) if unable to read, eg
2335 : * file not there or insufficient memory.
2336 : *
2337 : * On success, the buffer size is also returned into *buffer_size.
2338 : *
2339 : * This can be called without any lock on pgss->lock, but in that case
2340 : * the caller is responsible for verifying that the result is sane.
2341 : */
2342 : static char *
2343 272 : qtext_load_file(Size *buffer_size)
2344 : {
2345 : char *buf;
2346 : int fd;
2347 : struct stat stat;
2348 : Size nread;
2349 :
2350 272 : fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDONLY | PG_BINARY);
2351 272 : if (fd < 0)
2352 : {
2353 0 : if (errno != ENOENT)
2354 0 : ereport(LOG,
2355 : (errcode_for_file_access(),
2356 : errmsg("could not read file \"%s\": %m",
2357 : PGSS_TEXT_FILE)));
2358 0 : return NULL;
2359 : }
2360 :
2361 : /* Get file length */
2362 272 : if (fstat(fd, &stat))
2363 : {
2364 0 : ereport(LOG,
2365 : (errcode_for_file_access(),
2366 : errmsg("could not stat file \"%s\": %m",
2367 : PGSS_TEXT_FILE)));
2368 0 : CloseTransientFile(fd);
2369 0 : return NULL;
2370 : }
2371 :
2372 : /* Allocate buffer; beware that off_t might be wider than size_t */
2373 272 : if (stat.st_size <= MaxAllocHugeSize)
2374 272 : buf = (char *) malloc(stat.st_size);
2375 : else
2376 0 : buf = NULL;
2377 272 : if (buf == NULL)
2378 : {
2379 0 : ereport(LOG,
2380 : (errcode(ERRCODE_OUT_OF_MEMORY),
2381 : errmsg("out of memory"),
2382 : errdetail("Could not allocate enough memory to read file \"%s\".",
2383 : PGSS_TEXT_FILE)));
2384 0 : CloseTransientFile(fd);
2385 0 : return NULL;
2386 : }
2387 :
2388 : /*
2389 : * OK, slurp in the file. Windows fails if we try to read more than
2390 : * INT_MAX bytes at once, and other platforms might not like that either,
2391 : * so read a very large file in 1GB segments.
2392 : */
2393 272 : nread = 0;
2394 542 : while (nread < stat.st_size)
2395 : {
2396 270 : int toread = Min(1024 * 1024 * 1024, stat.st_size - nread);
2397 :
2398 : /*
2399 : * If we get a short read and errno doesn't get set, the reason is
2400 : * probably that garbage collection truncated the file since we did
2401 : * the fstat(), so we don't log a complaint --- but we don't return
2402 : * the data, either, since it's most likely corrupt due to concurrent
2403 : * writes from garbage collection.
2404 : */
2405 270 : errno = 0;
2406 270 : if (read(fd, buf + nread, toread) != toread)
2407 : {
2408 0 : if (errno)
2409 0 : ereport(LOG,
2410 : (errcode_for_file_access(),
2411 : errmsg("could not read file \"%s\": %m",
2412 : PGSS_TEXT_FILE)));
2413 0 : free(buf);
2414 0 : CloseTransientFile(fd);
2415 0 : return NULL;
2416 : }
2417 270 : nread += toread;
2418 : }
2419 :
2420 272 : if (CloseTransientFile(fd) != 0)
2421 0 : ereport(LOG,
2422 : (errcode_for_file_access(),
2423 : errmsg("could not close file \"%s\": %m", PGSS_TEXT_FILE)));
2424 :
2425 272 : *buffer_size = nread;
2426 272 : return buf;
2427 : }
2428 :
2429 : /*
2430 : * Locate a query text in the file image previously read by qtext_load_file().
2431 : *
2432 : * We validate the given offset/length, and return NULL if bogus. Otherwise,
2433 : * the result points to a null-terminated string within the buffer.
2434 : */
2435 : static char *
2436 159910 : qtext_fetch(Size query_offset, int query_len,
2437 : char *buffer, Size buffer_size)
2438 : {
2439 : /* File read failed? */
2440 159910 : if (buffer == NULL)
2441 0 : return NULL;
2442 : /* Bogus offset/length? */
2443 159910 : if (query_len < 0 ||
2444 159910 : query_offset + query_len >= buffer_size)
2445 0 : return NULL;
2446 : /* As a further sanity check, make sure there's a trailing null */
2447 159910 : if (buffer[query_offset + query_len] != '\0')
2448 0 : return NULL;
2449 : /* Looks OK */
2450 159910 : return buffer + query_offset;
2451 : }
2452 :
2453 : /*
2454 : * Do we need to garbage-collect the external query text file?
2455 : *
2456 : * Caller should hold at least a shared lock on pgss->lock.
2457 : */
2458 : static bool
2459 56756 : need_gc_qtexts(void)
2460 : {
2461 : Size extent;
2462 :
2463 : /* Read shared extent pointer */
2464 56756 : SpinLockAcquire(&pgss->mutex);
2465 56756 : extent = pgss->extent;
2466 56756 : SpinLockRelease(&pgss->mutex);
2467 :
2468 : /*
2469 : * Don't proceed if file does not exceed 512 bytes per possible entry.
2470 : *
2471 : * Here and in the next test, 32-bit machines have overflow hazards if
2472 : * pgss_max and/or mean_query_len are large. Force the multiplications
2473 : * and comparisons to be done in uint64 arithmetic to forestall trouble.
2474 : */
2475 56756 : if ((uint64) extent < (uint64) 512 * pgss_max)
2476 56756 : return false;
2477 :
2478 : /*
2479 : * Don't proceed if file is less than about 50% bloat. Nothing can or
2480 : * should be done in the event of unusually large query texts accounting
2481 : * for file's large size. We go to the trouble of maintaining the mean
2482 : * query length in order to prevent garbage collection from thrashing
2483 : * uselessly.
2484 : */
2485 0 : if ((uint64) extent < (uint64) pgss->mean_query_len * pgss_max * 2)
2486 0 : return false;
2487 :
2488 0 : return true;
2489 : }
2490 :
2491 : /*
2492 : * Garbage-collect orphaned query texts in external file.
2493 : *
2494 : * This won't be called often in the typical case, since it's likely that
2495 : * there won't be too much churn, and besides, a similar compaction process
2496 : * occurs when serializing to disk at shutdown or as part of resetting.
2497 : * Despite this, it seems prudent to plan for the edge case where the file
2498 : * becomes unreasonably large, with no other method of compaction likely to
2499 : * occur in the foreseeable future.
2500 : *
2501 : * The caller must hold an exclusive lock on pgss->lock.
2502 : *
2503 : * At the first sign of trouble we unlink the query text file to get a clean
2504 : * slate (although existing statistics are retained), rather than risk
2505 : * thrashing by allowing the same problem case to recur indefinitely.
2506 : */
2507 : static void
2508 0 : gc_qtexts(void)
2509 : {
2510 : char *qbuffer;
2511 : Size qbuffer_size;
2512 0 : FILE *qfile = NULL;
2513 : HASH_SEQ_STATUS hash_seq;
2514 : pgssEntry *entry;
2515 : Size extent;
2516 : int nentries;
2517 :
2518 : /*
2519 : * When called from pgss_store, some other session might have proceeded
2520 : * with garbage collection in the no-lock-held interim of lock strength
2521 : * escalation. Check once more that this is actually necessary.
2522 : */
2523 0 : if (!need_gc_qtexts())
2524 0 : return;
2525 :
2526 : /*
2527 : * Load the old texts file. If we fail (out of memory, for instance),
2528 : * invalidate query texts. Hopefully this is rare. It might seem better
2529 : * to leave things alone on an OOM failure, but the problem is that the
2530 : * file is only going to get bigger; hoping for a future non-OOM result is
2531 : * risky and can easily lead to complete denial of service.
2532 : */
2533 0 : qbuffer = qtext_load_file(&qbuffer_size);
2534 0 : if (qbuffer == NULL)
2535 0 : goto gc_fail;
2536 :
2537 : /*
2538 : * We overwrite the query texts file in place, so as to reduce the risk of
2539 : * an out-of-disk-space failure. Since the file is guaranteed not to get
2540 : * larger, this should always work on traditional filesystems; though we
2541 : * could still lose on copy-on-write filesystems.
2542 : */
2543 0 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2544 0 : if (qfile == NULL)
2545 : {
2546 0 : ereport(LOG,
2547 : (errcode_for_file_access(),
2548 : errmsg("could not write file \"%s\": %m",
2549 : PGSS_TEXT_FILE)));
2550 0 : goto gc_fail;
2551 : }
2552 :
2553 0 : extent = 0;
2554 0 : nentries = 0;
2555 :
2556 0 : hash_seq_init(&hash_seq, pgss_hash);
2557 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2558 : {
2559 0 : int query_len = entry->query_len;
2560 0 : char *qry = qtext_fetch(entry->query_offset,
2561 : query_len,
2562 : qbuffer,
2563 : qbuffer_size);
2564 :
2565 0 : if (qry == NULL)
2566 : {
2567 : /* Trouble ... drop the text */
2568 0 : entry->query_offset = 0;
2569 0 : entry->query_len = -1;
2570 : /* entry will not be counted in mean query length computation */
2571 0 : continue;
2572 : }
2573 :
2574 0 : if (fwrite(qry, 1, query_len + 1, qfile) != query_len + 1)
2575 : {
2576 0 : ereport(LOG,
2577 : (errcode_for_file_access(),
2578 : errmsg("could not write file \"%s\": %m",
2579 : PGSS_TEXT_FILE)));
2580 0 : hash_seq_term(&hash_seq);
2581 0 : goto gc_fail;
2582 : }
2583 :
2584 0 : entry->query_offset = extent;
2585 0 : extent += query_len + 1;
2586 0 : nentries++;
2587 : }
2588 :
2589 : /*
2590 : * Truncate away any now-unused space. If this fails for some odd reason,
2591 : * we log it, but there's no need to fail.
2592 : */
2593 0 : if (ftruncate(fileno(qfile), extent) != 0)
2594 0 : ereport(LOG,
2595 : (errcode_for_file_access(),
2596 : errmsg("could not truncate file \"%s\": %m",
2597 : PGSS_TEXT_FILE)));
2598 :
2599 0 : if (FreeFile(qfile))
2600 : {
2601 0 : ereport(LOG,
2602 : (errcode_for_file_access(),
2603 : errmsg("could not write file \"%s\": %m",
2604 : PGSS_TEXT_FILE)));
2605 0 : qfile = NULL;
2606 0 : goto gc_fail;
2607 : }
2608 :
2609 0 : elog(DEBUG1, "pgss gc of queries file shrunk size from %zu to %zu",
2610 : pgss->extent, extent);
2611 :
2612 : /* Reset the shared extent pointer */
2613 0 : pgss->extent = extent;
2614 :
2615 : /*
2616 : * Also update the mean query length, to be sure that need_gc_qtexts()
2617 : * won't still think we have a problem.
2618 : */
2619 0 : if (nentries > 0)
2620 0 : pgss->mean_query_len = extent / nentries;
2621 : else
2622 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2623 :
2624 0 : free(qbuffer);
2625 :
2626 : /*
2627 : * OK, count a garbage collection cycle. (Note: even though we have
2628 : * exclusive lock on pgss->lock, we must take pgss->mutex for this, since
2629 : * other processes may examine gc_count while holding only the mutex.
2630 : * Also, we have to advance the count *after* we've rewritten the file,
2631 : * else other processes might not realize they read a stale file.)
2632 : */
2633 0 : record_gc_qtexts();
2634 :
2635 0 : return;
2636 :
2637 0 : gc_fail:
2638 : /* clean up resources */
2639 0 : if (qfile)
2640 0 : FreeFile(qfile);
2641 0 : free(qbuffer);
2642 :
2643 : /*
2644 : * Since the contents of the external file are now uncertain, mark all
2645 : * hashtable entries as having invalid texts.
2646 : */
2647 0 : hash_seq_init(&hash_seq, pgss_hash);
2648 0 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2649 : {
2650 0 : entry->query_offset = 0;
2651 0 : entry->query_len = -1;
2652 : }
2653 :
2654 : /*
2655 : * Destroy the query text file and create a new, empty one
2656 : */
2657 0 : (void) unlink(PGSS_TEXT_FILE);
2658 0 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2659 0 : if (qfile == NULL)
2660 0 : ereport(LOG,
2661 : (errcode_for_file_access(),
2662 : errmsg("could not recreate file \"%s\": %m",
2663 : PGSS_TEXT_FILE)));
2664 : else
2665 0 : FreeFile(qfile);
2666 :
2667 : /* Reset the shared extent pointer */
2668 0 : pgss->extent = 0;
2669 :
2670 : /* Reset mean_query_len to match the new state */
2671 0 : pgss->mean_query_len = ASSUMED_LENGTH_INIT;
2672 :
2673 : /*
2674 : * Bump the GC count even though we failed.
2675 : *
2676 : * This is needed to make concurrent readers of file without any lock on
2677 : * pgss->lock notice existence of new version of file. Once readers
2678 : * subsequently observe a change in GC count with pgss->lock held, that
2679 : * forces a safe reopen of file. Writers also require that we bump here,
2680 : * of course. (As required by locking protocol, readers and writers don't
2681 : * trust earlier file contents until gc_count is found unchanged after
2682 : * pgss->lock acquired in shared or exclusive mode respectively.)
2683 : */
2684 0 : record_gc_qtexts();
2685 : }
2686 :
2687 : #define SINGLE_ENTRY_RESET(e) \
2688 : if (e) { \
2689 : if (minmax_only) { \
2690 : /* When requested reset only min/max statistics of an entry */ \
2691 : for (int kind = 0; kind < PGSS_NUMKIND; kind++) \
2692 : { \
2693 : e->counters.max_time[kind] = 0; \
2694 : e->counters.min_time[kind] = 0; \
2695 : } \
2696 : e->minmax_stats_since = stats_reset; \
2697 : } \
2698 : else \
2699 : { \
2700 : /* Remove the key otherwise */ \
2701 : hash_search(pgss_hash, &e->key, HASH_REMOVE, NULL); \
2702 : num_remove++; \
2703 : } \
2704 : }
2705 :
2706 : /*
2707 : * Reset entries corresponding to parameters passed.
2708 : */
2709 : static TimestampTz
2710 236 : entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
2711 : {
2712 : HASH_SEQ_STATUS hash_seq;
2713 : pgssEntry *entry;
2714 : FILE *qfile;
2715 : int64 num_entries;
2716 236 : int64 num_remove = 0;
2717 : pgssHashKey key;
2718 : TimestampTz stats_reset;
2719 :
2720 236 : if (!pgss || !pgss_hash)
2721 0 : ereport(ERROR,
2722 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2723 : errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
2724 :
2725 236 : LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
2726 236 : num_entries = hash_get_num_entries(pgss_hash);
2727 :
2728 236 : stats_reset = GetCurrentTimestamp();
2729 :
2730 236 : if (userid != 0 && dbid != 0 && queryid != INT64CONST(0))
2731 : {
2732 : /* If all the parameters are available, use the fast path. */
2733 2 : memset(&key, 0, sizeof(pgssHashKey));
2734 2 : key.userid = userid;
2735 2 : key.dbid = dbid;
2736 2 : key.queryid = queryid;
2737 :
2738 : /*
2739 : * Reset the entry if it exists, starting with the non-top-level
2740 : * entry.
2741 : */
2742 2 : key.toplevel = false;
2743 2 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
2744 :
2745 2 : SINGLE_ENTRY_RESET(entry);
2746 :
2747 : /* Also reset the top-level entry if it exists. */
2748 2 : key.toplevel = true;
2749 2 : entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
2750 :
2751 2 : SINGLE_ENTRY_RESET(entry);
2752 : }
2753 234 : else if (userid != 0 || dbid != 0 || queryid != INT64CONST(0))
2754 : {
2755 : /* Reset entries corresponding to valid parameters. */
2756 8 : hash_seq_init(&hash_seq, pgss_hash);
2757 102 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2758 : {
2759 94 : if ((!userid || entry->key.userid == userid) &&
2760 72 : (!dbid || entry->key.dbid == dbid) &&
2761 68 : (!queryid || entry->key.queryid == queryid))
2762 : {
2763 14 : SINGLE_ENTRY_RESET(entry);
2764 : }
2765 : }
2766 : }
2767 : else
2768 : {
2769 : /* Reset all entries. */
2770 226 : hash_seq_init(&hash_seq, pgss_hash);
2771 2212 : while ((entry = hash_seq_search(&hash_seq)) != NULL)
2772 : {
2773 1804 : SINGLE_ENTRY_RESET(entry);
2774 : }
2775 : }
2776 :
2777 : /* All entries are removed? */
2778 236 : if (num_entries != num_remove)
2779 12 : goto release_lock;
2780 :
2781 : /*
2782 : * Reset global statistics for pg_stat_statements since all entries are
2783 : * removed.
2784 : */
2785 224 : SpinLockAcquire(&pgss->mutex);
2786 224 : pgss->stats.dealloc = 0;
2787 224 : pgss->stats.stats_reset = stats_reset;
2788 224 : SpinLockRelease(&pgss->mutex);
2789 :
2790 : /*
2791 : * Write new empty query file, perhaps even creating a new one to recover
2792 : * if the file was missing.
2793 : */
2794 224 : qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
2795 224 : if (qfile == NULL)
2796 : {
2797 0 : ereport(LOG,
2798 : (errcode_for_file_access(),
2799 : errmsg("could not create file \"%s\": %m",
2800 : PGSS_TEXT_FILE)));
2801 0 : goto done;
2802 : }
2803 :
2804 : /* If ftruncate fails, log it, but it's not a fatal problem */
2805 224 : if (ftruncate(fileno(qfile), 0) != 0)
2806 0 : ereport(LOG,
2807 : (errcode_for_file_access(),
2808 : errmsg("could not truncate file \"%s\": %m",
2809 : PGSS_TEXT_FILE)));
2810 :
2811 224 : FreeFile(qfile);
2812 :
2813 224 : done:
2814 224 : pgss->extent = 0;
2815 : /* This counts as a query text garbage collection for our purposes */
2816 224 : record_gc_qtexts();
2817 :
2818 236 : release_lock:
2819 236 : LWLockRelease(pgss->lock);
2820 :
2821 236 : return stats_reset;
2822 : }
2823 :
2824 : /*
2825 : * Generate a normalized version of the query string that will be used to
2826 : * represent all similar queries.
2827 : *
2828 : * Note that the normalized representation may well vary depending on
2829 : * just which "equivalent" query is used to create the hashtable entry.
2830 : * We assume this is OK.
2831 : *
2832 : * If query_loc > 0, then "query" has been advanced by that much compared to
2833 : * the original string start, so we need to translate the provided locations
2834 : * to compensate. (This lets us avoid re-scanning statements before the one
2835 : * of interest, so it's worth doing.)
2836 : *
2837 : * *query_len_p contains the input string length, and is updated with
2838 : * the result string length on exit. The resulting string might be longer
2839 : * or shorter depending on what happens with replacement of constants.
2840 : *
2841 : * Returns a palloc'd string.
2842 : */
2843 : static char *
2844 21218 : generate_normalized_query(JumbleState *jstate, const char *query,
2845 : int query_loc, int *query_len_p)
2846 : {
2847 : char *norm_query;
2848 21218 : int query_len = *query_len_p;
2849 : int norm_query_buflen, /* Space allowed for norm_query */
2850 : len_to_wrt, /* Length (in bytes) to write */
2851 21218 : quer_loc = 0, /* Source query byte location */
2852 21218 : n_quer_loc = 0, /* Normalized query byte location */
2853 21218 : last_off = 0, /* Offset from start for previous tok */
2854 21218 : last_tok_len = 0; /* Length (in bytes) of that tok */
2855 21218 : int num_constants_replaced = 0;
2856 :
2857 : /*
2858 : * Get constants' lengths (core system only gives us locations). Note
2859 : * this also ensures the items are sorted by location.
2860 : */
2861 21218 : fill_in_constant_lengths(jstate, query, query_loc);
2862 :
2863 : /*
2864 : * Allow for $n symbols to be longer than the constants they replace.
2865 : * Constants must take at least one byte in text form, while a $n symbol
2866 : * certainly isn't more than 11 bytes, even if n reaches INT_MAX. We
2867 : * could refine that limit based on the max value of n for the current
2868 : * query, but it hardly seems worth any extra effort to do so.
2869 : */
2870 21218 : norm_query_buflen = query_len + jstate->clocations_count * 10;
2871 :
2872 : /* Allocate result buffer */
2873 21218 : norm_query = palloc(norm_query_buflen + 1);
2874 :
2875 83702 : for (int i = 0; i < jstate->clocations_count; i++)
2876 : {
2877 : int off, /* Offset from start for cur tok */
2878 : tok_len; /* Length (in bytes) of that tok */
2879 :
2880 : /*
2881 : * If we have an external param at this location, but no lists are
2882 : * being squashed across the query, then we skip here; this will make
2883 : * us print the characters found in the original query that represent
2884 : * the parameter in the next iteration (or after the loop is done),
2885 : * which is a bit odd but seems to work okay in most cases.
2886 : */
2887 62484 : if (jstate->clocations[i].extern_param && !jstate->has_squashed_lists)
2888 298 : continue;
2889 :
2890 62186 : off = jstate->clocations[i].location;
2891 :
2892 : /* Adjust recorded location if we're dealing with partial string */
2893 62186 : off -= query_loc;
2894 :
2895 62186 : tok_len = jstate->clocations[i].length;
2896 :
2897 62186 : if (tok_len < 0)
2898 478 : continue; /* ignore any duplicates */
2899 :
2900 : /* Copy next chunk (what precedes the next constant) */
2901 61708 : len_to_wrt = off - last_off;
2902 61708 : len_to_wrt -= last_tok_len;
2903 : Assert(len_to_wrt >= 0);
2904 61708 : memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
2905 61708 : n_quer_loc += len_to_wrt;
2906 :
2907 : /*
2908 : * And insert a param symbol in place of the constant token; and, if
2909 : * we have a squashable list, insert a placeholder comment starting
2910 : * from the list's second value.
2911 : */
2912 61708 : n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d%s",
2913 61708 : num_constants_replaced + 1 + jstate->highest_extern_param_id,
2914 61708 : jstate->clocations[i].squashed ? " /*, ... */" : "");
2915 61708 : num_constants_replaced++;
2916 :
2917 : /* move forward */
2918 61708 : quer_loc = off + tok_len;
2919 61708 : last_off = off;
2920 61708 : last_tok_len = tok_len;
2921 : }
2922 :
2923 : /*
2924 : * We've copied up until the last ignorable constant. Copy over the
2925 : * remaining bytes of the original query string.
2926 : */
2927 21218 : len_to_wrt = query_len - quer_loc;
2928 :
2929 : Assert(len_to_wrt >= 0);
2930 21218 : memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
2931 21218 : n_quer_loc += len_to_wrt;
2932 :
2933 : Assert(n_quer_loc <= norm_query_buflen);
2934 21218 : norm_query[n_quer_loc] = '\0';
2935 :
2936 21218 : *query_len_p = n_quer_loc;
2937 21218 : return norm_query;
2938 : }
2939 :
2940 : /*
2941 : * Given a valid SQL string and an array of constant-location records,
2942 : * fill in the textual lengths of those constants.
2943 : *
2944 : * The constants may use any allowed constant syntax, such as float literals,
2945 : * bit-strings, single-quoted strings and dollar-quoted strings. This is
2946 : * accomplished by using the public API for the core scanner.
2947 : *
2948 : * It is the caller's job to ensure that the string is a valid SQL statement
2949 : * with constants at the indicated locations. Since in practice the string
2950 : * has already been parsed, and the locations that the caller provides will
2951 : * have originated from within the authoritative parser, this should not be
2952 : * a problem.
2953 : *
2954 : * Duplicate constant pointers are possible, and will have their lengths
2955 : * marked as '-1', so that they are later ignored. (Actually, we assume the
2956 : * lengths were initialized as -1 to start with, and don't change them here.)
2957 : *
2958 : * If query_loc > 0, then "query" has been advanced by that much compared to
2959 : * the original string start, so we need to translate the provided locations
2960 : * to compensate. (This lets us avoid re-scanning statements before the one
2961 : * of interest, so it's worth doing.)
2962 : *
2963 : * N.B. There is an assumption that a '-' character at a Const location begins
2964 : * a negative numeric constant. This precludes there ever being another
2965 : * reason for a constant to start with a '-'.
2966 : */
2967 : static void
2968 21218 : fill_in_constant_lengths(JumbleState *jstate, const char *query,
2969 : int query_loc)
2970 : {
2971 : LocationLen *locs;
2972 : core_yyscan_t yyscanner;
2973 : core_yy_extra_type yyextra;
2974 : core_YYSTYPE yylval;
2975 : YYLTYPE yylloc;
2976 21218 : int last_loc = -1;
2977 : int i;
2978 :
2979 : /*
2980 : * Sort the records by location so that we can process them in order while
2981 : * scanning the query text.
2982 : */
2983 21218 : if (jstate->clocations_count > 1)
2984 13426 : qsort(jstate->clocations, jstate->clocations_count,
2985 : sizeof(LocationLen), comp_location);
2986 21218 : locs = jstate->clocations;
2987 :
2988 : /* initialize the flex scanner --- should match raw_parser() */
2989 21218 : yyscanner = scanner_init(query,
2990 : &yyextra,
2991 : &ScanKeywords,
2992 : ScanKeywordTokens);
2993 :
2994 : /* we don't want to re-emit any escape string warnings */
2995 21218 : yyextra.escape_string_warning = false;
2996 :
2997 : /* Search for each constant, in sequence */
2998 83702 : for (i = 0; i < jstate->clocations_count; i++)
2999 : {
3000 62484 : int loc = locs[i].location;
3001 : int tok;
3002 :
3003 : /* Adjust recorded location if we're dealing with partial string */
3004 62484 : loc -= query_loc;
3005 :
3006 : Assert(loc >= 0);
3007 :
3008 62484 : if (locs[i].squashed)
3009 1254 : continue; /* squashable list, ignore */
3010 :
3011 61230 : if (loc <= last_loc)
3012 482 : continue; /* Duplicate constant, ignore */
3013 :
3014 : /* Lex tokens until we find the desired constant */
3015 : for (;;)
3016 : {
3017 475124 : tok = core_yylex(&yylval, &yylloc, yyscanner);
3018 :
3019 : /* We should not hit end-of-string, but if we do, behave sanely */
3020 475124 : if (tok == 0)
3021 0 : break; /* out of inner for-loop */
3022 :
3023 : /*
3024 : * We should find the token position exactly, but if we somehow
3025 : * run past it, work with that.
3026 : */
3027 475124 : if (yylloc >= loc)
3028 : {
3029 60748 : if (query[loc] == '-')
3030 : {
3031 : /*
3032 : * It's a negative value - this is the one and only case
3033 : * where we replace more than a single token.
3034 : *
3035 : * Do not compensate for the core system's special-case
3036 : * adjustment of location to that of the leading '-'
3037 : * operator in the event of a negative constant. It is
3038 : * also useful for our purposes to start from the minus
3039 : * symbol. In this way, queries like "select * from foo
3040 : * where bar = 1" and "select * from foo where bar = -2"
3041 : * will have identical normalized query strings.
3042 : */
3043 736 : tok = core_yylex(&yylval, &yylloc, yyscanner);
3044 736 : if (tok == 0)
3045 0 : break; /* out of inner for-loop */
3046 : }
3047 :
3048 : /*
3049 : * We now rely on the assumption that flex has placed a zero
3050 : * byte after the text of the current token in scanbuf.
3051 : */
3052 60748 : locs[i].length = strlen(yyextra.scanbuf + loc);
3053 60748 : break; /* out of inner for-loop */
3054 : }
3055 : }
3056 :
3057 : /* If we hit end-of-string, give up, leaving remaining lengths -1 */
3058 60748 : if (tok == 0)
3059 0 : break;
3060 :
3061 60748 : last_loc = loc;
3062 : }
3063 :
3064 21218 : scanner_finish(yyscanner);
3065 21218 : }
3066 :
3067 : /*
3068 : * comp_location: comparator for qsorting LocationLen structs by location
3069 : */
3070 : static int
3071 69510 : comp_location(const void *a, const void *b)
3072 : {
3073 69510 : int l = ((const LocationLen *) a)->location;
3074 69510 : int r = ((const LocationLen *) b)->location;
3075 :
3076 69510 : return pg_cmp_s32(l, r);
3077 : }
|