Line data Source code
1 : /* -------------------------------------------------------------------------
2 : *
3 : * pgstat_io.c
4 : * Implementation of IO statistics.
5 : *
6 : * This file contains the implementation of IO statistics. It is kept separate
7 : * from pgstat.c to enforce the line between the statistics access / storage
8 : * implementation and the details about individual types of statistics.
9 : *
10 : * Copyright (c) 2021-2023, PostgreSQL Global Development Group
11 : *
12 : * IDENTIFICATION
13 : * src/backend/utils/activity/pgstat_io.c
14 : * -------------------------------------------------------------------------
15 : */
16 :
17 : #include "postgres.h"
18 :
19 : #include "executor/instrument.h"
20 : #include "storage/bufmgr.h"
21 : #include "utils/pgstat_internal.h"
22 :
23 :
24 : typedef struct PgStat_PendingIO
25 : {
26 : PgStat_Counter counts[IOOBJECT_NUM_TYPES][IOCONTEXT_NUM_TYPES][IOOP_NUM_TYPES];
27 : instr_time pending_times[IOOBJECT_NUM_TYPES][IOCONTEXT_NUM_TYPES][IOOP_NUM_TYPES];
28 : } PgStat_PendingIO;
29 :
30 :
31 : static PgStat_PendingIO PendingIOStats;
32 : bool have_iostats = false;
33 :
34 :
35 : /*
36 : * Check that stats have not been counted for any combination of IOObject,
37 : * IOContext, and IOOp which are not tracked for the passed-in BackendType. If
38 : * stats are tracked for this combination and IO times are non-zero, counts
39 : * should be non-zero.
40 : *
41 : * The passed-in PgStat_BktypeIO must contain stats from the BackendType
42 : * specified by the second parameter. Caller is responsible for locking the
43 : * passed-in PgStat_BktypeIO, if needed.
44 : */
45 : bool
46 0 : pgstat_bktype_io_stats_valid(PgStat_BktypeIO *backend_io,
47 : BackendType bktype)
48 : {
49 0 : for (int io_object = 0; io_object < IOOBJECT_NUM_TYPES; io_object++)
50 : {
51 0 : for (int io_context = 0; io_context < IOCONTEXT_NUM_TYPES; io_context++)
52 : {
53 0 : for (int io_op = 0; io_op < IOOP_NUM_TYPES; io_op++)
54 : {
55 : /* we do track it */
56 0 : if (pgstat_tracks_io_op(bktype, io_object, io_context, io_op))
57 : {
58 : /* ensure that if IO times are non-zero, counts are > 0 */
59 0 : if (backend_io->times[io_object][io_context][io_op] != 0 &&
60 0 : backend_io->counts[io_object][io_context][io_op] <= 0)
61 0 : return false;
62 :
63 0 : continue;
64 : }
65 :
66 : /* we don't track it, and it is not 0 */
67 0 : if (backend_io->counts[io_object][io_context][io_op] != 0)
68 0 : return false;
69 : }
70 : }
71 : }
72 :
73 0 : return true;
74 : }
75 :
76 : void
77 134319570 : pgstat_count_io_op(IOObject io_object, IOContext io_context, IOOp io_op)
78 : {
79 134319570 : pgstat_count_io_op_n(io_object, io_context, io_op, 1);
80 134319570 : }
81 :
82 : void
83 138962764 : pgstat_count_io_op_n(IOObject io_object, IOContext io_context, IOOp io_op, uint32 cnt)
84 : {
85 : Assert((unsigned int) io_object < IOOBJECT_NUM_TYPES);
86 : Assert((unsigned int) io_context < IOCONTEXT_NUM_TYPES);
87 : Assert((unsigned int) io_op < IOOP_NUM_TYPES);
88 : Assert(pgstat_tracks_io_op(MyBackendType, io_object, io_context, io_op));
89 :
90 138962764 : PendingIOStats.counts[io_object][io_context][io_op] += cnt;
91 :
92 138962764 : have_iostats = true;
93 138962764 : }
94 :
95 : instr_time
96 4643220 : pgstat_prepare_io_time(void)
97 : {
98 : instr_time io_start;
99 :
100 4643220 : if (track_io_timing)
101 4 : INSTR_TIME_SET_CURRENT(io_start);
102 : else
103 4643216 : INSTR_TIME_SET_ZERO(io_start);
104 :
105 4643220 : return io_start;
106 : }
107 :
108 : /*
109 : * Like pgstat_count_io_op_n() except it also accumulates time.
110 : */
111 : void
112 4643194 : pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op,
113 : instr_time start_time, uint32 cnt)
114 : {
115 4643194 : if (track_io_timing)
116 : {
117 : instr_time io_time;
118 :
119 4 : INSTR_TIME_SET_CURRENT(io_time);
120 4 : INSTR_TIME_SUBTRACT(io_time, start_time);
121 :
122 4 : if (io_op == IOOP_WRITE)
123 : {
124 2 : pgstat_count_buffer_write_time(INSTR_TIME_GET_MICROSEC(io_time));
125 2 : if (io_object == IOOBJECT_RELATION)
126 2 : INSTR_TIME_ADD(pgBufferUsage.blk_write_time, io_time);
127 : }
128 2 : else if (io_op == IOOP_READ)
129 : {
130 2 : pgstat_count_buffer_read_time(INSTR_TIME_GET_MICROSEC(io_time));
131 2 : if (io_object == IOOBJECT_RELATION)
132 2 : INSTR_TIME_ADD(pgBufferUsage.blk_read_time, io_time);
133 : }
134 :
135 4 : INSTR_TIME_ADD(PendingIOStats.pending_times[io_object][io_context][io_op],
136 : io_time);
137 : }
138 :
139 4643194 : pgstat_count_io_op_n(io_object, io_context, io_op, cnt);
140 4643194 : }
141 :
142 : PgStat_IO *
143 112 : pgstat_fetch_stat_io(void)
144 : {
145 112 : pgstat_snapshot_fixed(PGSTAT_KIND_IO);
146 :
147 112 : return &pgStatLocal.snapshot.io;
148 : }
149 :
150 : /*
151 : * Flush out locally pending IO statistics
152 : *
153 : * If no stats have been recorded, this function returns false.
154 : *
155 : * If nowait is true, this function returns true if the lock could not be
156 : * acquired. Otherwise, return false.
157 : */
158 : bool
159 220306 : pgstat_flush_io(bool nowait)
160 : {
161 : LWLock *bktype_lock;
162 : PgStat_BktypeIO *bktype_shstats;
163 :
164 220306 : if (!have_iostats)
165 51598 : return false;
166 :
167 168708 : bktype_lock = &pgStatLocal.shmem->io.locks[MyBackendType];
168 168708 : bktype_shstats =
169 168708 : &pgStatLocal.shmem->io.stats.stats[MyBackendType];
170 :
171 168708 : if (!nowait)
172 148772 : LWLockAcquire(bktype_lock, LW_EXCLUSIVE);
173 19936 : else if (!LWLockConditionalAcquire(bktype_lock, LW_EXCLUSIVE))
174 0 : return true;
175 :
176 506124 : for (int io_object = 0; io_object < IOOBJECT_NUM_TYPES; io_object++)
177 : {
178 1687080 : for (int io_context = 0; io_context < IOCONTEXT_NUM_TYPES; io_context++)
179 : {
180 12146976 : for (int io_op = 0; io_op < IOOP_NUM_TYPES; io_op++)
181 : {
182 : instr_time time;
183 :
184 10797312 : bktype_shstats->counts[io_object][io_context][io_op] +=
185 10797312 : PendingIOStats.counts[io_object][io_context][io_op];
186 :
187 10797312 : time = PendingIOStats.pending_times[io_object][io_context][io_op];
188 :
189 10797312 : bktype_shstats->times[io_object][io_context][io_op] +=
190 10797312 : INSTR_TIME_GET_MICROSEC(time);
191 : }
192 : }
193 : }
194 :
195 : Assert(pgstat_bktype_io_stats_valid(bktype_shstats, MyBackendType));
196 :
197 168708 : LWLockRelease(bktype_lock);
198 :
199 168708 : memset(&PendingIOStats, 0, sizeof(PendingIOStats));
200 :
201 168708 : have_iostats = false;
202 :
203 168708 : return false;
204 : }
205 :
206 : const char *
207 8064 : pgstat_get_io_context_name(IOContext io_context)
208 : {
209 8064 : switch (io_context)
210 : {
211 2016 : case IOCONTEXT_BULKREAD:
212 2016 : return "bulkread";
213 2016 : case IOCONTEXT_BULKWRITE:
214 2016 : return "bulkwrite";
215 2016 : case IOCONTEXT_NORMAL:
216 2016 : return "normal";
217 2016 : case IOCONTEXT_VACUUM:
218 2016 : return "vacuum";
219 : }
220 :
221 0 : elog(ERROR, "unrecognized IOContext value: %d", io_context);
222 : pg_unreachable();
223 : }
224 :
225 : const char *
226 2016 : pgstat_get_io_object_name(IOObject io_object)
227 : {
228 2016 : switch (io_object)
229 : {
230 1008 : case IOOBJECT_RELATION:
231 1008 : return "relation";
232 1008 : case IOOBJECT_TEMP_RELATION:
233 1008 : return "temp relation";
234 : }
235 :
236 0 : elog(ERROR, "unrecognized IOObject value: %d", io_object);
237 : pg_unreachable();
238 : }
239 :
240 : void
241 888 : pgstat_io_reset_all_cb(TimestampTz ts)
242 : {
243 13320 : for (int i = 0; i < BACKEND_NUM_TYPES; i++)
244 : {
245 12432 : LWLock *bktype_lock = &pgStatLocal.shmem->io.locks[i];
246 12432 : PgStat_BktypeIO *bktype_shstats = &pgStatLocal.shmem->io.stats.stats[i];
247 :
248 12432 : LWLockAcquire(bktype_lock, LW_EXCLUSIVE);
249 :
250 : /*
251 : * Use the lock in the first BackendType's PgStat_BktypeIO to protect
252 : * the reset timestamp as well.
253 : */
254 12432 : if (i == 0)
255 888 : pgStatLocal.shmem->io.stats.stat_reset_timestamp = ts;
256 :
257 12432 : memset(bktype_shstats, 0, sizeof(*bktype_shstats));
258 12432 : LWLockRelease(bktype_lock);
259 : }
260 888 : }
261 :
262 : void
263 2078 : pgstat_io_snapshot_cb(void)
264 : {
265 31170 : for (int i = 0; i < BACKEND_NUM_TYPES; i++)
266 : {
267 29092 : LWLock *bktype_lock = &pgStatLocal.shmem->io.locks[i];
268 29092 : PgStat_BktypeIO *bktype_shstats = &pgStatLocal.shmem->io.stats.stats[i];
269 29092 : PgStat_BktypeIO *bktype_snap = &pgStatLocal.snapshot.io.stats[i];
270 :
271 29092 : LWLockAcquire(bktype_lock, LW_SHARED);
272 :
273 : /*
274 : * Use the lock in the first BackendType's PgStat_BktypeIO to protect
275 : * the reset timestamp as well.
276 : */
277 29092 : if (i == 0)
278 2078 : pgStatLocal.snapshot.io.stat_reset_timestamp =
279 2078 : pgStatLocal.shmem->io.stats.stat_reset_timestamp;
280 :
281 : /* using struct assignment due to better type safety */
282 29092 : *bktype_snap = *bktype_shstats;
283 29092 : LWLockRelease(bktype_lock);
284 : }
285 2078 : }
286 :
287 : /*
288 : * IO statistics are not collected for all BackendTypes.
289 : *
290 : * The following BackendTypes do not participate in the cumulative stats
291 : * subsystem or do not perform IO on which we currently track:
292 : * - Syslogger because it is not connected to shared memory
293 : * - Archiver because most relevant archiving IO is delegated to a
294 : * specialized command or module
295 : * - WAL Receiver and WAL Writer IO is not tracked in pg_stat_io for now
296 : *
297 : * Function returns true if BackendType participates in the cumulative stats
298 : * subsystem for IO and false if it does not.
299 : *
300 : * When adding a new BackendType, also consider adding relevant restrictions to
301 : * pgstat_tracks_io_object() and pgstat_tracks_io_op().
302 : */
303 : bool
304 36512 : pgstat_tracks_io_bktype(BackendType bktype)
305 : {
306 : /*
307 : * List every type so that new backend types trigger a warning about
308 : * needing to adjust this switch.
309 : */
310 36512 : switch (bktype)
311 : {
312 560 : case B_INVALID:
313 : case B_ARCHIVER:
314 : case B_LOGGER:
315 : case B_WAL_RECEIVER:
316 : case B_WAL_WRITER:
317 560 : return false;
318 :
319 35952 : case B_AUTOVAC_LAUNCHER:
320 : case B_AUTOVAC_WORKER:
321 : case B_BACKEND:
322 : case B_BG_WORKER:
323 : case B_BG_WRITER:
324 : case B_CHECKPOINTER:
325 : case B_STANDALONE_BACKEND:
326 : case B_STARTUP:
327 : case B_WAL_SENDER:
328 35952 : return true;
329 : }
330 :
331 0 : return false;
332 : }
333 :
334 : /*
335 : * Some BackendTypes do not perform IO on certain IOObjects or in certain
336 : * IOContexts. Some IOObjects are never operated on in some IOContexts. Check
337 : * that the given BackendType is expected to do IO in the given IOContext and
338 : * on the given IOObject and that the given IOObject is expected to be operated
339 : * on in the given IOContext.
340 : */
341 : bool
342 34944 : pgstat_tracks_io_object(BackendType bktype, IOObject io_object,
343 : IOContext io_context)
344 : {
345 : bool no_temp_rel;
346 :
347 : /*
348 : * Some BackendTypes should never track IO statistics.
349 : */
350 34944 : if (!pgstat_tracks_io_bktype(bktype))
351 0 : return false;
352 :
353 : /*
354 : * Currently, IO on temporary relations can only occur in the
355 : * IOCONTEXT_NORMAL IOContext.
356 : */
357 34944 : if (io_context != IOCONTEXT_NORMAL &&
358 : io_object == IOOBJECT_TEMP_RELATION)
359 3024 : return false;
360 :
361 : /*
362 : * In core Postgres, only regular backends and WAL Sender processes
363 : * executing queries will use local buffers and operate on temporary
364 : * relations. Parallel workers will not use local buffers (see
365 : * InitLocalBuffers()); however, extensions leveraging background workers
366 : * have no such limitation, so track IO on IOOBJECT_TEMP_RELATION for
367 : * BackendType B_BG_WORKER.
368 : */
369 29568 : no_temp_rel = bktype == B_AUTOVAC_LAUNCHER || bktype == B_BG_WRITER ||
370 26656 : bktype == B_CHECKPOINTER || bktype == B_AUTOVAC_WORKER ||
371 61488 : bktype == B_STANDALONE_BACKEND || bktype == B_STARTUP;
372 :
373 31920 : if (no_temp_rel && io_context == IOCONTEXT_NORMAL &&
374 : io_object == IOOBJECT_TEMP_RELATION)
375 672 : return false;
376 :
377 : /*
378 : * Some BackendTypes do not currently perform any IO in certain
379 : * IOContexts, and, while it may not be inherently incorrect for them to
380 : * do so, excluding those rows from the view makes the view easier to use.
381 : */
382 31248 : if ((bktype == B_CHECKPOINTER || bktype == B_BG_WRITER) &&
383 2464 : (io_context == IOCONTEXT_BULKREAD ||
384 2240 : io_context == IOCONTEXT_BULKWRITE ||
385 : io_context == IOCONTEXT_VACUUM))
386 672 : return false;
387 :
388 30576 : if (bktype == B_AUTOVAC_LAUNCHER && io_context == IOCONTEXT_VACUUM)
389 112 : return false;
390 :
391 30464 : if ((bktype == B_AUTOVAC_WORKER || bktype == B_AUTOVAC_LAUNCHER) &&
392 : io_context == IOCONTEXT_BULKWRITE)
393 224 : return false;
394 :
395 30240 : return true;
396 : }
397 :
398 : /*
399 : * Some BackendTypes will never do certain IOOps and some IOOps should not
400 : * occur in certain IOContexts or on certain IOObjects. Check that the given
401 : * IOOp is valid for the given BackendType in the given IOContext and on the
402 : * given IOObject. Note that there are currently no cases of an IOOp being
403 : * invalid for a particular BackendType only within a certain IOContext and/or
404 : * only on a certain IOObject.
405 : */
406 : bool
407 26880 : pgstat_tracks_io_op(BackendType bktype, IOObject io_object,
408 : IOContext io_context, IOOp io_op)
409 : {
410 : bool strategy_io_context;
411 :
412 : /* if (io_context, io_object) will never collect stats, we're done */
413 26880 : if (!pgstat_tracks_io_object(bktype, io_object, io_context))
414 0 : return false;
415 :
416 : /*
417 : * Some BackendTypes will not do certain IOOps.
418 : */
419 26880 : if ((bktype == B_BG_WRITER || bktype == B_CHECKPOINTER) &&
420 1568 : (io_op == IOOP_READ || io_op == IOOP_EVICT || io_op == IOOP_HIT))
421 672 : return false;
422 :
423 26208 : if ((bktype == B_AUTOVAC_LAUNCHER || bktype == B_BG_WRITER ||
424 2912 : bktype == B_CHECKPOINTER) && io_op == IOOP_EXTEND)
425 448 : return false;
426 :
427 : /*
428 : * Temporary tables are not logged and thus do not require fsync'ing.
429 : * Writeback is not requested for temporary tables.
430 : */
431 25760 : if (io_object == IOOBJECT_TEMP_RELATION &&
432 2352 : (io_op == IOOP_FSYNC || io_op == IOOP_WRITEBACK))
433 672 : return false;
434 :
435 : /*
436 : * Some IOOps are not valid in certain IOContexts and some IOOps are only
437 : * valid in certain contexts.
438 : */
439 25088 : if (io_context == IOCONTEXT_BULKREAD && io_op == IOOP_EXTEND)
440 672 : return false;
441 :
442 18928 : strategy_io_context = io_context == IOCONTEXT_BULKREAD ||
443 43344 : io_context == IOCONTEXT_BULKWRITE || io_context == IOCONTEXT_VACUUM;
444 :
445 : /*
446 : * IOOP_REUSE is only relevant when a BufferAccessStrategy is in use.
447 : */
448 24416 : if (!strategy_io_context && io_op == IOOP_REUSE)
449 1344 : return false;
450 :
451 : /*
452 : * IOOP_FSYNC IOOps done by a backend using a BufferAccessStrategy are
453 : * counted in the IOCONTEXT_NORMAL IOContext. See comment in
454 : * register_dirty_segment() for more details.
455 : */
456 23072 : if (strategy_io_context && io_op == IOOP_FSYNC)
457 2016 : return false;
458 :
459 :
460 21056 : return true;
461 : }
|