Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * s_lock.c
4 : * Implementation of spinlocks.
5 : *
6 : * When waiting for a contended spinlock we loop tightly for awhile, then
7 : * delay using pg_usleep() and try again. Preferably, "awhile" should be a
8 : * small multiple of the maximum time we expect a spinlock to be held. 100
9 : * iterations seems about right as an initial guess. However, on a
10 : * uniprocessor the loop is a waste of cycles, while in a multi-CPU scenario
11 : * it's usually better to spin a bit longer than to call the kernel, so we try
12 : * to adapt the spin loop count depending on whether we seem to be in a
13 : * uniprocessor or multiprocessor.
14 : *
15 : * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd
16 : * be wrong; there are platforms where that can result in a "stuck
17 : * spinlock" failure. This has been seen particularly on Alphas; it seems
18 : * that the first TAS after returning from kernel space will always fail
19 : * on that hardware.
20 : *
21 : * Once we do decide to block, we use randomly increasing pg_usleep()
22 : * delays. The first delay is 1 msec, then the delay randomly increases to
23 : * about one second, after which we reset to 1 msec and start again. The
24 : * idea here is that in the presence of heavy contention we need to
25 : * increase the delay, else the spinlock holder may never get to run and
26 : * release the lock. (Consider situation where spinlock holder has been
27 : * nice'd down in priority by the scheduler --- it will not get scheduled
28 : * until all would-be acquirers are sleeping, so if we always use a 1-msec
29 : * sleep, there is a real possibility of starvation.) But we can't just
30 : * clamp the delay to an upper bound, else it would take a long time to
31 : * make a reasonable number of tries.
32 : *
33 : * We time out and declare error after NUM_DELAYS delays (thus, exactly
34 : * that many tries). With the given settings, this will usually take 2 or
35 : * so minutes. It seems better to fix the total number of tries (and thus
36 : * the probability of unintended failure) than to fix the total time
37 : * spent.
38 : *
39 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
40 : * Portions Copyright (c) 1994, Regents of the University of California
41 : *
42 : *
43 : * IDENTIFICATION
44 : * src/backend/storage/lmgr/s_lock.c
45 : *
46 : *-------------------------------------------------------------------------
47 : */
48 : #include "postgres.h"
49 :
50 : #include <time.h>
51 : #include <unistd.h>
52 :
53 : #include "common/pg_prng.h"
54 : #include "port/atomics.h"
55 : #include "storage/s_lock.h"
56 : #include "utils/wait_event.h"
57 :
58 : #define MIN_SPINS_PER_DELAY 10
59 : #define MAX_SPINS_PER_DELAY 1000
60 : #define NUM_DELAYS 1000
61 : #define MIN_DELAY_USEC 1000L
62 : #define MAX_DELAY_USEC 1000000L
63 :
64 : #ifdef S_LOCK_TEST
65 : /*
66 : * These are needed by pgstat_report_wait_start in the standalone compile of
67 : * s_lock_test.
68 : */
69 : static uint32 local_my_wait_event_info;
70 : uint32 *my_wait_event_info = &local_my_wait_event_info;
71 : #endif
72 :
73 : static int spins_per_delay = DEFAULT_SPINS_PER_DELAY;
74 :
75 :
76 : /*
77 : * s_lock_stuck() - complain about a stuck spinlock
78 : */
79 : static void
80 0 : s_lock_stuck(const char *file, int line, const char *func)
81 : {
82 0 : if (!func)
83 0 : func = "(unknown)";
84 : #if defined(S_LOCK_TEST)
85 : fprintf(stderr,
86 : "\nStuck spinlock detected at %s, %s:%d.\n",
87 : func, file, line);
88 : exit(1);
89 : #else
90 0 : elog(PANIC, "stuck spinlock detected at %s, %s:%d",
91 : func, file, line);
92 : #endif
93 : }
94 :
95 : /*
96 : * s_lock(lock) - platform-independent portion of waiting for a spinlock.
97 : */
98 : int
99 14426 : s_lock(volatile slock_t *lock, const char *file, int line, const char *func)
100 : {
101 : SpinDelayStatus delayStatus;
102 :
103 14426 : init_spin_delay(&delayStatus, file, line, func);
104 :
105 110762 : while (TAS_SPIN(lock))
106 : {
107 96336 : perform_spin_delay(&delayStatus);
108 : }
109 :
110 14426 : finish_spin_delay(&delayStatus);
111 :
112 14426 : return delayStatus.delays;
113 : }
114 :
115 : #ifdef USE_DEFAULT_S_UNLOCK
116 : void
117 : s_unlock(volatile slock_t *lock)
118 : {
119 : *lock = 0;
120 : }
121 : #endif
122 :
123 : /*
124 : * Wait while spinning on a contended spinlock.
125 : */
126 : void
127 144936 : perform_spin_delay(SpinDelayStatus *status)
128 : {
129 : /* CPU-specific delay each time through the loop */
130 144936 : SPIN_DELAY();
131 :
132 : /* Block the process every spins_per_delay tries */
133 144936 : if (++(status->spins) >= spins_per_delay)
134 : {
135 142 : if (++(status->delays) > NUM_DELAYS)
136 0 : s_lock_stuck(status->file, status->line, status->func);
137 :
138 142 : if (status->cur_delay == 0) /* first time to delay? */
139 78 : status->cur_delay = MIN_DELAY_USEC;
140 :
141 : /*
142 : * Once we start sleeping, the overhead of reporting a wait event is
143 : * justified. Actively spinning easily stands out in profilers, but
144 : * sleeping with an exponential backoff is harder to spot...
145 : *
146 : * We might want to report something more granular at some point, but
147 : * this is better than nothing.
148 : */
149 142 : pgstat_report_wait_start(WAIT_EVENT_SPIN_DELAY);
150 142 : pg_usleep(status->cur_delay);
151 142 : pgstat_report_wait_end();
152 :
153 : #if defined(S_LOCK_TEST)
154 : fprintf(stdout, "*");
155 : fflush(stdout);
156 : #endif
157 :
158 : /* increase delay by a random fraction between 1X and 2X */
159 284 : status->cur_delay += (int) (status->cur_delay *
160 142 : pg_prng_double(&pg_global_prng_state) + 0.5);
161 : /* wrap back to minimum delay when max is exceeded */
162 142 : if (status->cur_delay > MAX_DELAY_USEC)
163 0 : status->cur_delay = MIN_DELAY_USEC;
164 :
165 142 : status->spins = 0;
166 : }
167 144936 : }
168 :
169 : /*
170 : * After acquiring a spinlock, update estimates about how long to loop.
171 : *
172 : * If we were able to acquire the lock without delaying, it's a good
173 : * indication we are in a multiprocessor. If we had to delay, it's a sign
174 : * (but not a sure thing) that we are in a uniprocessor. Hence, we
175 : * decrement spins_per_delay slowly when we had to delay, and increase it
176 : * rapidly when we didn't. It's expected that spins_per_delay will
177 : * converge to the minimum value on a uniprocessor and to the maximum
178 : * value on a multiprocessor.
179 : *
180 : * Note: spins_per_delay is local within our current process. We want to
181 : * average these observations across multiple backends, since it's
182 : * relatively rare for this function to even get entered, and so a single
183 : * backend might not live long enough to converge on a good value. That
184 : * is handled by the two routines below.
185 : */
186 : void
187 47764130 : finish_spin_delay(SpinDelayStatus *status)
188 : {
189 47764130 : if (status->cur_delay == 0)
190 : {
191 : /* we never had to delay */
192 47764052 : if (spins_per_delay < MAX_SPINS_PER_DELAY)
193 94378 : spins_per_delay = Min(spins_per_delay + 100, MAX_SPINS_PER_DELAY);
194 : }
195 : else
196 : {
197 78 : if (spins_per_delay > MIN_SPINS_PER_DELAY)
198 78 : spins_per_delay = Max(spins_per_delay - 1, MIN_SPINS_PER_DELAY);
199 : }
200 47764130 : }
201 :
202 : /*
203 : * Set local copy of spins_per_delay during backend startup.
204 : *
205 : * NB: this has to be pretty fast as it is called while holding a spinlock
206 : */
207 : void
208 30416 : set_spins_per_delay(int shared_spins_per_delay)
209 : {
210 30416 : spins_per_delay = shared_spins_per_delay;
211 30416 : }
212 :
213 : /*
214 : * Update shared estimate of spins_per_delay during backend exit.
215 : *
216 : * NB: this has to be pretty fast as it is called while holding a spinlock
217 : */
218 : int
219 30412 : update_spins_per_delay(int shared_spins_per_delay)
220 : {
221 : /*
222 : * We use an exponential moving average with a relatively slow adaption
223 : * rate, so that noise in any one backend's result won't affect the shared
224 : * value too much. As long as both inputs are within the allowed range,
225 : * the result must be too, so we need not worry about clamping the result.
226 : *
227 : * We deliberately truncate rather than rounding; this is so that single
228 : * adjustments inside a backend can affect the shared estimate (see the
229 : * asymmetric adjustment rules above).
230 : */
231 30412 : return (shared_spins_per_delay * 15 + spins_per_delay) / 16;
232 : }
233 :
234 :
235 : /*****************************************************************************/
236 : #if defined(S_LOCK_TEST)
237 :
238 : /*
239 : * test program for verifying a port's spinlock support.
240 : */
241 :
242 : struct test_lock_struct
243 : {
244 : char pad1;
245 : slock_t lock;
246 : char pad2;
247 : };
248 :
249 : volatile struct test_lock_struct test_lock;
250 :
251 : int
252 : main()
253 : {
254 : pg_prng_seed(&pg_global_prng_state, (uint64) time(NULL));
255 :
256 : test_lock.pad1 = test_lock.pad2 = 0x44;
257 :
258 : S_INIT_LOCK(&test_lock.lock);
259 :
260 : if (test_lock.pad1 != 0x44 || test_lock.pad2 != 0x44)
261 : {
262 : printf("S_LOCK_TEST: failed, declared datatype is wrong size\n");
263 : return 1;
264 : }
265 :
266 : if (!S_LOCK_FREE(&test_lock.lock))
267 : {
268 : printf("S_LOCK_TEST: failed, lock not initialized\n");
269 : return 1;
270 : }
271 :
272 : S_LOCK(&test_lock.lock);
273 :
274 : if (test_lock.pad1 != 0x44 || test_lock.pad2 != 0x44)
275 : {
276 : printf("S_LOCK_TEST: failed, declared datatype is wrong size\n");
277 : return 1;
278 : }
279 :
280 : if (S_LOCK_FREE(&test_lock.lock))
281 : {
282 : printf("S_LOCK_TEST: failed, lock not locked\n");
283 : return 1;
284 : }
285 :
286 : S_UNLOCK(&test_lock.lock);
287 :
288 : if (test_lock.pad1 != 0x44 || test_lock.pad2 != 0x44)
289 : {
290 : printf("S_LOCK_TEST: failed, declared datatype is wrong size\n");
291 : return 1;
292 : }
293 :
294 : if (!S_LOCK_FREE(&test_lock.lock))
295 : {
296 : printf("S_LOCK_TEST: failed, lock not unlocked\n");
297 : return 1;
298 : }
299 :
300 : S_LOCK(&test_lock.lock);
301 :
302 : if (test_lock.pad1 != 0x44 || test_lock.pad2 != 0x44)
303 : {
304 : printf("S_LOCK_TEST: failed, declared datatype is wrong size\n");
305 : return 1;
306 : }
307 :
308 : if (S_LOCK_FREE(&test_lock.lock))
309 : {
310 : printf("S_LOCK_TEST: failed, lock not re-locked\n");
311 : return 1;
312 : }
313 :
314 : printf("S_LOCK_TEST: this will print %d stars and then\n", NUM_DELAYS);
315 : printf(" exit with a 'stuck spinlock' message\n");
316 : printf(" if S_LOCK() and TAS() are working.\n");
317 : fflush(stdout);
318 :
319 : s_lock(&test_lock.lock, __FILE__, __LINE__, __func__);
320 :
321 : printf("S_LOCK_TEST: failed, lock not locked\n");
322 : return 1;
323 : }
324 :
325 : #endif /* S_LOCK_TEST */
|