|
|
57726f |
From bc9a36bad14b014340244bfc35a20df6809a5568 Mon Sep 17 00:00:00 2001
|
|
|
57726f |
From: Miroslav Lichvar <mlichvar@redhat.com>
|
|
|
57726f |
Date: Thu, 27 Feb 2020 15:35:31 +0100
|
|
|
57726f |
Subject: [PATCH] Fix rwlock to be thread-safe
|
|
|
57726f |
MIME-Version: 1.0
|
|
|
57726f |
Content-Type: text/plain; charset=UTF-8
|
|
|
57726f |
Content-Transfer-Encoding: 8bit
|
|
|
57726f |
|
|
|
57726f |
This is a backport of the following commits
|
|
|
57726f |
|
|
|
57726f |
commit 4cf275ba8aa1caf47ed763b51c37fa561005cb8d
|
|
|
57726f |
Author: Ondřej Surý <ondrej@isc.org>
|
|
|
57726f |
Date: Wed Feb 12 09:17:55 2020 +0100
|
|
|
57726f |
|
|
|
57726f |
Replace non-loop usage of atomic_compare_exchange_weak with strong variant
|
|
|
57726f |
|
|
|
57726f |
commit b43f5e023885dac9f1ffdace54720150768a333b
|
|
|
57726f |
Author: Ondřej Surý <ondrej@isc.org>
|
|
|
57726f |
Date: Sat Feb 1 10:48:20 2020 +0100
|
|
|
57726f |
|
|
|
57726f |
Convert all atomic operations in isc_rwlock to release-acquire memory ordering
|
|
|
57726f |
|
|
|
57726f |
commit 49462cf9747261cbc39d5fa4c691b64ac5472af4
|
|
|
57726f |
Author: Ondřej Surý <ondrej@sury.org>
|
|
|
57726f |
Date: Tue May 14 00:19:11 2019 +0700
|
|
|
57726f |
|
|
|
57726f |
Make isc_rwlock.c thread-safe
|
|
|
57726f |
|
|
|
57726f |
commit 9d5df99a9d9d13c9487969b6fa3818a8b83b4ee2
|
|
|
57726f |
Author: Ondřej Surý <ondrej@sury.org>
|
|
|
57726f |
Date: Thu Aug 23 15:30:06 2018 +0200
|
|
|
57726f |
|
|
|
57726f |
Directly use return value of atomic_compare_exchange_strong_explicit insteaf of comparing expected value
|
|
|
57726f |
|
|
|
57726f |
commit b5709e5531d9d45f9fc3db129c11ad474477d7b6
|
|
|
57726f |
Author: Ondřej Surý <ondrej@sury.org>
|
|
|
57726f |
Date: Fri Aug 17 19:21:12 2018 +0200
|
|
|
57726f |
|
|
|
57726f |
Explicitly load atomic values in lib/isc/rwlock.c
|
|
|
57726f |
---
|
|
|
57726f |
lib/isc/rwlock.c | 275 ++++++++++++++++++-----------------------------
|
|
|
57726f |
1 file changed, 107 insertions(+), 168 deletions(-)
|
|
|
57726f |
|
|
|
57726f |
diff --git a/lib/isc/rwlock.c b/lib/isc/rwlock.c
|
|
|
57726f |
index 9533c0f828..5591eff719 100644
|
|
|
57726f |
--- a/lib/isc/rwlock.c
|
|
|
57726f |
+++ b/lib/isc/rwlock.c
|
|
|
57726f |
@@ -46,6 +46,26 @@
|
|
|
57726f |
#if defined(ISC_RWLOCK_USEATOMIC)
|
|
|
57726f |
static isc_result_t
|
|
|
57726f |
isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
|
|
|
57726f |
+
|
|
|
57726f |
+#ifndef ISC_RWLOCK_USESTDATOMIC
|
|
|
57726f |
+#error non-stdatomic support removed
|
|
|
57726f |
+#endif
|
|
|
57726f |
+
|
|
|
57726f |
+#define atomic_load_acquire(o) \
|
|
|
57726f |
+ atomic_load_explicit((o), memory_order_acquire)
|
|
|
57726f |
+#define atomic_store_release(o, v) \
|
|
|
57726f |
+ atomic_store_explicit((o), (v), memory_order_release)
|
|
|
57726f |
+#define atomic_fetch_add_release(o, v) \
|
|
|
57726f |
+ atomic_fetch_add_explicit((o), (v), memory_order_release)
|
|
|
57726f |
+#define atomic_fetch_sub_release(o, v) \
|
|
|
57726f |
+ atomic_fetch_sub_explicit((o), (v), memory_order_release)
|
|
|
57726f |
+#define atomic_compare_exchange_weak_acq_rel(o, e, d) \
|
|
|
57726f |
+ atomic_compare_exchange_weak_explicit((o), (e), (d), \
|
|
|
57726f |
+ memory_order_acq_rel, \
|
|
|
57726f |
+ memory_order_acquire)
|
|
|
57726f |
+#define atomic_compare_exchange_strong_acq_rel(o, e, d) \
|
|
|
57726f |
+ atomic_compare_exchange_strong_explicit( \
|
|
|
57726f |
+ (o), (e), (d), memory_order_acq_rel, memory_order_acquire)
|
|
|
57726f |
#endif
|
|
|
57726f |
|
|
|
57726f |
#ifdef ISC_RWLOCK_TRACE
|
|
|
57726f |
@@ -108,13 +128,13 @@ isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
|
|
|
57726f |
*/
|
|
|
57726f |
rwl->magic = 0;
|
|
|
57726f |
|
|
|
57726f |
- rwl->spins = 0;
|
|
|
57726f |
#if defined(ISC_RWLOCK_USEATOMIC)
|
|
|
57726f |
- rwl->write_requests = 0;
|
|
|
57726f |
- rwl->write_completions = 0;
|
|
|
57726f |
- rwl->cnt_and_flag = 0;
|
|
|
57726f |
+ atomic_init(&rwl->spins, 0);
|
|
|
57726f |
+ atomic_init(&rwl->write_requests, 0);
|
|
|
57726f |
+ atomic_init(&rwl->write_completions, 0);
|
|
|
57726f |
+ atomic_init(&rwl->cnt_and_flag, 0);
|
|
|
57726f |
rwl->readers_waiting = 0;
|
|
|
57726f |
- rwl->write_granted = 0;
|
|
|
57726f |
+ atomic_init(&rwl->write_granted, 0);
|
|
|
57726f |
if (read_quota != 0) {
|
|
|
57726f |
UNEXPECTED_ERROR(__FILE__, __LINE__,
|
|
|
57726f |
"read quota is not supported");
|
|
|
57726f |
@@ -123,6 +143,7 @@ isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
|
|
|
57726f |
write_quota = RWLOCK_DEFAULT_WRITE_QUOTA;
|
|
|
57726f |
rwl->write_quota = write_quota;
|
|
|
57726f |
#else
|
|
|
57726f |
+ rwl->spins = 0;
|
|
|
57726f |
rwl->type = isc_rwlocktype_read;
|
|
|
57726f |
rwl->original = isc_rwlocktype_none;
|
|
|
57726f |
rwl->active = 0;
|
|
|
57726f |
@@ -178,16 +199,9 @@ void
|
|
|
57726f |
isc_rwlock_destroy(isc_rwlock_t *rwl) {
|
|
|
57726f |
REQUIRE(VALID_RWLOCK(rwl));
|
|
|
57726f |
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USEATOMIC)
|
|
|
57726f |
- REQUIRE(rwl->write_requests == rwl->write_completions &&
|
|
|
57726f |
- rwl->cnt_and_flag == 0 && rwl->readers_waiting == 0);
|
|
|
57726f |
-#else
|
|
|
57726f |
- LOCK(&rwl->lock);
|
|
|
57726f |
- REQUIRE(rwl->active == 0 &&
|
|
|
57726f |
- rwl->readers_waiting == 0 &&
|
|
|
57726f |
- rwl->writers_waiting == 0);
|
|
|
57726f |
- UNLOCK(&rwl->lock);
|
|
|
57726f |
-#endif
|
|
|
57726f |
+ REQUIRE(atomic_load_acquire(&rwl->write_requests) ==
|
|
|
57726f |
+ atomic_load_acquire(&rwl->write_completions) &&
|
|
|
57726f |
+ atomic_load_acquire(&rwl->cnt_and_flag) == 0 && rwl->readers_waiting == 0);
|
|
|
57726f |
|
|
|
57726f |
rwl->magic = 0;
|
|
|
57726f |
(void)isc_condition_destroy(&rwl->readable);
|
|
|
57726f |
@@ -274,10 +288,13 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
#endif
|
|
|
57726f |
|
|
|
57726f |
if (type == isc_rwlocktype_read) {
|
|
|
57726f |
- if (rwl->write_requests != rwl->write_completions) {
|
|
|
57726f |
+ if (atomic_load_acquire(&rwl->write_requests) !=
|
|
|
57726f |
+ atomic_load_acquire(&rwl->write_completions))
|
|
|
57726f |
+ {
|
|
|
57726f |
/* there is a waiting or active writer */
|
|
|
57726f |
LOCK(&rwl->lock);
|
|
|
57726f |
- if (rwl->write_requests != rwl->write_completions) {
|
|
|
57726f |
+ if (atomic_load_acquire(&rwl->write_requests) !=
|
|
|
57726f |
+ atomic_load_acquire(&rwl->write_completions)) {
|
|
|
57726f |
rwl->readers_waiting++;
|
|
|
57726f |
WAIT(&rwl->readable, &rwl->lock);
|
|
|
57726f |
rwl->readers_waiting--;
|
|
|
57726f |
@@ -285,23 +302,24 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
UNLOCK(&rwl->lock);
|
|
|
57726f |
}
|
|
|
57726f |
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
- cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
|
|
|
57726f |
- READER_INCR,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
-#else
|
|
|
57726f |
- cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
|
|
|
57726f |
-#endif
|
|
|
57726f |
+ cntflag = atomic_fetch_add_release(&rwl->cnt_and_flag,
|
|
|
57726f |
+ READER_INCR);
|
|
|
57726f |
POST(cntflag);
|
|
|
57726f |
while (1) {
|
|
|
57726f |
- if ((rwl->cnt_and_flag & WRITER_ACTIVE) == 0)
|
|
|
57726f |
+ if ((atomic_load_acquire(&rwl->cnt_and_flag)
|
|
|
57726f |
+ & WRITER_ACTIVE) == 0)
|
|
|
57726f |
+ {
|
|
|
57726f |
break;
|
|
|
57726f |
+ }
|
|
|
57726f |
|
|
|
57726f |
/* A writer is still working */
|
|
|
57726f |
LOCK(&rwl->lock);
|
|
|
57726f |
rwl->readers_waiting++;
|
|
|
57726f |
- if ((rwl->cnt_and_flag & WRITER_ACTIVE) != 0)
|
|
|
57726f |
+ if ((atomic_load_acquire(&rwl->cnt_and_flag)
|
|
|
57726f |
+ & WRITER_ACTIVE) != 0)
|
|
|
57726f |
+ {
|
|
|
57726f |
WAIT(&rwl->readable, &rwl->lock);
|
|
|
57726f |
+ }
|
|
|
57726f |
rwl->readers_waiting--;
|
|
|
57726f |
UNLOCK(&rwl->lock);
|
|
|
57726f |
|
|
|
57726f |
@@ -336,20 +354,19 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
* quota, reset the condition (race among readers doesn't
|
|
|
57726f |
* matter).
|
|
|
57726f |
*/
|
|
|
57726f |
- rwl->write_granted = 0;
|
|
|
57726f |
+ atomic_store_release(&rwl->write_granted, 0);
|
|
|
57726f |
} else {
|
|
|
57726f |
int32_t prev_writer;
|
|
|
57726f |
|
|
|
57726f |
/* enter the waiting queue, and wait for our turn */
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
- prev_writer = atomic_fetch_add_explicit(&rwl->write_requests, 1,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
-#else
|
|
|
57726f |
- prev_writer = isc_atomic_xadd(&rwl->write_requests, 1);
|
|
|
57726f |
-#endif
|
|
|
57726f |
- while (rwl->write_completions != prev_writer) {
|
|
|
57726f |
+ prev_writer = atomic_fetch_add_release(&rwl->write_requests, 1);
|
|
|
57726f |
+ while (atomic_load_acquire(&rwl->write_completions)
|
|
|
57726f |
+ != prev_writer)
|
|
|
57726f |
+ {
|
|
|
57726f |
LOCK(&rwl->lock);
|
|
|
57726f |
- if (rwl->write_completions != prev_writer) {
|
|
|
57726f |
+ if (atomic_load_acquire(&rwl->write_completions)
|
|
|
57726f |
+ != prev_writer)
|
|
|
57726f |
+ {
|
|
|
57726f |
WAIT(&rwl->writeable, &rwl->lock);
|
|
|
57726f |
UNLOCK(&rwl->lock);
|
|
|
57726f |
continue;
|
|
|
57726f |
@@ -359,29 +376,24 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
}
|
|
|
57726f |
|
|
|
57726f |
while (1) {
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
int_fast32_t cntflag2 = 0;
|
|
|
57726f |
- atomic_compare_exchange_strong_explicit
|
|
|
57726f |
- (&rwl->cnt_and_flag, &cntflag2, WRITER_ACTIVE,
|
|
|
57726f |
- memory_order_relaxed, memory_order_relaxed);
|
|
|
57726f |
-#else
|
|
|
57726f |
- int32_t cntflag2;
|
|
|
57726f |
- cntflag2 = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
|
|
|
57726f |
- WRITER_ACTIVE);
|
|
|
57726f |
-#endif
|
|
|
57726f |
-
|
|
|
57726f |
- if (cntflag2 == 0)
|
|
|
57726f |
+ if (atomic_compare_exchange_weak_acq_rel(
|
|
|
57726f |
+ &rwl->cnt_and_flag, &cntflag2, WRITER_ACTIVE))
|
|
|
57726f |
+ {
|
|
|
57726f |
break;
|
|
|
57726f |
+ }
|
|
|
57726f |
|
|
|
57726f |
/* Another active reader or writer is working. */
|
|
|
57726f |
LOCK(&rwl->lock);
|
|
|
57726f |
- if (rwl->cnt_and_flag != 0)
|
|
|
57726f |
+ if (atomic_load_acquire(&rwl->cnt_and_flag) != 0) {
|
|
|
57726f |
WAIT(&rwl->writeable, &rwl->lock);
|
|
|
57726f |
+ }
|
|
|
57726f |
UNLOCK(&rwl->lock);
|
|
|
57726f |
}
|
|
|
57726f |
|
|
|
57726f |
- INSIST((rwl->cnt_and_flag & WRITER_ACTIVE) != 0);
|
|
|
57726f |
- rwl->write_granted++;
|
|
|
57726f |
+ INSIST((atomic_load_acquire(&rwl->cnt_and_flag)
|
|
|
57726f |
+ & WRITER_ACTIVE));
|
|
|
57726f |
+ atomic_fetch_add_release(&rwl->write_granted, 1);
|
|
|
57726f |
}
|
|
|
57726f |
|
|
|
57726f |
#ifdef ISC_RWLOCK_TRACE
|
|
|
57726f |
@@ -395,12 +407,10 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
isc_result_t
|
|
|
57726f |
isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
int32_t cnt = 0;
|
|
|
57726f |
- int32_t max_cnt = rwl->spins * 2 + 10;
|
|
|
57726f |
+ int32_t spins = atomic_load_acquire(&rwl->spins) * 2 + 10;
|
|
|
57726f |
+ int32_t max_cnt = ISC_MAX(spins, RWLOCK_MAX_ADAPTIVE_COUNT);
|
|
|
57726f |
isc_result_t result = ISC_R_SUCCESS;
|
|
|
57726f |
|
|
|
57726f |
- if (max_cnt > RWLOCK_MAX_ADAPTIVE_COUNT)
|
|
|
57726f |
- max_cnt = RWLOCK_MAX_ADAPTIVE_COUNT;
|
|
|
57726f |
-
|
|
|
57726f |
do {
|
|
|
57726f |
if (cnt++ >= max_cnt) {
|
|
|
57726f |
result = isc__rwlock_lock(rwl, type);
|
|
|
57726f |
@@ -411,7 +421,7 @@ isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
#endif
|
|
|
57726f |
} while (isc_rwlock_trylock(rwl, type) != ISC_R_SUCCESS);
|
|
|
57726f |
|
|
|
57726f |
- rwl->spins += (cnt - rwl->spins) / 8;
|
|
|
57726f |
+ atomic_fetch_add_release(&rwl->spins, (cnt - spins) / 8);
|
|
|
57726f |
|
|
|
57726f |
return (result);
|
|
|
57726f |
}
|
|
|
57726f |
@@ -429,36 +439,28 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
|
|
|
57726f |
if (type == isc_rwlocktype_read) {
|
|
|
57726f |
/* If a writer is waiting or working, we fail. */
|
|
|
57726f |
- if (rwl->write_requests != rwl->write_completions)
|
|
|
57726f |
+ if (atomic_load_acquire(&rwl->write_requests) !=
|
|
|
57726f |
+ atomic_load_acquire(&rwl->write_completions))
|
|
|
57726f |
return (ISC_R_LOCKBUSY);
|
|
|
57726f |
|
|
|
57726f |
/* Otherwise, be ready for reading. */
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
- cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
|
|
|
57726f |
- READER_INCR,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
-#else
|
|
|
57726f |
- cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
|
|
|
57726f |
-#endif
|
|
|
57726f |
+ cntflag = atomic_fetch_add_release(&rwl->cnt_and_flag,
|
|
|
57726f |
+ READER_INCR);
|
|
|
57726f |
if ((cntflag & WRITER_ACTIVE) != 0) {
|
|
|
57726f |
/*
|
|
|
57726f |
* A writer is working. We lose, and cancel the read
|
|
|
57726f |
* request.
|
|
|
57726f |
*/
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
- cntflag = atomic_fetch_sub_explicit
|
|
|
57726f |
- (&rwl->cnt_and_flag, READER_INCR,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
-#else
|
|
|
57726f |
- cntflag = isc_atomic_xadd(&rwl->cnt_and_flag,
|
|
|
57726f |
- -READER_INCR);
|
|
|
57726f |
-#endif
|
|
|
57726f |
+ cntflag = atomic_fetch_sub_release(
|
|
|
57726f |
+ &rwl->cnt_and_flag, READER_INCR);
|
|
|
57726f |
/*
|
|
|
57726f |
* If no other readers are waiting and we've suspended
|
|
|
57726f |
* new writers in this short period, wake them up.
|
|
|
57726f |
*/
|
|
|
57726f |
if (cntflag == READER_INCR &&
|
|
|
57726f |
- rwl->write_completions != rwl->write_requests) {
|
|
|
57726f |
+ atomic_load_acquire(&rwl->write_completions) !=
|
|
|
57726f |
+ atomic_load_acquire(&rwl->write_requests))
|
|
|
57726f |
+ {
|
|
|
57726f |
LOCK(&rwl->lock);
|
|
|
57726f |
BROADCAST(&rwl->writeable);
|
|
|
57726f |
UNLOCK(&rwl->lock);
|
|
|
57726f |
@@ -468,31 +470,19 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
}
|
|
|
57726f |
} else {
|
|
|
57726f |
/* Try locking without entering the waiting queue. */
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
int_fast32_t zero = 0;
|
|
|
57726f |
- if (!atomic_compare_exchange_strong_explicit
|
|
|
57726f |
- (&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
|
|
|
57726f |
- memory_order_relaxed, memory_order_relaxed))
|
|
|
57726f |
+ if (!atomic_compare_exchange_strong_acq_rel(
|
|
|
57726f |
+ &rwl->cnt_and_flag, &zero, WRITER_ACTIVE))
|
|
|
57726f |
+ {
|
|
|
57726f |
return (ISC_R_LOCKBUSY);
|
|
|
57726f |
-#else
|
|
|
57726f |
- cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
|
|
|
57726f |
- WRITER_ACTIVE);
|
|
|
57726f |
- if (cntflag != 0)
|
|
|
57726f |
- return (ISC_R_LOCKBUSY);
|
|
|
57726f |
-#endif
|
|
|
57726f |
+ }
|
|
|
57726f |
|
|
|
57726f |
/*
|
|
|
57726f |
* XXXJT: jump into the queue, possibly breaking the writer
|
|
|
57726f |
* order.
|
|
|
57726f |
*/
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
- atomic_fetch_sub_explicit(&rwl->write_completions, 1,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
-#else
|
|
|
57726f |
- (void)isc_atomic_xadd(&rwl->write_completions, -1);
|
|
|
57726f |
-#endif
|
|
|
57726f |
-
|
|
|
57726f |
- rwl->write_granted++;
|
|
|
57726f |
+ atomic_fetch_sub_release(&rwl->write_completions, 1);
|
|
|
57726f |
+ atomic_fetch_add_release(&rwl->write_granted, 1);
|
|
|
57726f |
}
|
|
|
57726f |
|
|
|
57726f |
#ifdef ISC_RWLOCK_TRACE
|
|
|
57726f |
@@ -507,14 +497,12 @@ isc_result_t
|
|
|
57726f |
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
|
|
|
57726f |
REQUIRE(VALID_RWLOCK(rwl));
|
|
|
57726f |
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
{
|
|
|
57726f |
int_fast32_t reader_incr = READER_INCR;
|
|
|
57726f |
|
|
|
57726f |
/* Try to acquire write access. */
|
|
|
57726f |
- atomic_compare_exchange_strong_explicit
|
|
|
57726f |
- (&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE,
|
|
|
57726f |
- memory_order_relaxed, memory_order_relaxed);
|
|
|
57726f |
+ atomic_compare_exchange_strong_acq_rel(
|
|
|
57726f |
+ &rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE);
|
|
|
57726f |
/*
|
|
|
57726f |
* There must have been no writer, and there must have
|
|
|
57726f |
* been at least one reader.
|
|
|
57726f |
@@ -527,36 +515,11 @@ isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
|
|
|
57726f |
* We are the only reader and have been upgraded.
|
|
|
57726f |
* Now jump into the head of the writer waiting queue.
|
|
|
57726f |
*/
|
|
|
57726f |
- atomic_fetch_sub_explicit(&rwl->write_completions, 1,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
+ atomic_fetch_sub_release(&rwl->write_completions, 1);
|
|
|
57726f |
} else
|
|
|
57726f |
return (ISC_R_LOCKBUSY);
|
|
|
57726f |
|
|
|
57726f |
}
|
|
|
57726f |
-#else
|
|
|
57726f |
- {
|
|
|
57726f |
- int32_t prevcnt;
|
|
|
57726f |
-
|
|
|
57726f |
- /* Try to acquire write access. */
|
|
|
57726f |
- prevcnt = isc_atomic_cmpxchg(&rwl->cnt_and_flag,
|
|
|
57726f |
- READER_INCR, WRITER_ACTIVE);
|
|
|
57726f |
- /*
|
|
|
57726f |
- * There must have been no writer, and there must have
|
|
|
57726f |
- * been at least one reader.
|
|
|
57726f |
- */
|
|
|
57726f |
- INSIST((prevcnt & WRITER_ACTIVE) == 0 &&
|
|
|
57726f |
- (prevcnt & ~WRITER_ACTIVE) != 0);
|
|
|
57726f |
-
|
|
|
57726f |
- if (prevcnt == READER_INCR) {
|
|
|
57726f |
- /*
|
|
|
57726f |
- * We are the only reader and have been upgraded.
|
|
|
57726f |
- * Now jump into the head of the writer waiting queue.
|
|
|
57726f |
- */
|
|
|
57726f |
- (void)isc_atomic_xadd(&rwl->write_completions, -1);
|
|
|
57726f |
- } else
|
|
|
57726f |
- return (ISC_R_LOCKBUSY);
|
|
|
57726f |
- }
|
|
|
57726f |
-#endif
|
|
|
57726f |
|
|
|
57726f |
return (ISC_R_SUCCESS);
|
|
|
57726f |
}
|
|
|
57726f |
@@ -567,33 +530,15 @@ isc_rwlock_downgrade(isc_rwlock_t *rwl) {
|
|
|
57726f |
|
|
|
57726f |
REQUIRE(VALID_RWLOCK(rwl));
|
|
|
57726f |
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
- {
|
|
|
57726f |
- /* Become an active reader. */
|
|
|
57726f |
- prev_readers = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
|
|
|
57726f |
- READER_INCR,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
- /* We must have been a writer. */
|
|
|
57726f |
- INSIST((prev_readers & WRITER_ACTIVE) != 0);
|
|
|
57726f |
-
|
|
|
57726f |
- /* Complete write */
|
|
|
57726f |
- atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
- atomic_fetch_add_explicit(&rwl->write_completions, 1,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
- }
|
|
|
57726f |
-#else
|
|
|
57726f |
- {
|
|
|
57726f |
- /* Become an active reader. */
|
|
|
57726f |
- prev_readers = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
|
|
|
57726f |
- /* We must have been a writer. */
|
|
|
57726f |
- INSIST((prev_readers & WRITER_ACTIVE) != 0);
|
|
|
57726f |
-
|
|
|
57726f |
- /* Complete write */
|
|
|
57726f |
- (void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
|
|
|
57726f |
- (void)isc_atomic_xadd(&rwl->write_completions, 1);
|
|
|
57726f |
- }
|
|
|
57726f |
-#endif
|
|
|
57726f |
+ /* Become an active reader. */
|
|
|
57726f |
+ prev_readers = atomic_fetch_add_release(&rwl->cnt_and_flag,
|
|
|
57726f |
+ READER_INCR);
|
|
|
57726f |
+ /* We must have been a writer. */
|
|
|
57726f |
+ INSIST((prev_readers & WRITER_ACTIVE) != 0);
|
|
|
57726f |
+
|
|
|
57726f |
+ /* Complete write */
|
|
|
57726f |
+ atomic_fetch_sub_release(&rwl->cnt_and_flag, WRITER_ACTIVE);
|
|
|
57726f |
+ atomic_fetch_add_release(&rwl->write_completions, 1);
|
|
|
57726f |
|
|
|
57726f |
/* Resume other readers */
|
|
|
57726f |
LOCK(&rwl->lock);
|
|
|
57726f |
@@ -614,20 +559,16 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
#endif
|
|
|
57726f |
|
|
|
57726f |
if (type == isc_rwlocktype_read) {
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
- prev_cnt = atomic_fetch_sub_explicit(&rwl->cnt_and_flag,
|
|
|
57726f |
- READER_INCR,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
-#else
|
|
|
57726f |
- prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR);
|
|
|
57726f |
-#endif
|
|
|
57726f |
+ prev_cnt = atomic_fetch_sub_release(&rwl->cnt_and_flag,
|
|
|
57726f |
+ READER_INCR);
|
|
|
57726f |
/*
|
|
|
57726f |
* If we're the last reader and any writers are waiting, wake
|
|
|
57726f |
* them up. We need to wake up all of them to ensure the
|
|
|
57726f |
* FIFO order.
|
|
|
57726f |
*/
|
|
|
57726f |
if (prev_cnt == READER_INCR &&
|
|
|
57726f |
- rwl->write_completions != rwl->write_requests) {
|
|
|
57726f |
+ atomic_load_acquire(&rwl->write_completions) !=
|
|
|
57726f |
+ atomic_load_acquire(&rwl->write_requests)) {
|
|
|
57726f |
LOCK(&rwl->lock);
|
|
|
57726f |
BROADCAST(&rwl->writeable);
|
|
|
57726f |
UNLOCK(&rwl->lock);
|
|
|
57726f |
@@ -639,19 +580,16 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
* Reset the flag, and (implicitly) tell other writers
|
|
|
57726f |
* we are done.
|
|
|
57726f |
*/
|
|
|
57726f |
-#if defined(ISC_RWLOCK_USESTDATOMIC)
|
|
|
57726f |
- atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
- atomic_fetch_add_explicit(&rwl->write_completions, 1,
|
|
|
57726f |
- memory_order_relaxed);
|
|
|
57726f |
-#else
|
|
|
57726f |
- (void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
|
|
|
57726f |
- (void)isc_atomic_xadd(&rwl->write_completions, 1);
|
|
|
57726f |
-#endif
|
|
|
57726f |
-
|
|
|
57726f |
- if (rwl->write_granted >= rwl->write_quota ||
|
|
|
57726f |
- rwl->write_requests == rwl->write_completions ||
|
|
|
57726f |
- (rwl->cnt_and_flag & ~WRITER_ACTIVE) != 0) {
|
|
|
57726f |
+ atomic_fetch_sub_release(&rwl->cnt_and_flag, WRITER_ACTIVE);
|
|
|
57726f |
+ atomic_fetch_add_release(&rwl->write_completions, 1);
|
|
|
57726f |
+
|
|
|
57726f |
+ if ((atomic_load_acquire(&rwl->write_granted) >=
|
|
|
57726f |
+ rwl->write_quota) ||
|
|
|
57726f |
+ (atomic_load_acquire(&rwl->write_requests) ==
|
|
|
57726f |
+ atomic_load_acquire(&rwl->write_completions)) ||
|
|
|
57726f |
+ (atomic_load_acquire(&rwl->cnt_and_flag)
|
|
|
57726f |
+ & ~WRITER_ACTIVE))
|
|
|
57726f |
+ {
|
|
|
57726f |
/*
|
|
|
57726f |
* We have passed the write quota, no writer is
|
|
|
57726f |
* waiting, or some readers are almost ready, pending
|
|
|
57726f |
@@ -668,7 +606,8 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
|
|
57726f |
UNLOCK(&rwl->lock);
|
|
|
57726f |
}
|
|
|
57726f |
|
|
|
57726f |
- if (rwl->write_requests != rwl->write_completions &&
|
|
|
57726f |
+ if ((atomic_load_acquire(&rwl->write_requests) !=
|
|
|
57726f |
+ atomic_load_acquire(&rwl->write_completions)) &&
|
|
|
57726f |
wakeup_writers) {
|
|
|
57726f |
LOCK(&rwl->lock);
|
|
|
57726f |
BROADCAST(&rwl->writeable);
|
|
|
57726f |
--
|
|
|
57726f |
2.21.0
|
|
|
57726f |
|