|
|
081b2d |
From f19dec383e24e2aaa40a6bdce2ca0e657ffc6e10 Mon Sep 17 00:00:00 2001
|
|
|
081b2d |
From: Mark Reynolds <mreynolds@redhat.com>
|
|
|
081b2d |
Date: Wed, 27 Sep 2017 09:26:14 -0400
|
|
|
081b2d |
Subject: [PATCH] Ticket 49305 - Need to wrap atomic calls
|
|
|
081b2d |
|
|
|
081b2d |
Bug Description: Some RHEL 7.5 platforms (ppc 32bit) still do not support
|
|
|
081b2d |
all the gcc builtin atomics. This breaks the downstream
|
|
|
081b2d |
builds.
|
|
|
081b2d |
|
|
|
081b2d |
Fix Description: Use wrapper functions for the atomic's using #define's
|
|
|
081b2d |
to detect if builtin atomics are supported, otherwise
|
|
|
081b2d |
use the egneric nspr atomic functions.
|
|
|
081b2d |
|
|
|
081b2d |
https://pagure.io/389-ds-base/issue/49305
|
|
|
081b2d |
|
|
|
081b2d |
Reviewed by: tbordaz(Thanks!)
|
|
|
081b2d |
|
|
|
081b2d |
(cherry picked from commit af723fd632d355642babeed1dbdb5a308c21fa79)
|
|
|
081b2d |
---
|
|
|
081b2d |
ldap/servers/slapd/attrsyntax.c | 8 +-
|
|
|
081b2d |
ldap/servers/slapd/back-ldbm/dblayer.c | 66 +++++-----
|
|
|
081b2d |
ldap/servers/slapd/entry.c | 11 +-
|
|
|
081b2d |
ldap/servers/slapd/libglobs.c | 161 ++++++++++++-----------
|
|
|
081b2d |
ldap/servers/slapd/log.c | 9 +-
|
|
|
081b2d |
ldap/servers/slapd/mapping_tree.c | 28 ++--
|
|
|
081b2d |
ldap/servers/slapd/object.c | 8 +-
|
|
|
081b2d |
ldap/servers/slapd/psearch.c | 7 +-
|
|
|
081b2d |
ldap/servers/slapd/slapi-plugin.h | 52 ++++++++
|
|
|
081b2d |
ldap/servers/slapd/slapi_counter.c | 100 ++++++++++++++
|
|
|
081b2d |
ldap/servers/slapd/thread_data.c | 2 +-
|
|
|
081b2d |
src/nunc-stans/ns/ns_thrpool.c | 17 ++-
|
|
|
081b2d |
src/nunc-stans/test/test_nuncstans_stress_core.c | 42 +++++-
|
|
|
081b2d |
13 files changed, 361 insertions(+), 150 deletions(-)
|
|
|
081b2d |
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/attrsyntax.c b/ldap/servers/slapd/attrsyntax.c
|
|
|
081b2d |
index 03f05d9..a0a60c4 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/attrsyntax.c
|
|
|
081b2d |
+++ b/ldap/servers/slapd/attrsyntax.c
|
|
|
081b2d |
@@ -274,7 +274,7 @@ attr_syntax_get_by_oid_locking_optional(const char *oid, PRBool use_lock, PRUint
|
|
|
081b2d |
}
|
|
|
081b2d |
asi = (struct asyntaxinfo *)PL_HashTableLookup_const(ht, oid);
|
|
|
081b2d |
if (asi) {
|
|
|
081b2d |
- __atomic_add_fetch_8(&(asi->asi_refcnt), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_incr(&(asi->asi_refcnt), __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
}
|
|
|
081b2d |
if (use_lock) {
|
|
|
081b2d |
AS_UNLOCK_READ(oid2asi_lock);
|
|
|
081b2d |
@@ -371,7 +371,7 @@ attr_syntax_get_by_name_locking_optional(const char *name, PRBool use_lock, PRUi
|
|
|
081b2d |
}
|
|
|
081b2d |
asi = (struct asyntaxinfo *)PL_HashTableLookup_const(ht, name);
|
|
|
081b2d |
if (NULL != asi) {
|
|
|
081b2d |
- __atomic_add_fetch_8(&(asi->asi_refcnt), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_incr(&(asi->asi_refcnt), __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
}
|
|
|
081b2d |
if (use_lock) {
|
|
|
081b2d |
AS_UNLOCK_READ(name2asi_lock);
|
|
|
081b2d |
@@ -406,7 +406,7 @@ attr_syntax_return_locking_optional(struct asyntaxinfo *asi, PRBool use_lock)
|
|
|
081b2d |
}
|
|
|
081b2d |
if (NULL != asi) {
|
|
|
081b2d |
PRBool delete_it = PR_FALSE;
|
|
|
081b2d |
- if (0 == __atomic_sub_fetch_8(&(asi->asi_refcnt), 1, __ATOMIC_ACQ_REL)) {
|
|
|
081b2d |
+ if (0 == slapi_atomic_decr(&(asi->asi_refcnt), __ATOMIC_ACQ_REL, ATOMIC_LONG)) {
|
|
|
081b2d |
delete_it = asi->asi_marked_for_delete;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -540,7 +540,7 @@ attr_syntax_delete_no_lock(struct asyntaxinfo *asi,
|
|
|
081b2d |
PL_HashTableRemove(ht, asi->asi_aliases[i]);
|
|
|
081b2d |
}
|
|
|
081b2d |
}
|
|
|
081b2d |
- if (__atomic_load_8(&(asi->asi_refcnt), __ATOMIC_ACQUIRE) > 0) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&(asi->asi_refcnt), __ATOMIC_ACQUIRE, ATOMIC_LONG) > 0) {
|
|
|
081b2d |
asi->asi_marked_for_delete = PR_TRUE;
|
|
|
081b2d |
} else {
|
|
|
081b2d |
/* This is ok, but the correct thing is to call delete first,
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
|
|
|
081b2d |
index d43258d..c4c4959 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
|
|
|
081b2d |
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
|
|
|
081b2d |
@@ -2860,16 +2860,16 @@ int
|
|
|
081b2d |
dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flags)
|
|
|
081b2d |
{
|
|
|
081b2d |
/*
|
|
|
081b2d |
- * We either already have a DB* handle in the attrinfo structure.
|
|
|
081b2d |
- * in which case we simply return it to the caller, OR:
|
|
|
081b2d |
- * we need to make one. We do this as follows:
|
|
|
081b2d |
- * 1a) acquire the mutex that protects the handle list.
|
|
|
081b2d |
- * 1b) check that the DB* is still null.
|
|
|
081b2d |
- * 2) get the filename, and call libdb to open it
|
|
|
081b2d |
- * 3) if successful, store the result in the attrinfo stucture
|
|
|
081b2d |
- * 4) store the DB* in our own list so we can close it later.
|
|
|
081b2d |
- * 5) release the mutex.
|
|
|
081b2d |
- */
|
|
|
081b2d |
+ * We either already have a DB* handle in the attrinfo structure.
|
|
|
081b2d |
+ * in which case we simply return it to the caller, OR:
|
|
|
081b2d |
+ * we need to make one. We do this as follows:
|
|
|
081b2d |
+ * 1a) acquire the mutex that protects the handle list.
|
|
|
081b2d |
+ * 1b) check that the DB* is still null.
|
|
|
081b2d |
+ * 2) get the filename, and call libdb to open it
|
|
|
081b2d |
+ * 3) if successful, store the result in the attrinfo stucture
|
|
|
081b2d |
+ * 4) store the DB* in our own list so we can close it later.
|
|
|
081b2d |
+ * 5) release the mutex.
|
|
|
081b2d |
+ */
|
|
|
081b2d |
ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
|
|
|
081b2d |
int return_value = -1;
|
|
|
081b2d |
DB *pDB = NULL;
|
|
|
081b2d |
@@ -2878,9 +2878,9 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag
|
|
|
081b2d |
*ppDB = NULL;
|
|
|
081b2d |
|
|
|
081b2d |
/* it's like a semaphore -- when count > 0, any file handle that's in
|
|
|
081b2d |
- * the attrinfo will remain valid from here on.
|
|
|
081b2d |
- */
|
|
|
081b2d |
- __atomic_add_fetch_8(&(a->ai_dblayer_count), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ * the attrinfo will remain valid from here on.
|
|
|
081b2d |
+ */
|
|
|
081b2d |
+ slapi_atomic_incr(&(a->ai_dblayer_count), __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
|
|
|
081b2d |
if (a->ai_dblayer && ((dblayer_handle *)(a->ai_dblayer))->dblayer_dbp) {
|
|
|
081b2d |
/* This means that the pointer is valid, so we should return it. */
|
|
|
081b2d |
@@ -2888,9 +2888,7 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag
|
|
|
081b2d |
return 0;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
- /* attrinfo handle is NULL, at least for now -- grab the mutex and try
|
|
|
081b2d |
- * again.
|
|
|
081b2d |
- */
|
|
|
081b2d |
+ /* attrinfo handle is NULL, at least for now -- grab the mutex and try again. */
|
|
|
081b2d |
PR_Lock(inst->inst_handle_list_mutex);
|
|
|
081b2d |
if (a->ai_dblayer && ((dblayer_handle *)(a->ai_dblayer))->dblayer_dbp) {
|
|
|
081b2d |
/* another thread set the handle while we were waiting on the lock */
|
|
|
081b2d |
@@ -2900,8 +2898,8 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
/* attrinfo handle is still blank, and we have the mutex: open the
|
|
|
081b2d |
- * index file and stuff it in the attrinfo.
|
|
|
081b2d |
- */
|
|
|
081b2d |
+ * index file and stuff it in the attrinfo.
|
|
|
081b2d |
+ */
|
|
|
081b2d |
return_value = dblayer_open_file(be, attribute_name, open_flags,
|
|
|
081b2d |
a, &pDB);
|
|
|
081b2d |
if (0 == return_value) {
|
|
|
081b2d |
@@ -2911,40 +2909,36 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag
|
|
|
081b2d |
|
|
|
081b2d |
PR_ASSERT(NULL != pDB);
|
|
|
081b2d |
/* Store the returned DB* in our own private list of
|
|
|
081b2d |
- * open files */
|
|
|
081b2d |
+ * open files */
|
|
|
081b2d |
if (NULL == prev_handle) {
|
|
|
081b2d |
/* List was empty */
|
|
|
081b2d |
inst->inst_handle_tail = handle;
|
|
|
081b2d |
inst->inst_handle_head = handle;
|
|
|
081b2d |
} else {
|
|
|
081b2d |
- /* Chain the handle onto the last structure in the
|
|
|
081b2d |
- * list */
|
|
|
081b2d |
+ /* Chain the handle onto the last structure in the list */
|
|
|
081b2d |
inst->inst_handle_tail = handle;
|
|
|
081b2d |
prev_handle->dblayer_handle_next = handle;
|
|
|
081b2d |
}
|
|
|
081b2d |
- /* Stash a pointer to our wrapper structure in the
|
|
|
081b2d |
- * attrinfo structure */
|
|
|
081b2d |
+ /* Stash a pointer to our wrapper structure in the attrinfo structure */
|
|
|
081b2d |
handle->dblayer_dbp = pDB;
|
|
|
081b2d |
/* And, most importantly, return something to the caller!*/
|
|
|
081b2d |
*ppDB = pDB;
|
|
|
081b2d |
- /* and save the hande in the attrinfo structure for
|
|
|
081b2d |
- * next time */
|
|
|
081b2d |
+ /* and save the hande in the attrinfo structure for next time */
|
|
|
081b2d |
a->ai_dblayer = handle;
|
|
|
081b2d |
/* don't need to update count -- we incr'd it already */
|
|
|
081b2d |
handle->dblayer_handle_ai_backpointer = &(a->ai_dblayer);
|
|
|
081b2d |
} else {
|
|
|
081b2d |
/* Did not open it OK ! */
|
|
|
081b2d |
/* Do nothing, because return value and fact that we didn't
|
|
|
081b2d |
- * store a DB* in the attrinfo is enough
|
|
|
081b2d |
- */
|
|
|
081b2d |
+ * store a DB* in the attrinfo is enough */
|
|
|
081b2d |
}
|
|
|
081b2d |
PR_Unlock(inst->inst_handle_list_mutex);
|
|
|
081b2d |
|
|
|
081b2d |
if (return_value != 0) {
|
|
|
081b2d |
/* some sort of error -- we didn't open a handle at all.
|
|
|
081b2d |
- * decrement the refcount back to where it was.
|
|
|
081b2d |
- */
|
|
|
081b2d |
- __atomic_sub_fetch_8(&(a->ai_dblayer_count), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ * decrement the refcount back to where it was.
|
|
|
081b2d |
+ */
|
|
|
081b2d |
+ slapi_atomic_decr(&(a->ai_dblayer_count), __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
return return_value;
|
|
|
081b2d |
@@ -2956,7 +2950,7 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag
|
|
|
081b2d |
int
|
|
|
081b2d |
dblayer_release_index_file(backend *be __attribute__((unused)), struct attrinfo *a, DB *pDB __attribute__((unused)))
|
|
|
081b2d |
{
|
|
|
081b2d |
- __atomic_sub_fetch_8(&(a->ai_dblayer_count), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_decr(&(a->ai_dblayer_count), __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
return 0;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -3063,13 +3057,13 @@ dblayer_erase_index_file_ex(backend *be, struct attrinfo *a, PRBool use_lock, in
|
|
|
081b2d |
|
|
|
081b2d |
dblayer_release_index_file(be, a, db);
|
|
|
081b2d |
|
|
|
081b2d |
- while (__atomic_load_8(&(a->ai_dblayer_count), __ATOMIC_ACQUIRE) > 0) {
|
|
|
081b2d |
+ while (slapi_atomic_load(&(a->ai_dblayer_count), __ATOMIC_ACQUIRE, ATOMIC_LONG) > 0) {
|
|
|
081b2d |
/* someone is using this index file */
|
|
|
081b2d |
/* ASSUMPTION: you have already set the INDEX_OFFLINE flag, because
|
|
|
081b2d |
- * you intend to mess with this index. therefore no new requests
|
|
|
081b2d |
- * for this indexfile should happen, so the dblayer_count should
|
|
|
081b2d |
- * NEVER increase.
|
|
|
081b2d |
- */
|
|
|
081b2d |
+ * you intend to mess with this index. therefore no new requests
|
|
|
081b2d |
+ * for this indexfile should happen, so the dblayer_count should
|
|
|
081b2d |
+ * NEVER increase.
|
|
|
081b2d |
+ */
|
|
|
081b2d |
PR_ASSERT(a->ai_indexmask & INDEX_OFFLINE);
|
|
|
081b2d |
PR_Unlock(inst->inst_handle_list_mutex);
|
|
|
081b2d |
DS_Sleep(DBLAYER_CACHE_DELAY);
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
|
|
|
081b2d |
index 62d10c2..289a149 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/entry.c
|
|
|
081b2d |
+++ b/ldap/servers/slapd/entry.c
|
|
|
081b2d |
@@ -2244,18 +2244,19 @@ slapi_entry_attr_find(const Slapi_Entry *e, const char *type, Slapi_Attr **a)
|
|
|
081b2d |
|
|
|
081b2d |
/* the following functions control virtual attribute cache invalidation */
|
|
|
081b2d |
|
|
|
081b2d |
-static uint32_t g_virtual_watermark = 0; /* good enough to init */
|
|
|
081b2d |
+static int32_t g_virtual_watermark = 0; /* good enough to init */
|
|
|
081b2d |
|
|
|
081b2d |
int
|
|
|
081b2d |
slapi_entry_vattrcache_watermark_isvalid(const Slapi_Entry *e)
|
|
|
081b2d |
{
|
|
|
081b2d |
- return e->e_virtual_watermark == __atomic_load_4(&g_virtual_watermark, __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return e->e_virtual_watermark == slapi_atomic_load(&g_virtual_watermark, __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
+
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
void
|
|
|
081b2d |
slapi_entry_vattrcache_watermark_set(Slapi_Entry *e)
|
|
|
081b2d |
{
|
|
|
081b2d |
- e->e_virtual_watermark = __atomic_load_4(&g_virtual_watermark, __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ e->e_virtual_watermark = slapi_atomic_load(&g_virtual_watermark, __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
void
|
|
|
081b2d |
@@ -2268,8 +2269,8 @@ void
|
|
|
081b2d |
slapi_entrycache_vattrcache_watermark_invalidate()
|
|
|
081b2d |
{
|
|
|
081b2d |
/* Make sure the value is never 0 */
|
|
|
081b2d |
- if (__atomic_add_fetch_4(&g_virtual_watermark, 1, __ATOMIC_RELEASE) == 0) {
|
|
|
081b2d |
- __atomic_add_fetch_4(&g_virtual_watermark, 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ if (slapi_atomic_incr(&g_virtual_watermark, __ATOMIC_RELEASE, ATOMIC_INT) == 0) {
|
|
|
081b2d |
+ slapi_atomic_incr(&g_virtual_watermark, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
|
|
|
081b2d |
index 0eeb16a..4c54cf7 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/libglobs.c
|
|
|
081b2d |
+++ b/ldap/servers/slapd/libglobs.c
|
|
|
081b2d |
@@ -1335,19 +1335,19 @@ static uint64_t active_threads = 0;
|
|
|
081b2d |
void
|
|
|
081b2d |
g_incr_active_threadcnt(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
- __atomic_add_fetch_8(&active_threads, 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_incr(&active_threads, __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
void
|
|
|
081b2d |
g_decr_active_threadcnt(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
- __atomic_sub_fetch_8(&active_threads, 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_decr(&active_threads, __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
uint64_t
|
|
|
081b2d |
g_get_active_threadcnt(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
- return __atomic_load_8(&active_threads, __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&active_threads, __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
/*
|
|
|
081b2d |
@@ -1936,7 +1936,7 @@ config_set_ndn_cache_max_size(const char *attrname, char *value, char *errorbuf,
|
|
|
081b2d |
size = NDN_DEFAULT_SIZE;
|
|
|
081b2d |
}
|
|
|
081b2d |
if (apply) {
|
|
|
081b2d |
- __atomic_store_8(&(slapdFrontendConfig->ndn_cache_max_size), size, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->ndn_cache_max_size), &size, __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
return retVal;
|
|
|
081b2d |
@@ -3476,7 +3476,8 @@ int32_t
|
|
|
081b2d |
config_get_dynamic_plugins(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->dynamic_plugins), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->dynamic_plugins), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
+
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
@@ -3498,7 +3499,7 @@ int32_t
|
|
|
081b2d |
config_get_cn_uses_dn_syntax_in_dns()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->cn_uses_dn_syntax_in_dns), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->cn_uses_dn_syntax_in_dns), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
@@ -3543,7 +3544,7 @@ config_set_onoff(const char *attrname, char *value, int32_t *configvalue, char *
|
|
|
081b2d |
newval = LDAP_OFF;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
- __atomic_store_4(configvalue, newval, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(configvalue, &newval, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
|
|
|
081b2d |
return retVal;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -3915,7 +3916,7 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
|
|
|
081b2d |
retVal = LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
}
|
|
|
081b2d |
if (apply) {
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->threadnumber), threadnum, __ATOMIC_RELAXED);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->threadnumber), &threadnum, __ATOMIC_RELAXED, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
return retVal;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -3944,7 +3945,7 @@ config_set_maxthreadsperconn(const char *attrname, char *value, char *errorbuf,
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
if (apply) {
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->maxthreadsperconn), maxthreadnum, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->maxthreadsperconn), &maxthreadnum, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
return retVal;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -4102,7 +4103,7 @@ config_set_ioblocktimeout(const char *attrname, char *value, char *errorbuf, int
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
if (apply) {
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->ioblocktimeout), nValue, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->ioblocktimeout), &nValue, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
return retVal;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -4606,21 +4607,22 @@ int32_t
|
|
|
081b2d |
config_get_sasl_mapping_fallback()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->sasl_mapping_fallback), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->sasl_mapping_fallback), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
+
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_disk_monitoring()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->disk_monitoring), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->disk_monitoring), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_disk_logging_critical()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->disk_logging_critical), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->disk_logging_critical), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int
|
|
|
081b2d |
@@ -4667,14 +4669,14 @@ int32_t
|
|
|
081b2d |
config_get_ldapi_switch()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->ldapi_switch), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->ldapi_switch), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_ldapi_bind_switch()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->ldapi_bind_switch), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->ldapi_bind_switch), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
char *
|
|
|
081b2d |
@@ -4693,7 +4695,7 @@ int
|
|
|
081b2d |
config_get_ldapi_map_entries()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->ldapi_map_entries), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->ldapi_map_entries), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
char *
|
|
|
081b2d |
@@ -4763,7 +4765,8 @@ int32_t
|
|
|
081b2d |
config_get_slapi_counters()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->slapi_counters), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->slapi_counters), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
+
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
char *
|
|
|
081b2d |
@@ -4945,7 +4948,7 @@ int32_t
|
|
|
081b2d |
config_get_pw_change(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_change), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_change), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
|
|
|
081b2d |
@@ -4953,7 +4956,7 @@ int32_t
|
|
|
081b2d |
config_get_pw_history(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_history), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_history), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
|
|
|
081b2d |
@@ -4961,21 +4964,21 @@ int32_t
|
|
|
081b2d |
config_get_pw_must_change(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_must_change), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_must_change), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_allow_hashed_pw(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->allow_hashed_pw), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->allow_hashed_pw), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_pw_syntax(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_syntax), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_syntax), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
|
|
|
081b2d |
@@ -5164,21 +5167,21 @@ int32_t
|
|
|
081b2d |
config_get_pw_is_global_policy(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->pw_is_global_policy), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->pw_is_global_policy), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_pw_is_legacy_policy(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_is_legacy), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_is_legacy), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_pw_exp(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_exp), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_exp), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
|
|
|
081b2d |
@@ -5186,14 +5189,14 @@ int32_t
|
|
|
081b2d |
config_get_pw_unlock(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_unlock), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_unlock), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_pw_lockout()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_lockout), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_lockout), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int
|
|
|
081b2d |
@@ -5213,112 +5216,112 @@ int32_t
|
|
|
081b2d |
config_get_lastmod()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->lastmod), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->lastmod), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_enquote_sup_oc()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->enquote_sup_oc), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->enquote_sup_oc), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_nagle(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->nagle), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->nagle), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_accesscontrol(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->accesscontrol), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->accesscontrol), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_return_exact_case(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->return_exact_case), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->return_exact_case), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_result_tweak(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->result_tweak), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->result_tweak), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_moddn_aci(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->moddn_aci), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->moddn_aci), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_security(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->security), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->security), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
slapi_config_get_readonly(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->readonly), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->readonly), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_schemacheck(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->schemacheck), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->schemacheck), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_schemamod(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->schemamod), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->schemamod), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_syntaxcheck(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->syntaxcheck), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->syntaxcheck), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_syntaxlogging(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->syntaxlogging), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->syntaxlogging), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_dn_validate_strict(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->dn_validate_strict), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->dn_validate_strict), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_ds4_compatible_schema(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->ds4_compatible_schema), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->ds4_compatible_schema), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_schema_ignore_trailing_spaces(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->schema_ignore_trailing_spaces), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->schema_ignore_trailing_spaces), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
char *
|
|
|
081b2d |
@@ -5402,7 +5405,7 @@ config_get_threadnumber(void)
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
int32_t retVal;
|
|
|
081b2d |
|
|
|
081b2d |
- retVal = __atomic_load_4(&(slapdFrontendConfig->threadnumber), __ATOMIC_RELAXED);
|
|
|
081b2d |
+ retVal = slapi_atomic_load(&(slapdFrontendConfig->threadnumber), __ATOMIC_RELAXED, ATOMIC_INT);
|
|
|
081b2d |
|
|
|
081b2d |
if (retVal <= 0) {
|
|
|
081b2d |
retVal = util_get_hardware_threads();
|
|
|
081b2d |
@@ -5420,7 +5423,7 @@ int32_t
|
|
|
081b2d |
config_get_maxthreadsperconn()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->maxthreadsperconn), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->maxthreadsperconn), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int
|
|
|
081b2d |
@@ -5452,7 +5455,7 @@ int32_t
|
|
|
081b2d |
config_get_ioblocktimeout()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->ioblocktimeout), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->ioblocktimeout), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int
|
|
|
081b2d |
@@ -5769,21 +5772,21 @@ int32_t
|
|
|
081b2d |
config_get_unauth_binds_switch(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->allow_unauth_binds), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->allow_unauth_binds), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_require_secure_binds(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->require_secure_binds), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->require_secure_binds), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_anon_access_switch(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->allow_anon_access), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->allow_anon_access), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int
|
|
|
081b2d |
@@ -6025,7 +6028,8 @@ int32_t
|
|
|
081b2d |
config_get_minssf_exclude_rootdse()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->minssf_exclude_rootdse), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->minssf_exclude_rootdse), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
+
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int
|
|
|
081b2d |
@@ -6034,18 +6038,17 @@ config_set_max_filter_nest_level(const char *attrname, char *value, char *errorb
|
|
|
081b2d |
int retVal = LDAP_SUCCESS;
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
char *endp;
|
|
|
081b2d |
- long level;
|
|
|
081b2d |
+ int32_t level;
|
|
|
081b2d |
|
|
|
081b2d |
if (config_value_is_null(attrname, value, errorbuf, 0)) {
|
|
|
081b2d |
return LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
errno = 0;
|
|
|
081b2d |
- level = strtol(value, &endp, 10);
|
|
|
081b2d |
+ level = (int32_t)strtol(value, &endp, 10);
|
|
|
081b2d |
if (*endp != '\0' || errno == ERANGE) {
|
|
|
081b2d |
- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "(%s) value (%s) "
|
|
|
081b2d |
- "is invalid\n",
|
|
|
081b2d |
- attrname, value);
|
|
|
081b2d |
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
|
|
|
081b2d |
+ "(%s) value (%s) is invalid\n", attrname, value);
|
|
|
081b2d |
retVal = LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
return retVal;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -6054,7 +6057,7 @@ config_set_max_filter_nest_level(const char *attrname, char *value, char *errorb
|
|
|
081b2d |
return retVal;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->max_filter_nest_level), level, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->max_filter_nest_level), &level, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
return retVal;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -6062,29 +6065,28 @@ int32_t
|
|
|
081b2d |
config_get_max_filter_nest_level()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->max_filter_nest_level), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->max_filter_nest_level), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
uint64_t
|
|
|
081b2d |
config_get_ndn_cache_size()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
-
|
|
|
081b2d |
- return __atomic_load_8(&(slapdFrontendConfig->ndn_cache_max_size), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->ndn_cache_max_size), __ATOMIC_ACQUIRE, ATOMIC_LONG);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_ndn_cache_enabled()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->ndn_cache_enabled), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->ndn_cache_enabled), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_return_orig_type_switch()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->return_orig_type), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->return_orig_type), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
char *
|
|
|
081b2d |
@@ -6786,7 +6788,7 @@ int32_t
|
|
|
081b2d |
config_get_force_sasl_external(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->force_sasl_external), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->force_sasl_external), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
@@ -6808,7 +6810,7 @@ int32_t
|
|
|
081b2d |
config_get_entryusn_global(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->entryusn_global), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->entryusn_global), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
@@ -7046,21 +7048,21 @@ int32_t
|
|
|
081b2d |
config_get_enable_turbo_mode(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->enable_turbo_mode), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->enable_turbo_mode), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_connection_nocanon(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->connection_nocanon), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->connection_nocanon), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_plugin_logging(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->plugin_logging), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->plugin_logging), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
@@ -7073,21 +7075,21 @@ int32_t
|
|
|
081b2d |
config_get_unhashed_pw_switch()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->unhashed_pw_switch), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->unhashed_pw_switch), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_ignore_time_skew(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->ignore_time_skew), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->ignore_time_skew), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
config_get_global_backend_lock()
|
|
|
081b2d |
{
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
- return __atomic_load_4(&(slapdFrontendConfig->global_backend_lock), __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+ return slapi_atomic_load(&(slapdFrontendConfig->global_backend_lock), __ATOMIC_ACQUIRE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
int32_t
|
|
|
081b2d |
@@ -7163,8 +7165,9 @@ config_get_connection_buffer(void)
|
|
|
081b2d |
int
|
|
|
081b2d |
config_set_connection_buffer(const char *attrname, char *value, char *errorbuf, int apply)
|
|
|
081b2d |
{
|
|
|
081b2d |
- int retVal = LDAP_SUCCESS;
|
|
|
081b2d |
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
|
|
081b2d |
+ int retVal = LDAP_SUCCESS;
|
|
|
081b2d |
+ int32_t val;
|
|
|
081b2d |
|
|
|
081b2d |
if (config_value_is_null(attrname, value, errorbuf, 0)) {
|
|
|
081b2d |
return LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
@@ -7181,7 +7184,9 @@ config_set_connection_buffer(const char *attrname, char *value, char *errorbuf,
|
|
|
081b2d |
return retVal;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->connection_buffer), atoi(value), __ATOMIC_RELEASE);
|
|
|
081b2d |
+ val = atoi(value);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->connection_buffer), &val, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
+
|
|
|
081b2d |
return retVal;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -7204,7 +7209,7 @@ config_set_listen_backlog_size(const char *attrname, char *value, char *errorbuf
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
if (apply) {
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->listen_backlog_size), size, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->listen_backlog_size), &size, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
return LDAP_SUCCESS;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -7617,7 +7622,7 @@ config_set_accesslog_enabled(int value)
|
|
|
081b2d |
char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
|
|
|
081b2d |
errorbuf[0] = '\0';
|
|
|
081b2d |
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->accesslog_logging_enabled), value, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->accesslog_logging_enabled), &value, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
if (value) {
|
|
|
081b2d |
log_set_logging(CONFIG_ACCESSLOG_LOGGING_ENABLED_ATTRIBUTE, "on", SLAPD_ACCESS_LOG, errorbuf, CONFIG_APPLY);
|
|
|
081b2d |
} else {
|
|
|
081b2d |
@@ -7635,7 +7640,7 @@ config_set_auditlog_enabled(int value)
|
|
|
081b2d |
char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
|
|
|
081b2d |
errorbuf[0] = '\0';
|
|
|
081b2d |
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->auditlog_logging_enabled), value, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->auditlog_logging_enabled), &value, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
if (value) {
|
|
|
081b2d |
log_set_logging(CONFIG_AUDITLOG_LOGGING_ENABLED_ATTRIBUTE, "on", SLAPD_AUDIT_LOG, errorbuf, CONFIG_APPLY);
|
|
|
081b2d |
} else {
|
|
|
081b2d |
@@ -7653,7 +7658,7 @@ config_set_auditfaillog_enabled(int value)
|
|
|
081b2d |
char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
|
|
|
081b2d |
errorbuf[0] = '\0';
|
|
|
081b2d |
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->auditfaillog_logging_enabled), value, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->auditfaillog_logging_enabled), &value, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
if (value) {
|
|
|
081b2d |
log_set_logging(CONFIG_AUDITFAILLOG_LOGGING_ENABLED_ATTRIBUTE, "on", SLAPD_AUDITFAIL_LOG, errorbuf, CONFIG_APPLY);
|
|
|
081b2d |
} else {
|
|
|
081b2d |
@@ -7744,7 +7749,7 @@ config_set_malloc_mxfast(const char *attrname, char *value, char *errorbuf, int
|
|
|
081b2d |
value, CONFIG_MALLOC_MXFAST, max);
|
|
|
081b2d |
return LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
}
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->malloc_mxfast), mxfast, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->malloc_mxfast), &mxfast, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
|
|
|
081b2d |
if ((mxfast >= 0) && (mxfast <= max)) {
|
|
|
081b2d |
mallopt(M_MXFAST, mxfast);
|
|
|
081b2d |
@@ -7784,7 +7789,7 @@ config_set_malloc_trim_threshold(const char *attrname, char *value, char *errorb
|
|
|
081b2d |
return LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->malloc_trim_threshold), trim_threshold, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->malloc_trim_threshold), &trim_threshold, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
|
|
|
081b2d |
if (trim_threshold >= -1) {
|
|
|
081b2d |
mallopt(M_TRIM_THRESHOLD, trim_threshold);
|
|
|
081b2d |
@@ -7831,7 +7836,7 @@ config_set_malloc_mmap_threshold(const char *attrname, char *value, char *errorb
|
|
|
081b2d |
return LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
- __atomic_store_4(&(slapdFrontendConfig->malloc_mmap_threshold), mmap_threshold, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(slapdFrontendConfig->malloc_mmap_threshold), &mmap_threshold, __ATOMIC_RELEASE, ATOMIC_INT);
|
|
|
081b2d |
|
|
|
081b2d |
if ((mmap_threshold >= 0) && (mmap_threshold <= max)) {
|
|
|
081b2d |
mallopt(M_MMAP_THRESHOLD, mmap_threshold);
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
|
|
081b2d |
index 41b5c99..4d44c87 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/log.c
|
|
|
081b2d |
+++ b/ldap/servers/slapd/log.c
|
|
|
081b2d |
@@ -4942,12 +4942,13 @@ static LogBufferInfo *
|
|
|
081b2d |
log_create_buffer(size_t sz)
|
|
|
081b2d |
{
|
|
|
081b2d |
LogBufferInfo *lbi;
|
|
|
081b2d |
+ uint64_t init_val = 0;
|
|
|
081b2d |
|
|
|
081b2d |
lbi = (LogBufferInfo *)slapi_ch_malloc(sizeof(LogBufferInfo));
|
|
|
081b2d |
lbi->top = (char *)slapi_ch_malloc(sz);
|
|
|
081b2d |
lbi->current = lbi->top;
|
|
|
081b2d |
lbi->maxsize = sz;
|
|
|
081b2d |
- __atomic_store_8(&(lbi->refcount), 0, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(lbi->refcount), &init_val, __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
return lbi;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -5009,7 +5010,7 @@ log_append_buffer2(time_t tnl, LogBufferInfo *lbi, char *msg1, size_t size1, cha
|
|
|
081b2d |
insert_point = lbi->current;
|
|
|
081b2d |
lbi->current += size;
|
|
|
081b2d |
/* Increment the copy refcount */
|
|
|
081b2d |
- __atomic_add_fetch_8(&(lbi->refcount), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_incr(&(lbi->refcount), __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
PR_Unlock(lbi->lock);
|
|
|
081b2d |
|
|
|
081b2d |
/* Now we can copy without holding the lock */
|
|
|
081b2d |
@@ -5017,7 +5018,7 @@ log_append_buffer2(time_t tnl, LogBufferInfo *lbi, char *msg1, size_t size1, cha
|
|
|
081b2d |
memcpy(insert_point + size1, msg2, size2);
|
|
|
081b2d |
|
|
|
081b2d |
/* Decrement the copy refcount */
|
|
|
081b2d |
- __atomic_sub_fetch_8(&(lbi->refcount), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_decr(&(lbi->refcount), __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
|
|
|
081b2d |
/* If we are asked to sync to disk immediately, do so */
|
|
|
081b2d |
if (!slapdFrontendConfig->accesslogbuffering) {
|
|
|
081b2d |
@@ -5037,7 +5038,7 @@ log_flush_buffer(LogBufferInfo *lbi, int type, int sync_now)
|
|
|
081b2d |
if (type == SLAPD_ACCESS_LOG) {
|
|
|
081b2d |
|
|
|
081b2d |
/* It is only safe to flush once any other threads which are copying are finished */
|
|
|
081b2d |
- while (__atomic_load_8(&(lbi->refcount), __ATOMIC_ACQUIRE) > 0) {
|
|
|
081b2d |
+ while (slapi_atomic_load(&(lbi->refcount), __ATOMIC_ACQUIRE, ATOMIC_LONG) > 0) {
|
|
|
081b2d |
/* It's ok to sleep for a while because we only flush every second or so */
|
|
|
081b2d |
DS_Sleep(PR_MillisecondsToInterval(1));
|
|
|
081b2d |
}
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c
|
|
|
081b2d |
index 651d70e..6621ceb 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/mapping_tree.c
|
|
|
081b2d |
+++ b/ldap/servers/slapd/mapping_tree.c
|
|
|
081b2d |
@@ -1647,7 +1647,7 @@ mapping_tree_init()
|
|
|
081b2d |
|
|
|
081b2d |
/* we call this function from a single thread, so it should be ok */
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
/* shutdown has been detected */
|
|
|
081b2d |
return 0;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -1759,6 +1759,8 @@ mtn_free_node(mapping_tree_node **node)
|
|
|
081b2d |
void
|
|
|
081b2d |
mapping_tree_free()
|
|
|
081b2d |
{
|
|
|
081b2d |
+ int init_val = 1;
|
|
|
081b2d |
+
|
|
|
081b2d |
/* unregister dse callbacks */
|
|
|
081b2d |
slapi_config_remove_callback(SLAPI_OPERATION_MODIFY, DSE_FLAG_PREOP, MAPPING_TREE_BASE_DN, LDAP_SCOPE_BASE, "(objectclass=*)", mapping_tree_entry_modify_callback);
|
|
|
081b2d |
slapi_config_remove_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, MAPPING_TREE_BASE_DN, LDAP_SCOPE_BASE, "(objectclass=*)", mapping_tree_entry_add_callback);
|
|
|
081b2d |
@@ -1771,7 +1773,7 @@ mapping_tree_free()
|
|
|
081b2d |
slapi_unregister_backend_state_change_all();
|
|
|
081b2d |
/* recursively free tree nodes */
|
|
|
081b2d |
mtn_free_node(&mapping_tree_root);
|
|
|
081b2d |
- __atomic_store_4(&mapping_tree_freed, 1, __ATOMIC_RELAXED);
|
|
|
081b2d |
+ slapi_atomic_store(&mapping_tree_freed, &init_val, __ATOMIC_RELAXED, ATOMIC_INT);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
/* This function returns the first node to parse when a search is done
|
|
|
081b2d |
@@ -2022,7 +2024,7 @@ slapi_dn_write_needs_referral(Slapi_DN *target_sdn, Slapi_Entry **referral)
|
|
|
081b2d |
mapping_tree_node *target_node = NULL;
|
|
|
081b2d |
int ret = 0;
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
/* shutdown detected */
|
|
|
081b2d |
goto done;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -2093,7 +2095,7 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re
|
|
|
081b2d |
int fixup = 0;
|
|
|
081b2d |
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
/* shutdown detected */
|
|
|
081b2d |
return LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -2198,7 +2200,7 @@ slapi_mapping_tree_select_all(Slapi_PBlock *pb, Slapi_Backend **be_list, Slapi_E
|
|
|
081b2d |
int flag_partial_result = 0;
|
|
|
081b2d |
int op_type;
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
return LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -2358,7 +2360,7 @@ slapi_mapping_tree_select_and_check(Slapi_PBlock *pb, char *newdn, Slapi_Backend
|
|
|
081b2d |
int ret;
|
|
|
081b2d |
int need_unlock = 0;
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
return LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -2524,7 +2526,7 @@ mtn_get_be(mapping_tree_node *target_node, Slapi_PBlock *pb, Slapi_Backend **be,
|
|
|
081b2d |
int flag_stop = 0;
|
|
|
081b2d |
struct slapi_componentid *cid = NULL;
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
/* shut down detected */
|
|
|
081b2d |
return LDAP_OPERATIONS_ERROR;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -2712,7 +2714,7 @@ best_matching_child(mapping_tree_node *parent,
|
|
|
081b2d |
mapping_tree_node *highest_match_node = NULL;
|
|
|
081b2d |
mapping_tree_node *current;
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
/* shutdown detected */
|
|
|
081b2d |
return NULL;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -2739,7 +2741,7 @@ mtn_get_mapping_tree_node_by_entry(mapping_tree_node *node, const Slapi_DN *dn)
|
|
|
081b2d |
{
|
|
|
081b2d |
mapping_tree_node *found_node = NULL;
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
/* shutdown detected */
|
|
|
081b2d |
return NULL;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -2782,7 +2784,7 @@ slapi_get_mapping_tree_node_by_dn(const Slapi_DN *dn)
|
|
|
081b2d |
mapping_tree_node *current_best_match = mapping_tree_root;
|
|
|
081b2d |
mapping_tree_node *next_best_match = mapping_tree_root;
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
/* shutdown detected */
|
|
|
081b2d |
return NULL;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -2816,7 +2818,7 @@ get_mapping_tree_node_by_name(mapping_tree_node *node, char *be_name)
|
|
|
081b2d |
int i;
|
|
|
081b2d |
mapping_tree_node *found_node = NULL;
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
/* shutdown detected */
|
|
|
081b2d |
return NULL;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -2863,7 +2865,7 @@ slapi_get_mapping_tree_node_configdn(const Slapi_DN *root)
|
|
|
081b2d |
{
|
|
|
081b2d |
char *dn = NULL;
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
/* shutdown detected */
|
|
|
081b2d |
return NULL;
|
|
|
081b2d |
}
|
|
|
081b2d |
@@ -2890,7 +2892,7 @@ slapi_get_mapping_tree_node_configsdn(const Slapi_DN *root)
|
|
|
081b2d |
char *dn = NULL;
|
|
|
081b2d |
Slapi_DN *sdn = NULL;
|
|
|
081b2d |
|
|
|
081b2d |
- if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
|
|
081b2d |
+ if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
|
|
|
081b2d |
/* shutdown detected */
|
|
|
081b2d |
return NULL;
|
|
|
081b2d |
}
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/object.c b/ldap/servers/slapd/object.c
|
|
|
081b2d |
index 84845d3..6a1a9a5 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/object.c
|
|
|
081b2d |
+++ b/ldap/servers/slapd/object.c
|
|
|
081b2d |
@@ -43,10 +43,12 @@ Object *
|
|
|
081b2d |
object_new(void *user_data, FNFree destructor)
|
|
|
081b2d |
{
|
|
|
081b2d |
Object *o;
|
|
|
081b2d |
+ uint64_t init_val = 1;
|
|
|
081b2d |
+
|
|
|
081b2d |
o = (object *)slapi_ch_malloc(sizeof(object));
|
|
|
081b2d |
o->destructor = destructor;
|
|
|
081b2d |
o->data = user_data;
|
|
|
081b2d |
- __atomic_store_8(&(o->refcnt), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(o->refcnt), &init_val, __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
return o;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -60,7 +62,7 @@ void
|
|
|
081b2d |
object_acquire(Object *o)
|
|
|
081b2d |
{
|
|
|
081b2d |
PR_ASSERT(NULL != o);
|
|
|
081b2d |
- __atomic_add_fetch_8(&(o->refcnt), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_incr(&(o->refcnt), __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
|
|
|
081b2d |
@@ -75,7 +77,7 @@ object_release(Object *o)
|
|
|
081b2d |
PRInt32 refcnt_after_release;
|
|
|
081b2d |
|
|
|
081b2d |
PR_ASSERT(NULL != o);
|
|
|
081b2d |
- refcnt_after_release = __atomic_sub_fetch_8(&(o->refcnt), 1, __ATOMIC_ACQ_REL);
|
|
|
081b2d |
+ refcnt_after_release = slapi_atomic_decr(&(o->refcnt), __ATOMIC_ACQ_REL, ATOMIC_LONG);
|
|
|
081b2d |
if (refcnt_after_release == 0) {
|
|
|
081b2d |
/* Object can be destroyed */
|
|
|
081b2d |
if (o->destructor)
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c
|
|
|
081b2d |
index 0489122..70c530b 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/psearch.c
|
|
|
081b2d |
+++ b/ldap/servers/slapd/psearch.c
|
|
|
081b2d |
@@ -134,7 +134,7 @@ ps_stop_psearch_system()
|
|
|
081b2d |
if (PS_IS_INITIALIZED()) {
|
|
|
081b2d |
PSL_LOCK_WRITE();
|
|
|
081b2d |
for (ps = psearch_list->pl_head; NULL != ps; ps = ps->ps_next) {
|
|
|
081b2d |
- __atomic_add_fetch_8(&(ps->ps_complete), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_incr(&(ps->ps_complete), __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
}
|
|
|
081b2d |
PSL_UNLOCK_WRITE();
|
|
|
081b2d |
ps_wakeup_all();
|
|
|
081b2d |
@@ -285,7 +285,7 @@ ps_send_results(void *arg)
|
|
|
081b2d |
|
|
|
081b2d |
PR_Lock(psearch_list->pl_cvarlock);
|
|
|
081b2d |
|
|
|
081b2d |
- while ((conn_acq_flag == 0) && __atomic_load_8(&(ps->ps_complete), __ATOMIC_ACQUIRE) == 0) {
|
|
|
081b2d |
+ while ((conn_acq_flag == 0) && slapi_atomic_load(&(ps->ps_complete), __ATOMIC_ACQUIRE, ATOMIC_LONG) == 0) {
|
|
|
081b2d |
/* Check for an abandoned operation */
|
|
|
081b2d |
if (pb_op == NULL || slapi_op_abandoned(ps->ps_pblock)) {
|
|
|
081b2d |
slapi_log_err(SLAPI_LOG_CONNS, "ps_send_results",
|
|
|
081b2d |
@@ -427,6 +427,7 @@ static PSearch *
|
|
|
081b2d |
psearch_alloc(void)
|
|
|
081b2d |
{
|
|
|
081b2d |
PSearch *ps;
|
|
|
081b2d |
+ uint64_t init_val = 0;
|
|
|
081b2d |
|
|
|
081b2d |
ps = (PSearch *)slapi_ch_calloc(1, sizeof(PSearch));
|
|
|
081b2d |
|
|
|
081b2d |
@@ -437,7 +438,7 @@ psearch_alloc(void)
|
|
|
081b2d |
slapi_ch_free((void **)&ps);
|
|
|
081b2d |
return (NULL);
|
|
|
081b2d |
}
|
|
|
081b2d |
- __atomic_store_8(&(ps->ps_complete), 0, __ATOMIC_RELEASE);
|
|
|
081b2d |
+ slapi_atomic_store(&(ps->ps_complete), &init_val, __ATOMIC_RELEASE, ATOMIC_LONG);
|
|
|
081b2d |
ps->ps_eq_head = ps->ps_eq_tail = (PSEQNode *)NULL;
|
|
|
081b2d |
ps->ps_lasttime = (time_t)0L;
|
|
|
081b2d |
ps->ps_next = NULL;
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
|
|
|
081b2d |
index 3397c63..c434add 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/slapi-plugin.h
|
|
|
081b2d |
+++ b/ldap/servers/slapd/slapi-plugin.h
|
|
|
081b2d |
@@ -8202,6 +8202,58 @@ void slapi_operation_time_initiated(Slapi_Operation *o, struct timespec *initiat
|
|
|
081b2d |
*/
|
|
|
081b2d |
#endif
|
|
|
081b2d |
|
|
|
081b2d |
+/* See: https://gcc.gnu.org/ml/gcc/2016-11/txt6ZlA_JS27i.txt */
|
|
|
081b2d |
+#define ATOMIC_GENERIC 0
|
|
|
081b2d |
+#define ATOMIC_INT 4
|
|
|
081b2d |
+#define ATOMIC_LONG 8
|
|
|
081b2d |
+#define ATOMIC_INT128 16 /* Future */
|
|
|
081b2d |
+
|
|
|
081b2d |
+/**
|
|
|
081b2d |
+ * Store an integral value atomicly
|
|
|
081b2d |
+ *
|
|
|
081b2d |
+ * \param ptr - integral pointer
|
|
|
081b2d |
+ * \param val - pointer to integral value (use integral type int32_t with ATOMIC_INT, or uint64_t
|
|
|
081b2d |
+ * with ATOMIC_LONG & ATOMIC_GENERIC)
|
|
|
081b2d |
+ * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE,
|
|
|
081b2d |
+ * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST
|
|
|
081b2d |
+ * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG
|
|
|
081b2d |
+ */
|
|
|
081b2d |
+void slapi_atomic_store(void *ptr, void *val, int memorder, int type);
|
|
|
081b2d |
+
|
|
|
081b2d |
+/**
|
|
|
081b2d |
+ * Get an integral value atomicly
|
|
|
081b2d |
+ *
|
|
|
081b2d |
+ * \param ptr - integral pointer
|
|
|
081b2d |
+ * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE,
|
|
|
081b2d |
+ * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST
|
|
|
081b2d |
+ * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG
|
|
|
081b2d |
+ * \return -
|
|
|
081b2d |
+ */
|
|
|
081b2d |
+uint64_t slapi_atomic_load(void *ptr, int memorder, int type);
|
|
|
081b2d |
+
|
|
|
081b2d |
+/**
|
|
|
081b2d |
+ * Increment integral atomicly
|
|
|
081b2d |
+ *
|
|
|
081b2d |
+ * \param ptr - pointer to integral to increment
|
|
|
081b2d |
+ * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE,
|
|
|
081b2d |
+ * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST
|
|
|
081b2d |
+ * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG
|
|
|
081b2d |
+ * \return - new value of ptr
|
|
|
081b2d |
+ */
|
|
|
081b2d |
+uint64_t slapi_atomic_incr(void *ptr, int memorder, int type);
|
|
|
081b2d |
+
|
|
|
081b2d |
+/**
|
|
|
081b2d |
+ * Decrement integral atomicly
|
|
|
081b2d |
+ *
|
|
|
081b2d |
+ * \param ptr - pointer to integral to decrement
|
|
|
081b2d |
+ * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE,
|
|
|
081b2d |
+ * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST
|
|
|
081b2d |
+ * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG
|
|
|
081b2d |
+ * \return - new value of ptr
|
|
|
081b2d |
+ */
|
|
|
081b2d |
+uint64_t slapi_atomic_decr(void *ptr, int memorder, int type);
|
|
|
081b2d |
+
|
|
|
081b2d |
+
|
|
|
081b2d |
#ifdef __cplusplus
|
|
|
081b2d |
}
|
|
|
081b2d |
#endif
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/slapi_counter.c b/ldap/servers/slapd/slapi_counter.c
|
|
|
081b2d |
index ba0091f..9e705b3 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/slapi_counter.c
|
|
|
081b2d |
+++ b/ldap/servers/slapd/slapi_counter.c
|
|
|
081b2d |
@@ -283,3 +283,103 @@ slapi_counter_get_value(Slapi_Counter *counter)
|
|
|
081b2d |
|
|
|
081b2d |
return value;
|
|
|
081b2d |
}
|
|
|
081b2d |
+
|
|
|
081b2d |
+
|
|
|
081b2d |
+/*
|
|
|
081b2d |
+ *
|
|
|
081b2d |
+ * Atomic functions
|
|
|
081b2d |
+ *
|
|
|
081b2d |
+ * ptr - a pointer to an integral type variable: int, uint32_t, uint64_t, etc
|
|
|
081b2d |
+ *
|
|
|
081b2d |
+ * memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE,
|
|
|
081b2d |
+ * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, or __ATOMIC_SEQ_CST
|
|
|
081b2d |
+ *
|
|
|
081b2d |
+ * See: https://gcc.gnu.org/onlinedocs/gcc-4.9.2/gcc/_005f_005fatomic-Builtins.html
|
|
|
081b2d |
+ *
|
|
|
081b2d |
+ * type_size - ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG, see slapi-plugin.h for more info
|
|
|
081b2d |
+ *
|
|
|
081b2d |
+ * Future:
|
|
|
081b2d |
+ * If we need to support ATOMIC_INT128 (not available on 32bit systems):
|
|
|
081b2d |
+ * __atomic_store_16((uint64_t *)&ptr, val, memorder);
|
|
|
081b2d |
+ * __atomic_load_16((uint64_t *)&ptr, memorder);
|
|
|
081b2d |
+ * __atomic_add_fetch_16((uint64_t *)&ptr, 1, memorder);
|
|
|
081b2d |
+ * __atomic_sub_fetch_16((uint64_t *)&ptr, 1, memorder);
|
|
|
081b2d |
+ */
|
|
|
081b2d |
+
|
|
|
081b2d |
+/*
|
|
|
081b2d |
+ * "val" must be either int32_t or uint64_t
|
|
|
081b2d |
+ */
|
|
|
081b2d |
+void
|
|
|
081b2d |
+slapi_atomic_store(void *ptr, void *val, int memorder, int type_size)
|
|
|
081b2d |
+{
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
+ if (type_size == ATOMIC_INT) {
|
|
|
081b2d |
+ __atomic_store_4((int32_t *)ptr, *(int32_t *)val, memorder);
|
|
|
081b2d |
+ } else if (type_size == ATOMIC_LONG) {
|
|
|
081b2d |
+ __atomic_store_8((uint64_t *)ptr, *(uint64_t *)val, memorder);
|
|
|
081b2d |
+ } else {
|
|
|
081b2d |
+ /* ATOMIC_GENERIC or unknown size */
|
|
|
081b2d |
+ __atomic_store((uint64_t *)&ptr, (uint64_t *)val, memorder);
|
|
|
081b2d |
+ }
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PRInt32 *pr_ptr = (PRInt32 *)ptr;
|
|
|
081b2d |
+ PR_AtomicSet(pr_ptr, *(PRInt32 *)val);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
+}
|
|
|
081b2d |
+
|
|
|
081b2d |
+uint64_t
|
|
|
081b2d |
+slapi_atomic_load(void *ptr, int memorder, int type_size)
|
|
|
081b2d |
+{
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
+ uint64_t ret;
|
|
|
081b2d |
+
|
|
|
081b2d |
+ if (type_size == ATOMIC_INT) {
|
|
|
081b2d |
+ return __atomic_load_4((int32_t *)ptr, memorder);
|
|
|
081b2d |
+ } else if (type_size == ATOMIC_LONG) {
|
|
|
081b2d |
+ return __atomic_load_8((uint64_t *)ptr, memorder);
|
|
|
081b2d |
+ } else {
|
|
|
081b2d |
+ /* ATOMIC_GENERIC or unknown size */
|
|
|
081b2d |
+ __atomic_load((uint64_t *)ptr, &ret, memorder);
|
|
|
081b2d |
+ return ret;
|
|
|
081b2d |
+ }
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PRInt32 *pr_ptr = (PRInt32 *)ptr;
|
|
|
081b2d |
+ return PR_AtomicAdd(pr_ptr, 0);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
+}
|
|
|
081b2d |
+
|
|
|
081b2d |
+uint64_t
|
|
|
081b2d |
+slapi_atomic_incr(void *ptr, int memorder, int type_size)
|
|
|
081b2d |
+{
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
+ if (type_size == ATOMIC_INT) {
|
|
|
081b2d |
+ return __atomic_add_fetch_4((int32_t *)ptr, 1, memorder);
|
|
|
081b2d |
+ } else if (type_size == ATOMIC_LONG) {
|
|
|
081b2d |
+ return __atomic_add_fetch_8((uint64_t *)ptr, 1, memorder);
|
|
|
081b2d |
+ } else {
|
|
|
081b2d |
+ /* ATOMIC_GENERIC or unknown size */
|
|
|
081b2d |
+ return __atomic_add_fetch((uint64_t *)ptr, 1, memorder);
|
|
|
081b2d |
+ }
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PRInt32 *pr_ptr = (PRInt32 *)ptr;
|
|
|
081b2d |
+ return PR_AtomicIncrement(pr_ptr);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
+}
|
|
|
081b2d |
+
|
|
|
081b2d |
+uint64_t
|
|
|
081b2d |
+slapi_atomic_decr(void *ptr, int memorder, int type_size)
|
|
|
081b2d |
+{
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
+ if (type_size == ATOMIC_INT) {
|
|
|
081b2d |
+ return __atomic_sub_fetch_4((int32_t *)ptr, 1, memorder);
|
|
|
081b2d |
+ } else if (type_size == ATOMIC_LONG) {
|
|
|
081b2d |
+ return __atomic_sub_fetch_8((uint64_t *)ptr, 1, memorder);
|
|
|
081b2d |
+ } else {
|
|
|
081b2d |
+ /* ATOMIC_GENERIC or unknown size */
|
|
|
081b2d |
+ return __atomic_sub_fetch((uint64_t *)ptr, 1, memorder);
|
|
|
081b2d |
+ }
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PRInt32 *pr_ptr = (PRInt32 *)ptr;
|
|
|
081b2d |
+ return PR_AtomicDecrement(pr_ptr);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
+}
|
|
|
081b2d |
diff --git a/ldap/servers/slapd/thread_data.c b/ldap/servers/slapd/thread_data.c
|
|
|
081b2d |
index 9964832..d473710 100644
|
|
|
081b2d |
--- a/ldap/servers/slapd/thread_data.c
|
|
|
081b2d |
+++ b/ldap/servers/slapd/thread_data.c
|
|
|
081b2d |
@@ -9,7 +9,7 @@
|
|
|
081b2d |
/*
|
|
|
081b2d |
* Thread Local Storage Functions
|
|
|
081b2d |
*/
|
|
|
081b2d |
-#include <slapi-plugin.h>
|
|
|
081b2d |
+#include "slap.h"
|
|
|
081b2d |
#include <prthread.h>
|
|
|
081b2d |
|
|
|
081b2d |
void td_dn_destructor(void *priv);
|
|
|
081b2d |
diff --git a/src/nunc-stans/ns/ns_thrpool.c b/src/nunc-stans/ns/ns_thrpool.c
|
|
|
081b2d |
index 7921cbc..2ad0bd7 100644
|
|
|
081b2d |
--- a/src/nunc-stans/ns/ns_thrpool.c
|
|
|
081b2d |
+++ b/src/nunc-stans/ns/ns_thrpool.c
|
|
|
081b2d |
@@ -169,7 +169,11 @@ int32_t
|
|
|
081b2d |
ns_thrpool_is_shutdown(struct ns_thrpool_t *tp)
|
|
|
081b2d |
{
|
|
|
081b2d |
int32_t result = 0;
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_load(&(tp->shutdown), &result, __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ result = PR_AtomicAdd(&(tp->shutdown), 0);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
return result;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -177,7 +181,11 @@ int32_t
|
|
|
081b2d |
ns_thrpool_is_event_shutdown(struct ns_thrpool_t *tp)
|
|
|
081b2d |
{
|
|
|
081b2d |
int32_t result = 0;
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_load(&(tp->shutdown_event_loop), &result, __ATOMIC_ACQUIRE);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ result = PR_AtomicAdd(&(tp->shutdown_event_loop), 0);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
return result;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -1442,8 +1450,11 @@ ns_thrpool_destroy(struct ns_thrpool_t *tp)
|
|
|
081b2d |
#endif
|
|
|
081b2d |
if (tp) {
|
|
|
081b2d |
/* Set the flag to shutdown the event loop. */
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&(tp->shutdown_event_loop), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
-
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&(tp->shutdown_event_loop));
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
/* Finish the event queue wakeup job. This has the
|
|
|
081b2d |
* side effect of waking up the event loop thread, which
|
|
|
081b2d |
* will cause it to exit since we set the event loop
|
|
|
081b2d |
@@ -1532,7 +1543,11 @@ ns_thrpool_shutdown(struct ns_thrpool_t *tp)
|
|
|
081b2d |
|
|
|
081b2d |
/* Set the shutdown flag. This will cause the worker
|
|
|
081b2d |
* threads to exit after they finish all remaining work. */
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&(tp->shutdown), 1, __ATOMIC_RELEASE);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&(tp->shutdown));
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
|
|
|
081b2d |
/* Send worker shutdown jobs into the queues. This allows
|
|
|
081b2d |
* currently queued jobs to complete.
|
|
|
081b2d |
diff --git a/src/nunc-stans/test/test_nuncstans_stress_core.c b/src/nunc-stans/test/test_nuncstans_stress_core.c
|
|
|
081b2d |
index a678800..2fc4ef4 100644
|
|
|
081b2d |
--- a/src/nunc-stans/test/test_nuncstans_stress_core.c
|
|
|
081b2d |
+++ b/src/nunc-stans/test/test_nuncstans_stress_core.c
|
|
|
081b2d |
@@ -128,7 +128,11 @@ server_conn_write(struct ns_job_t *job)
|
|
|
081b2d |
assert(connctx != NULL);
|
|
|
081b2d |
if (NS_JOB_IS_TIMER(ns_job_get_output_type(job))) {
|
|
|
081b2d |
do_logging(LOG_ERR, "conn_write: job [%p] timeout\n", job);
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&server_fail_count, 1, __ATOMIC_SEQ_CST);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&server_fail_count);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
conn_ctx_free(connctx);
|
|
|
081b2d |
assert_int_equal(ns_job_done(job), 0);
|
|
|
081b2d |
return;
|
|
|
081b2d |
@@ -173,7 +177,11 @@ server_conn_read(struct ns_job_t *job)
|
|
|
081b2d |
if (NS_JOB_IS_TIMER(ns_job_get_output_type(job))) {
|
|
|
081b2d |
/* The event that triggered this call back is because we timed out waiting for IO */
|
|
|
081b2d |
do_logging(LOG_ERR, "conn_read: job [%p] timed out\n", job);
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&server_fail_count, 1, __ATOMIC_SEQ_CST);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&server_fail_count);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
conn_ctx_free(connctx);
|
|
|
081b2d |
assert_int_equal(ns_job_done(job), 0);
|
|
|
081b2d |
return;
|
|
|
081b2d |
@@ -204,7 +212,11 @@ server_conn_read(struct ns_job_t *job)
|
|
|
081b2d |
return;
|
|
|
081b2d |
} else {
|
|
|
081b2d |
do_logging(LOG_ERR, "conn_read: read error for job [%p] %d: %s\n", job, PR_GetError(), PR_ErrorToString(PR_GetError(), PR_LANGUAGE_I_DEFAULT));
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&server_fail_count, 1, __ATOMIC_SEQ_CST);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&server_fail_count);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
conn_ctx_free(connctx);
|
|
|
081b2d |
assert_int_equal(ns_job_done(job), 0);
|
|
|
081b2d |
return;
|
|
|
081b2d |
@@ -214,7 +226,11 @@ server_conn_read(struct ns_job_t *job)
|
|
|
081b2d |
/* Didn't read anything */
|
|
|
081b2d |
do_logging(LOG_DEBUG, "conn_read: job [%p] closed\n", job);
|
|
|
081b2d |
/* Increment the success */
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&server_success_count, 1, __ATOMIC_SEQ_CST);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&server_success_count);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
conn_ctx_free(connctx);
|
|
|
081b2d |
assert_int_equal(ns_job_done(job), 0);
|
|
|
081b2d |
return;
|
|
|
081b2d |
@@ -314,26 +330,41 @@ client_response_cb(struct ns_job_t *job)
|
|
|
081b2d |
if (len < 0) {
|
|
|
081b2d |
/* PRErrorCode prerr = PR_GetError(); */
|
|
|
081b2d |
do_logging(LOG_ERR, "FAIL: connection error, no data \n");
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&client_fail_count);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
goto done;
|
|
|
081b2d |
} else if (len == 0) {
|
|
|
081b2d |
do_logging(LOG_ERR, "FAIL: connection closed, no data \n");
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&client_fail_count);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
goto done;
|
|
|
081b2d |
} else {
|
|
|
081b2d |
/* Be paranoid, force last byte null */
|
|
|
081b2d |
buffer[buflen - 1] = '\0';
|
|
|
081b2d |
if (strncmp("this is a test!\n", buffer, strlen("this is a test!\n")) != 0) {
|
|
|
081b2d |
do_logging(LOG_ERR, "FAIL: connection incorrect response, no data \n");
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&client_fail_count);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
goto done;
|
|
|
081b2d |
}
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
struct timespec ts;
|
|
|
081b2d |
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
081b2d |
-
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&client_success_count, 1, __ATOMIC_SEQ_CST);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&client_success_count);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
do_logging(LOG_ERR, "PASS: %ld.%ld %d\n", ts.tv_sec, ts.tv_nsec, client_success_count);
|
|
|
081b2d |
|
|
|
081b2d |
done:
|
|
|
081b2d |
@@ -354,7 +385,11 @@ client_initiate_connection_cb(struct ns_job_t *job)
|
|
|
081b2d |
char *err = NULL;
|
|
|
081b2d |
PR_GetErrorText(err);
|
|
|
081b2d |
do_logging(LOG_ERR, "FAIL: Socket failed, %d -> %s\n", PR_GetError(), err);
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&client_fail_count);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
goto done;
|
|
|
081b2d |
}
|
|
|
081b2d |
|
|
|
081b2d |
@@ -368,8 +403,11 @@ client_initiate_connection_cb(struct ns_job_t *job)
|
|
|
081b2d |
PR_GetErrorText(err);
|
|
|
081b2d |
do_logging(LOG_ERR, "FAIL: cannot connect, timeout %d -> %s \n", PR_GetError(), err);
|
|
|
081b2d |
/* Atomic increment fail */
|
|
|
081b2d |
+#ifdef ATOMIC_64BIT_OPERATIONS
|
|
|
081b2d |
__atomic_add_fetch(&client_timeout_count, 1, __ATOMIC_SEQ_CST);
|
|
|
081b2d |
-
|
|
|
081b2d |
+#else
|
|
|
081b2d |
+ PR_AtomicIncrement(&client_timeout_count);
|
|
|
081b2d |
+#endif
|
|
|
081b2d |
if (sock != NULL) {
|
|
|
081b2d |
PR_Close(sock);
|
|
|
081b2d |
}
|
|
|
081b2d |
--
|
|
|
081b2d |
2.9.5
|
|
|
081b2d |
|