andykimpe / rpms / 389-ds-base

Forked from rpms/389-ds-base 5 months ago
Clone

Blame SOURCES/0000-Ticket-49305-Need-to-wrap-atomic-calls.patch

b045b9
From f19dec383e24e2aaa40a6bdce2ca0e657ffc6e10 Mon Sep 17 00:00:00 2001
b045b9
From: Mark Reynolds <mreynolds@redhat.com>
b045b9
Date: Wed, 27 Sep 2017 09:26:14 -0400
b045b9
Subject: [PATCH] Ticket 49305 - Need to wrap atomic calls
b045b9
b045b9
Bug Description:  Some RHEL 7.5 platforms (ppc 32bit) still do not support
b045b9
                  all the gcc builtin atomics.  This breaks the downstream
b045b9
                  builds.
b045b9
b045b9
Fix Description:  Use wrapper functions for the atomic's using #define's
b045b9
                  to detect if builtin atomics are supported, otherwise
b045b9
                  use the egneric nspr atomic functions.
b045b9
b045b9
https://pagure.io/389-ds-base/issue/49305
b045b9
b045b9
Reviewed by: tbordaz(Thanks!)
b045b9
b045b9
(cherry picked from commit af723fd632d355642babeed1dbdb5a308c21fa79)
b045b9
---
b045b9
 ldap/servers/slapd/attrsyntax.c                  |   8 +-
b045b9
 ldap/servers/slapd/back-ldbm/dblayer.c           |  66 +++++-----
b045b9
 ldap/servers/slapd/entry.c                       |  11 +-
b045b9
 ldap/servers/slapd/libglobs.c                    | 161 ++++++++++++-----------
b045b9
 ldap/servers/slapd/log.c                         |   9 +-
b045b9
 ldap/servers/slapd/mapping_tree.c                |  28 ++--
b045b9
 ldap/servers/slapd/object.c                      |   8 +-
b045b9
 ldap/servers/slapd/psearch.c                     |   7 +-
b045b9
 ldap/servers/slapd/slapi-plugin.h                |  52 ++++++++
b045b9
 ldap/servers/slapd/slapi_counter.c               | 100 ++++++++++++++
b045b9
 ldap/servers/slapd/thread_data.c                 |   2 +-
b045b9
 src/nunc-stans/ns/ns_thrpool.c                   |  17 ++-
b045b9
 src/nunc-stans/test/test_nuncstans_stress_core.c |  42 +++++-
b045b9
 13 files changed, 361 insertions(+), 150 deletions(-)
b045b9
b045b9
diff --git a/ldap/servers/slapd/attrsyntax.c b/ldap/servers/slapd/attrsyntax.c
b045b9
index 03f05d9..a0a60c4 100644
b045b9
--- a/ldap/servers/slapd/attrsyntax.c
b045b9
+++ b/ldap/servers/slapd/attrsyntax.c
b045b9
@@ -274,7 +274,7 @@ attr_syntax_get_by_oid_locking_optional(const char *oid, PRBool use_lock, PRUint
b045b9
         }
b045b9
         asi = (struct asyntaxinfo *)PL_HashTableLookup_const(ht, oid);
b045b9
         if (asi) {
b045b9
-            __atomic_add_fetch_8(&(asi->asi_refcnt), 1, __ATOMIC_RELEASE);
b045b9
+            slapi_atomic_incr(&(asi->asi_refcnt), __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
         }
b045b9
         if (use_lock) {
b045b9
             AS_UNLOCK_READ(oid2asi_lock);
b045b9
@@ -371,7 +371,7 @@ attr_syntax_get_by_name_locking_optional(const char *name, PRBool use_lock, PRUi
b045b9
         }
b045b9
         asi = (struct asyntaxinfo *)PL_HashTableLookup_const(ht, name);
b045b9
         if (NULL != asi) {
b045b9
-            __atomic_add_fetch_8(&(asi->asi_refcnt), 1, __ATOMIC_RELEASE);
b045b9
+            slapi_atomic_incr(&(asi->asi_refcnt), __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
         }
b045b9
         if (use_lock) {
b045b9
             AS_UNLOCK_READ(name2asi_lock);
b045b9
@@ -406,7 +406,7 @@ attr_syntax_return_locking_optional(struct asyntaxinfo *asi, PRBool use_lock)
b045b9
     }
b045b9
     if (NULL != asi) {
b045b9
         PRBool delete_it = PR_FALSE;
b045b9
-        if (0 == __atomic_sub_fetch_8(&(asi->asi_refcnt), 1, __ATOMIC_ACQ_REL)) {
b045b9
+        if (0 == slapi_atomic_decr(&(asi->asi_refcnt), __ATOMIC_ACQ_REL, ATOMIC_LONG)) {
b045b9
             delete_it = asi->asi_marked_for_delete;
b045b9
         }
b045b9
 
b045b9
@@ -540,7 +540,7 @@ attr_syntax_delete_no_lock(struct asyntaxinfo *asi,
b045b9
                 PL_HashTableRemove(ht, asi->asi_aliases[i]);
b045b9
             }
b045b9
         }
b045b9
-        if (__atomic_load_8(&(asi->asi_refcnt), __ATOMIC_ACQUIRE) > 0) {
b045b9
+        if (slapi_atomic_load(&(asi->asi_refcnt), __ATOMIC_ACQUIRE, ATOMIC_LONG) > 0) {
b045b9
             asi->asi_marked_for_delete = PR_TRUE;
b045b9
         } else {
b045b9
             /* This is ok, but the correct thing is to call delete first,
b045b9
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
b045b9
index d43258d..c4c4959 100644
b045b9
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
b045b9
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
b045b9
@@ -2860,16 +2860,16 @@ int
b045b9
 dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flags)
b045b9
 {
b045b9
     /*
b045b9
-   * We either already have a DB* handle in the attrinfo structure.
b045b9
-   * in which case we simply return it to the caller, OR:
b045b9
-   * we need to make one. We do this as follows:
b045b9
-   * 1a) acquire the mutex that protects the handle list.
b045b9
-   * 1b) check that the DB* is still null.
b045b9
-   * 2) get the filename, and call libdb to open it
b045b9
-   * 3) if successful, store the result in the attrinfo stucture
b045b9
-   * 4) store the DB* in our own list so we can close it later.
b045b9
-   * 5) release the mutex.
b045b9
-   */
b045b9
+     * We either already have a DB* handle in the attrinfo structure.
b045b9
+     * in which case we simply return it to the caller, OR:
b045b9
+     * we need to make one. We do this as follows:
b045b9
+     * 1a) acquire the mutex that protects the handle list.
b045b9
+     * 1b) check that the DB* is still null.
b045b9
+     * 2) get the filename, and call libdb to open it
b045b9
+     * 3) if successful, store the result in the attrinfo stucture
b045b9
+     * 4) store the DB* in our own list so we can close it later.
b045b9
+     * 5) release the mutex.
b045b9
+     */
b045b9
     ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
b045b9
     int return_value = -1;
b045b9
     DB *pDB = NULL;
b045b9
@@ -2878,9 +2878,9 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag
b045b9
     *ppDB = NULL;
b045b9
 
b045b9
     /* it's like a semaphore -- when count > 0, any file handle that's in
b045b9
-   * the attrinfo will remain valid from here on.
b045b9
-   */
b045b9
-    __atomic_add_fetch_8(&(a->ai_dblayer_count), 1, __ATOMIC_RELEASE);
b045b9
+     * the attrinfo will remain valid from here on.
b045b9
+     */
b045b9
+    slapi_atomic_incr(&(a->ai_dblayer_count), __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
 
b045b9
     if (a->ai_dblayer && ((dblayer_handle *)(a->ai_dblayer))->dblayer_dbp) {
b045b9
         /* This means that the pointer is valid, so we should return it. */
b045b9
@@ -2888,9 +2888,7 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag
b045b9
         return 0;
b045b9
     }
b045b9
 
b045b9
-    /* attrinfo handle is NULL, at least for now -- grab the mutex and try
b045b9
-   * again.
b045b9
-   */
b045b9
+    /* attrinfo handle is NULL, at least for now -- grab the mutex and try again. */
b045b9
     PR_Lock(inst->inst_handle_list_mutex);
b045b9
     if (a->ai_dblayer && ((dblayer_handle *)(a->ai_dblayer))->dblayer_dbp) {
b045b9
         /* another thread set the handle while we were waiting on the lock */
b045b9
@@ -2900,8 +2898,8 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag
b045b9
     }
b045b9
 
b045b9
     /* attrinfo handle is still blank, and we have the mutex: open the
b045b9
-   * index file and stuff it in the attrinfo.
b045b9
-   */
b045b9
+     * index file and stuff it in the attrinfo.
b045b9
+     */
b045b9
     return_value = dblayer_open_file(be, attribute_name, open_flags,
b045b9
                                      a, &pDB);
b045b9
     if (0 == return_value) {
b045b9
@@ -2911,40 +2909,36 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag
b045b9
 
b045b9
         PR_ASSERT(NULL != pDB);
b045b9
         /* Store the returned DB* in our own private list of
b045b9
-       * open files */
b045b9
+         * open files */
b045b9
         if (NULL == prev_handle) {
b045b9
             /* List was empty */
b045b9
             inst->inst_handle_tail = handle;
b045b9
             inst->inst_handle_head = handle;
b045b9
         } else {
b045b9
-            /* Chain the handle onto the last structure in the
b045b9
-           * list */
b045b9
+            /* Chain the handle onto the last structure in the list */
b045b9
             inst->inst_handle_tail = handle;
b045b9
             prev_handle->dblayer_handle_next = handle;
b045b9
         }
b045b9
-        /* Stash a pointer to our wrapper structure in the
b045b9
-       * attrinfo structure */
b045b9
+        /* Stash a pointer to our wrapper structure in the attrinfo structure */
b045b9
         handle->dblayer_dbp = pDB;
b045b9
         /* And, most importantly, return something to the caller!*/
b045b9
         *ppDB = pDB;
b045b9
-        /* and save the hande in the attrinfo structure for
b045b9
-       * next time */
b045b9
+        /* and save the hande in the attrinfo structure for next time */
b045b9
         a->ai_dblayer = handle;
b045b9
         /* don't need to update count -- we incr'd it already */
b045b9
         handle->dblayer_handle_ai_backpointer = &(a->ai_dblayer);
b045b9
     } else {
b045b9
         /* Did not open it OK ! */
b045b9
         /* Do nothing, because return value and fact that we didn't
b045b9
-     * store a DB* in the attrinfo is enough
b045b9
-     */
b045b9
+         * store a DB* in the attrinfo is enough */
b045b9
     }
b045b9
     PR_Unlock(inst->inst_handle_list_mutex);
b045b9
 
b045b9
     if (return_value != 0) {
b045b9
         /* some sort of error -- we didn't open a handle at all.
b045b9
-     * decrement the refcount back to where it was.
b045b9
-     */
b045b9
-        __atomic_sub_fetch_8(&(a->ai_dblayer_count), 1, __ATOMIC_RELEASE);
b045b9
+         * decrement the refcount back to where it was.
b045b9
+         */
b045b9
+        slapi_atomic_decr(&(a->ai_dblayer_count), __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
     }
b045b9
 
b045b9
     return return_value;
b045b9
@@ -2956,7 +2950,7 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag
b045b9
 int
b045b9
 dblayer_release_index_file(backend *be __attribute__((unused)), struct attrinfo *a, DB *pDB __attribute__((unused)))
b045b9
 {
b045b9
-    __atomic_sub_fetch_8(&(a->ai_dblayer_count), 1, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_decr(&(a->ai_dblayer_count), __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
     return 0;
b045b9
 }
b045b9
 
b045b9
@@ -3063,13 +3057,13 @@ dblayer_erase_index_file_ex(backend *be, struct attrinfo *a, PRBool use_lock, in
b045b9
 
b045b9
             dblayer_release_index_file(be, a, db);
b045b9
 
b045b9
-            while (__atomic_load_8(&(a->ai_dblayer_count), __ATOMIC_ACQUIRE) > 0) {
b045b9
+            while (slapi_atomic_load(&(a->ai_dblayer_count), __ATOMIC_ACQUIRE, ATOMIC_LONG) > 0) {
b045b9
                 /* someone is using this index file */
b045b9
                 /* ASSUMPTION: you have already set the INDEX_OFFLINE flag, because
b045b9
-         * you intend to mess with this index.  therefore no new requests
b045b9
-         * for this indexfile should happen, so the dblayer_count should
b045b9
-         * NEVER increase.
b045b9
-         */
b045b9
+                 * you intend to mess with this index.  therefore no new requests
b045b9
+                 * for this indexfile should happen, so the dblayer_count should
b045b9
+                 * NEVER increase.
b045b9
+                 */
b045b9
                 PR_ASSERT(a->ai_indexmask & INDEX_OFFLINE);
b045b9
                 PR_Unlock(inst->inst_handle_list_mutex);
b045b9
                 DS_Sleep(DBLAYER_CACHE_DELAY);
b045b9
diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
b045b9
index 62d10c2..289a149 100644
b045b9
--- a/ldap/servers/slapd/entry.c
b045b9
+++ b/ldap/servers/slapd/entry.c
b045b9
@@ -2244,18 +2244,19 @@ slapi_entry_attr_find(const Slapi_Entry *e, const char *type, Slapi_Attr **a)
b045b9
 
b045b9
 /* the following functions control virtual attribute cache invalidation */
b045b9
 
b045b9
-static uint32_t g_virtual_watermark = 0; /* good enough to init */
b045b9
+static int32_t g_virtual_watermark = 0; /* good enough to init */
b045b9
 
b045b9
 int
b045b9
 slapi_entry_vattrcache_watermark_isvalid(const Slapi_Entry *e)
b045b9
 {
b045b9
-    return e->e_virtual_watermark == __atomic_load_4(&g_virtual_watermark, __ATOMIC_ACQUIRE);
b045b9
+    return e->e_virtual_watermark == slapi_atomic_load(&g_virtual_watermark, __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
+
b045b9
 }
b045b9
 
b045b9
 void
b045b9
 slapi_entry_vattrcache_watermark_set(Slapi_Entry *e)
b045b9
 {
b045b9
-    e->e_virtual_watermark = __atomic_load_4(&g_virtual_watermark, __ATOMIC_ACQUIRE);
b045b9
+    e->e_virtual_watermark = slapi_atomic_load(&g_virtual_watermark, __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 void
b045b9
@@ -2268,8 +2269,8 @@ void
b045b9
 slapi_entrycache_vattrcache_watermark_invalidate()
b045b9
 {
b045b9
     /* Make sure the value is never 0 */
b045b9
-    if (__atomic_add_fetch_4(&g_virtual_watermark, 1, __ATOMIC_RELEASE) == 0) {
b045b9
-        __atomic_add_fetch_4(&g_virtual_watermark, 1, __ATOMIC_RELEASE);
b045b9
+    if (slapi_atomic_incr(&g_virtual_watermark, __ATOMIC_RELEASE, ATOMIC_INT) == 0) {
b045b9
+        slapi_atomic_incr(&g_virtual_watermark, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
     }
b045b9
 }
b045b9
 
b045b9
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
b045b9
index 0eeb16a..4c54cf7 100644
b045b9
--- a/ldap/servers/slapd/libglobs.c
b045b9
+++ b/ldap/servers/slapd/libglobs.c
b045b9
@@ -1335,19 +1335,19 @@ static uint64_t active_threads = 0;
b045b9
 void
b045b9
 g_incr_active_threadcnt(void)
b045b9
 {
b045b9
-    __atomic_add_fetch_8(&active_threads, 1, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_incr(&active_threads, __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
 }
b045b9
 
b045b9
 void
b045b9
 g_decr_active_threadcnt(void)
b045b9
 {
b045b9
-    __atomic_sub_fetch_8(&active_threads, 1, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_decr(&active_threads, __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
 }
b045b9
 
b045b9
 uint64_t
b045b9
 g_get_active_threadcnt(void)
b045b9
 {
b045b9
-    return __atomic_load_8(&active_threads, __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&active_threads, __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
 }
b045b9
 
b045b9
 /*
b045b9
@@ -1936,7 +1936,7 @@ config_set_ndn_cache_max_size(const char *attrname, char *value, char *errorbuf,
b045b9
         size = NDN_DEFAULT_SIZE;
b045b9
     }
b045b9
     if (apply) {
b045b9
-        __atomic_store_8(&(slapdFrontendConfig->ndn_cache_max_size), size, __ATOMIC_RELEASE);
b045b9
+        slapi_atomic_store(&(slapdFrontendConfig->ndn_cache_max_size), &size, __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
     }
b045b9
 
b045b9
     return retVal;
b045b9
@@ -3476,7 +3476,8 @@ int32_t
b045b9
 config_get_dynamic_plugins(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->dynamic_plugins), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->dynamic_plugins), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
+
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
@@ -3498,7 +3499,7 @@ int32_t
b045b9
 config_get_cn_uses_dn_syntax_in_dns()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->cn_uses_dn_syntax_in_dns), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->cn_uses_dn_syntax_in_dns), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
@@ -3543,7 +3544,7 @@ config_set_onoff(const char *attrname, char *value, int32_t *configvalue, char *
b045b9
         newval = LDAP_OFF;
b045b9
     }
b045b9
 
b045b9
-    __atomic_store_4(configvalue, newval, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(configvalue, &newval, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
 
b045b9
     return retVal;
b045b9
 }
b045b9
@@ -3915,7 +3916,7 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
b045b9
         retVal = LDAP_OPERATIONS_ERROR;
b045b9
     }
b045b9
     if (apply) {
b045b9
-        __atomic_store_4(&(slapdFrontendConfig->threadnumber), threadnum, __ATOMIC_RELAXED);
b045b9
+        slapi_atomic_store(&(slapdFrontendConfig->threadnumber), &threadnum, __ATOMIC_RELAXED, ATOMIC_INT);
b045b9
     }
b045b9
     return retVal;
b045b9
 }
b045b9
@@ -3944,7 +3945,7 @@ config_set_maxthreadsperconn(const char *attrname, char *value, char *errorbuf,
b045b9
     }
b045b9
 
b045b9
     if (apply) {
b045b9
-        __atomic_store_4(&(slapdFrontendConfig->maxthreadsperconn), maxthreadnum, __ATOMIC_RELEASE);
b045b9
+        slapi_atomic_store(&(slapdFrontendConfig->maxthreadsperconn), &maxthreadnum, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
     }
b045b9
     return retVal;
b045b9
 }
b045b9
@@ -4102,7 +4103,7 @@ config_set_ioblocktimeout(const char *attrname, char *value, char *errorbuf, int
b045b9
     }
b045b9
 
b045b9
     if (apply) {
b045b9
-        __atomic_store_4(&(slapdFrontendConfig->ioblocktimeout), nValue, __ATOMIC_RELEASE);
b045b9
+        slapi_atomic_store(&(slapdFrontendConfig->ioblocktimeout), &nValue, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
     }
b045b9
     return retVal;
b045b9
 }
b045b9
@@ -4606,21 +4607,22 @@ int32_t
b045b9
 config_get_sasl_mapping_fallback()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->sasl_mapping_fallback), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->sasl_mapping_fallback), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
+
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_disk_monitoring()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->disk_monitoring), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->disk_monitoring), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_disk_logging_critical()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->disk_logging_critical), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->disk_logging_critical), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int
b045b9
@@ -4667,14 +4669,14 @@ int32_t
b045b9
 config_get_ldapi_switch()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->ldapi_switch), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->ldapi_switch), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_ldapi_bind_switch()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->ldapi_bind_switch), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->ldapi_bind_switch), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 char *
b045b9
@@ -4693,7 +4695,7 @@ int
b045b9
 config_get_ldapi_map_entries()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->ldapi_map_entries), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->ldapi_map_entries), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 char *
b045b9
@@ -4763,7 +4765,8 @@ int32_t
b045b9
 config_get_slapi_counters()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->slapi_counters), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->slapi_counters), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
+
b045b9
 }
b045b9
 
b045b9
 char *
b045b9
@@ -4945,7 +4948,7 @@ int32_t
b045b9
 config_get_pw_change(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_change), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_change), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 
b045b9
@@ -4953,7 +4956,7 @@ int32_t
b045b9
 config_get_pw_history(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_history), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_history), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 
b045b9
@@ -4961,21 +4964,21 @@ int32_t
b045b9
 config_get_pw_must_change(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_must_change), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_must_change), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_allow_hashed_pw(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->allow_hashed_pw), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->allow_hashed_pw), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_pw_syntax(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_syntax), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_syntax), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 
b045b9
@@ -5164,21 +5167,21 @@ int32_t
b045b9
 config_get_pw_is_global_policy(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->pw_is_global_policy), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->pw_is_global_policy), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_pw_is_legacy_policy(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_is_legacy), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_is_legacy), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_pw_exp(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_exp), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_exp), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 
b045b9
@@ -5186,14 +5189,14 @@ int32_t
b045b9
 config_get_pw_unlock(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_unlock), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_unlock), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_pw_lockout()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->pw_policy.pw_lockout), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_lockout), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int
b045b9
@@ -5213,112 +5216,112 @@ int32_t
b045b9
 config_get_lastmod()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->lastmod), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->lastmod), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_enquote_sup_oc()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->enquote_sup_oc), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->enquote_sup_oc), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_nagle(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->nagle), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->nagle), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_accesscontrol(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->accesscontrol), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->accesscontrol), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_return_exact_case(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->return_exact_case), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->return_exact_case), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_result_tweak(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->result_tweak), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->result_tweak), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_moddn_aci(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->moddn_aci), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->moddn_aci), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_security(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->security), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->security), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 slapi_config_get_readonly(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->readonly), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->readonly), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_schemacheck(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->schemacheck), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->schemacheck), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_schemamod(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->schemamod), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->schemamod), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_syntaxcheck(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->syntaxcheck), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->syntaxcheck), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_syntaxlogging(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->syntaxlogging), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->syntaxlogging), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_dn_validate_strict(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->dn_validate_strict), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->dn_validate_strict), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_ds4_compatible_schema(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->ds4_compatible_schema), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->ds4_compatible_schema), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_schema_ignore_trailing_spaces(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->schema_ignore_trailing_spaces), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->schema_ignore_trailing_spaces), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 char *
b045b9
@@ -5402,7 +5405,7 @@ config_get_threadnumber(void)
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
     int32_t retVal;
b045b9
 
b045b9
-    retVal = __atomic_load_4(&(slapdFrontendConfig->threadnumber), __ATOMIC_RELAXED);
b045b9
+    retVal = slapi_atomic_load(&(slapdFrontendConfig->threadnumber), __ATOMIC_RELAXED, ATOMIC_INT);
b045b9
 
b045b9
     if (retVal <= 0) {
b045b9
         retVal = util_get_hardware_threads();
b045b9
@@ -5420,7 +5423,7 @@ int32_t
b045b9
 config_get_maxthreadsperconn()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->maxthreadsperconn), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->maxthreadsperconn), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int
b045b9
@@ -5452,7 +5455,7 @@ int32_t
b045b9
 config_get_ioblocktimeout()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->ioblocktimeout), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->ioblocktimeout), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int
b045b9
@@ -5769,21 +5772,21 @@ int32_t
b045b9
 config_get_unauth_binds_switch(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->allow_unauth_binds), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->allow_unauth_binds), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_require_secure_binds(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->require_secure_binds), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->require_secure_binds), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_anon_access_switch(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->allow_anon_access), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->allow_anon_access), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int
b045b9
@@ -6025,7 +6028,8 @@ int32_t
b045b9
 config_get_minssf_exclude_rootdse()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->minssf_exclude_rootdse), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->minssf_exclude_rootdse), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
+
b045b9
 }
b045b9
 
b045b9
 int
b045b9
@@ -6034,18 +6038,17 @@ config_set_max_filter_nest_level(const char *attrname, char *value, char *errorb
b045b9
     int retVal = LDAP_SUCCESS;
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
     char *endp;
b045b9
-    long level;
b045b9
+    int32_t level;
b045b9
 
b045b9
     if (config_value_is_null(attrname, value, errorbuf, 0)) {
b045b9
         return LDAP_OPERATIONS_ERROR;
b045b9
     }
b045b9
 
b045b9
     errno = 0;
b045b9
-    level = strtol(value, &endp, 10);
b045b9
+    level = (int32_t)strtol(value, &endp, 10);
b045b9
     if (*endp != '\0' || errno == ERANGE) {
b045b9
-        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "(%s) value (%s) "
b045b9
-                                                                   "is invalid\n",
b045b9
-                              attrname, value);
b045b9
+        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
b045b9
+                              "(%s) value (%s) is invalid\n", attrname, value);
b045b9
         retVal = LDAP_OPERATIONS_ERROR;
b045b9
         return retVal;
b045b9
     }
b045b9
@@ -6054,7 +6057,7 @@ config_set_max_filter_nest_level(const char *attrname, char *value, char *errorb
b045b9
         return retVal;
b045b9
     }
b045b9
 
b045b9
-    __atomic_store_4(&(slapdFrontendConfig->max_filter_nest_level), level, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(&(slapdFrontendConfig->max_filter_nest_level), &level, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
     return retVal;
b045b9
 }
b045b9
 
b045b9
@@ -6062,29 +6065,28 @@ int32_t
b045b9
 config_get_max_filter_nest_level()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->max_filter_nest_level), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->max_filter_nest_level), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 uint64_t
b045b9
 config_get_ndn_cache_size()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-
b045b9
-    return __atomic_load_8(&(slapdFrontendConfig->ndn_cache_max_size), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->ndn_cache_max_size), __ATOMIC_ACQUIRE, ATOMIC_LONG);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_ndn_cache_enabled()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->ndn_cache_enabled), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->ndn_cache_enabled), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_return_orig_type_switch()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->return_orig_type), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->return_orig_type), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 char *
b045b9
@@ -6786,7 +6788,7 @@ int32_t
b045b9
 config_get_force_sasl_external(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->force_sasl_external), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->force_sasl_external), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
@@ -6808,7 +6810,7 @@ int32_t
b045b9
 config_get_entryusn_global(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->entryusn_global), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->entryusn_global), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
@@ -7046,21 +7048,21 @@ int32_t
b045b9
 config_get_enable_turbo_mode(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->enable_turbo_mode), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->enable_turbo_mode), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_connection_nocanon(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->connection_nocanon), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->connection_nocanon), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_plugin_logging(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->plugin_logging), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->plugin_logging), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
@@ -7073,21 +7075,21 @@ int32_t
b045b9
 config_get_unhashed_pw_switch()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->unhashed_pw_switch), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->unhashed_pw_switch), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_ignore_time_skew(void)
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->ignore_time_skew), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->ignore_time_skew), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
 config_get_global_backend_lock()
b045b9
 {
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
-    return __atomic_load_4(&(slapdFrontendConfig->global_backend_lock), __ATOMIC_ACQUIRE);
b045b9
+    return slapi_atomic_load(&(slapdFrontendConfig->global_backend_lock), __ATOMIC_ACQUIRE, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 int32_t
b045b9
@@ -7163,8 +7165,9 @@ config_get_connection_buffer(void)
b045b9
 int
b045b9
 config_set_connection_buffer(const char *attrname, char *value, char *errorbuf, int apply)
b045b9
 {
b045b9
-    int retVal = LDAP_SUCCESS;
b045b9
     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
b045b9
+    int retVal = LDAP_SUCCESS;
b045b9
+    int32_t val;
b045b9
 
b045b9
     if (config_value_is_null(attrname, value, errorbuf, 0)) {
b045b9
         return LDAP_OPERATIONS_ERROR;
b045b9
@@ -7181,7 +7184,9 @@ config_set_connection_buffer(const char *attrname, char *value, char *errorbuf,
b045b9
         return retVal;
b045b9
     }
b045b9
 
b045b9
-    __atomic_store_4(&(slapdFrontendConfig->connection_buffer), atoi(value), __ATOMIC_RELEASE);
b045b9
+    val = atoi(value);
b045b9
+    slapi_atomic_store(&(slapdFrontendConfig->connection_buffer), &val, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
+
b045b9
     return retVal;
b045b9
 }
b045b9
 
b045b9
@@ -7204,7 +7209,7 @@ config_set_listen_backlog_size(const char *attrname, char *value, char *errorbuf
b045b9
     }
b045b9
 
b045b9
     if (apply) {
b045b9
-        __atomic_store_4(&(slapdFrontendConfig->listen_backlog_size), size, __ATOMIC_RELEASE);
b045b9
+        slapi_atomic_store(&(slapdFrontendConfig->listen_backlog_size), &size, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
     }
b045b9
     return LDAP_SUCCESS;
b045b9
 }
b045b9
@@ -7617,7 +7622,7 @@ config_set_accesslog_enabled(int value)
b045b9
     char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
b045b9
     errorbuf[0] = '\0';
b045b9
 
b045b9
-    __atomic_store_4(&(slapdFrontendConfig->accesslog_logging_enabled), value, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(&(slapdFrontendConfig->accesslog_logging_enabled), &value, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
     if (value) {
b045b9
         log_set_logging(CONFIG_ACCESSLOG_LOGGING_ENABLED_ATTRIBUTE, "on", SLAPD_ACCESS_LOG, errorbuf, CONFIG_APPLY);
b045b9
     } else {
b045b9
@@ -7635,7 +7640,7 @@ config_set_auditlog_enabled(int value)
b045b9
     char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
b045b9
     errorbuf[0] = '\0';
b045b9
 
b045b9
-    __atomic_store_4(&(slapdFrontendConfig->auditlog_logging_enabled), value, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(&(slapdFrontendConfig->auditlog_logging_enabled), &value, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
     if (value) {
b045b9
         log_set_logging(CONFIG_AUDITLOG_LOGGING_ENABLED_ATTRIBUTE, "on", SLAPD_AUDIT_LOG, errorbuf, CONFIG_APPLY);
b045b9
     } else {
b045b9
@@ -7653,7 +7658,7 @@ config_set_auditfaillog_enabled(int value)
b045b9
     char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
b045b9
     errorbuf[0] = '\0';
b045b9
 
b045b9
-    __atomic_store_4(&(slapdFrontendConfig->auditfaillog_logging_enabled), value, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(&(slapdFrontendConfig->auditfaillog_logging_enabled), &value, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
     if (value) {
b045b9
         log_set_logging(CONFIG_AUDITFAILLOG_LOGGING_ENABLED_ATTRIBUTE, "on", SLAPD_AUDITFAIL_LOG, errorbuf, CONFIG_APPLY);
b045b9
     } else {
b045b9
@@ -7744,7 +7749,7 @@ config_set_malloc_mxfast(const char *attrname, char *value, char *errorbuf, int
b045b9
                               value, CONFIG_MALLOC_MXFAST, max);
b045b9
         return LDAP_OPERATIONS_ERROR;
b045b9
     }
b045b9
-    __atomic_store_4(&(slapdFrontendConfig->malloc_mxfast), mxfast, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(&(slapdFrontendConfig->malloc_mxfast), &mxfast, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
 
b045b9
     if ((mxfast >= 0) && (mxfast <= max)) {
b045b9
         mallopt(M_MXFAST, mxfast);
b045b9
@@ -7784,7 +7789,7 @@ config_set_malloc_trim_threshold(const char *attrname, char *value, char *errorb
b045b9
         return LDAP_OPERATIONS_ERROR;
b045b9
     }
b045b9
 
b045b9
-    __atomic_store_4(&(slapdFrontendConfig->malloc_trim_threshold), trim_threshold, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(&(slapdFrontendConfig->malloc_trim_threshold), &trim_threshold, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
 
b045b9
     if (trim_threshold >= -1) {
b045b9
         mallopt(M_TRIM_THRESHOLD, trim_threshold);
b045b9
@@ -7831,7 +7836,7 @@ config_set_malloc_mmap_threshold(const char *attrname, char *value, char *errorb
b045b9
         return LDAP_OPERATIONS_ERROR;
b045b9
     }
b045b9
 
b045b9
-    __atomic_store_4(&(slapdFrontendConfig->malloc_mmap_threshold), mmap_threshold, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(&(slapdFrontendConfig->malloc_mmap_threshold), &mmap_threshold, __ATOMIC_RELEASE, ATOMIC_INT);
b045b9
 
b045b9
     if ((mmap_threshold >= 0) && (mmap_threshold <= max)) {
b045b9
         mallopt(M_MMAP_THRESHOLD, mmap_threshold);
b045b9
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
b045b9
index 41b5c99..4d44c87 100644
b045b9
--- a/ldap/servers/slapd/log.c
b045b9
+++ b/ldap/servers/slapd/log.c
b045b9
@@ -4942,12 +4942,13 @@ static LogBufferInfo *
b045b9
 log_create_buffer(size_t sz)
b045b9
 {
b045b9
     LogBufferInfo *lbi;
b045b9
+    uint64_t init_val = 0;
b045b9
 
b045b9
     lbi = (LogBufferInfo *)slapi_ch_malloc(sizeof(LogBufferInfo));
b045b9
     lbi->top = (char *)slapi_ch_malloc(sz);
b045b9
     lbi->current = lbi->top;
b045b9
     lbi->maxsize = sz;
b045b9
-    __atomic_store_8(&(lbi->refcount), 0, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(&(lbi->refcount), &init_val, __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
     return lbi;
b045b9
 }
b045b9
 
b045b9
@@ -5009,7 +5010,7 @@ log_append_buffer2(time_t tnl, LogBufferInfo *lbi, char *msg1, size_t size1, cha
b045b9
     insert_point = lbi->current;
b045b9
     lbi->current += size;
b045b9
     /* Increment the copy refcount */
b045b9
-    __atomic_add_fetch_8(&(lbi->refcount), 1, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_incr(&(lbi->refcount), __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
     PR_Unlock(lbi->lock);
b045b9
 
b045b9
     /* Now we can copy without holding the lock */
b045b9
@@ -5017,7 +5018,7 @@ log_append_buffer2(time_t tnl, LogBufferInfo *lbi, char *msg1, size_t size1, cha
b045b9
     memcpy(insert_point + size1, msg2, size2);
b045b9
 
b045b9
     /* Decrement the copy refcount */
b045b9
-    __atomic_sub_fetch_8(&(lbi->refcount), 1, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_decr(&(lbi->refcount), __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
 
b045b9
     /* If we are asked to sync to disk immediately, do so */
b045b9
     if (!slapdFrontendConfig->accesslogbuffering) {
b045b9
@@ -5037,7 +5038,7 @@ log_flush_buffer(LogBufferInfo *lbi, int type, int sync_now)
b045b9
     if (type == SLAPD_ACCESS_LOG) {
b045b9
 
b045b9
         /* It is only safe to flush once any other threads which are copying are finished */
b045b9
-        while (__atomic_load_8(&(lbi->refcount), __ATOMIC_ACQUIRE) > 0) {
b045b9
+        while (slapi_atomic_load(&(lbi->refcount), __ATOMIC_ACQUIRE, ATOMIC_LONG) > 0) {
b045b9
             /* It's ok to sleep for a while because we only flush every second or so */
b045b9
             DS_Sleep(PR_MillisecondsToInterval(1));
b045b9
         }
b045b9
diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c
b045b9
index 651d70e..6621ceb 100644
b045b9
--- a/ldap/servers/slapd/mapping_tree.c
b045b9
+++ b/ldap/servers/slapd/mapping_tree.c
b045b9
@@ -1647,7 +1647,7 @@ mapping_tree_init()
b045b9
 
b045b9
     /* we call this function from a single thread, so it should be ok */
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         /* shutdown has been detected */
b045b9
         return 0;
b045b9
     }
b045b9
@@ -1759,6 +1759,8 @@ mtn_free_node(mapping_tree_node **node)
b045b9
 void
b045b9
 mapping_tree_free()
b045b9
 {
b045b9
+    int init_val = 1;
b045b9
+
b045b9
     /* unregister dse callbacks */
b045b9
     slapi_config_remove_callback(SLAPI_OPERATION_MODIFY, DSE_FLAG_PREOP, MAPPING_TREE_BASE_DN, LDAP_SCOPE_BASE, "(objectclass=*)", mapping_tree_entry_modify_callback);
b045b9
     slapi_config_remove_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, MAPPING_TREE_BASE_DN, LDAP_SCOPE_BASE, "(objectclass=*)", mapping_tree_entry_add_callback);
b045b9
@@ -1771,7 +1773,7 @@ mapping_tree_free()
b045b9
     slapi_unregister_backend_state_change_all();
b045b9
     /* recursively free tree nodes */
b045b9
     mtn_free_node(&mapping_tree_root);
b045b9
-    __atomic_store_4(&mapping_tree_freed, 1, __ATOMIC_RELAXED);
b045b9
+    slapi_atomic_store(&mapping_tree_freed, &init_val, __ATOMIC_RELAXED, ATOMIC_INT);
b045b9
 }
b045b9
 
b045b9
 /* This function returns the first node to parse when a search is done
b045b9
@@ -2022,7 +2024,7 @@ slapi_dn_write_needs_referral(Slapi_DN *target_sdn, Slapi_Entry **referral)
b045b9
     mapping_tree_node *target_node = NULL;
b045b9
     int ret = 0;
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         /* shutdown detected */
b045b9
         goto done;
b045b9
     }
b045b9
@@ -2093,7 +2095,7 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re
b045b9
     int fixup = 0;
b045b9
 
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         /* shutdown detected */
b045b9
         return LDAP_OPERATIONS_ERROR;
b045b9
     }
b045b9
@@ -2198,7 +2200,7 @@ slapi_mapping_tree_select_all(Slapi_PBlock *pb, Slapi_Backend **be_list, Slapi_E
b045b9
     int flag_partial_result = 0;
b045b9
     int op_type;
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         return LDAP_OPERATIONS_ERROR;
b045b9
     }
b045b9
 
b045b9
@@ -2358,7 +2360,7 @@ slapi_mapping_tree_select_and_check(Slapi_PBlock *pb, char *newdn, Slapi_Backend
b045b9
     int ret;
b045b9
     int need_unlock = 0;
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         return LDAP_OPERATIONS_ERROR;
b045b9
     }
b045b9
 
b045b9
@@ -2524,7 +2526,7 @@ mtn_get_be(mapping_tree_node *target_node, Slapi_PBlock *pb, Slapi_Backend **be,
b045b9
     int flag_stop = 0;
b045b9
     struct slapi_componentid *cid = NULL;
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         /* shut down detected */
b045b9
         return LDAP_OPERATIONS_ERROR;
b045b9
     }
b045b9
@@ -2712,7 +2714,7 @@ best_matching_child(mapping_tree_node *parent,
b045b9
     mapping_tree_node *highest_match_node = NULL;
b045b9
     mapping_tree_node *current;
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         /* shutdown detected */
b045b9
         return NULL;
b045b9
     }
b045b9
@@ -2739,7 +2741,7 @@ mtn_get_mapping_tree_node_by_entry(mapping_tree_node *node, const Slapi_DN *dn)
b045b9
 {
b045b9
     mapping_tree_node *found_node = NULL;
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         /* shutdown detected */
b045b9
         return NULL;
b045b9
     }
b045b9
@@ -2782,7 +2784,7 @@ slapi_get_mapping_tree_node_by_dn(const Slapi_DN *dn)
b045b9
     mapping_tree_node *current_best_match = mapping_tree_root;
b045b9
     mapping_tree_node *next_best_match = mapping_tree_root;
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         /* shutdown detected */
b045b9
         return NULL;
b045b9
     }
b045b9
@@ -2816,7 +2818,7 @@ get_mapping_tree_node_by_name(mapping_tree_node *node, char *be_name)
b045b9
     int i;
b045b9
     mapping_tree_node *found_node = NULL;
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         /* shutdown detected */
b045b9
         return NULL;
b045b9
     }
b045b9
@@ -2863,7 +2865,7 @@ slapi_get_mapping_tree_node_configdn(const Slapi_DN *root)
b045b9
 {
b045b9
     char *dn = NULL;
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         /* shutdown detected */
b045b9
         return NULL;
b045b9
     }
b045b9
@@ -2890,7 +2892,7 @@ slapi_get_mapping_tree_node_configsdn(const Slapi_DN *root)
b045b9
     char *dn = NULL;
b045b9
     Slapi_DN *sdn = NULL;
b045b9
 
b045b9
-    if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) {
b045b9
+    if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) {
b045b9
         /* shutdown detected */
b045b9
         return NULL;
b045b9
     }
b045b9
diff --git a/ldap/servers/slapd/object.c b/ldap/servers/slapd/object.c
b045b9
index 84845d3..6a1a9a5 100644
b045b9
--- a/ldap/servers/slapd/object.c
b045b9
+++ b/ldap/servers/slapd/object.c
b045b9
@@ -43,10 +43,12 @@ Object *
b045b9
 object_new(void *user_data, FNFree destructor)
b045b9
 {
b045b9
     Object *o;
b045b9
+    uint64_t init_val = 1;
b045b9
+
b045b9
     o = (object *)slapi_ch_malloc(sizeof(object));
b045b9
     o->destructor = destructor;
b045b9
     o->data = user_data;
b045b9
-    __atomic_store_8(&(o->refcnt), 1, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(&(o->refcnt), &init_val, __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
     return o;
b045b9
 }
b045b9
 
b045b9
@@ -60,7 +62,7 @@ void
b045b9
 object_acquire(Object *o)
b045b9
 {
b045b9
     PR_ASSERT(NULL != o);
b045b9
-    __atomic_add_fetch_8(&(o->refcnt), 1, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_incr(&(o->refcnt), __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
 }
b045b9
 
b045b9
 
b045b9
@@ -75,7 +77,7 @@ object_release(Object *o)
b045b9
     PRInt32 refcnt_after_release;
b045b9
 
b045b9
     PR_ASSERT(NULL != o);
b045b9
-    refcnt_after_release = __atomic_sub_fetch_8(&(o->refcnt), 1, __ATOMIC_ACQ_REL);
b045b9
+    refcnt_after_release = slapi_atomic_decr(&(o->refcnt), __ATOMIC_ACQ_REL, ATOMIC_LONG);
b045b9
     if (refcnt_after_release == 0) {
b045b9
         /* Object can be destroyed */
b045b9
         if (o->destructor)
b045b9
diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c
b045b9
index 0489122..70c530b 100644
b045b9
--- a/ldap/servers/slapd/psearch.c
b045b9
+++ b/ldap/servers/slapd/psearch.c
b045b9
@@ -134,7 +134,7 @@ ps_stop_psearch_system()
b045b9
     if (PS_IS_INITIALIZED()) {
b045b9
         PSL_LOCK_WRITE();
b045b9
         for (ps = psearch_list->pl_head; NULL != ps; ps = ps->ps_next) {
b045b9
-            __atomic_add_fetch_8(&(ps->ps_complete), 1, __ATOMIC_RELEASE);
b045b9
+            slapi_atomic_incr(&(ps->ps_complete), __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
         }
b045b9
         PSL_UNLOCK_WRITE();
b045b9
         ps_wakeup_all();
b045b9
@@ -285,7 +285,7 @@ ps_send_results(void *arg)
b045b9
 
b045b9
     PR_Lock(psearch_list->pl_cvarlock);
b045b9
 
b045b9
-    while ((conn_acq_flag == 0) && __atomic_load_8(&(ps->ps_complete), __ATOMIC_ACQUIRE) == 0) {
b045b9
+    while ((conn_acq_flag == 0) && slapi_atomic_load(&(ps->ps_complete), __ATOMIC_ACQUIRE, ATOMIC_LONG) == 0) {
b045b9
         /* Check for an abandoned operation */
b045b9
         if (pb_op == NULL || slapi_op_abandoned(ps->ps_pblock)) {
b045b9
             slapi_log_err(SLAPI_LOG_CONNS, "ps_send_results",
b045b9
@@ -427,6 +427,7 @@ static PSearch *
b045b9
 psearch_alloc(void)
b045b9
 {
b045b9
     PSearch *ps;
b045b9
+    uint64_t init_val = 0;
b045b9
 
b045b9
     ps = (PSearch *)slapi_ch_calloc(1, sizeof(PSearch));
b045b9
 
b045b9
@@ -437,7 +438,7 @@ psearch_alloc(void)
b045b9
         slapi_ch_free((void **)&ps);
b045b9
         return (NULL);
b045b9
     }
b045b9
-    __atomic_store_8(&(ps->ps_complete), 0, __ATOMIC_RELEASE);
b045b9
+    slapi_atomic_store(&(ps->ps_complete), &init_val, __ATOMIC_RELEASE, ATOMIC_LONG);
b045b9
     ps->ps_eq_head = ps->ps_eq_tail = (PSEQNode *)NULL;
b045b9
     ps->ps_lasttime = (time_t)0L;
b045b9
     ps->ps_next = NULL;
b045b9
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
b045b9
index 3397c63..c434add 100644
b045b9
--- a/ldap/servers/slapd/slapi-plugin.h
b045b9
+++ b/ldap/servers/slapd/slapi-plugin.h
b045b9
@@ -8202,6 +8202,58 @@ void slapi_operation_time_initiated(Slapi_Operation *o, struct timespec *initiat
b045b9
  */
b045b9
 #endif
b045b9
 
b045b9
+/* See: https://gcc.gnu.org/ml/gcc/2016-11/txt6ZlA_JS27i.txt */
b045b9
+#define ATOMIC_GENERIC  0
b045b9
+#define ATOMIC_INT      4
b045b9
+#define ATOMIC_LONG     8
b045b9
+#define ATOMIC_INT128  16  /* Future */
b045b9
+
b045b9
+/**
b045b9
+ * Store an integral value atomicly
b045b9
+ *
b045b9
+ * \param ptr - integral pointer
b045b9
+ * \param val - pointer to integral value (use integral type int32_t with ATOMIC_INT, or uint64_t
b045b9
+ * with ATOMIC_LONG & ATOMIC_GENERIC)
b045b9
+ * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE,
b045b9
+ * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST
b045b9
+ * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG
b045b9
+ */
b045b9
+void slapi_atomic_store(void *ptr, void *val, int memorder, int type);
b045b9
+
b045b9
+/**
b045b9
+ * Get an integral value atomicly
b045b9
+ *
b045b9
+ * \param ptr - integral pointer
b045b9
+ * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE,
b045b9
+ * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST
b045b9
+ * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG
b045b9
+ * \return -
b045b9
+ */
b045b9
+uint64_t slapi_atomic_load(void *ptr, int memorder, int type);
b045b9
+
b045b9
+/**
b045b9
+ * Increment integral atomicly
b045b9
+ *
b045b9
+ * \param ptr - pointer to integral to increment
b045b9
+ * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE,
b045b9
+ * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST
b045b9
+ * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG
b045b9
+ * \return - new value of ptr
b045b9
+ */
b045b9
+uint64_t slapi_atomic_incr(void *ptr, int memorder, int type);
b045b9
+
b045b9
+/**
b045b9
+ * Decrement integral atomicly
b045b9
+ *
b045b9
+ * \param ptr - pointer to integral to decrement
b045b9
+ * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE,
b045b9
+ * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST
b045b9
+ * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG
b045b9
+ * \return - new value of ptr
b045b9
+ */
b045b9
+uint64_t slapi_atomic_decr(void *ptr, int memorder, int type);
b045b9
+
b045b9
+
b045b9
 #ifdef __cplusplus
b045b9
 }
b045b9
 #endif
b045b9
diff --git a/ldap/servers/slapd/slapi_counter.c b/ldap/servers/slapd/slapi_counter.c
b045b9
index ba0091f..9e705b3 100644
b045b9
--- a/ldap/servers/slapd/slapi_counter.c
b045b9
+++ b/ldap/servers/slapd/slapi_counter.c
b045b9
@@ -283,3 +283,103 @@ slapi_counter_get_value(Slapi_Counter *counter)
b045b9
 
b045b9
     return value;
b045b9
 }
b045b9
+
b045b9
+
b045b9
+/*
b045b9
+ *
b045b9
+ * Atomic functions
b045b9
+ *
b045b9
+ * ptr - a pointer to an integral type variable: int, uint32_t, uint64_t, etc
b045b9
+ *
b045b9
+ * memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE,
b045b9
+ * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, or __ATOMIC_SEQ_CST
b045b9
+ *
b045b9
+ *     See: https://gcc.gnu.org/onlinedocs/gcc-4.9.2/gcc/_005f_005fatomic-Builtins.html
b045b9
+ *
b045b9
+ * type_size - ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG, see slapi-plugin.h for more info
b045b9
+ *
b045b9
+ * Future:
b045b9
+ *    If we need to support ATOMIC_INT128 (not available on 32bit systems):
b045b9
+ *         __atomic_store_16((uint64_t *)&ptr, val, memorder);
b045b9
+ *         __atomic_load_16((uint64_t *)&ptr, memorder);
b045b9
+ *         __atomic_add_fetch_16((uint64_t *)&ptr, 1, memorder);
b045b9
+ *         __atomic_sub_fetch_16((uint64_t *)&ptr, 1, memorder);
b045b9
+ */
b045b9
+
b045b9
+/*
b045b9
+ * "val" must be either int32_t or uint64_t
b045b9
+ */
b045b9
+void
b045b9
+slapi_atomic_store(void *ptr, void *val, int memorder, int type_size)
b045b9
+{
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
+    if (type_size == ATOMIC_INT) {
b045b9
+        __atomic_store_4((int32_t *)ptr, *(int32_t *)val, memorder);
b045b9
+    } else if (type_size == ATOMIC_LONG) {
b045b9
+        __atomic_store_8((uint64_t *)ptr, *(uint64_t *)val, memorder);
b045b9
+    } else {
b045b9
+        /* ATOMIC_GENERIC or unknown size */
b045b9
+        __atomic_store((uint64_t *)&ptr, (uint64_t *)val, memorder);
b045b9
+    }
b045b9
+#else
b045b9
+    PRInt32 *pr_ptr = (PRInt32 *)ptr;
b045b9
+    PR_AtomicSet(pr_ptr, *(PRInt32 *)val);
b045b9
+#endif
b045b9
+}
b045b9
+
b045b9
+uint64_t
b045b9
+slapi_atomic_load(void *ptr, int memorder, int type_size)
b045b9
+{
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
+    uint64_t ret;
b045b9
+
b045b9
+    if (type_size == ATOMIC_INT) {
b045b9
+        return __atomic_load_4((int32_t *)ptr, memorder);
b045b9
+    } else if (type_size == ATOMIC_LONG) {
b045b9
+        return __atomic_load_8((uint64_t *)ptr, memorder);
b045b9
+    } else {
b045b9
+        /* ATOMIC_GENERIC or unknown size */
b045b9
+        __atomic_load((uint64_t *)ptr, &ret, memorder);
b045b9
+        return ret;
b045b9
+    }
b045b9
+#else
b045b9
+    PRInt32 *pr_ptr = (PRInt32 *)ptr;
b045b9
+    return PR_AtomicAdd(pr_ptr, 0);
b045b9
+#endif
b045b9
+}
b045b9
+
b045b9
+uint64_t
b045b9
+slapi_atomic_incr(void *ptr, int memorder, int type_size)
b045b9
+{
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
+    if (type_size == ATOMIC_INT) {
b045b9
+        return __atomic_add_fetch_4((int32_t *)ptr, 1, memorder);
b045b9
+    } else if (type_size == ATOMIC_LONG) {
b045b9
+        return __atomic_add_fetch_8((uint64_t *)ptr, 1, memorder);
b045b9
+    } else {
b045b9
+        /* ATOMIC_GENERIC or unknown size */
b045b9
+        return __atomic_add_fetch((uint64_t *)ptr, 1, memorder);
b045b9
+    }
b045b9
+#else
b045b9
+    PRInt32 *pr_ptr = (PRInt32 *)ptr;
b045b9
+    return PR_AtomicIncrement(pr_ptr);
b045b9
+#endif
b045b9
+}
b045b9
+
b045b9
+uint64_t
b045b9
+slapi_atomic_decr(void *ptr, int memorder, int type_size)
b045b9
+{
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
+    if (type_size == ATOMIC_INT) {
b045b9
+        return __atomic_sub_fetch_4((int32_t *)ptr, 1, memorder);
b045b9
+    } else if (type_size == ATOMIC_LONG) {
b045b9
+        return __atomic_sub_fetch_8((uint64_t *)ptr, 1, memorder);
b045b9
+    } else {
b045b9
+        /* ATOMIC_GENERIC or unknown size */
b045b9
+        return __atomic_sub_fetch((uint64_t *)ptr, 1, memorder);
b045b9
+    }
b045b9
+#else
b045b9
+    PRInt32 *pr_ptr = (PRInt32 *)ptr;
b045b9
+    return PR_AtomicDecrement(pr_ptr);
b045b9
+#endif
b045b9
+}
b045b9
diff --git a/ldap/servers/slapd/thread_data.c b/ldap/servers/slapd/thread_data.c
b045b9
index 9964832..d473710 100644
b045b9
--- a/ldap/servers/slapd/thread_data.c
b045b9
+++ b/ldap/servers/slapd/thread_data.c
b045b9
@@ -9,7 +9,7 @@
b045b9
 /*
b045b9
  *   Thread Local Storage Functions
b045b9
  */
b045b9
-#include <slapi-plugin.h>
b045b9
+#include "slap.h"
b045b9
 #include <prthread.h>
b045b9
 
b045b9
 void td_dn_destructor(void *priv);
b045b9
diff --git a/src/nunc-stans/ns/ns_thrpool.c b/src/nunc-stans/ns/ns_thrpool.c
b045b9
index 7921cbc..2ad0bd7 100644
b045b9
--- a/src/nunc-stans/ns/ns_thrpool.c
b045b9
+++ b/src/nunc-stans/ns/ns_thrpool.c
b045b9
@@ -169,7 +169,11 @@ int32_t
b045b9
 ns_thrpool_is_shutdown(struct ns_thrpool_t *tp)
b045b9
 {
b045b9
     int32_t result = 0;
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
     __atomic_load(&(tp->shutdown), &result, __ATOMIC_ACQUIRE);
b045b9
+#else
b045b9
+    result = PR_AtomicAdd(&(tp->shutdown), 0);
b045b9
+#endif
b045b9
     return result;
b045b9
 }
b045b9
 
b045b9
@@ -177,7 +181,11 @@ int32_t
b045b9
 ns_thrpool_is_event_shutdown(struct ns_thrpool_t *tp)
b045b9
 {
b045b9
     int32_t result = 0;
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
     __atomic_load(&(tp->shutdown_event_loop), &result, __ATOMIC_ACQUIRE);
b045b9
+#else
b045b9
+    result = PR_AtomicAdd(&(tp->shutdown_event_loop), 0);
b045b9
+#endif
b045b9
     return result;
b045b9
 }
b045b9
 
b045b9
@@ -1442,8 +1450,11 @@ ns_thrpool_destroy(struct ns_thrpool_t *tp)
b045b9
 #endif
b045b9
     if (tp) {
b045b9
         /* Set the flag to shutdown the event loop. */
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
         __atomic_add_fetch(&(tp->shutdown_event_loop), 1, __ATOMIC_RELEASE);
b045b9
-
b045b9
+#else
b045b9
+        PR_AtomicIncrement(&(tp->shutdown_event_loop));
b045b9
+#endif
b045b9
         /* Finish the event queue wakeup job.  This has the
b045b9
          * side effect of waking up the event loop thread, which
b045b9
          * will cause it to exit since we set the event loop
b045b9
@@ -1532,7 +1543,11 @@ ns_thrpool_shutdown(struct ns_thrpool_t *tp)
b045b9
 
b045b9
     /* Set the shutdown flag.  This will cause the worker
b045b9
      * threads to exit after they finish all remaining work. */
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
     __atomic_add_fetch(&(tp->shutdown), 1, __ATOMIC_RELEASE);
b045b9
+#else
b045b9
+    PR_AtomicIncrement(&(tp->shutdown));
b045b9
+#endif
b045b9
 
b045b9
     /* Send worker shutdown jobs into the queues. This allows
b045b9
      * currently queued jobs to complete.
b045b9
diff --git a/src/nunc-stans/test/test_nuncstans_stress_core.c b/src/nunc-stans/test/test_nuncstans_stress_core.c
b045b9
index a678800..2fc4ef4 100644
b045b9
--- a/src/nunc-stans/test/test_nuncstans_stress_core.c
b045b9
+++ b/src/nunc-stans/test/test_nuncstans_stress_core.c
b045b9
@@ -128,7 +128,11 @@ server_conn_write(struct ns_job_t *job)
b045b9
     assert(connctx != NULL);
b045b9
     if (NS_JOB_IS_TIMER(ns_job_get_output_type(job))) {
b045b9
         do_logging(LOG_ERR, "conn_write: job [%p] timeout\n", job);
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
         __atomic_add_fetch(&server_fail_count, 1, __ATOMIC_SEQ_CST);
b045b9
+#else
b045b9
+        PR_AtomicIncrement(&server_fail_count);
b045b9
+#endif
b045b9
         conn_ctx_free(connctx);
b045b9
         assert_int_equal(ns_job_done(job), 0);
b045b9
         return;
b045b9
@@ -173,7 +177,11 @@ server_conn_read(struct ns_job_t *job)
b045b9
     if (NS_JOB_IS_TIMER(ns_job_get_output_type(job))) {
b045b9
         /* The event that triggered this call back is because we timed out waiting for IO */
b045b9
         do_logging(LOG_ERR, "conn_read: job [%p] timed out\n", job);
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
         __atomic_add_fetch(&server_fail_count, 1, __ATOMIC_SEQ_CST);
b045b9
+#else
b045b9
+        PR_AtomicIncrement(&server_fail_count);
b045b9
+#endif
b045b9
         conn_ctx_free(connctx);
b045b9
         assert_int_equal(ns_job_done(job), 0);
b045b9
         return;
b045b9
@@ -204,7 +212,11 @@ server_conn_read(struct ns_job_t *job)
b045b9
             return;
b045b9
         } else {
b045b9
             do_logging(LOG_ERR, "conn_read: read error for job [%p] %d: %s\n", job, PR_GetError(), PR_ErrorToString(PR_GetError(), PR_LANGUAGE_I_DEFAULT));
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
             __atomic_add_fetch(&server_fail_count, 1, __ATOMIC_SEQ_CST);
b045b9
+#else
b045b9
+            PR_AtomicIncrement(&server_fail_count);
b045b9
+#endif
b045b9
             conn_ctx_free(connctx);
b045b9
             assert_int_equal(ns_job_done(job), 0);
b045b9
             return;
b045b9
@@ -214,7 +226,11 @@ server_conn_read(struct ns_job_t *job)
b045b9
         /* Didn't read anything */
b045b9
         do_logging(LOG_DEBUG, "conn_read: job [%p] closed\n", job);
b045b9
         /* Increment the success */
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
         __atomic_add_fetch(&server_success_count, 1, __ATOMIC_SEQ_CST);
b045b9
+#else
b045b9
+        PR_AtomicIncrement(&server_success_count);
b045b9
+#endif
b045b9
         conn_ctx_free(connctx);
b045b9
         assert_int_equal(ns_job_done(job), 0);
b045b9
         return;
b045b9
@@ -314,26 +330,41 @@ client_response_cb(struct ns_job_t *job)
b045b9
     if (len < 0) {
b045b9
         /* PRErrorCode prerr = PR_GetError(); */
b045b9
         do_logging(LOG_ERR, "FAIL: connection error, no data \n");
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
         __atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
b045b9
+#else
b045b9
+        PR_AtomicIncrement(&client_fail_count);
b045b9
+#endif
b045b9
         goto done;
b045b9
     } else if (len == 0) {
b045b9
         do_logging(LOG_ERR, "FAIL: connection closed, no data \n");
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
         __atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
b045b9
+#else
b045b9
+        PR_AtomicIncrement(&client_fail_count);
b045b9
+#endif
b045b9
         goto done;
b045b9
     } else {
b045b9
         /* Be paranoid, force last byte null */
b045b9
         buffer[buflen - 1] = '\0';
b045b9
         if (strncmp("this is a test!\n", buffer, strlen("this is a test!\n")) != 0) {
b045b9
             do_logging(LOG_ERR, "FAIL: connection incorrect response, no data \n");
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
             __atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
b045b9
+#else
b045b9
+            PR_AtomicIncrement(&client_fail_count);
b045b9
+#endif
b045b9
             goto done;
b045b9
         }
b045b9
     }
b045b9
 
b045b9
     struct timespec ts;
b045b9
     clock_gettime(CLOCK_MONOTONIC, &ts);
b045b9
-
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
     __atomic_add_fetch(&client_success_count, 1, __ATOMIC_SEQ_CST);
b045b9
+#else
b045b9
+    PR_AtomicIncrement(&client_success_count);
b045b9
+#endif
b045b9
     do_logging(LOG_ERR, "PASS: %ld.%ld %d\n", ts.tv_sec, ts.tv_nsec, client_success_count);
b045b9
 
b045b9
 done:
b045b9
@@ -354,7 +385,11 @@ client_initiate_connection_cb(struct ns_job_t *job)
b045b9
         char *err = NULL;
b045b9
         PR_GetErrorText(err);
b045b9
         do_logging(LOG_ERR, "FAIL: Socket failed, %d -> %s\n", PR_GetError(), err);
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
         __atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
b045b9
+#else
b045b9
+        PR_AtomicIncrement(&client_fail_count);
b045b9
+#endif
b045b9
         goto done;
b045b9
     }
b045b9
 
b045b9
@@ -368,8 +403,11 @@ client_initiate_connection_cb(struct ns_job_t *job)
b045b9
         PR_GetErrorText(err);
b045b9
         do_logging(LOG_ERR, "FAIL: cannot connect, timeout %d -> %s \n", PR_GetError(), err);
b045b9
         /* Atomic increment fail */
b045b9
+#ifdef ATOMIC_64BIT_OPERATIONS
b045b9
         __atomic_add_fetch(&client_timeout_count, 1, __ATOMIC_SEQ_CST);
b045b9
-
b045b9
+#else
b045b9
+        PR_AtomicIncrement(&client_timeout_count);
b045b9
+#endif
b045b9
         if (sock != NULL) {
b045b9
             PR_Close(sock);
b045b9
         }
b045b9
-- 
b045b9
2.9.5
b045b9