|
|
6405db |
From 2975f68e139169ee2d2259cfbbb2a15b54dc3724 Mon Sep 17 00:00:00 2001
|
|
|
6405db |
From: William Brown <firstyear@redhat.com>
|
|
|
6405db |
Date: Wed, 26 Jul 2017 11:01:49 +1000
|
|
|
6405db |
Subject: [PATCH] Ticket 49330 - Improve ndn cache performance 1.3.6
|
|
|
6405db |
|
|
|
6405db |
Backport from 1.3.7 master.
|
|
|
6405db |
|
|
|
6405db |
Bug Description: Normalised DN's are a costly process to update
|
|
|
6405db |
and maintain. As a result, a normalised DN cache was created. Yet
|
|
|
6405db |
it was never able to perform well. In some datasets with large sets
|
|
|
6405db |
of dn attr types, the NDN cache actively hurt performance.
|
|
|
6405db |
|
|
|
6405db |
The issue stemmed from 3 major issues in the design of the NDN
|
|
|
6405db |
cache.
|
|
|
6405db |
|
|
|
6405db |
First, it is a global cache which means it exists behind
|
|
|
6405db |
a rwlock. This causes delay as threads wait behind the lock
|
|
|
6405db |
to access or update the cache (especially on a miss).
|
|
|
6405db |
|
|
|
6405db |
Second, the cache was limited to 4073 buckets. Despite the fact
|
|
|
6405db |
that a prime number on a hash causes a skew in distribution,
|
|
|
6405db |
this was in an NSPR hash - which does not grow dynamically,
|
|
|
6405db |
rather devolving a bucket to a linked list. AS a result, once you
|
|
|
6405db |
passed ~3000 your lookup performance would degrade rapidly to O(1)
|
|
|
6405db |
|
|
|
6405db |
Finally, the cache's lru policy did not evict least used - it
|
|
|
6405db |
evicted the 10,000 least used. So if you tuned your cache
|
|
|
6405db |
to match the NSPR map, every inclusion that would trigger a
|
|
|
6405db |
delete of old values would effectively empty your cache. ON bigger
|
|
|
6405db |
set sizes, this has to walk the map (at O(1)) to clean 10,000
|
|
|
6405db |
elements.
|
|
|
6405db |
|
|
|
6405db |
Premature optimisation strikes again ....
|
|
|
6405db |
|
|
|
6405db |
Fix Description: Throw it out. Rewrite. We now use a hash
|
|
|
6405db |
algo that has proper distribution across a set. The hash
|
|
|
6405db |
sizes slots to a power of two. Finally, each thread has
|
|
|
6405db |
a private cache rather than shared which completely eliminates
|
|
|
6405db |
a lock contention and even NUMA performance issues.
|
|
|
6405db |
|
|
|
6405db |
Interestingly this fix should have improvements for DB
|
|
|
6405db |
imports, memberof and refint performance and more.
|
|
|
6405db |
|
|
|
6405db |
Some testing has shown in simple search workloads a 10%
|
|
|
6405db |
improvement in throughput, and on complex searches a 47x
|
|
|
6405db |
improvement.
|
|
|
6405db |
|
|
|
6405db |
https://pagure.io/389-ds-base/issue/49330
|
|
|
6405db |
|
|
|
6405db |
Author: wibrown
|
|
|
6405db |
|
|
|
6405db |
Review by: lkrispen, tbordaz
|
|
|
6405db |
---
|
|
|
6405db |
ldap/servers/slapd/back-ldbm/monitor.c | 11 +-
|
|
|
6405db |
ldap/servers/slapd/dn.c | 809 +++++++++++++++++++++------------
|
|
|
6405db |
ldap/servers/slapd/slapi-private.h | 2 +-
|
|
|
6405db |
3 files changed, 527 insertions(+), 295 deletions(-)
|
|
|
6405db |
|
|
|
6405db |
diff --git a/ldap/servers/slapd/back-ldbm/monitor.c b/ldap/servers/slapd/back-ldbm/monitor.c
|
|
|
6405db |
index c58b069..aa7d709 100644
|
|
|
6405db |
--- a/ldap/servers/slapd/back-ldbm/monitor.c
|
|
|
6405db |
+++ b/ldap/servers/slapd/back-ldbm/monitor.c
|
|
|
6405db |
@@ -43,6 +43,9 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e,
|
|
|
6405db |
PRUint64 hits, tries;
|
|
|
6405db |
long nentries, maxentries, count;
|
|
|
6405db |
size_t size, maxsize;
|
|
|
6405db |
+ size_t thread_size;
|
|
|
6405db |
+ size_t evicts;
|
|
|
6405db |
+ size_t slots;
|
|
|
6405db |
/* NPCTE fix for bugid 544365, esc 0. <P.R> <04-Jul-2001> */
|
|
|
6405db |
struct stat astat;
|
|
|
6405db |
/* end of NPCTE fix for bugid 544365 */
|
|
|
6405db |
@@ -118,7 +121,7 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e,
|
|
|
6405db |
}
|
|
|
6405db |
/* normalized dn cache stats */
|
|
|
6405db |
if(ndn_cache_started()){
|
|
|
6405db |
- ndn_cache_get_stats(&hits, &tries, &size, &maxsize, &count);
|
|
|
6405db |
+ ndn_cache_get_stats(&hits, &tries, &size, &maxsize, &thread_size, &evicts, &slots, &count);
|
|
|
6405db |
sprintf(buf, "%" PRIu64, tries);
|
|
|
6405db |
MSET("normalizedDnCacheTries");
|
|
|
6405db |
sprintf(buf, "%" PRIu64, hits);
|
|
|
6405db |
@@ -127,6 +130,8 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e,
|
|
|
6405db |
MSET("normalizedDnCacheMisses");
|
|
|
6405db |
sprintf(buf, "%lu", (unsigned long)(100.0*(double)hits / (double)(tries > 0 ? tries : 1)));
|
|
|
6405db |
MSET("normalizedDnCacheHitRatio");
|
|
|
6405db |
+ sprintf(buf, "%"PRIu64, evicts);
|
|
|
6405db |
+ MSET("NormalizedDnCacheEvictions");
|
|
|
6405db |
sprintf(buf, "%lu", (long unsigned int)size);
|
|
|
6405db |
MSET("currentNormalizedDnCacheSize");
|
|
|
6405db |
if(maxsize == 0){
|
|
|
6405db |
@@ -135,6 +140,10 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e,
|
|
|
6405db |
sprintf(buf, "%lu", (long unsigned int)maxsize);
|
|
|
6405db |
}
|
|
|
6405db |
MSET("maxNormalizedDnCacheSize");
|
|
|
6405db |
+ sprintf(buf, "%"PRIu64, thread_size);
|
|
|
6405db |
+ MSET("NormalizedDnCacheThreadSize");
|
|
|
6405db |
+ sprintf(buf, "%"PRIu64, slots);
|
|
|
6405db |
+ MSET("NormalizedDnCacheThreadSlots");
|
|
|
6405db |
sprintf(buf, "%ld", count);
|
|
|
6405db |
MSET("currentNormalizedDnCacheCount");
|
|
|
6405db |
}
|
|
|
6405db |
diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c
|
|
|
6405db |
index fa3909f..9cb3e7b 100644
|
|
|
6405db |
--- a/ldap/servers/slapd/dn.c
|
|
|
6405db |
+++ b/ldap/servers/slapd/dn.c
|
|
|
6405db |
@@ -22,6 +22,24 @@
|
|
|
6405db |
#include "slap.h"
|
|
|
6405db |
#include <plhash.h>
|
|
|
6405db |
|
|
|
6405db |
+#include <inttypes.h>
|
|
|
6405db |
+#include <stddef.h> /* for size_t */
|
|
|
6405db |
+
|
|
|
6405db |
+#if defined(HAVE_SYS_ENDIAN_H)
|
|
|
6405db |
+#include <sys/endian.h>
|
|
|
6405db |
+#elif defined(HAVE_ENDIAN_H)
|
|
|
6405db |
+#include <endian.h>
|
|
|
6405db |
+#else
|
|
|
6405db |
+#error platform header for endian detection not found.
|
|
|
6405db |
+#endif
|
|
|
6405db |
+
|
|
|
6405db |
+/* See: http://sourceforge.net/p/predef/wiki/Endianness/ */
|
|
|
6405db |
+#if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN
|
|
|
6405db |
+#define _le64toh(x) ((uint64_t)(x))
|
|
|
6405db |
+#else
|
|
|
6405db |
+#define _le64toh(x) le64toh(x)
|
|
|
6405db |
+#endif
|
|
|
6405db |
+
|
|
|
6405db |
#undef SDN_DEBUG
|
|
|
6405db |
|
|
|
6405db |
static void add_rdn_av( char *avstart, char *avend, int *rdn_av_countp,
|
|
|
6405db |
@@ -33,52 +51,89 @@ static void rdn_av_swap( struct berval *av1, struct berval *av2, int escape );
|
|
|
6405db |
static int does_cn_uses_dn_syntax_in_dns(char *type, char *dn);
|
|
|
6405db |
|
|
|
6405db |
/* normalized dn cache related definitions*/
|
|
|
6405db |
-struct
|
|
|
6405db |
-ndn_cache_lru
|
|
|
6405db |
-{
|
|
|
6405db |
- struct ndn_cache_lru *prev;
|
|
|
6405db |
- struct ndn_cache_lru *next;
|
|
|
6405db |
- char *key;
|
|
|
6405db |
-};
|
|
|
6405db |
-
|
|
|
6405db |
-struct
|
|
|
6405db |
-ndn_cache_ctx
|
|
|
6405db |
-{
|
|
|
6405db |
- struct ndn_cache_lru *head;
|
|
|
6405db |
- struct ndn_cache_lru *tail;
|
|
|
6405db |
+struct ndn_cache_stats {
|
|
|
6405db |
Slapi_Counter *cache_hits;
|
|
|
6405db |
Slapi_Counter *cache_tries;
|
|
|
6405db |
- Slapi_Counter *cache_misses;
|
|
|
6405db |
- size_t cache_size;
|
|
|
6405db |
- size_t cache_max_size;
|
|
|
6405db |
- long cache_count;
|
|
|
6405db |
+ Slapi_Counter *cache_count;
|
|
|
6405db |
+ Slapi_Counter *cache_size;
|
|
|
6405db |
+ Slapi_Counter *cache_evicts;
|
|
|
6405db |
+ size_t max_size;
|
|
|
6405db |
+ size_t thread_max_size;
|
|
|
6405db |
+ size_t slots;
|
|
|
6405db |
};
|
|
|
6405db |
|
|
|
6405db |
-struct
|
|
|
6405db |
-ndn_hash_val
|
|
|
6405db |
-{
|
|
|
6405db |
+struct ndn_cache_value {
|
|
|
6405db |
+ size_t size;
|
|
|
6405db |
+ size_t slot;
|
|
|
6405db |
+ char *dn;
|
|
|
6405db |
char *ndn;
|
|
|
6405db |
- size_t len;
|
|
|
6405db |
- int size;
|
|
|
6405db |
- struct ndn_cache_lru *lru_node; /* used to speed up lru shuffling */
|
|
|
6405db |
+ struct ndn_cache_value *next;
|
|
|
6405db |
+ struct ndn_cache_value *prev;
|
|
|
6405db |
+ struct ndn_cache_value *child;
|
|
|
6405db |
+};
|
|
|
6405db |
+
|
|
|
6405db |
+/*
|
|
|
6405db |
+ * This uses a similar alloc trick to IDList to keep
|
|
|
6405db |
+ * The amount of derefs small.
|
|
|
6405db |
+ */
|
|
|
6405db |
+struct ndn_cache {
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * We keep per thread stats and flush them occasionally
|
|
|
6405db |
+ */
|
|
|
6405db |
+ size_t max_size;
|
|
|
6405db |
+ /* Need to track this because we need to provide diffs to counter */
|
|
|
6405db |
+ size_t last_count;
|
|
|
6405db |
+ size_t count;
|
|
|
6405db |
+ /* Number of ops */
|
|
|
6405db |
+ size_t tries;
|
|
|
6405db |
+ /* hit vs miss. in theroy miss == tries - hits.*/
|
|
|
6405db |
+ size_t hits;
|
|
|
6405db |
+ /* How many values we kicked out */
|
|
|
6405db |
+ size_t evicts;
|
|
|
6405db |
+ /* Need to track this because we need to provide diffs to counter */
|
|
|
6405db |
+ size_t last_size;
|
|
|
6405db |
+ size_t size;
|
|
|
6405db |
+
|
|
|
6405db |
+ size_t slots;
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * This is used by siphash to prevent hash bugket attacks
|
|
|
6405db |
+ */
|
|
|
6405db |
+ char key[16];
|
|
|
6405db |
+
|
|
|
6405db |
+ struct ndn_cache_value *head;
|
|
|
6405db |
+ struct ndn_cache_value *tail;
|
|
|
6405db |
+ struct ndn_cache_value *table[1];
|
|
|
6405db |
};
|
|
|
6405db |
|
|
|
6405db |
-#define NDN_FLUSH_COUNT 10000 /* number of DN's to remove when cache fills up */
|
|
|
6405db |
-#define NDN_MIN_COUNT 1000 /* the minimum number of DN's to keep in the cache */
|
|
|
6405db |
-#define NDN_CACHE_BUCKETS 2053 /* prime number */
|
|
|
6405db |
+/*
|
|
|
6405db |
+ * This means we need 1 MB minimum per thread
|
|
|
6405db |
+ *
|
|
|
6405db |
+ */
|
|
|
6405db |
+#define NDN_CACHE_MINIMUM_CAPACITY 1048576
|
|
|
6405db |
+/*
|
|
|
6405db |
+ * This helps us define the number of hashtable slots
|
|
|
6405db |
+ * to create. We assume an average DN is 64 chars long
|
|
|
6405db |
+ * This way we end up we a ht entry of:
|
|
|
6405db |
+ * 8 bytes: from the table pointing to us.
|
|
|
6405db |
+ * 8 bytes: next ptr
|
|
|
6405db |
+ * 8 bytes: prev ptr
|
|
|
6405db |
+ * 8 bytes + 64: dn
|
|
|
6405db |
+ * 8 bytes + 64: ndn itself.
|
|
|
6405db |
+ * This gives us 168 bytes. In theory this means
|
|
|
6405db |
+ * 6241 entries, but we have to clamp this to a power of
|
|
|
6405db |
+ * two, so we have 8192 slots. In reality, dns may be
|
|
|
6405db |
+ * shorter *and* the dn may be the same as the ndn
|
|
|
6405db |
+ * so we *may* store more ndns that this. Again, a good reason
|
|
|
6405db |
+ * to round the ht size up!
|
|
|
6405db |
+ */
|
|
|
6405db |
+#define NDN_ENTRY_AVG_SIZE 168
|
|
|
6405db |
+/*
|
|
|
6405db |
+ * After how many operations do we sync our per-thread stats.
|
|
|
6405db |
+ */
|
|
|
6405db |
+#define NDN_STAT_COMMIT_FREQUENCY 256
|
|
|
6405db |
|
|
|
6405db |
-static PLHashNumber ndn_hash_string(const void *key);
|
|
|
6405db |
static int ndn_cache_lookup(char *dn, size_t dn_len, char **result, char **udn, int *rc);
|
|
|
6405db |
-static void ndn_cache_update_lru(struct ndn_cache_lru **node);
|
|
|
6405db |
static void ndn_cache_add(char *dn, size_t dn_len, char *ndn, size_t ndn_len);
|
|
|
6405db |
-static void ndn_cache_delete(char *dn);
|
|
|
6405db |
-static void ndn_cache_flush(void);
|
|
|
6405db |
-static void ndn_cache_free(void);
|
|
|
6405db |
-static int ndn_started = 0;
|
|
|
6405db |
-static PRLock *lru_lock = NULL;
|
|
|
6405db |
-static Slapi_RWLock *ndn_cache_lock = NULL;
|
|
|
6405db |
-static struct ndn_cache_ctx *ndn_cache = NULL;
|
|
|
6405db |
-static PLHashTable *ndn_cache_hashtable = NULL;
|
|
|
6405db |
|
|
|
6405db |
#define ISBLANK(c) ((c) == ' ')
|
|
|
6405db |
#define ISBLANKSTR(s) (((*(s)) == '2') && (*((s)+1) == '0'))
|
|
|
6405db |
@@ -2768,166 +2823,408 @@ slapi_sdn_get_size(const Slapi_DN *sdn)
|
|
|
6405db |
*
|
|
|
6405db |
*/
|
|
|
6405db |
|
|
|
6405db |
+/* <MIT License>
|
|
|
6405db |
+ Copyright (c) 2013 Marek Majkowski <marek@popcount.org>
|
|
|
6405db |
+
|
|
|
6405db |
+ Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
6405db |
+ of this software and associated documentation files (the "Software"), to deal
|
|
|
6405db |
+ in the Software without restriction, including without limitation the rights
|
|
|
6405db |
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
6405db |
+ copies of the Software, and to permit persons to whom the Software is
|
|
|
6405db |
+ furnished to do so, subject to the following conditions:
|
|
|
6405db |
+
|
|
|
6405db |
+ The above copyright notice and this permission notice shall be included in
|
|
|
6405db |
+ all copies or substantial portions of the Software.
|
|
|
6405db |
+
|
|
|
6405db |
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
6405db |
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
6405db |
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
6405db |
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
6405db |
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
6405db |
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
6405db |
+ THE SOFTWARE.
|
|
|
6405db |
+ </MIT License>
|
|
|
6405db |
+
|
|
|
6405db |
+ Original location:
|
|
|
6405db |
+ https://github.com/majek/csiphash/
|
|
|
6405db |
+
|
|
|
6405db |
+ Solution inspired by code from:
|
|
|
6405db |
+ Samuel Neves (supercop/crypto_auth/siphash24/little)
|
|
|
6405db |
+ djb (supercop/crypto_auth/siphash24/little2)
|
|
|
6405db |
+ Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c)
|
|
|
6405db |
+*/
|
|
|
6405db |
+
|
|
|
6405db |
+#define ROTATE(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
|
|
|
6405db |
+
|
|
|
6405db |
+#define HALF_ROUND(a, b, c, d, s, t) \
|
|
|
6405db |
+ a += b; \
|
|
|
6405db |
+ c += d; \
|
|
|
6405db |
+ b = ROTATE(b, s) ^ a; \
|
|
|
6405db |
+ d = ROTATE(d, t) ^ c; \
|
|
|
6405db |
+ a = ROTATE(a, 32);
|
|
|
6405db |
+
|
|
|
6405db |
+#define ROUND(v0, v1, v2, v3) \
|
|
|
6405db |
+ HALF_ROUND(v0, v1, v2, v3, 13, 16); \
|
|
|
6405db |
+ HALF_ROUND(v2, v1, v0, v3, 17, 21)
|
|
|
6405db |
+
|
|
|
6405db |
+#define cROUND(v0, v1, v2, v3) \
|
|
|
6405db |
+ ROUND(v0, v1, v2, v3)
|
|
|
6405db |
+
|
|
|
6405db |
+#define dROUND(v0, v1, v2, v3) \
|
|
|
6405db |
+ ROUND(v0, v1, v2, v3); \
|
|
|
6405db |
+ ROUND(v0, v1, v2, v3); \
|
|
|
6405db |
+ ROUND(v0, v1, v2, v3)
|
|
|
6405db |
+
|
|
|
6405db |
+
|
|
|
6405db |
+static uint64_t
|
|
|
6405db |
+sds_siphash13(const void *src, size_t src_sz, const char key[16])
|
|
|
6405db |
+{
|
|
|
6405db |
+ const uint64_t *_key = (uint64_t *)key;
|
|
|
6405db |
+ uint64_t k0 = _le64toh(_key[0]);
|
|
|
6405db |
+ uint64_t k1 = _le64toh(_key[1]);
|
|
|
6405db |
+ uint64_t b = (uint64_t)src_sz << 56;
|
|
|
6405db |
+ const uint64_t *in = (uint64_t *)src;
|
|
|
6405db |
+
|
|
|
6405db |
+ uint64_t v0 = k0 ^ 0x736f6d6570736575ULL;
|
|
|
6405db |
+ uint64_t v1 = k1 ^ 0x646f72616e646f6dULL;
|
|
|
6405db |
+ uint64_t v2 = k0 ^ 0x6c7967656e657261ULL;
|
|
|
6405db |
+ uint64_t v3 = k1 ^ 0x7465646279746573ULL;
|
|
|
6405db |
+
|
|
|
6405db |
+ while (src_sz >= 8) {
|
|
|
6405db |
+ uint64_t mi = _le64toh(*in);
|
|
|
6405db |
+ in += 1;
|
|
|
6405db |
+ src_sz -= 8;
|
|
|
6405db |
+ v3 ^= mi;
|
|
|
6405db |
+ // cround
|
|
|
6405db |
+ cROUND(v0, v1, v2, v3);
|
|
|
6405db |
+ v0 ^= mi;
|
|
|
6405db |
+ }
|
|
|
6405db |
+
|
|
|
6405db |
+ uint64_t t = 0;
|
|
|
6405db |
+ uint8_t *pt = (uint8_t *)&t;
|
|
|
6405db |
+ uint8_t *m = (uint8_t *)in;
|
|
|
6405db |
+
|
|
|
6405db |
+ switch (src_sz) {
|
|
|
6405db |
+ case 7:
|
|
|
6405db |
+ pt[6] = m[6]; /* FALLTHRU */
|
|
|
6405db |
+ case 6:
|
|
|
6405db |
+ pt[5] = m[5]; /* FALLTHRU */
|
|
|
6405db |
+ case 5:
|
|
|
6405db |
+ pt[4] = m[4]; /* FALLTHRU */
|
|
|
6405db |
+ case 4:
|
|
|
6405db |
+ *((uint32_t *)&pt[0]) = *((uint32_t *)&m[0]);
|
|
|
6405db |
+ break;
|
|
|
6405db |
+ case 3:
|
|
|
6405db |
+ pt[2] = m[2]; /* FALLTHRU */
|
|
|
6405db |
+ case 2:
|
|
|
6405db |
+ pt[1] = m[1]; /* FALLTHRU */
|
|
|
6405db |
+ case 1:
|
|
|
6405db |
+ pt[0] = m[0]; /* FALLTHRU */
|
|
|
6405db |
+ }
|
|
|
6405db |
+ b |= _le64toh(t);
|
|
|
6405db |
+
|
|
|
6405db |
+ v3 ^= b;
|
|
|
6405db |
+ // cround
|
|
|
6405db |
+ cROUND(v0, v1, v2, v3);
|
|
|
6405db |
+ v0 ^= b;
|
|
|
6405db |
+ v2 ^= 0xff;
|
|
|
6405db |
+ // dround
|
|
|
6405db |
+ dROUND(v0, v1, v2, v3);
|
|
|
6405db |
+ return (v0 ^ v1) ^ (v2 ^ v3);
|
|
|
6405db |
+}
|
|
|
6405db |
+
|
|
|
6405db |
+static pthread_key_t ndn_cache_key;
|
|
|
6405db |
+static pthread_once_t ndn_cache_key_once = PTHREAD_ONCE_INIT;
|
|
|
6405db |
+static struct ndn_cache_stats t_cache_stats = {0};
|
|
|
6405db |
/*
|
|
|
6405db |
- * Hashing function using Bernstein's method
|
|
|
6405db |
+ * WARNING: For some reason we try to use the NDN cache *before*
|
|
|
6405db |
+ * we have a chance to configure it. As a result, we need to rely
|
|
|
6405db |
+ * on a trick in the way we start, that we start in one thread
|
|
|
6405db |
+ * so we can manipulate ints as though they were atomics, then
|
|
|
6405db |
+ * we start in *one* thread, so it's set, then when threads
|
|
|
6405db |
+ * fork the get barriers, so we can go from there. However we *CANNOT*
|
|
|
6405db |
+ * change this at runtime without expensive atomics per op, so lets
|
|
|
6405db |
+ * not bother until we improve libglobs to be COW.
|
|
|
6405db |
*/
|
|
|
6405db |
-static PLHashNumber
|
|
|
6405db |
-ndn_hash_string(const void *key)
|
|
|
6405db |
-{
|
|
|
6405db |
- PLHashNumber hash = 5381;
|
|
|
6405db |
- unsigned char *x = (unsigned char *)key;
|
|
|
6405db |
- int c;
|
|
|
6405db |
+static int32_t ndn_enabled = 0;
|
|
|
6405db |
+
|
|
|
6405db |
+static struct ndn_cache *
|
|
|
6405db |
+ndn_thread_cache_create(size_t thread_max_size, size_t slots) {
|
|
|
6405db |
+ size_t t_cache_size = sizeof(struct ndn_cache) + (slots * sizeof(struct ndn_cache_value *));
|
|
|
6405db |
+ struct ndn_cache *t_cache = (struct ndn_cache *)slapi_ch_calloc(1, t_cache_size);
|
|
|
6405db |
+
|
|
|
6405db |
+ t_cache->max_size = thread_max_size;
|
|
|
6405db |
+ t_cache->slots = slots;
|
|
|
6405db |
|
|
|
6405db |
- while ((c = *x++)){
|
|
|
6405db |
- hash = ((hash << 5) + hash) ^ c;
|
|
|
6405db |
+ return t_cache;
|
|
|
6405db |
+}
|
|
|
6405db |
+
|
|
|
6405db |
+static void
|
|
|
6405db |
+ndn_thread_cache_commit_status(struct ndn_cache *t_cache) {
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * Every so often we commit these atomically. We do this infrequently
|
|
|
6405db |
+ * to avoid the costly atomics.
|
|
|
6405db |
+ */
|
|
|
6405db |
+ if (t_cache->tries % NDN_STAT_COMMIT_FREQUENCY == 0) {
|
|
|
6405db |
+ /* We can just add tries and hits. */
|
|
|
6405db |
+ slapi_counter_add(t_cache_stats.cache_evicts, t_cache->evicts);
|
|
|
6405db |
+ slapi_counter_add(t_cache_stats.cache_tries, t_cache->tries);
|
|
|
6405db |
+ slapi_counter_add(t_cache_stats.cache_hits, t_cache->hits);
|
|
|
6405db |
+ t_cache->hits = 0;
|
|
|
6405db |
+ t_cache->tries = 0;
|
|
|
6405db |
+ t_cache->evicts = 0;
|
|
|
6405db |
+ /* Count and size need diff */
|
|
|
6405db |
+ int64_t diff = (t_cache->size - t_cache->last_size);
|
|
|
6405db |
+ if (diff > 0) {
|
|
|
6405db |
+ // We have more ....
|
|
|
6405db |
+ slapi_counter_add(t_cache_stats.cache_size, (uint64_t)diff);
|
|
|
6405db |
+ } else if (diff < 0) {
|
|
|
6405db |
+ slapi_counter_subtract(t_cache_stats.cache_size, (uint64_t)llabs(diff));
|
|
|
6405db |
+ }
|
|
|
6405db |
+ t_cache->last_size = t_cache->size;
|
|
|
6405db |
+
|
|
|
6405db |
+ diff = (t_cache->count - t_cache->last_count);
|
|
|
6405db |
+ if (diff > 0) {
|
|
|
6405db |
+ // We have more ....
|
|
|
6405db |
+ slapi_counter_add(t_cache_stats.cache_count, (uint64_t)diff);
|
|
|
6405db |
+ } else if (diff < 0) {
|
|
|
6405db |
+ slapi_counter_subtract(t_cache_stats.cache_count, (uint64_t)llabs(diff));
|
|
|
6405db |
+ }
|
|
|
6405db |
+ t_cache->last_count = t_cache->count;
|
|
|
6405db |
+
|
|
|
6405db |
+ }
|
|
|
6405db |
+}
|
|
|
6405db |
+
|
|
|
6405db |
+static void
|
|
|
6405db |
+ndn_thread_cache_value_destroy(struct ndn_cache *t_cache, struct ndn_cache_value *v) {
|
|
|
6405db |
+ /* Update stats */
|
|
|
6405db |
+ t_cache->size = t_cache->size - v->size;
|
|
|
6405db |
+ t_cache->count--;
|
|
|
6405db |
+ t_cache->evicts++;
|
|
|
6405db |
+
|
|
|
6405db |
+ if (v == t_cache->head) {
|
|
|
6405db |
+ t_cache->head = v->prev;
|
|
|
6405db |
+ }
|
|
|
6405db |
+ if (v == t_cache->tail) {
|
|
|
6405db |
+ t_cache->tail = v->next;
|
|
|
6405db |
+ }
|
|
|
6405db |
+
|
|
|
6405db |
+ /* Cut the node out. */
|
|
|
6405db |
+ if (v->next != NULL) {
|
|
|
6405db |
+ v->next->prev = v->prev;
|
|
|
6405db |
+ }
|
|
|
6405db |
+ if (v->prev != NULL) {
|
|
|
6405db |
+ v->prev->next = v->next;
|
|
|
6405db |
+ }
|
|
|
6405db |
+ /* Set the pointer in the table to NULL */
|
|
|
6405db |
+ /* Now see if we were in a list */
|
|
|
6405db |
+ struct ndn_cache_value *slot_node = t_cache->table[v->slot];
|
|
|
6405db |
+ if (slot_node == v) {
|
|
|
6405db |
+ t_cache->table[v->slot] = v->child;
|
|
|
6405db |
+ } else {
|
|
|
6405db |
+ struct ndn_cache_value *former_slot_node = NULL;
|
|
|
6405db |
+ do {
|
|
|
6405db |
+ former_slot_node = slot_node;
|
|
|
6405db |
+ slot_node = slot_node->child;
|
|
|
6405db |
+ } while(slot_node != v);
|
|
|
6405db |
+ /* Okay, now slot_node is us, and former is our parent */
|
|
|
6405db |
+ former_slot_node->child = v->child;
|
|
|
6405db |
+ }
|
|
|
6405db |
+
|
|
|
6405db |
+ slapi_ch_free((void **)&(v->dn));
|
|
|
6405db |
+ slapi_ch_free((void **)&(v->ndn));
|
|
|
6405db |
+ slapi_ch_free((void **)&v);
|
|
|
6405db |
+}
|
|
|
6405db |
+
|
|
|
6405db |
+static void
|
|
|
6405db |
+ndn_thread_cache_destroy(void *v_cache) {
|
|
|
6405db |
+ struct ndn_cache *t_cache = (struct ndn_cache *)v_cache;
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * FREE ALL THE NODES!!!
|
|
|
6405db |
+ */
|
|
|
6405db |
+ struct ndn_cache_value *node = t_cache->tail;
|
|
|
6405db |
+ struct ndn_cache_value *next_node = NULL;
|
|
|
6405db |
+ while (node) {
|
|
|
6405db |
+ next_node = node->next;
|
|
|
6405db |
+ ndn_thread_cache_value_destroy(t_cache, node);
|
|
|
6405db |
+ node = next_node;
|
|
|
6405db |
+ }
|
|
|
6405db |
+ slapi_ch_free((void **)&t_cache);
|
|
|
6405db |
+}
|
|
|
6405db |
+
|
|
|
6405db |
+static void
|
|
|
6405db |
+ndn_cache_key_init() {
|
|
|
6405db |
+ if (pthread_key_create(&ndn_cache_key, ndn_thread_cache_destroy) != 0) {
|
|
|
6405db |
+ /* Log a scary warning? */
|
|
|
6405db |
+ slapi_log_err(SLAPI_LOG_ERR, "ndn_cache_init", "Failed to create pthread key, aborting.\n");
|
|
|
6405db |
}
|
|
|
6405db |
- return hash;
|
|
|
6405db |
}
|
|
|
6405db |
|
|
|
6405db |
void
|
|
|
6405db |
ndn_cache_init()
|
|
|
6405db |
{
|
|
|
6405db |
- if(!config_get_ndn_cache_enabled() || ndn_started){
|
|
|
6405db |
+ ndn_enabled = config_get_ndn_cache_enabled();
|
|
|
6405db |
+ if (ndn_enabled == 0) {
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * Don't configure the keys or anything, need a restart
|
|
|
6405db |
+ * to enable. We'll just never use ndn cache in this
|
|
|
6405db |
+ * run.
|
|
|
6405db |
+ */
|
|
|
6405db |
return;
|
|
|
6405db |
}
|
|
|
6405db |
- ndn_cache_hashtable = PL_NewHashTable( NDN_CACHE_BUCKETS, ndn_hash_string, PL_CompareStrings, PL_CompareValues, 0, 0);
|
|
|
6405db |
- ndn_cache = (struct ndn_cache_ctx *)slapi_ch_malloc(sizeof(struct ndn_cache_ctx));
|
|
|
6405db |
- ndn_cache->cache_max_size = config_get_ndn_cache_size();
|
|
|
6405db |
- ndn_cache->cache_hits = slapi_counter_new();
|
|
|
6405db |
- ndn_cache->cache_tries = slapi_counter_new();
|
|
|
6405db |
- ndn_cache->cache_misses = slapi_counter_new();
|
|
|
6405db |
- ndn_cache->cache_count = 0;
|
|
|
6405db |
- ndn_cache->cache_size = sizeof(struct ndn_cache_ctx) + sizeof(PLHashTable) + sizeof(PLHashTable);
|
|
|
6405db |
- ndn_cache->head = NULL;
|
|
|
6405db |
- ndn_cache->tail = NULL;
|
|
|
6405db |
- ndn_started = 1;
|
|
|
6405db |
- if ( NULL == ( lru_lock = PR_NewLock()) || NULL == ( ndn_cache_lock = slapi_new_rwlock())) {
|
|
|
6405db |
- ndn_cache_destroy();
|
|
|
6405db |
- slapi_log_err(SLAPI_LOG_ERR, "ndn_cache_init", "Failed to create locks. Disabling cache.\n" );
|
|
|
6405db |
+
|
|
|
6405db |
+ /* Create the pthread key */
|
|
|
6405db |
+ (void)pthread_once(&ndn_cache_key_once, ndn_cache_key_init);
|
|
|
6405db |
+
|
|
|
6405db |
+ /* Create the global stats. */
|
|
|
6405db |
+ t_cache_stats.max_size = config_get_ndn_cache_size();
|
|
|
6405db |
+ t_cache_stats.cache_evicts = slapi_counter_new();
|
|
|
6405db |
+ t_cache_stats.cache_tries = slapi_counter_new();
|
|
|
6405db |
+ t_cache_stats.cache_hits = slapi_counter_new();
|
|
|
6405db |
+ t_cache_stats.cache_count = slapi_counter_new();
|
|
|
6405db |
+ t_cache_stats.cache_size = slapi_counter_new();
|
|
|
6405db |
+ /* Get thread numbers and calc the per thread size */
|
|
|
6405db |
+ int32_t maxthreads = (int32_t)config_get_threadnumber();
|
|
|
6405db |
+ size_t tentative_size = t_cache_stats.max_size / maxthreads;
|
|
|
6405db |
+ if (tentative_size < NDN_CACHE_MINIMUM_CAPACITY) {
|
|
|
6405db |
+ tentative_size = NDN_CACHE_MINIMUM_CAPACITY;
|
|
|
6405db |
+ t_cache_stats.max_size = NDN_CACHE_MINIMUM_CAPACITY * maxthreads;
|
|
|
6405db |
+ }
|
|
|
6405db |
+ t_cache_stats.thread_max_size = tentative_size;
|
|
|
6405db |
+
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * Slots *must* be a power of two, even if the number of entries
|
|
|
6405db |
+ * we store will be *less* than this.
|
|
|
6405db |
+ */
|
|
|
6405db |
+ size_t possible_elements = tentative_size / NDN_ENTRY_AVG_SIZE;
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * So this is like 1048576 / 168, so we get 6241. Now we need to
|
|
|
6405db |
+ * shift this to get the number of bits.
|
|
|
6405db |
+ */
|
|
|
6405db |
+ size_t shifts = 0;
|
|
|
6405db |
+ while (possible_elements > 0) {
|
|
|
6405db |
+ shifts++;
|
|
|
6405db |
+ possible_elements = possible_elements >> 1;
|
|
|
6405db |
}
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * So now we can use this to make the slot count.
|
|
|
6405db |
+ */
|
|
|
6405db |
+ t_cache_stats.slots = 1 << shifts;
|
|
|
6405db |
+ /* Done? */
|
|
|
6405db |
+ return;
|
|
|
6405db |
}
|
|
|
6405db |
|
|
|
6405db |
void
|
|
|
6405db |
ndn_cache_destroy()
|
|
|
6405db |
{
|
|
|
6405db |
- if(!ndn_started){
|
|
|
6405db |
+ if (ndn_enabled == 0) {
|
|
|
6405db |
return;
|
|
|
6405db |
}
|
|
|
6405db |
- if(lru_lock){
|
|
|
6405db |
- PR_DestroyLock(lru_lock);
|
|
|
6405db |
- lru_lock = NULL;
|
|
|
6405db |
- }
|
|
|
6405db |
- if(ndn_cache_lock){
|
|
|
6405db |
- slapi_destroy_rwlock(ndn_cache_lock);
|
|
|
6405db |
- ndn_cache_lock = NULL;
|
|
|
6405db |
- }
|
|
|
6405db |
- if(ndn_cache_hashtable){
|
|
|
6405db |
- ndn_cache_free();
|
|
|
6405db |
- PL_HashTableDestroy(ndn_cache_hashtable);
|
|
|
6405db |
- ndn_cache_hashtable = NULL;
|
|
|
6405db |
- }
|
|
|
6405db |
- config_set_ndn_cache_enabled(CONFIG_NDN_CACHE, "off", NULL, 1 );
|
|
|
6405db |
- slapi_counter_destroy(&ndn_cache->cache_hits);
|
|
|
6405db |
- slapi_counter_destroy(&ndn_cache->cache_tries);
|
|
|
6405db |
- slapi_counter_destroy(&ndn_cache->cache_misses);
|
|
|
6405db |
- slapi_ch_free((void **)&ndn_cache);
|
|
|
6405db |
-
|
|
|
6405db |
- ndn_started = 0;
|
|
|
6405db |
+ slapi_counter_destroy(&(t_cache_stats.cache_tries));
|
|
|
6405db |
+ slapi_counter_destroy(&(t_cache_stats.cache_hits));
|
|
|
6405db |
+ slapi_counter_destroy(&(t_cache_stats.cache_count));
|
|
|
6405db |
+ slapi_counter_destroy(&(t_cache_stats.cache_size));
|
|
|
6405db |
+ slapi_counter_destroy(&(t_cache_stats.cache_evicts));
|
|
|
6405db |
}
|
|
|
6405db |
|
|
|
6405db |
int
|
|
|
6405db |
ndn_cache_started()
|
|
|
6405db |
{
|
|
|
6405db |
- return ndn_started;
|
|
|
6405db |
+ return ndn_enabled;
|
|
|
6405db |
}
|
|
|
6405db |
|
|
|
6405db |
/*
|
|
|
6405db |
* Look up this dn in the ndn cache
|
|
|
6405db |
*/
|
|
|
6405db |
static int
|
|
|
6405db |
-ndn_cache_lookup(char *dn, size_t dn_len, char **result, char **udn, int *rc)
|
|
|
6405db |
+ndn_cache_lookup(char *dn, size_t dn_len, char **ndn, char **udn, int *rc)
|
|
|
6405db |
{
|
|
|
6405db |
- struct ndn_hash_val *ndn_ht_val = NULL;
|
|
|
6405db |
- char *ndn, *key;
|
|
|
6405db |
- int rv = 0;
|
|
|
6405db |
-
|
|
|
6405db |
- if(NULL == udn){
|
|
|
6405db |
- return rv;
|
|
|
6405db |
+ if (ndn_enabled == 0 || NULL == udn) {
|
|
|
6405db |
+ return 0;
|
|
|
6405db |
}
|
|
|
6405db |
*udn = NULL;
|
|
|
6405db |
- if(ndn_started == 0){
|
|
|
6405db |
- return rv;
|
|
|
6405db |
- }
|
|
|
6405db |
- if(dn_len == 0){
|
|
|
6405db |
- *result = dn;
|
|
|
6405db |
+
|
|
|
6405db |
+ if (dn_len == 0) {
|
|
|
6405db |
+ *ndn = dn;
|
|
|
6405db |
*rc = 0;
|
|
|
6405db |
return 1;
|
|
|
6405db |
}
|
|
|
6405db |
- slapi_counter_increment(ndn_cache->cache_tries);
|
|
|
6405db |
- slapi_rwlock_rdlock(ndn_cache_lock);
|
|
|
6405db |
- ndn_ht_val = (struct ndn_hash_val *)PL_HashTableLookupConst(ndn_cache_hashtable, dn);
|
|
|
6405db |
- if(ndn_ht_val){
|
|
|
6405db |
- ndn_cache_update_lru(&ndn_ht_val->lru_node);
|
|
|
6405db |
- slapi_counter_increment(ndn_cache->cache_hits);
|
|
|
6405db |
- if ((ndn_ht_val->len != dn_len) ||
|
|
|
6405db |
- /* even if the lengths match, dn may not be normalized yet.
|
|
|
6405db |
- * (e.g., 'cn="o=ABC",o=XYZ' vs. 'cn=o\3DABC,o=XYZ') */
|
|
|
6405db |
- (memcmp(dn, ndn_ht_val->ndn, dn_len))){
|
|
|
6405db |
- *rc = 1; /* free result */
|
|
|
6405db |
- ndn = slapi_ch_malloc(ndn_ht_val->len + 1);
|
|
|
6405db |
- memcpy(ndn, ndn_ht_val->ndn, ndn_ht_val->len);
|
|
|
6405db |
- ndn[ndn_ht_val->len] = '\0';
|
|
|
6405db |
- *result = ndn;
|
|
|
6405db |
- } else {
|
|
|
6405db |
- /* the dn was already normalized, just return the dn as the result */
|
|
|
6405db |
- *result = dn;
|
|
|
6405db |
- *rc = 0;
|
|
|
6405db |
- }
|
|
|
6405db |
- rv = 1;
|
|
|
6405db |
- } else {
|
|
|
6405db |
- /* copy/preserve the udn, so we can use it as the key when we add dn's to the hashtable */
|
|
|
6405db |
- key = slapi_ch_malloc(dn_len + 1);
|
|
|
6405db |
- memcpy(key, dn, dn_len);
|
|
|
6405db |
- key[dn_len] = '\0';
|
|
|
6405db |
- *udn = key;
|
|
|
6405db |
+
|
|
|
6405db |
+ struct ndn_cache *t_cache = pthread_getspecific(ndn_cache_key);
|
|
|
6405db |
+ if (t_cache == NULL) {
|
|
|
6405db |
+ t_cache = ndn_thread_cache_create(t_cache_stats.thread_max_size, t_cache_stats.slots);
|
|
|
6405db |
+ pthread_setspecific(ndn_cache_key, t_cache);
|
|
|
6405db |
+ /* If we have no cache, we can't look up ... */
|
|
|
6405db |
+ return 0;
|
|
|
6405db |
}
|
|
|
6405db |
- slapi_rwlock_unlock(ndn_cache_lock);
|
|
|
6405db |
|
|
|
6405db |
- return rv;
|
|
|
6405db |
-}
|
|
|
6405db |
+ t_cache->tries++;
|
|
|
6405db |
|
|
|
6405db |
-/*
|
|
|
6405db |
- * Move this lru node to the top of the list
|
|
|
6405db |
- */
|
|
|
6405db |
-static void
|
|
|
6405db |
-ndn_cache_update_lru(struct ndn_cache_lru **node)
|
|
|
6405db |
-{
|
|
|
6405db |
- struct ndn_cache_lru *prev, *next, *curr_node = *node;
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * Hash our DN ...
|
|
|
6405db |
+ */
|
|
|
6405db |
+ uint64_t dn_hash = sds_siphash13(dn, dn_len, t_cache->key);
|
|
|
6405db |
+ /* Where should it be? */
|
|
|
6405db |
+ size_t expect_slot = dn_hash % t_cache->slots;
|
|
|
6405db |
|
|
|
6405db |
- if(curr_node == NULL){
|
|
|
6405db |
- return;
|
|
|
6405db |
- }
|
|
|
6405db |
- PR_Lock(lru_lock);
|
|
|
6405db |
- if(curr_node->prev == NULL){
|
|
|
6405db |
- /* already the top node */
|
|
|
6405db |
- PR_Unlock(lru_lock);
|
|
|
6405db |
- return;
|
|
|
6405db |
- }
|
|
|
6405db |
- prev = curr_node->prev;
|
|
|
6405db |
- next = curr_node->next;
|
|
|
6405db |
- if(next){
|
|
|
6405db |
- next->prev = prev;
|
|
|
6405db |
- prev->next = next;
|
|
|
6405db |
- } else {
|
|
|
6405db |
- /* this was the tail, so reset the tail */
|
|
|
6405db |
- ndn_cache->tail = prev;
|
|
|
6405db |
- prev->next = NULL;
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * Is it there?
|
|
|
6405db |
+ */
|
|
|
6405db |
+ if (t_cache->table[expect_slot] != NULL) {
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * Check it really matches, could be collision.
|
|
|
6405db |
+ */
|
|
|
6405db |
+ struct ndn_cache_value *node = t_cache->table[expect_slot];
|
|
|
6405db |
+ while (node != NULL) {
|
|
|
6405db |
+ if (strcmp(dn, node->dn) == 0) {
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * Update LRU
|
|
|
6405db |
+ * Are we already the tail? If so, we can just skip.
|
|
|
6405db |
+ * remember, this means in a set of 1, we will always be tail
|
|
|
6405db |
+ */
|
|
|
6405db |
+ if (t_cache->tail != node) {
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * Okay, we are *not* the tail. We could be anywhere between
|
|
|
6405db |
+ * tail -> ... -> x -> head
|
|
|
6405db |
+ * or even, we are the head ourself.
|
|
|
6405db |
+ */
|
|
|
6405db |
+ if (t_cache->head == node) {
|
|
|
6405db |
+ /* We are the head, update head to our predecessor */
|
|
|
6405db |
+ t_cache->head = node->prev;
|
|
|
6405db |
+ /* Remember, the head has no next. */
|
|
|
6405db |
+ t_cache->head->next = NULL;
|
|
|
6405db |
+ } else {
|
|
|
6405db |
+ /* Right, we aren't the head, so we have a next node. */
|
|
|
6405db |
+ node->next->prev = node->prev;
|
|
|
6405db |
+ }
|
|
|
6405db |
+ /* Because we must be in the middle somewhere, we can assume next and prev exist. */
|
|
|
6405db |
+ node->prev->next = node->next;
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * Tail can't be NULL if we have a value in the cache, so we can
|
|
|
6405db |
+ * just deref this.
|
|
|
6405db |
+ */
|
|
|
6405db |
+ node->next = t_cache->tail;
|
|
|
6405db |
+ t_cache->tail->prev = node;
|
|
|
6405db |
+ t_cache->tail = node;
|
|
|
6405db |
+ node->prev = NULL;
|
|
|
6405db |
+ }
|
|
|
6405db |
+ /* Update that we have a hit.*/
|
|
|
6405db |
+ t_cache->hits++;
|
|
|
6405db |
+ /* Cope the NDN to the caller. */
|
|
|
6405db |
+ *ndn = slapi_ch_strdup(node->ndn);
|
|
|
6405db |
+ /* Indicate to the caller to free this. */
|
|
|
6405db |
+ *rc = 1;
|
|
|
6405db |
+ ndn_thread_cache_commit_status(t_cache);
|
|
|
6405db |
+ return 1;
|
|
|
6405db |
+ }
|
|
|
6405db |
+ node = node->child;
|
|
|
6405db |
+ }
|
|
|
6405db |
}
|
|
|
6405db |
- curr_node->prev = NULL;
|
|
|
6405db |
- curr_node->next = ndn_cache->head;
|
|
|
6405db |
- ndn_cache->head->prev = curr_node;
|
|
|
6405db |
- ndn_cache->head = curr_node;
|
|
|
6405db |
- PR_Unlock(lru_lock);
|
|
|
6405db |
+ /* If we miss, we need to duplicate dn to udn here. */
|
|
|
6405db |
+ *udn = slapi_ch_strdup(dn);
|
|
|
6405db |
+ *rc = 0;
|
|
|
6405db |
+ ndn_thread_cache_commit_status(t_cache);
|
|
|
6405db |
+ return 0;
|
|
|
6405db |
}
|
|
|
6405db |
|
|
|
6405db |
/*
|
|
|
6405db |
@@ -2936,176 +3233,102 @@ ndn_cache_update_lru(struct ndn_cache_lru **node)
|
|
|
6405db |
static void
|
|
|
6405db |
ndn_cache_add(char *dn, size_t dn_len, char *ndn, size_t ndn_len)
|
|
|
6405db |
{
|
|
|
6405db |
- struct ndn_hash_val *ht_entry;
|
|
|
6405db |
- struct ndn_cache_lru *new_node = NULL;
|
|
|
6405db |
- PLHashEntry *he;
|
|
|
6405db |
- int size;
|
|
|
6405db |
-
|
|
|
6405db |
- if(ndn_started == 0 || dn_len == 0){
|
|
|
6405db |
+ if (ndn_enabled == 0) {
|
|
|
6405db |
return;
|
|
|
6405db |
}
|
|
|
6405db |
- if(strlen(ndn) > ndn_len){
|
|
|
6405db |
+ if (dn_len == 0) {
|
|
|
6405db |
+ return;
|
|
|
6405db |
+ }
|
|
|
6405db |
+ if (strlen(ndn) > ndn_len) {
|
|
|
6405db |
/* we need to null terminate the ndn */
|
|
|
6405db |
*(ndn + ndn_len) = '\0';
|
|
|
6405db |
}
|
|
|
6405db |
/*
|
|
|
6405db |
* Calculate the approximate memory footprint of the hash entry, key, and lru entry.
|
|
|
6405db |
*/
|
|
|
6405db |
- size = (dn_len * 2) + ndn_len + sizeof(PLHashEntry) + sizeof(struct ndn_hash_val) + sizeof(struct ndn_cache_lru);
|
|
|
6405db |
+ struct ndn_cache_value *new_value = (struct ndn_cache_value *)slapi_ch_calloc(1, sizeof(struct ndn_cache_value));
|
|
|
6405db |
+ new_value->size = sizeof(struct ndn_cache_value) + dn_len + ndn_len;
|
|
|
6405db |
+ /* DN is alloc for us */
|
|
|
6405db |
+ new_value->dn = dn;
|
|
|
6405db |
+ /* But we need to copy ndn */
|
|
|
6405db |
+ new_value->ndn = slapi_ch_strdup(ndn);
|
|
|
6405db |
+
|
|
|
6405db |
/*
|
|
|
6405db |
- * Create our LRU node
|
|
|
6405db |
+ * Get our local cache out.
|
|
|
6405db |
*/
|
|
|
6405db |
- new_node = (struct ndn_cache_lru *)slapi_ch_malloc(sizeof(struct ndn_cache_lru));
|
|
|
6405db |
- if(new_node == NULL){
|
|
|
6405db |
- slapi_log_err(SLAPI_LOG_ERR, "ndn_cache_add", "Failed to allocate new lru node.\n");
|
|
|
6405db |
- return;
|
|
|
6405db |
+ struct ndn_cache *t_cache = pthread_getspecific(ndn_cache_key);
|
|
|
6405db |
+ if (t_cache == NULL) {
|
|
|
6405db |
+ t_cache = ndn_thread_cache_create(t_cache_stats.thread_max_size, t_cache_stats.slots);
|
|
|
6405db |
+ pthread_setspecific(ndn_cache_key, t_cache);
|
|
|
6405db |
}
|
|
|
6405db |
- new_node->prev = NULL;
|
|
|
6405db |
- new_node->key = dn; /* dn has already been allocated */
|
|
|
6405db |
/*
|
|
|
6405db |
- * Its possible this dn was added to the hash by another thread.
|
|
|
6405db |
+ * Hash the DN
|
|
|
6405db |
*/
|
|
|
6405db |
- slapi_rwlock_wrlock(ndn_cache_lock);
|
|
|
6405db |
- ht_entry = (struct ndn_hash_val *)PL_HashTableLookupConst(ndn_cache_hashtable, dn);
|
|
|
6405db |
- if(ht_entry){
|
|
|
6405db |
- /* already exists, free the node and return */
|
|
|
6405db |
- slapi_rwlock_unlock(ndn_cache_lock);
|
|
|
6405db |
- slapi_ch_free_string(&new_node->key);
|
|
|
6405db |
- slapi_ch_free((void **)&new_node);
|
|
|
6405db |
- return;
|
|
|
6405db |
- }
|
|
|
6405db |
+ uint64_t dn_hash = sds_siphash13(new_value->dn, dn_len, t_cache->key);
|
|
|
6405db |
/*
|
|
|
6405db |
- * Create the hash entry
|
|
|
6405db |
+ * Get the insert slot: This works because the number spaces of dn_hash is
|
|
|
6405db |
+ * a 64bit int, and slots is a power of two. As a result, we end up with
|
|
|
6405db |
+ * even distribution of the values.
|
|
|
6405db |
*/
|
|
|
6405db |
- ht_entry = (struct ndn_hash_val *)slapi_ch_malloc(sizeof(struct ndn_hash_val));
|
|
|
6405db |
- if(ht_entry == NULL){
|
|
|
6405db |
- slapi_rwlock_unlock(ndn_cache_lock);
|
|
|
6405db |
- slapi_log_err(SLAPI_LOG_ERR, "ndn_cache_add", "Failed to allocate new hash entry.\n");
|
|
|
6405db |
- slapi_ch_free_string(&new_node->key);
|
|
|
6405db |
- slapi_ch_free((void **)&new_node);
|
|
|
6405db |
- return;
|
|
|
6405db |
- }
|
|
|
6405db |
- ht_entry->ndn = slapi_ch_malloc(ndn_len + 1);
|
|
|
6405db |
- memcpy(ht_entry->ndn, ndn, ndn_len);
|
|
|
6405db |
- ht_entry->ndn[ndn_len] = '\0';
|
|
|
6405db |
- ht_entry->len = ndn_len;
|
|
|
6405db |
- ht_entry->size = size;
|
|
|
6405db |
- ht_entry->lru_node = new_node;
|
|
|
6405db |
+ size_t insert_slot = dn_hash % t_cache->slots;
|
|
|
6405db |
+ /* Track this for free */
|
|
|
6405db |
+ new_value->slot = insert_slot;
|
|
|
6405db |
+
|
|
|
6405db |
/*
|
|
|
6405db |
- * Check if our cache is full
|
|
|
6405db |
+ * Okay, check if we have space, else we need to trim nodes from
|
|
|
6405db |
+ * the LRU
|
|
|
6405db |
*/
|
|
|
6405db |
- PR_Lock(lru_lock); /* grab the lru lock now, as ndn_cache_flush needs it */
|
|
|
6405db |
- if(ndn_cache->cache_max_size != 0 && ((ndn_cache->cache_size + size) > ndn_cache->cache_max_size)){
|
|
|
6405db |
- ndn_cache_flush();
|
|
|
6405db |
+ while (t_cache->head && (t_cache->size + new_value->size) > t_cache->max_size) {
|
|
|
6405db |
+ struct ndn_cache_value *trim_node = t_cache->head;
|
|
|
6405db |
+ ndn_thread_cache_value_destroy(t_cache, trim_node);
|
|
|
6405db |
}
|
|
|
6405db |
+
|
|
|
6405db |
/*
|
|
|
6405db |
- * Set the ndn cache lru nodes
|
|
|
6405db |
+ * Add it!
|
|
|
6405db |
*/
|
|
|
6405db |
- if(ndn_cache->head == NULL && ndn_cache->tail == NULL){
|
|
|
6405db |
- /* this is the first node */
|
|
|
6405db |
- ndn_cache->head = new_node;
|
|
|
6405db |
- ndn_cache->tail = new_node;
|
|
|
6405db |
- new_node->next = NULL;
|
|
|
6405db |
+ if (t_cache->table[insert_slot] == NULL) {
|
|
|
6405db |
+ t_cache->table[insert_slot] = new_value;
|
|
|
6405db |
} else {
|
|
|
6405db |
- new_node->next = ndn_cache->head;
|
|
|
6405db |
- if(ndn_cache->head)
|
|
|
6405db |
- ndn_cache->head->prev = new_node;
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * Hash collision! We need to replace the bucket then ....
|
|
|
6405db |
+ * insert at the head of the slot to make this simpler.
|
|
|
6405db |
+ */
|
|
|
6405db |
+ new_value->child = t_cache->table[insert_slot];
|
|
|
6405db |
+ t_cache->table[insert_slot] = new_value;
|
|
|
6405db |
}
|
|
|
6405db |
- ndn_cache->head = new_node;
|
|
|
6405db |
- PR_Unlock(lru_lock);
|
|
|
6405db |
+
|
|
|
6405db |
/*
|
|
|
6405db |
- * Add the new object to the hashtable, and update our stats
|
|
|
6405db |
+ * Finally, stick this onto the tail because it's the newest.
|
|
|
6405db |
*/
|
|
|
6405db |
- he = PL_HashTableAdd(ndn_cache_hashtable, new_node->key, (void *)ht_entry);
|
|
|
6405db |
- if(he == NULL){
|
|
|
6405db |
- slapi_log_err(SLAPI_LOG_ERR, "ndn_cache_add", "Failed to add new entry to hash(%s)\n",dn);
|
|
|
6405db |
- } else {
|
|
|
6405db |
- ndn_cache->cache_count++;
|
|
|
6405db |
- ndn_cache->cache_size += size;
|
|
|
6405db |
+ if (t_cache->head == NULL) {
|
|
|
6405db |
+ t_cache->head = new_value;
|
|
|
6405db |
}
|
|
|
6405db |
- slapi_rwlock_unlock(ndn_cache_lock);
|
|
|
6405db |
-}
|
|
|
6405db |
-
|
|
|
6405db |
-/*
|
|
|
6405db |
- * cache is full, remove the least used dn's. lru_lock/ndn_cache write lock are already taken
|
|
|
6405db |
- */
|
|
|
6405db |
-static void
|
|
|
6405db |
-ndn_cache_flush(void)
|
|
|
6405db |
-{
|
|
|
6405db |
- struct ndn_cache_lru *node, *next, *flush_node;
|
|
|
6405db |
- int i;
|
|
|
6405db |
-
|
|
|
6405db |
- node = ndn_cache->tail;
|
|
|
6405db |
- for(i = 0; node && i < NDN_FLUSH_COUNT && ndn_cache->cache_count > NDN_MIN_COUNT; i++){
|
|
|
6405db |
- flush_node = node;
|
|
|
6405db |
- /* update the lru */
|
|
|
6405db |
- next = node->prev;
|
|
|
6405db |
- next->next = NULL;
|
|
|
6405db |
- ndn_cache->tail = next;
|
|
|
6405db |
- node = next;
|
|
|
6405db |
- /* now update the hash */
|
|
|
6405db |
- ndn_cache->cache_count--;
|
|
|
6405db |
- ndn_cache_delete(flush_node->key);
|
|
|
6405db |
- slapi_ch_free_string(&flush_node->key);
|
|
|
6405db |
- slapi_ch_free((void **)&flush_node);
|
|
|
6405db |
+ if (t_cache->tail != NULL) {
|
|
|
6405db |
+ new_value->next = t_cache->tail;
|
|
|
6405db |
+ t_cache->tail->prev = new_value;
|
|
|
6405db |
}
|
|
|
6405db |
+ t_cache->tail = new_value;
|
|
|
6405db |
|
|
|
6405db |
- slapi_log_err(SLAPI_LOG_CACHE, "ndn_cache_flush","Flushed cache.\n");
|
|
|
6405db |
-}
|
|
|
6405db |
-
|
|
|
6405db |
-static void
|
|
|
6405db |
-ndn_cache_free(void)
|
|
|
6405db |
-{
|
|
|
6405db |
- struct ndn_cache_lru *node, *next, *flush_node;
|
|
|
6405db |
-
|
|
|
6405db |
- if(!ndn_cache){
|
|
|
6405db |
- return;
|
|
|
6405db |
- }
|
|
|
6405db |
-
|
|
|
6405db |
- node = ndn_cache->tail;
|
|
|
6405db |
- while(node && ndn_cache->cache_count){
|
|
|
6405db |
- flush_node = node;
|
|
|
6405db |
- /* update the lru */
|
|
|
6405db |
- next = node->prev;
|
|
|
6405db |
- if(next){
|
|
|
6405db |
- next->next = NULL;
|
|
|
6405db |
- }
|
|
|
6405db |
- ndn_cache->tail = next;
|
|
|
6405db |
- node = next;
|
|
|
6405db |
- /* now update the hash */
|
|
|
6405db |
- ndn_cache->cache_count--;
|
|
|
6405db |
- ndn_cache_delete(flush_node->key);
|
|
|
6405db |
- slapi_ch_free_string(&flush_node->key);
|
|
|
6405db |
- slapi_ch_free((void **)&flush_node);
|
|
|
6405db |
- }
|
|
|
6405db |
-}
|
|
|
6405db |
-
|
|
|
6405db |
-/* this is already "write" locked from ndn_cache_add */
|
|
|
6405db |
-static void
|
|
|
6405db |
-ndn_cache_delete(char *dn)
|
|
|
6405db |
-{
|
|
|
6405db |
- struct ndn_hash_val *ht_entry;
|
|
|
6405db |
+ /*
|
|
|
6405db |
+ * And update the stats.
|
|
|
6405db |
+ */
|
|
|
6405db |
+ t_cache->size = t_cache->size + new_value->size;
|
|
|
6405db |
+ t_cache->count++;
|
|
|
6405db |
|
|
|
6405db |
- ht_entry = (struct ndn_hash_val *)PL_HashTableLookupConst(ndn_cache_hashtable, dn);
|
|
|
6405db |
- if(ht_entry){
|
|
|
6405db |
- ndn_cache->cache_size -= ht_entry->size;
|
|
|
6405db |
- slapi_ch_free_string(&ht_entry->ndn);
|
|
|
6405db |
- slapi_ch_free((void **)&ht_entry);
|
|
|
6405db |
- PL_HashTableRemove(ndn_cache_hashtable, dn);
|
|
|
6405db |
- }
|
|
|
6405db |
}
|
|
|
6405db |
|
|
|
6405db |
/* stats for monitor */
|
|
|
6405db |
void
|
|
|
6405db |
-ndn_cache_get_stats(PRUint64 *hits, PRUint64 *tries, size_t *size, size_t *max_size, long *count)
|
|
|
6405db |
-{
|
|
|
6405db |
- slapi_rwlock_rdlock(ndn_cache_lock);
|
|
|
6405db |
- *hits = slapi_counter_get_value(ndn_cache->cache_hits);
|
|
|
6405db |
- *tries = slapi_counter_get_value(ndn_cache->cache_tries);
|
|
|
6405db |
- *size = ndn_cache->cache_size;
|
|
|
6405db |
- *max_size = ndn_cache->cache_max_size;
|
|
|
6405db |
- *count = ndn_cache->cache_count;
|
|
|
6405db |
- slapi_rwlock_unlock(ndn_cache_lock);
|
|
|
6405db |
+ndn_cache_get_stats(PRUint64 *hits, PRUint64 *tries, size_t *size, size_t *max_size, size_t *thread_size, size_t *evicts, size_t *slots, long *count)
|
|
|
6405db |
+{
|
|
|
6405db |
+ *max_size = t_cache_stats.max_size;
|
|
|
6405db |
+ *thread_size = t_cache_stats.thread_max_size;
|
|
|
6405db |
+ *slots = t_cache_stats.slots;
|
|
|
6405db |
+ *evicts = slapi_counter_get_value(t_cache_stats.cache_evicts);
|
|
|
6405db |
+ *hits = slapi_counter_get_value(t_cache_stats.cache_hits);
|
|
|
6405db |
+ *tries = slapi_counter_get_value(t_cache_stats.cache_tries);
|
|
|
6405db |
+ *size = slapi_counter_get_value(t_cache_stats.cache_size);
|
|
|
6405db |
+ *count = slapi_counter_get_value(t_cache_stats.cache_count);
|
|
|
6405db |
}
|
|
|
6405db |
|
|
|
6405db |
/* Common ancestor sdn is allocated.
|
|
|
6405db |
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
|
|
6405db |
index 3910dbe..68b59f3 100644
|
|
|
6405db |
--- a/ldap/servers/slapd/slapi-private.h
|
|
|
6405db |
+++ b/ldap/servers/slapd/slapi-private.h
|
|
|
6405db |
@@ -380,7 +380,7 @@ char *slapi_dn_normalize_case_original( char *dn );
|
|
|
6405db |
void ndn_cache_init(void);
|
|
|
6405db |
void ndn_cache_destroy(void);
|
|
|
6405db |
int ndn_cache_started(void);
|
|
|
6405db |
-void ndn_cache_get_stats(PRUint64 *hits, PRUint64 *tries, size_t *size, size_t *max_size, long *count);
|
|
|
6405db |
+void ndn_cache_get_stats(PRUint64 *hits, PRUint64 *tries, size_t *size, size_t *max_size, size_t *thread_size, size_t *evicts, size_t *slots, long *count);
|
|
|
6405db |
#define NDN_DEFAULT_SIZE 20971520 /* 20mb - size of normalized dn cache */
|
|
|
6405db |
|
|
|
6405db |
/* filter.c */
|
|
|
6405db |
--
|
|
|
6405db |
2.9.4
|
|
|
6405db |
|