|
|
7c7f29 |
From 80e8d8fc8eb44d45af5285308cda37553611f688 Mon Sep 17 00:00:00 2001
|
|
|
7c7f29 |
From: William Brown <firstyear@redhat.com>
|
|
|
7c7f29 |
Date: Sat, 9 Jul 2016 19:02:37 +1000
|
|
|
7c7f29 |
Subject: [PATCH 04/15] Ticket 48916 - DNA Threshold set to 0 causes SIGFPE
|
|
|
7c7f29 |
|
|
|
7c7f29 |
Bug Description: If the DNA threshold was set to 0, a divide by zero would
|
|
|
7c7f29 |
occur when requesting ranges.
|
|
|
7c7f29 |
|
|
|
7c7f29 |
Fix Description: Prevent the config from setting a value of 0 for dna threshold.
|
|
|
7c7f29 |
|
|
|
7c7f29 |
If an existing site has a threshold of 0, we guard the divide operation, and
|
|
|
7c7f29 |
return an operations error instead.
|
|
|
7c7f29 |
|
|
|
7c7f29 |
https://fedorahosted.org/389/ticket/48916
|
|
|
7c7f29 |
|
|
|
7c7f29 |
Author: wibrown
|
|
|
7c7f29 |
|
|
|
7c7f29 |
Review by: nhosoi, mreynolds (Thank you!)
|
|
|
7c7f29 |
|
|
|
7c7f29 |
(cherry picked from commit 05ebb6d10cf0ec8e03c59bade7f819ddb1fdcf78)
|
|
|
7c7f29 |
---
|
|
|
7c7f29 |
.gitignore | 1 +
|
|
|
7c7f29 |
dirsrvtests/tests/tickets/ticket48916_test.py | 253 ++++++++++++++++++++++++++
|
|
|
7c7f29 |
ldap/servers/plugins/dna/dna.c | 40 +++-
|
|
|
7c7f29 |
3 files changed, 289 insertions(+), 5 deletions(-)
|
|
|
7c7f29 |
create mode 100644 dirsrvtests/tests/tickets/ticket48916_test.py
|
|
|
7c7f29 |
|
|
|
7c7f29 |
diff --git a/.gitignore b/.gitignore
|
|
|
7c7f29 |
index f6583c2..f92bcd8 100644
|
|
|
7c7f29 |
--- a/.gitignore
|
|
|
7c7f29 |
+++ b/.gitignore
|
|
|
7c7f29 |
@@ -5,6 +5,7 @@ autom4te.cache
|
|
|
7c7f29 |
.cproject
|
|
|
7c7f29 |
.project
|
|
|
7c7f29 |
.settings
|
|
|
7c7f29 |
+.cache
|
|
|
7c7f29 |
*.a
|
|
|
7c7f29 |
*.dirstamp
|
|
|
7c7f29 |
*.la
|
|
|
7c7f29 |
diff --git a/dirsrvtests/tests/tickets/ticket48916_test.py b/dirsrvtests/tests/tickets/ticket48916_test.py
|
|
|
7c7f29 |
new file mode 100644
|
|
|
7c7f29 |
index 0000000..44c96da
|
|
|
7c7f29 |
--- /dev/null
|
|
|
7c7f29 |
+++ b/dirsrvtests/tests/tickets/ticket48916_test.py
|
|
|
7c7f29 |
@@ -0,0 +1,253 @@
|
|
|
7c7f29 |
+import os
|
|
|
7c7f29 |
+import sys
|
|
|
7c7f29 |
+import time
|
|
|
7c7f29 |
+import ldap
|
|
|
7c7f29 |
+import logging
|
|
|
7c7f29 |
+import pytest
|
|
|
7c7f29 |
+from lib389 import DirSrv, Entry, tools, tasks
|
|
|
7c7f29 |
+from lib389.tools import DirSrvTools
|
|
|
7c7f29 |
+from lib389._constants import *
|
|
|
7c7f29 |
+from lib389.properties import *
|
|
|
7c7f29 |
+from lib389.tasks import *
|
|
|
7c7f29 |
+from lib389.utils import *
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+DEBUGGING = False
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+if DEBUGGING:
|
|
|
7c7f29 |
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
|
|
7c7f29 |
+else:
|
|
|
7c7f29 |
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+log = logging.getLogger(__name__)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+class TopologyReplication(object):
|
|
|
7c7f29 |
+ """The Replication Topology Class"""
|
|
|
7c7f29 |
+ def __init__(self, master1, master2):
|
|
|
7c7f29 |
+ """Init"""
|
|
|
7c7f29 |
+ master1.open()
|
|
|
7c7f29 |
+ self.master1 = master1
|
|
|
7c7f29 |
+ master2.open()
|
|
|
7c7f29 |
+ self.master2 = master2
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+@pytest.fixture(scope="module")
|
|
|
7c7f29 |
+def topology(request):
|
|
|
7c7f29 |
+ """Create Replication Deployment"""
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Creating master 1...
|
|
|
7c7f29 |
+ if DEBUGGING:
|
|
|
7c7f29 |
+ master1 = DirSrv(verbose=True)
|
|
|
7c7f29 |
+ else:
|
|
|
7c7f29 |
+ master1 = DirSrv(verbose=False)
|
|
|
7c7f29 |
+ args_instance[SER_HOST] = HOST_MASTER_1
|
|
|
7c7f29 |
+ args_instance[SER_PORT] = PORT_MASTER_1
|
|
|
7c7f29 |
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
|
|
|
7c7f29 |
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
|
|
|
7c7f29 |
+ args_master = args_instance.copy()
|
|
|
7c7f29 |
+ master1.allocate(args_master)
|
|
|
7c7f29 |
+ instance_master1 = master1.exists()
|
|
|
7c7f29 |
+ if instance_master1:
|
|
|
7c7f29 |
+ master1.delete()
|
|
|
7c7f29 |
+ master1.create()
|
|
|
7c7f29 |
+ master1.open()
|
|
|
7c7f29 |
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Creating master 2...
|
|
|
7c7f29 |
+ if DEBUGGING:
|
|
|
7c7f29 |
+ master2 = DirSrv(verbose=True)
|
|
|
7c7f29 |
+ else:
|
|
|
7c7f29 |
+ master2 = DirSrv(verbose=False)
|
|
|
7c7f29 |
+ args_instance[SER_HOST] = HOST_MASTER_2
|
|
|
7c7f29 |
+ args_instance[SER_PORT] = PORT_MASTER_2
|
|
|
7c7f29 |
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
|
|
|
7c7f29 |
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
|
|
|
7c7f29 |
+ args_master = args_instance.copy()
|
|
|
7c7f29 |
+ master2.allocate(args_master)
|
|
|
7c7f29 |
+ instance_master2 = master2.exists()
|
|
|
7c7f29 |
+ if instance_master2:
|
|
|
7c7f29 |
+ master2.delete()
|
|
|
7c7f29 |
+ master2.create()
|
|
|
7c7f29 |
+ master2.open()
|
|
|
7c7f29 |
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ #
|
|
|
7c7f29 |
+ # Create all the agreements
|
|
|
7c7f29 |
+ #
|
|
|
7c7f29 |
+ # Creating agreement from master 1 to master 2
|
|
|
7c7f29 |
+ properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
|
|
|
7c7f29 |
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
|
|
|
7c7f29 |
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
|
|
|
7c7f29 |
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
|
|
|
7c7f29 |
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
|
|
|
7c7f29 |
+ m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
|
|
|
7c7f29 |
+ if not m1_m2_agmt:
|
|
|
7c7f29 |
+ log.fatal("Fail to create a master -> master replica agreement")
|
|
|
7c7f29 |
+ sys.exit(1)
|
|
|
7c7f29 |
+ log.debug("%s created" % m1_m2_agmt)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Creating agreement from master 2 to master 1
|
|
|
7c7f29 |
+ properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
|
|
|
7c7f29 |
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
|
|
|
7c7f29 |
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
|
|
|
7c7f29 |
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
|
|
|
7c7f29 |
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
|
|
|
7c7f29 |
+ m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
|
|
|
7c7f29 |
+ if not m2_m1_agmt:
|
|
|
7c7f29 |
+ log.fatal("Fail to create a master -> master replica agreement")
|
|
|
7c7f29 |
+ sys.exit(1)
|
|
|
7c7f29 |
+ log.debug("%s created" % m2_m1_agmt)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Allow the replicas to get situated with the new agreements...
|
|
|
7c7f29 |
+ time.sleep(5)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ #
|
|
|
7c7f29 |
+ # Initialize all the agreements
|
|
|
7c7f29 |
+ #
|
|
|
7c7f29 |
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
|
|
|
7c7f29 |
+ master1.waitForReplInit(m1_m2_agmt)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Check replication is working...
|
|
|
7c7f29 |
+ if master1.testReplication(DEFAULT_SUFFIX, master2):
|
|
|
7c7f29 |
+ log.info('Replication is working.')
|
|
|
7c7f29 |
+ else:
|
|
|
7c7f29 |
+ log.fatal('Replication is not working.')
|
|
|
7c7f29 |
+ assert False
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ def fin():
|
|
|
7c7f29 |
+ """If we are debugging just stop the instances, otherwise remove
|
|
|
7c7f29 |
+ them
|
|
|
7c7f29 |
+ """
|
|
|
7c7f29 |
+ if DEBUGGING:
|
|
|
7c7f29 |
+ master1.stop()
|
|
|
7c7f29 |
+ master2.stop()
|
|
|
7c7f29 |
+ else:
|
|
|
7c7f29 |
+ master1.delete()
|
|
|
7c7f29 |
+ master2.delete()
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ request.addfinalizer(fin)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Clear out the tmp dir
|
|
|
7c7f29 |
+ master1.clearTmpDir(__file__)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ return TopologyReplication(master1, master2)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+def _create_user(inst, idnum):
|
|
|
7c7f29 |
+ inst.add_s(Entry(
|
|
|
7c7f29 |
+ ('uid=user%s,ou=People,%s' % (idnum, DEFAULT_SUFFIX), {
|
|
|
7c7f29 |
+ 'objectClass' : 'top account posixAccount'.split(' '),
|
|
|
7c7f29 |
+ 'cn' : 'user',
|
|
|
7c7f29 |
+ 'uid' : 'user%s' % idnum,
|
|
|
7c7f29 |
+ 'homeDirectory' : '/home/user%s' % idnum,
|
|
|
7c7f29 |
+ 'loginShell' : '/bin/nologin',
|
|
|
7c7f29 |
+ 'gidNumber' : '-1',
|
|
|
7c7f29 |
+ 'uidNumber' : '-1',
|
|
|
7c7f29 |
+ })
|
|
|
7c7f29 |
+ ))
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+def test_ticket48916(topology):
|
|
|
7c7f29 |
+ """
|
|
|
7c7f29 |
+ https://bugzilla.redhat.com/show_bug.cgi?id=1353629
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ This is an issue with ID exhaustion in DNA causing a crash.
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ To access each DirSrv instance use: topology.master1, topology.master2,
|
|
|
7c7f29 |
+ ..., topology.hub1, ..., topology.consumer1,...
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ """
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ if DEBUGGING:
|
|
|
7c7f29 |
+ # Add debugging steps(if any)...
|
|
|
7c7f29 |
+ pass
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Enable the plugin on both servers
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ dna_m1 = topology.master1.plugins.get('Distributed Numeric Assignment Plugin')
|
|
|
7c7f29 |
+ dna_m2 = topology.master2.plugins.get('Distributed Numeric Assignment Plugin')
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Configure it
|
|
|
7c7f29 |
+ # Create the container for the ranges to go into.
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ topology.master1.add_s(Entry(
|
|
|
7c7f29 |
+ ('ou=Ranges,%s' % DEFAULT_SUFFIX, {
|
|
|
7c7f29 |
+ 'objectClass' : 'top organizationalUnit'.split(' '),
|
|
|
7c7f29 |
+ 'ou' : 'Ranges',
|
|
|
7c7f29 |
+ })
|
|
|
7c7f29 |
+ ))
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Create the dnaAdmin?
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # For now we just pinch the dn from the dna_m* types, and add the relevant child config
|
|
|
7c7f29 |
+ # but in the future, this could be a better plugin template type from lib389
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ config_dn = dna_m1.dn
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ topology.master1.add_s(Entry(
|
|
|
7c7f29 |
+ ('cn=uids,%s' % config_dn, {
|
|
|
7c7f29 |
+ 'objectClass' : 'top dnaPluginConfig'.split(' '),
|
|
|
7c7f29 |
+ 'cn': 'uids',
|
|
|
7c7f29 |
+ 'dnatype': 'uidNumber gidNumber'.split(' '),
|
|
|
7c7f29 |
+ 'dnafilter': '(objectclass=posixAccount)',
|
|
|
7c7f29 |
+ 'dnascope': '%s' % DEFAULT_SUFFIX,
|
|
|
7c7f29 |
+ 'dnaNextValue': '1',
|
|
|
7c7f29 |
+ 'dnaMaxValue': '50',
|
|
|
7c7f29 |
+ 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX,
|
|
|
7c7f29 |
+ 'dnaThreshold': '0',
|
|
|
7c7f29 |
+ 'dnaRangeRequestTimeout': '60',
|
|
|
7c7f29 |
+ 'dnaMagicRegen': '-1',
|
|
|
7c7f29 |
+ 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX,
|
|
|
7c7f29 |
+ 'dnaRemoteBindCred': 'secret123',
|
|
|
7c7f29 |
+ 'dnaNextRange': '80-90'
|
|
|
7c7f29 |
+ })
|
|
|
7c7f29 |
+ ))
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ topology.master2.add_s(Entry(
|
|
|
7c7f29 |
+ ('cn=uids,%s' % config_dn, {
|
|
|
7c7f29 |
+ 'objectClass' : 'top dnaPluginConfig'.split(' '),
|
|
|
7c7f29 |
+ 'cn': 'uids',
|
|
|
7c7f29 |
+ 'dnatype': 'uidNumber gidNumber'.split(' '),
|
|
|
7c7f29 |
+ 'dnafilter': '(objectclass=posixAccount)',
|
|
|
7c7f29 |
+ 'dnascope': '%s' % DEFAULT_SUFFIX,
|
|
|
7c7f29 |
+ 'dnaNextValue': '61',
|
|
|
7c7f29 |
+ 'dnaMaxValue': '70',
|
|
|
7c7f29 |
+ 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX,
|
|
|
7c7f29 |
+ 'dnaThreshold': '2',
|
|
|
7c7f29 |
+ 'dnaRangeRequestTimeout': '60',
|
|
|
7c7f29 |
+ 'dnaMagicRegen': '-1',
|
|
|
7c7f29 |
+ 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX,
|
|
|
7c7f29 |
+ 'dnaRemoteBindCred': 'secret123',
|
|
|
7c7f29 |
+ })
|
|
|
7c7f29 |
+ ))
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Enable the plugins
|
|
|
7c7f29 |
+ dna_m1.enable()
|
|
|
7c7f29 |
+ dna_m2.enable()
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Restart the instances
|
|
|
7c7f29 |
+ topology.master1.restart(60)
|
|
|
7c7f29 |
+ topology.master2.restart(60)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Wait for a replication .....
|
|
|
7c7f29 |
+ time.sleep(40)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Allocate the 10 members to exhaust
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ for i in range(1,11):
|
|
|
7c7f29 |
+ _create_user(topology.master2, i)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ # Allocate the 11th
|
|
|
7c7f29 |
+ _create_user(topology.master2, 11)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ log.info('Test PASSED')
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+if __name__ == '__main__':
|
|
|
7c7f29 |
+ # Run isolated
|
|
|
7c7f29 |
+ # -s for DEBUG mode
|
|
|
7c7f29 |
+ CURRENT_FILE = os.path.realpath(__file__)
|
|
|
7c7f29 |
+ pytest.main("-s %s" % CURRENT_FILE)
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
|
|
|
7c7f29 |
index 2908443..cf640d8 100644
|
|
|
7c7f29 |
--- a/ldap/servers/plugins/dna/dna.c
|
|
|
7c7f29 |
+++ b/ldap/servers/plugins/dna/dna.c
|
|
|
7c7f29 |
@@ -1244,6 +1244,12 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry * e, int apply)
|
|
|
7c7f29 |
slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM,
|
|
|
7c7f29 |
"----------> %s [%s]\n", DNA_THRESHOLD, value);
|
|
|
7c7f29 |
|
|
|
7c7f29 |
+ if (entry->threshold <= 0) {
|
|
|
7c7f29 |
+ entry->threshold = 1;
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
|
|
|
7c7f29 |
+ "----------> %s too low, setting to [%s]\n", DNA_THRESHOLD, value);
|
|
|
7c7f29 |
+ }
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
slapi_ch_free_string(&value);
|
|
|
7c7f29 |
} else {
|
|
|
7c7f29 |
entry->threshold = 1;
|
|
|
7c7f29 |
@@ -2171,7 +2177,7 @@ static int dna_dn_is_config(char *dn)
|
|
|
7c7f29 |
int ret = 0;
|
|
|
7c7f29 |
|
|
|
7c7f29 |
slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
|
|
|
7c7f29 |
- "--> dna_is_config\n");
|
|
|
7c7f29 |
+ "--> dna_is_config %s\n", dn);
|
|
|
7c7f29 |
|
|
|
7c7f29 |
if (slapi_dn_issuffix(dn, getPluginDN())) {
|
|
|
7c7f29 |
ret = 1;
|
|
|
7c7f29 |
@@ -3404,18 +3410,21 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr)
|
|
|
7c7f29 |
|
|
|
7c7f29 |
/* Did we already service all of these configured types? */
|
|
|
7c7f29 |
if (dna_list_contains_types(generated_types, config_entry->types)) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " no types to act upon.\n");
|
|
|
7c7f29 |
goto next;
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
/* is the entry in scope? */
|
|
|
7c7f29 |
if (config_entry->scope &&
|
|
|
7c7f29 |
!slapi_dn_issuffix(dn, config_entry->scope)) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " dn not in scope\n");
|
|
|
7c7f29 |
goto next;
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
/* is this entry in an excluded scope? */
|
|
|
7c7f29 |
for (i = 0; config_entry->excludescope && config_entry->excludescope[i]; i++) {
|
|
|
7c7f29 |
if (slapi_dn_issuffix(dn, slapi_sdn_get_dn(config_entry->excludescope[i]))) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " dn in excluded scope\n");
|
|
|
7c7f29 |
goto next;
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
@@ -3424,7 +3433,8 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr)
|
|
|
7c7f29 |
if (config_entry->slapi_filter) {
|
|
|
7c7f29 |
ret = slapi_vattr_filter_test(pb, e, config_entry->slapi_filter, 0);
|
|
|
7c7f29 |
if (LDAP_SUCCESS != ret) {
|
|
|
7c7f29 |
- goto next;
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " dn does not match filter\n");
|
|
|
7c7f29 |
+ goto next;
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
@@ -3454,6 +3464,8 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr)
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
if (types_to_generate && types_to_generate[0]) {
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " adding %s to %s as -2\n", types_to_generate[0], dn);
|
|
|
7c7f29 |
/* add - add to entry */
|
|
|
7c7f29 |
for (i = 0; types_to_generate && types_to_generate[i]; i++) {
|
|
|
7c7f29 |
slapi_entry_attr_set_charptr(e, types_to_generate[i],
|
|
|
7c7f29 |
@@ -3492,6 +3504,7 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr)
|
|
|
7c7f29 |
slapi_lock_mutex(config_entry->lock);
|
|
|
7c7f29 |
|
|
|
7c7f29 |
ret = dna_first_free_value(config_entry, &setval);
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " retrieved value %" PRIu64 " ret %d\n", setval, ret);
|
|
|
7c7f29 |
if (LDAP_SUCCESS != ret) {
|
|
|
7c7f29 |
/* check if we overflowed the configured range */
|
|
|
7c7f29 |
if (setval > config_entry->maxval) {
|
|
|
7c7f29 |
@@ -4022,18 +4035,22 @@ static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype)
|
|
|
7c7f29 |
"--> dna_be_txn_pre_op\n");
|
|
|
7c7f29 |
|
|
|
7c7f29 |
if (!slapi_plugin_running(pb)) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing, plugin not running\n");
|
|
|
7c7f29 |
goto bail;
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
if (0 == (dn = dna_get_dn(pb))) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing, is dna dn\n");
|
|
|
7c7f29 |
goto bail;
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
if (dna_dn_is_config(dn)) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing is dna config dn\n");
|
|
|
7c7f29 |
goto bail;
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
if (dna_isrepl(pb)) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing replicated operation\n");
|
|
|
7c7f29 |
/* if repl, the dna values should be already in the entry. */
|
|
|
7c7f29 |
goto bail;
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
@@ -4045,6 +4062,7 @@ static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype)
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
if (e == NULL) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing entry is NULL\n");
|
|
|
7c7f29 |
goto bail;
|
|
|
7c7f29 |
} else if (LDAP_CHANGETYPE_MODIFY == modtype) {
|
|
|
7c7f29 |
slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods;;
|
|
|
7c7f29 |
@@ -4056,32 +4074,39 @@ static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype)
|
|
|
7c7f29 |
|
|
|
7c7f29 |
if (!PR_CLIST_IS_EMPTY(dna_global_config)) {
|
|
|
7c7f29 |
list = PR_LIST_HEAD(dna_global_config);
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " using global config...\n");
|
|
|
7c7f29 |
|
|
|
7c7f29 |
while (list != dna_global_config && LDAP_SUCCESS == ret) {
|
|
|
7c7f29 |
config_entry = (struct configEntry *) list;
|
|
|
7c7f29 |
|
|
|
7c7f29 |
/* Did we already service all of these configured types? */
|
|
|
7c7f29 |
if (dna_list_contains_types(generated_types, config_entry->types)) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " All types already serviced\n");
|
|
|
7c7f29 |
goto next;
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
/* is the entry in scope? */
|
|
|
7c7f29 |
if (config_entry->scope) {
|
|
|
7c7f29 |
- if (!slapi_dn_issuffix(dn, config_entry->scope))
|
|
|
7c7f29 |
+ if (!slapi_dn_issuffix(dn, config_entry->scope)) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " Entry not in scope of dnaScope!\n");
|
|
|
7c7f29 |
goto next;
|
|
|
7c7f29 |
+ }
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
/* is this entry in an excluded scope? */
|
|
|
7c7f29 |
for (i = 0; config_entry->excludescope && config_entry->excludescope[i]; i++) {
|
|
|
7c7f29 |
if (slapi_dn_issuffix(dn, slapi_sdn_get_dn(config_entry->excludescope[i]))) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " Entry in excluded scope, next\n");
|
|
|
7c7f29 |
goto next;
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
-
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
/* does the entry match the filter? */
|
|
|
7c7f29 |
if (config_entry->slapi_filter) {
|
|
|
7c7f29 |
- if(LDAP_SUCCESS != slapi_vattr_filter_test(pb,e,config_entry->slapi_filter, 0))
|
|
|
7c7f29 |
+ if(LDAP_SUCCESS != slapi_vattr_filter_test(pb,e,config_entry->slapi_filter, 0)) {
|
|
|
7c7f29 |
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " Entry does not match filter\n");
|
|
|
7c7f29 |
goto next;
|
|
|
7c7f29 |
+ }
|
|
|
7c7f29 |
}
|
|
|
7c7f29 |
|
|
|
7c7f29 |
if (LDAP_CHANGETYPE_ADD == modtype) {
|
|
|
7c7f29 |
@@ -4526,6 +4551,11 @@ dna_release_range(char *range_dn, PRUint64 *lower, PRUint64 *upper)
|
|
|
7c7f29 |
* it instead of from the active range */
|
|
|
7c7f29 |
if (config_entry->next_range_lower != 0) {
|
|
|
7c7f29 |
/* Release up to half of our values from the next range. */
|
|
|
7c7f29 |
+ if (config_entry->threshold == 0) {
|
|
|
7c7f29 |
+ ret = LDAP_UNWILLING_TO_PERFORM;
|
|
|
7c7f29 |
+ goto bail;
|
|
|
7c7f29 |
+ }
|
|
|
7c7f29 |
+
|
|
|
7c7f29 |
release = (((config_entry->next_range_upper - config_entry->next_range_lower + 1) /
|
|
|
7c7f29 |
2) / config_entry->threshold) * config_entry->threshold;
|
|
|
7c7f29 |
|
|
|
7c7f29 |
--
|
|
|
7c7f29 |
2.4.11
|
|
|
7c7f29 |
|