|
|
dc8c34 |
From 381431775cf34bad480fbb04f3d18d65d2ca6fbe Mon Sep 17 00:00:00 2001
|
|
|
dc8c34 |
From: "Thierry bordaz (tbordaz)" <tbordaz@redhat.com>
|
|
|
dc8c34 |
Date: Fri, 25 Apr 2014 15:07:08 +0200
|
|
|
dc8c34 |
Subject: [PATCH 211/225] Ticket 47787 - A replicated MOD fails (Unwilling to
|
|
|
dc8c34 |
perform) if it targets a tombstone
|
|
|
dc8c34 |
|
|
|
dc8c34 |
Bug Description:
|
|
|
dc8c34 |
A fix for https://fedorahosted.org/389/ticket/47396 prevents a MOD operation
|
|
|
dc8c34 |
on a tombstone.
|
|
|
dc8c34 |
A problem is that legacy version may allowed it or if a MOD entry and DEL entry occurs on different
|
|
|
dc8c34 |
master, it may happen that a consumer receive a MOD on a tombstone.
|
|
|
dc8c34 |
The server return 'Unwilling to perform' that may break replication.
|
|
|
dc8c34 |
|
|
|
dc8c34 |
Fix Description:
|
|
|
dc8c34 |
Allows a MOD on tombstone, only if it comes from a replication session
|
|
|
dc8c34 |
|
|
|
dc8c34 |
https://fedorahosted.org/389/ticket/47787
|
|
|
dc8c34 |
|
|
|
dc8c34 |
Reviewed by: Rich Megginson (thanks)
|
|
|
dc8c34 |
|
|
|
dc8c34 |
Platforms tested: F17, F19
|
|
|
dc8c34 |
|
|
|
dc8c34 |
Flag Day: no
|
|
|
dc8c34 |
|
|
|
dc8c34 |
Doc impact: no
|
|
|
dc8c34 |
(cherry picked from commit 0ee8496d0e884f788041cc2e54279cfc28eabaa9)
|
|
|
dc8c34 |
---
|
|
|
dc8c34 |
dirsrvtests/tickets/ticket47787_test.py | 630 +++++++++++++++++++++++++++++
|
|
|
dc8c34 |
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 2 +-
|
|
|
dc8c34 |
2 files changed, 631 insertions(+), 1 deletion(-)
|
|
|
dc8c34 |
create mode 100644 dirsrvtests/tickets/ticket47787_test.py
|
|
|
dc8c34 |
|
|
|
dc8c34 |
diff --git a/dirsrvtests/tickets/ticket47787_test.py b/dirsrvtests/tickets/ticket47787_test.py
|
|
|
dc8c34 |
new file mode 100644
|
|
|
dc8c34 |
index 0000000..d9d3ca9
|
|
|
dc8c34 |
--- /dev/null
|
|
|
dc8c34 |
+++ b/dirsrvtests/tickets/ticket47787_test.py
|
|
|
dc8c34 |
@@ -0,0 +1,630 @@
|
|
|
dc8c34 |
+'''
|
|
|
dc8c34 |
+Created on April 14, 2014
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+@author: tbordaz
|
|
|
dc8c34 |
+'''
|
|
|
dc8c34 |
+import os
|
|
|
dc8c34 |
+import sys
|
|
|
dc8c34 |
+import time
|
|
|
dc8c34 |
+import ldap
|
|
|
dc8c34 |
+import logging
|
|
|
dc8c34 |
+import socket
|
|
|
dc8c34 |
+import time
|
|
|
dc8c34 |
+import logging
|
|
|
dc8c34 |
+import pytest
|
|
|
dc8c34 |
+import re
|
|
|
dc8c34 |
+from lib389 import DirSrv, Entry, tools, NoSuchEntryError
|
|
|
dc8c34 |
+from lib389.tools import DirSrvTools
|
|
|
dc8c34 |
+from lib389._constants import *
|
|
|
dc8c34 |
+from lib389.properties import *
|
|
|
dc8c34 |
+from constants import *
|
|
|
dc8c34 |
+from lib389._constants import REPLICAROLE_MASTER
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+logging.getLogger(__name__).setLevel(logging.DEBUG)
|
|
|
dc8c34 |
+log = logging.getLogger(__name__)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+#
|
|
|
dc8c34 |
+# important part. We can deploy Master1 and Master2 on different versions
|
|
|
dc8c34 |
+#
|
|
|
dc8c34 |
+installation1_prefix = None
|
|
|
dc8c34 |
+installation2_prefix = None
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+# set this flag to False so that it will assert on failure _status_entry_both_server
|
|
|
dc8c34 |
+DEBUG_FLAG = False
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+STAGING_CN = "staged user"
|
|
|
dc8c34 |
+PRODUCTION_CN = "accounts"
|
|
|
dc8c34 |
+EXCEPT_CN = "excepts"
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX)
|
|
|
dc8c34 |
+PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX)
|
|
|
dc8c34 |
+PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX)
|
|
|
dc8c34 |
+PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX)
|
|
|
dc8c34 |
+BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX)
|
|
|
dc8c34 |
+BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+BIND_CN = "bind_entry"
|
|
|
dc8c34 |
+BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX)
|
|
|
dc8c34 |
+BIND_PW = "password"
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+NEW_ACCOUNT = "new_account"
|
|
|
dc8c34 |
+MAX_ACCOUNTS = 20
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci"
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+class TopologyMaster1Master2(object):
|
|
|
dc8c34 |
+ def __init__(self, master1, master2):
|
|
|
dc8c34 |
+ master1.open()
|
|
|
dc8c34 |
+ self.master1 = master1
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ master2.open()
|
|
|
dc8c34 |
+ self.master2 = master2
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+@pytest.fixture(scope="module")
|
|
|
dc8c34 |
+def topology(request):
|
|
|
dc8c34 |
+ '''
|
|
|
dc8c34 |
+ This fixture is used to create a replicated topology for the 'module'.
|
|
|
dc8c34 |
+ The replicated topology is MASTER1 <-> Master2.
|
|
|
dc8c34 |
+ At the beginning, It may exists a master2 instance and/or a master2 instance.
|
|
|
dc8c34 |
+ It may also exists a backup for the master1 and/or the master2.
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ Principle:
|
|
|
dc8c34 |
+ If master1 instance exists:
|
|
|
dc8c34 |
+ restart it
|
|
|
dc8c34 |
+ If master2 instance exists:
|
|
|
dc8c34 |
+ restart it
|
|
|
dc8c34 |
+ If backup of master1 AND backup of master2 exists:
|
|
|
dc8c34 |
+ create or rebind to master1
|
|
|
dc8c34 |
+ create or rebind to master2
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ restore master1 from backup
|
|
|
dc8c34 |
+ restore master2 from backup
|
|
|
dc8c34 |
+ else:
|
|
|
dc8c34 |
+ Cleanup everything
|
|
|
dc8c34 |
+ remove instances
|
|
|
dc8c34 |
+ remove backups
|
|
|
dc8c34 |
+ Create instances
|
|
|
dc8c34 |
+ Initialize replication
|
|
|
dc8c34 |
+ Create backups
|
|
|
dc8c34 |
+ '''
|
|
|
dc8c34 |
+ global installation1_prefix
|
|
|
dc8c34 |
+ global installation2_prefix
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # allocate master1 on a given deployement
|
|
|
dc8c34 |
+ master1 = DirSrv(verbose=False)
|
|
|
dc8c34 |
+ if installation1_prefix:
|
|
|
dc8c34 |
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Args for the master1 instance
|
|
|
dc8c34 |
+ args_instance[SER_HOST] = HOST_MASTER_1
|
|
|
dc8c34 |
+ args_instance[SER_PORT] = PORT_MASTER_1
|
|
|
dc8c34 |
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
|
|
|
dc8c34 |
+ args_master = args_instance.copy()
|
|
|
dc8c34 |
+ master1.allocate(args_master)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # allocate master1 on a given deployement
|
|
|
dc8c34 |
+ master2 = DirSrv(verbose=False)
|
|
|
dc8c34 |
+ if installation2_prefix:
|
|
|
dc8c34 |
+ args_instance[SER_DEPLOYED_DIR] = installation2_prefix
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Args for the consumer instance
|
|
|
dc8c34 |
+ args_instance[SER_HOST] = HOST_MASTER_2
|
|
|
dc8c34 |
+ args_instance[SER_PORT] = PORT_MASTER_2
|
|
|
dc8c34 |
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
|
|
|
dc8c34 |
+ args_master = args_instance.copy()
|
|
|
dc8c34 |
+ master2.allocate(args_master)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Get the status of the backups
|
|
|
dc8c34 |
+ backup_master1 = master1.checkBackupFS()
|
|
|
dc8c34 |
+ backup_master2 = master2.checkBackupFS()
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Get the status of the instance and restart it if it exists
|
|
|
dc8c34 |
+ instance_master1 = master1.exists()
|
|
|
dc8c34 |
+ if instance_master1:
|
|
|
dc8c34 |
+ master1.stop(timeout=10)
|
|
|
dc8c34 |
+ master1.start(timeout=10)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ instance_master2 = master2.exists()
|
|
|
dc8c34 |
+ if instance_master2:
|
|
|
dc8c34 |
+ master2.stop(timeout=10)
|
|
|
dc8c34 |
+ master2.start(timeout=10)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if backup_master1 and backup_master2:
|
|
|
dc8c34 |
+ # The backups exist, assuming they are correct
|
|
|
dc8c34 |
+ # we just re-init the instances with them
|
|
|
dc8c34 |
+ if not instance_master1:
|
|
|
dc8c34 |
+ master1.create()
|
|
|
dc8c34 |
+ # Used to retrieve configuration information (dbdir, confdir...)
|
|
|
dc8c34 |
+ master1.open()
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not instance_master2:
|
|
|
dc8c34 |
+ master2.create()
|
|
|
dc8c34 |
+ # Used to retrieve configuration information (dbdir, confdir...)
|
|
|
dc8c34 |
+ master2.open()
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # restore master1 from backup
|
|
|
dc8c34 |
+ master1.stop(timeout=10)
|
|
|
dc8c34 |
+ master1.restoreFS(backup_master1)
|
|
|
dc8c34 |
+ master1.start(timeout=10)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # restore master2 from backup
|
|
|
dc8c34 |
+ master2.stop(timeout=10)
|
|
|
dc8c34 |
+ master2.restoreFS(backup_master2)
|
|
|
dc8c34 |
+ master2.start(timeout=10)
|
|
|
dc8c34 |
+ else:
|
|
|
dc8c34 |
+ # We should be here only in two conditions
|
|
|
dc8c34 |
+ # - This is the first time a test involve master-consumer
|
|
|
dc8c34 |
+ # so we need to create everything
|
|
|
dc8c34 |
+ # - Something weird happened (instance/backup destroyed)
|
|
|
dc8c34 |
+ # so we discard everything and recreate all
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Remove all the backups. So even if we have a specific backup file
|
|
|
dc8c34 |
+ # (e.g backup_master) we clear all backups that an instance my have created
|
|
|
dc8c34 |
+ if backup_master1:
|
|
|
dc8c34 |
+ master1.clearBackupFS()
|
|
|
dc8c34 |
+ if backup_master2:
|
|
|
dc8c34 |
+ master2.clearBackupFS()
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Remove all the instances
|
|
|
dc8c34 |
+ if instance_master1:
|
|
|
dc8c34 |
+ master1.delete()
|
|
|
dc8c34 |
+ if instance_master2:
|
|
|
dc8c34 |
+ master2.delete()
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Create the instances
|
|
|
dc8c34 |
+ master1.create()
|
|
|
dc8c34 |
+ master1.open()
|
|
|
dc8c34 |
+ master2.create()
|
|
|
dc8c34 |
+ master2.open()
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ #
|
|
|
dc8c34 |
+ # Now prepare the Master-Consumer topology
|
|
|
dc8c34 |
+ #
|
|
|
dc8c34 |
+ # First Enable replication
|
|
|
dc8c34 |
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
|
|
|
dc8c34 |
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Initialize the supplier->consumer
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ properties = {RA_NAME: r'meTo_$host:$port',
|
|
|
dc8c34 |
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
|
|
|
dc8c34 |
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
|
|
|
dc8c34 |
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
|
|
|
dc8c34 |
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
|
|
|
dc8c34 |
+ repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not repl_agreement:
|
|
|
dc8c34 |
+ log.fatal("Fail to create a replica agreement")
|
|
|
dc8c34 |
+ sys.exit(1)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ log.debug("%s created" % repl_agreement)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ properties = {RA_NAME: r'meTo_$host:$port',
|
|
|
dc8c34 |
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
|
|
|
dc8c34 |
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
|
|
|
dc8c34 |
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
|
|
|
dc8c34 |
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
|
|
|
dc8c34 |
+ master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
|
|
|
dc8c34 |
+ master1.waitForReplInit(repl_agreement)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Check replication is working fine
|
|
|
dc8c34 |
+ master1.add_s(Entry((TEST_REPL_DN, {
|
|
|
dc8c34 |
+ 'objectclass': "top person".split(),
|
|
|
dc8c34 |
+ 'sn': 'test_repl',
|
|
|
dc8c34 |
+ 'cn': 'test_repl'})))
|
|
|
dc8c34 |
+ loop = 0
|
|
|
dc8c34 |
+ while loop <= 10:
|
|
|
dc8c34 |
+ try:
|
|
|
dc8c34 |
+ ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
|
|
|
dc8c34 |
+ break
|
|
|
dc8c34 |
+ except ldap.NO_SUCH_OBJECT:
|
|
|
dc8c34 |
+ time.sleep(1)
|
|
|
dc8c34 |
+ loop += 1
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Time to create the backups
|
|
|
dc8c34 |
+ master1.stop(timeout=10)
|
|
|
dc8c34 |
+ master1.backupfile = master1.backupFS()
|
|
|
dc8c34 |
+ master1.start(timeout=10)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ master2.stop(timeout=10)
|
|
|
dc8c34 |
+ master2.backupfile = master2.backupFS()
|
|
|
dc8c34 |
+ master2.start(timeout=10)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ #
|
|
|
dc8c34 |
+ # Here we have two instances master and consumer
|
|
|
dc8c34 |
+ # with replication working. Either coming from a backup recovery
|
|
|
dc8c34 |
+ # or from a fresh (re)init
|
|
|
dc8c34 |
+ # Time to return the topology
|
|
|
dc8c34 |
+ return TopologyMaster1Master2(master1, master2)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _bind_manager(server):
|
|
|
dc8c34 |
+ server.log.info("Bind as %s " % DN_DM)
|
|
|
dc8c34 |
+ server.simple_bind_s(DN_DM, PASSWORD)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _bind_normal(server):
|
|
|
dc8c34 |
+ server.log.info("Bind as %s " % BIND_DN)
|
|
|
dc8c34 |
+ server.simple_bind_s(BIND_DN, BIND_PW)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _header(topology, label):
|
|
|
dc8c34 |
+ topology.master1.log.info("\n\n###############################################")
|
|
|
dc8c34 |
+ topology.master1.log.info("#######")
|
|
|
dc8c34 |
+ topology.master1.log.info("####### %s" % label)
|
|
|
dc8c34 |
+ topology.master1.log.info("#######")
|
|
|
dc8c34 |
+ topology.master1.log.info("###############################################")
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _status_entry_both_server(topology, name=None, desc=None, debug=True):
|
|
|
dc8c34 |
+ if not name:
|
|
|
dc8c34 |
+ return
|
|
|
dc8c34 |
+ topology.master1.log.info("\n\n######################### Tombstone on M1 ######################\n")
|
|
|
dc8c34 |
+ ent_m1 = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
|
|
|
dc8c34 |
+ assert ent_m1
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ topology.master1.log.info("\n\n######################### Tombstone on M2 ######################\n")
|
|
|
dc8c34 |
+ ent_m2 = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
|
|
|
dc8c34 |
+ assert ent_m2
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ topology.master1.log.info("\n\n######################### Description ######################\n%s\n" % desc)
|
|
|
dc8c34 |
+ topology.master1.log.info("M1 only\n")
|
|
|
dc8c34 |
+ for attr in ent_m1.getAttrs():
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not debug:
|
|
|
dc8c34 |
+ assert attr in ent_m2.getAttrs()
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not attr in ent_m2.getAttrs():
|
|
|
dc8c34 |
+ topology.master1.log.info(" %s" % attr)
|
|
|
dc8c34 |
+ for val in ent_m1.getValues(attr):
|
|
|
dc8c34 |
+ topology.master1.log.info(" %s" % val)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ topology.master1.log.info("M2 only\n")
|
|
|
dc8c34 |
+ for attr in ent_m2.getAttrs():
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not debug:
|
|
|
dc8c34 |
+ assert attr in ent_m1.getAttrs()
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not attr in ent_m1.getAttrs():
|
|
|
dc8c34 |
+ topology.master1.log.info(" %s" % attr)
|
|
|
dc8c34 |
+ for val in ent_m2.getValues(attr):
|
|
|
dc8c34 |
+ topology.master1.log.info(" %s" % val)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ topology.master1.log.info("M1 differs M2\n")
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not debug:
|
|
|
dc8c34 |
+ assert ent_m1.dn == ent_m2.dn
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if ent_m1.dn != ent_m2.dn:
|
|
|
dc8c34 |
+ topology.master1.log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn))
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ for attr1 in ent_m1.getAttrs():
|
|
|
dc8c34 |
+ if attr1 in ent_m2.getAttrs():
|
|
|
dc8c34 |
+ for val1 in ent_m1.getValues(attr1):
|
|
|
dc8c34 |
+ found = False
|
|
|
dc8c34 |
+ for val2 in ent_m2.getValues(attr1):
|
|
|
dc8c34 |
+ if val1 == val2:
|
|
|
dc8c34 |
+ found = True
|
|
|
dc8c34 |
+ break
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not debug:
|
|
|
dc8c34 |
+ assert found
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not found:
|
|
|
dc8c34 |
+ topology.master1.log.info(" M1[%s] = %s" % (attr1, val1))
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ for attr2 in ent_m2.getAttrs():
|
|
|
dc8c34 |
+ if attr2 in ent_m1.getAttrs():
|
|
|
dc8c34 |
+ for val2 in ent_m2.getValues(attr2):
|
|
|
dc8c34 |
+ found = False
|
|
|
dc8c34 |
+ for val1 in ent_m1.getValues(attr2):
|
|
|
dc8c34 |
+ if val2 == val1:
|
|
|
dc8c34 |
+ found = True
|
|
|
dc8c34 |
+ break
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not debug:
|
|
|
dc8c34 |
+ assert found
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not found:
|
|
|
dc8c34 |
+ topology.master1.log.info(" M2[%s] = %s" % (attr2, val2))
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _pause_RAs(topology):
|
|
|
dc8c34 |
+ topology.master1.log.info("\n\n######################### Pause RA M1<->M2 ######################\n")
|
|
|
dc8c34 |
+ ents = topology.master1.agreement.list(suffix=SUFFIX)
|
|
|
dc8c34 |
+ assert len(ents) == 1
|
|
|
dc8c34 |
+ topology.master1.agreement.pause(ents[0].dn)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ ents = topology.master2.agreement.list(suffix=SUFFIX)
|
|
|
dc8c34 |
+ assert len(ents) == 1
|
|
|
dc8c34 |
+ topology.master2.agreement.pause(ents[0].dn)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _resume_RAs(topology):
|
|
|
dc8c34 |
+ topology.master1.log.info("\n\n######################### resume RA M1<->M2 ######################\n")
|
|
|
dc8c34 |
+ ents = topology.master1.agreement.list(suffix=SUFFIX)
|
|
|
dc8c34 |
+ assert len(ents) == 1
|
|
|
dc8c34 |
+ topology.master1.agreement.resume(ents[0].dn)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ ents = topology.master2.agreement.list(suffix=SUFFIX)
|
|
|
dc8c34 |
+ assert len(ents) == 1
|
|
|
dc8c34 |
+ topology.master2.agreement.resume(ents[0].dn)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _find_tombstone(instance, base, attr, value):
|
|
|
dc8c34 |
+ #
|
|
|
dc8c34 |
+ # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because
|
|
|
dc8c34 |
+ # tombstone are not index in 'sn' so 'sn=name' will return NULL
|
|
|
dc8c34 |
+ # and even if tombstone are indexed for objectclass the '&' will set
|
|
|
dc8c34 |
+ # the candidate list to NULL
|
|
|
dc8c34 |
+ #
|
|
|
dc8c34 |
+ filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE
|
|
|
dc8c34 |
+ ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt)
|
|
|
dc8c34 |
+ found = False
|
|
|
dc8c34 |
+ for ent in ents:
|
|
|
dc8c34 |
+ if ent.hasAttr(attr):
|
|
|
dc8c34 |
+ for val in ent.getValues(attr):
|
|
|
dc8c34 |
+ if val == value:
|
|
|
dc8c34 |
+ instance.log.debug("tombstone found: %r" % ent)
|
|
|
dc8c34 |
+ return ent
|
|
|
dc8c34 |
+ return None
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _delete_entry(instance, entry_dn, name):
|
|
|
dc8c34 |
+ instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # delete the entry
|
|
|
dc8c34 |
+ instance.delete_s(entry_dn)
|
|
|
dc8c34 |
+ assert _find_tombstone(instance, SUFFIX, 'sn', name) != None
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _mod_entry(instance, entry_dn, attr, value):
|
|
|
dc8c34 |
+ instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn)
|
|
|
dc8c34 |
+ mod = [(ldap.MOD_REPLACE, attr, value)]
|
|
|
dc8c34 |
+ instance.modify_s(entry_dn, mod)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
|
|
|
dc8c34 |
+ assert instance != None
|
|
|
dc8c34 |
+ assert entry_dn != None
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ if not new_rdn:
|
|
|
dc8c34 |
+ pattern = 'cn=(.*),(.*)'
|
|
|
dc8c34 |
+ rdnre = re.compile(pattern)
|
|
|
dc8c34 |
+ match = rdnre.match(entry_dn)
|
|
|
dc8c34 |
+ old_value = match.group(1)
|
|
|
dc8c34 |
+ new_rdn_val = "%s_modrdn" % old_value
|
|
|
dc8c34 |
+ new_rdn = "cn=%s" % new_rdn_val
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn)
|
|
|
dc8c34 |
+ if new_superior:
|
|
|
dc8c34 |
+ instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
|
|
|
dc8c34 |
+ else:
|
|
|
dc8c34 |
+ instance.rename_s(entry_dn, new_rdn, delold=del_old)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _check_entry_exists(instance, entry_dn):
|
|
|
dc8c34 |
+ loop = 0
|
|
|
dc8c34 |
+ while loop <= 10:
|
|
|
dc8c34 |
+ try:
|
|
|
dc8c34 |
+ ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)")
|
|
|
dc8c34 |
+ break
|
|
|
dc8c34 |
+ except ldap.NO_SUCH_OBJECT:
|
|
|
dc8c34 |
+ time.sleep(1)
|
|
|
dc8c34 |
+ loop += 1
|
|
|
dc8c34 |
+ assert loop <= 10
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _check_mod_received(instance, base, filt, attr, value):
|
|
|
dc8c34 |
+ instance.log.info("\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid)
|
|
|
dc8c34 |
+ loop = 0
|
|
|
dc8c34 |
+ while loop <= 10:
|
|
|
dc8c34 |
+ ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt)
|
|
|
dc8c34 |
+ if ent.hasAttr(attr) and ent.getValue(attr) == value:
|
|
|
dc8c34 |
+ break
|
|
|
dc8c34 |
+ time.sleep(1)
|
|
|
dc8c34 |
+ loop += 1
|
|
|
dc8c34 |
+ assert loop <= 10
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def _check_replication(topology, entry_dn):
|
|
|
dc8c34 |
+ # prepare the filter to retrieve the entry
|
|
|
dc8c34 |
+ filt = entry_dn.split(',')[0]
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ topology.master1.log.info("\n######################### Check replicat M1->M2 ######################\n")
|
|
|
dc8c34 |
+ loop = 0
|
|
|
dc8c34 |
+ while loop <= 10:
|
|
|
dc8c34 |
+ attr = 'description'
|
|
|
dc8c34 |
+ value = 'test_value_%d' % loop
|
|
|
dc8c34 |
+ mod = [(ldap.MOD_REPLACE, attr, value)]
|
|
|
dc8c34 |
+ topology.master1.modify_s(entry_dn, mod)
|
|
|
dc8c34 |
+ _check_mod_received(topology.master2, SUFFIX, filt, attr, value)
|
|
|
dc8c34 |
+ loop += 1
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ topology.master1.log.info("\n######################### Check replicat M2->M1 ######################\n")
|
|
|
dc8c34 |
+ loop = 0
|
|
|
dc8c34 |
+ while loop <= 10:
|
|
|
dc8c34 |
+ attr = 'description'
|
|
|
dc8c34 |
+ value = 'test_value_%d' % loop
|
|
|
dc8c34 |
+ mod = [(ldap.MOD_REPLACE, attr, value)]
|
|
|
dc8c34 |
+ topology.master2.modify_s(entry_dn, mod)
|
|
|
dc8c34 |
+ _check_mod_received(topology.master1, SUFFIX, filt, attr, value)
|
|
|
dc8c34 |
+ loop += 1
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def test_ticket47787_init(topology):
|
|
|
dc8c34 |
+ """
|
|
|
dc8c34 |
+ Creates
|
|
|
dc8c34 |
+ - a staging DIT
|
|
|
dc8c34 |
+ - a production DIT
|
|
|
dc8c34 |
+ - add accounts in staging DIT
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ """
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n")
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # entry used to bind with
|
|
|
dc8c34 |
+ topology.master1.log.info("Add %s" % BIND_DN)
|
|
|
dc8c34 |
+ topology.master1.add_s(Entry((BIND_DN, {
|
|
|
dc8c34 |
+ 'objectclass': "top person".split(),
|
|
|
dc8c34 |
+ 'sn': BIND_CN,
|
|
|
dc8c34 |
+ 'cn': BIND_CN,
|
|
|
dc8c34 |
+ 'userpassword': BIND_PW})))
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # DIT for staging
|
|
|
dc8c34 |
+ topology.master1.log.info("Add %s" % STAGING_DN)
|
|
|
dc8c34 |
+ topology.master1.add_s(Entry((STAGING_DN, {
|
|
|
dc8c34 |
+ 'objectclass': "top organizationalRole".split(),
|
|
|
dc8c34 |
+ 'cn': STAGING_CN,
|
|
|
dc8c34 |
+ 'description': "staging DIT"})))
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # DIT for production
|
|
|
dc8c34 |
+ topology.master1.log.info("Add %s" % PRODUCTION_DN)
|
|
|
dc8c34 |
+ topology.master1.add_s(Entry((PRODUCTION_DN, {
|
|
|
dc8c34 |
+ 'objectclass': "top organizationalRole".split(),
|
|
|
dc8c34 |
+ 'cn': PRODUCTION_CN,
|
|
|
dc8c34 |
+ 'description': "production DIT"})))
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # enable replication error logging
|
|
|
dc8c34 |
+ mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192')]
|
|
|
dc8c34 |
+ topology.master1.modify_s(DN_CONFIG, mod)
|
|
|
dc8c34 |
+ topology.master2.modify_s(DN_CONFIG, mod)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # add dummy entries in the staging DIT
|
|
|
dc8c34 |
+ for cpt in range(MAX_ACCOUNTS):
|
|
|
dc8c34 |
+ name = "%s%d" % (NEW_ACCOUNT, cpt)
|
|
|
dc8c34 |
+ topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
|
|
|
dc8c34 |
+ 'objectclass': "top person".split(),
|
|
|
dc8c34 |
+ 'sn': name,
|
|
|
dc8c34 |
+ 'cn': name})))
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def test_ticket47787_2(topology):
|
|
|
dc8c34 |
+ '''
|
|
|
dc8c34 |
+ Disable replication so that updates are not replicated
|
|
|
dc8c34 |
+ Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior).
|
|
|
dc8c34 |
+ update a test entry on M2
|
|
|
dc8c34 |
+ Reenable the RA.
|
|
|
dc8c34 |
+ checks that entry was deleted on M2 (with the modified RDN)
|
|
|
dc8c34 |
+ checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn)
|
|
|
dc8c34 |
+ '''
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ _header(topology, "test_ticket47787_2")
|
|
|
dc8c34 |
+ _bind_manager(topology.master1)
|
|
|
dc8c34 |
+ _bind_manager(topology.master2)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ #entry to test the replication is still working
|
|
|
dc8c34 |
+ name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS -1)
|
|
|
dc8c34 |
+ test_rdn = "cn=%s" % (name)
|
|
|
dc8c34 |
+ testentry_dn = "%s,%s" % (test_rdn, STAGING_DN)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2)
|
|
|
dc8c34 |
+ test2_rdn = "cn=%s" % (name)
|
|
|
dc8c34 |
+ testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # value of updates to test the replication both ways
|
|
|
dc8c34 |
+ attr = 'description'
|
|
|
dc8c34 |
+ value = 'test_ticket47787_2'
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # entry for the modrdn
|
|
|
dc8c34 |
+ name = "%s%d" % (NEW_ACCOUNT, 1)
|
|
|
dc8c34 |
+ rdn = "cn=%s" % (name)
|
|
|
dc8c34 |
+ entry_dn = "%s,%s" % (rdn, STAGING_DN)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # created on M1, wait the entry exists on M2
|
|
|
dc8c34 |
+ _check_entry_exists(topology.master2, entry_dn)
|
|
|
dc8c34 |
+ _check_entry_exists(topology.master2, testentry_dn)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ _pause_RAs(topology)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # Delete 'entry_dn' on M1.
|
|
|
dc8c34 |
+ # dummy update is only have a first CSN before the DEL
|
|
|
dc8c34 |
+ # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
|
|
|
dc8c34 |
+ _mod_entry(topology.master1, testentry2_dn, attr, 'dummy')
|
|
|
dc8c34 |
+ _delete_entry(topology.master1, entry_dn, name)
|
|
|
dc8c34 |
+ _mod_entry(topology.master1, testentry2_dn, attr, value)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ time.sleep(1) # important to have MOD.csn != DEL.csn
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # MOD 'entry_dn' on M1.
|
|
|
dc8c34 |
+ # dummy update is only have a first CSN before the MOD entry_dn
|
|
|
dc8c34 |
+ # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
|
|
|
dc8c34 |
+ _mod_entry(topology.master2, testentry_dn, attr, 'dummy')
|
|
|
dc8c34 |
+ _mod_entry(topology.master2, entry_dn, attr, value)
|
|
|
dc8c34 |
+ _mod_entry(topology.master2, testentry_dn, attr, value)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ _resume_RAs(topology)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ topology.master1.log.info("\n\n######################### Check DEL replicated on M2 ######################\n")
|
|
|
dc8c34 |
+ loop = 0
|
|
|
dc8c34 |
+ while loop <= 10:
|
|
|
dc8c34 |
+ ent = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
|
|
|
dc8c34 |
+ if ent:
|
|
|
dc8c34 |
+ break
|
|
|
dc8c34 |
+ time.sleep(1)
|
|
|
dc8c34 |
+ loop += 1
|
|
|
dc8c34 |
+ assert loop <= 10
|
|
|
dc8c34 |
+ assert ent
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ # the following checks are not necessary
|
|
|
dc8c34 |
+ # as this bug is only for failing replicated MOD (entry_dn) on M1
|
|
|
dc8c34 |
+ #_check_mod_received(topology.master1, SUFFIX, "(%s)" % (test_rdn), attr, value)
|
|
|
dc8c34 |
+ #_check_mod_received(topology.master2, SUFFIX, "(%s)" % (test2_rdn), attr, value)
|
|
|
dc8c34 |
+ #
|
|
|
dc8c34 |
+ #_check_replication(topology, testentry_dn)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ _status_entry_both_server(topology, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ topology.master1.log.info("\n\n######################### Check MOD replicated on M1 ######################\n")
|
|
|
dc8c34 |
+ loop = 0
|
|
|
dc8c34 |
+ while loop <= 10:
|
|
|
dc8c34 |
+ ent = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
|
|
|
dc8c34 |
+ if ent:
|
|
|
dc8c34 |
+ break
|
|
|
dc8c34 |
+ time.sleep(1)
|
|
|
dc8c34 |
+ loop += 1
|
|
|
dc8c34 |
+ assert loop <= 10
|
|
|
dc8c34 |
+ assert ent
|
|
|
dc8c34 |
+ assert ent.hasAttr(attr)
|
|
|
dc8c34 |
+ assert ent.getValue(attr) == value
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def test_ticket47787_final(topology):
|
|
|
dc8c34 |
+ topology.master1.stop(timeout=10)
|
|
|
dc8c34 |
+ topology.master2.stop(timeout=10)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+def run_isolated():
|
|
|
dc8c34 |
+ '''
|
|
|
dc8c34 |
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
|
|
|
dc8c34 |
+ To run isolated without py.test, you need to
|
|
|
dc8c34 |
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
|
|
|
dc8c34 |
+ - set the installation prefix
|
|
|
dc8c34 |
+ - run this program
|
|
|
dc8c34 |
+ '''
|
|
|
dc8c34 |
+ global installation1_prefix
|
|
|
dc8c34 |
+ global installation2_prefix
|
|
|
dc8c34 |
+ installation1_prefix = None
|
|
|
dc8c34 |
+ installation2_prefix = None
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ topo = topology(True)
|
|
|
dc8c34 |
+ topo.master1.log.info("\n\n######################### Ticket 47787 ######################\n")
|
|
|
dc8c34 |
+ test_ticket47787_init(topo)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ test_ticket47787_2(topo)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+ test_ticket47787_final(topo)
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
+if __name__ == '__main__':
|
|
|
dc8c34 |
+ run_isolated()
|
|
|
dc8c34 |
+
|
|
|
dc8c34 |
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
|
|
dc8c34 |
index f7ebac1..b6a889f 100644
|
|
|
dc8c34 |
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
|
|
dc8c34 |
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
|
|
dc8c34 |
@@ -454,7 +454,7 @@ ldbm_back_modify( Slapi_PBlock *pb )
|
|
|
dc8c34 |
}
|
|
|
dc8c34 |
|
|
|
dc8c34 |
if (!is_fixup_operation) {
|
|
|
dc8c34 |
- if (slapi_entry_flag_is_set(e->ep_entry, SLAPI_ENTRY_FLAG_TOMBSTONE)) {
|
|
|
dc8c34 |
+ if (!repl_op && slapi_entry_flag_is_set(e->ep_entry, SLAPI_ENTRY_FLAG_TOMBSTONE)) {
|
|
|
dc8c34 |
ldap_result_code = LDAP_UNWILLING_TO_PERFORM;
|
|
|
dc8c34 |
ldap_result_message = "Operation not allowed on tombstone entry.";
|
|
|
dc8c34 |
slapi_log_error(SLAPI_LOG_FATAL, "ldbm_back_modify",
|
|
|
dc8c34 |
--
|
|
|
dc8c34 |
1.8.1.4
|
|
|
dc8c34 |
|