diff --git a/.389-ds-base.metadata b/.389-ds-base.metadata index 46522b1..5ad189d 100644 --- a/.389-ds-base.metadata +++ b/.389-ds-base.metadata @@ -1,3 +1,3 @@ -672f63948af9d242034f689340f772b8e148ee3c SOURCES/389-ds-base-1.4.3.30.tar.bz2 +6dd2b4523735ae964fa5a8519ccd5be258a947c9 SOURCES/389-ds-base-1.4.3.32.tar.bz2 1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2 -dc0d2e81e54cc7e4098a829b8202d59ec471b34f SOURCES/vendor-1.4.3.30-1.tar.gz +44d04546a521aee1e09e85924e08cbd67d0a2d0c SOURCES/vendor-1.4.3.32-1.tar.gz diff --git a/.gitignore b/.gitignore index 0beb6e7..604ccb6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,3 @@ -SOURCES/389-ds-base-1.4.3.30.tar.bz2 +SOURCES/389-ds-base-1.4.3.32.tar.bz2 SOURCES/jemalloc-5.3.0.tar.bz2 -SOURCES/vendor-1.4.3.30-1.tar.gz +SOURCES/vendor-1.4.3.32-1.tar.gz diff --git a/SOURCES/0001-Issue-5532-Make-db-compaction-TOD-day-more-robust.patch b/SOURCES/0001-Issue-5532-Make-db-compaction-TOD-day-more-robust.patch new file mode 100644 index 0000000..ce1aeea --- /dev/null +++ b/SOURCES/0001-Issue-5532-Make-db-compaction-TOD-day-more-robust.patch @@ -0,0 +1,440 @@ +From 9cdb6cb41b9c87c44e788cd1e354b14dbf4eb5f7 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 16 Nov 2022 16:37:05 -0500 +Subject: [PATCH 1/3] Issue 5532 - Make db compaction TOD day more robust. + +Bug Description: + +The time of day compaction setting does not promise that the compaction +will happen as configured. This is becuase the compaction interval +starts when the server is started. Once it wakes up and we are "past" +the TOD setting then we compact, but it can happen at any time +once the TOD has passed. + +Fix Description: + +Once the compaction interval is hit we create an "event" with the +exact time the compaction should start. + +relates: #5532 + +Reviewed by: tbordaz & spichugi(Thanks!!) +--- + .../tests/suites/config/compact_test.py | 29 +++-- + ldap/servers/plugins/replication/cl5_api.c | 58 +++++---- + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 118 ++++++++++++------ + .../slapd/back-ldbm/db-bdb/bdb_layer.h | 2 +- + 4 files changed, 136 insertions(+), 71 deletions(-) + +diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py +index 1f1c097e4..2e8dee4bb 100644 +--- a/dirsrvtests/tests/suites/config/compact_test.py ++++ b/dirsrvtests/tests/suites/config/compact_test.py +@@ -2,6 +2,7 @@ import logging + import pytest + import os + import time ++import datetime + from lib389.tasks import DBCompactTask + from lib389.backend import DatabaseConfig + from lib389.replica import Changelog5 +@@ -53,22 +54,34 @@ def test_compaction_interval_and_time(topo): + + inst = topo.ms["supplier1"] + +- # Configure DB compaction +- config = DatabaseConfig(inst) +- config.set([('nsslapd-db-compactdb-interval', '2'), ('nsslapd-db-compactdb-time', '00:01')]) ++ # Calculate the compaction time (2 minutes from now) ++ now = datetime.datetime.now() ++ current_hour = now.hour ++ current_minute = now.minute + 2 ++ if current_hour < 10: ++ hour = "0" + str(current_hour) ++ else: ++ hour = str(current_hour) ++ if current_minute < 10: ++ minute = "0" + str(current_minute) ++ else: ++ minute = str(current_minute) ++ compact_time = hour + ":" + minute + + # Configure changelog compaction + cl5 = Changelog5(inst) + cl5.replace_many( + ('nsslapd-changelogcompactdb-interval', '2'), +- ('nsslapd-changelogcompactdb-time', '00:01'), +- ('nsslapd-changelogtrim-interval', '2') ++ ('nsslapd-changelogcompactdb-time', compact_time), ++ ('nsslapd-changelogtrim-interval', '2') + ) + inst.deleteErrorLogs() + +- # Check is compaction occurred +- time.sleep(6) +- assert inst.searchErrorsLog("Compacting databases") ++ # Check compaction occurred as expected ++ time.sleep(60) ++ assert not inst.searchErrorsLog("compacting replication changelogs") ++ ++ time.sleep(61) + assert inst.searchErrorsLog("compacting replication changelogs") + inst.deleteErrorLogs(restart=False) + +diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c +index 43fa5bd46..5d4edea92 100644 +--- a/ldap/servers/plugins/replication/cl5_api.c ++++ b/ldap/servers/plugins/replication/cl5_api.c +@@ -103,6 +103,7 @@ + + #define NO_DISK_SPACE 1024 + #define MIN_DISK_SPACE 10485760 /* 10 MB */ ++#define _SEC_PER_DAY 86400 + + /***** Data Definitions *****/ + +@@ -293,6 +294,7 @@ static int _cl5FileEndsWith(const char *filename, const char *ext); + + static PRLock *cl5_diskfull_lock = NULL; + static int cl5_diskfull_flag = 0; ++static PRBool compacting = PR_FALSE; + + static void cl5_set_diskfull(void); + static void cl5_set_no_diskfull(void); +@@ -3099,7 +3101,7 @@ _cl5TrimCleanup(void) + static time_t + _cl5_get_tod_expiration(char *expire_time) + { +- time_t start_time, todays_elapsed_time, now = time(NULL); ++ time_t todays_elapsed_time, now = time(NULL); + struct tm *tm_struct = localtime(&now); + char hour_str[3] = {0}; + char min_str[3] = {0}; +@@ -3109,9 +3111,8 @@ _cl5_get_tod_expiration(char *expire_time) + + /* Get today's start time */ + todays_elapsed_time = (tm_struct->tm_hour * 3600) + (tm_struct->tm_min * 60) + (tm_struct->tm_sec); +- start_time = slapi_current_utc_time() - todays_elapsed_time; + +- /* Get the hour and minute and calculate the expiring time. The time was ++ /* Get the hour and minute and calculate the expiring TOD. The time was + * already validated in bdb_config.c: HH:MM */ + hour_str[0] = *s++; + hour_str[1] = *s++; +@@ -3122,7 +3123,34 @@ _cl5_get_tod_expiration(char *expire_time) + min = strtoll(min_str, &endp, 10); + expiring_time = (hour * 60 * 60) + (min * 60); + +- return start_time + expiring_time; ++ /* Calculate the time in seconds when the compaction should start, midnight ++ * requires special treatment (for both current time and configured TOD) */ ++ if (expiring_time == 0) { ++ /* Compaction TOD configured for midnight */ ++ if (todays_elapsed_time == 0) { ++ /* It's currently midnight, compact now! */ ++ return 0; ++ } else { ++ /* Return the time until it's midnight */ ++ return _SEC_PER_DAY - todays_elapsed_time; ++ } ++ } else if (todays_elapsed_time == 0) { ++ /* It's currently midnight, just use the configured TOD */ ++ return expiring_time; ++ } else if (todays_elapsed_time > expiring_time) { ++ /* We missed TOD today, do it tomorrow */ ++ return _SEC_PER_DAY - (todays_elapsed_time - expiring_time); ++ } else { ++ /* Compaction is coming up */ ++ return expiring_time - todays_elapsed_time; ++ } ++} ++ ++static void ++do_cl_compact(time_t when, void *arg) ++{ ++ cl5CompactDBs(); ++ compacting = PR_FALSE; + } + + static int +@@ -3131,7 +3159,6 @@ _cl5TrimMain(void *param __attribute__((unused))) + time_t timePrev = slapi_current_utc_time(); + time_t timeCompactPrev = slapi_current_utc_time(); + time_t timeNow; +- PRBool compacting = PR_FALSE; + int32_t compactdb_time = 0; + + PR_AtomicIncrement(&s_cl5Desc.threadCount); +@@ -3144,25 +3171,14 @@ _cl5TrimMain(void *param __attribute__((unused))) + _cl5DoTrimming(); + } + +- if (!compacting) { +- /* Once we know we want to compact we need to stop refreshing the +- * TOD expiration. Otherwise if the compact time is close to +- * midnight we could roll over past midnight during the checkpoint +- * sleep interval, and we'd never actually compact the databases. +- * We also need to get this value before the sleep. +- */ +- compactdb_time = _cl5_get_tod_expiration(s_cl5Desc.dbTrim.compactTime); +- } + if ((s_cl5Desc.dbTrim.compactInterval > 0) && +- (timeNow - timeCompactPrev >= s_cl5Desc.dbTrim.compactInterval)) ++ (timeNow - timeCompactPrev >= s_cl5Desc.dbTrim.compactInterval) && ++ !compacting) + { + compacting = PR_TRUE; +- if (slapi_current_utc_time() > compactdb_time) { +- /* time to trim */ +- timeCompactPrev = timeNow; +- cl5CompactDBs(); +- compacting = PR_FALSE; +- } ++ compactdb_time = _cl5_get_tod_expiration(s_cl5Desc.dbTrim.compactTime); ++ slapi_eq_once_rel(do_cl_compact, NULL, slapi_current_rel_time_t() + compactdb_time); ++ timeCompactPrev = timeNow; + } + if (NULL == s_cl5Desc.clLock) { + /* most likely, emergency */ +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +index 3e29feb50..b433fa919 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +@@ -95,6 +95,7 @@ static int trans_batch_txn_max_sleep = 50; + static PRBool log_flush_thread = PR_FALSE; + static int txn_in_progress_count = 0; + static int *txn_log_flush_pending = NULL; ++static PRBool compacting = PR_FALSE; + + static pthread_mutex_t sync_txn_log_flush; + static pthread_cond_t sync_txn_log_flush_done; +@@ -3646,13 +3647,12 @@ log_flush_threadmain(void *param) + } + + /* +- * This refreshes the TOD expiration. So live changes to the configuration +- * will take effect immediately. ++ * Get the time in seconds when the compaction should occur + */ + static time_t + bdb_get_tod_expiration(char *expire_time) + { +- time_t start_time, todays_elapsed_time, now = time(NULL); ++ time_t todays_elapsed_time, now = time(NULL); + struct tm *tm_struct = localtime(&now); + char hour_str[3] = {0}; + char min_str[3] = {0}; +@@ -3662,9 +3662,8 @@ bdb_get_tod_expiration(char *expire_time) + + /* Get today's start time */ + todays_elapsed_time = (tm_struct->tm_hour * 3600) + (tm_struct->tm_min * 60) + (tm_struct->tm_sec); +- start_time = slapi_current_utc_time() - todays_elapsed_time; + +- /* Get the hour and minute and calculate the expiring time. The time was ++ /* Get the hour and minute and calculate the expiring TOD. The time was + * already validated in bdb_config.c: HH:MM */ + hour_str[0] = *s++; + hour_str[1] = *s++; +@@ -3675,7 +3674,55 @@ bdb_get_tod_expiration(char *expire_time) + min = strtoll(min_str, &endp, 10); + expiring_time = (hour * 60 * 60) + (min * 60); + +- return start_time + expiring_time; ++ /* Calculate the time in seconds when the compaction should start, midnight ++ * requires special treatment (for both current time and configured TOD) */ ++ if (expiring_time == 0) { ++ /* Compaction TOD configured for midnight */ ++ if (todays_elapsed_time == 0) { ++ /* It's currently midnight, compact now! */ ++ return 0; ++ } else { ++ /* Return the time until it's midnight */ ++ return _SEC_PER_DAY - todays_elapsed_time; ++ } ++ } else if (todays_elapsed_time == 0) { ++ /* It's currently midnight, just use the configured TOD */ ++ return expiring_time; ++ } else if (todays_elapsed_time > expiring_time) { ++ /* We missed TOD today, do it tomorrow */ ++ return _SEC_PER_DAY - (todays_elapsed_time - expiring_time); ++ } else { ++ /* Compaction is coming up */ ++ return expiring_time - todays_elapsed_time; ++ } ++} ++ ++static void ++bdb_compact(time_t when, void *arg) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ Object *inst_obj; ++ ldbm_instance *inst; ++ DB *db = NULL; ++ int rc = 0; ++ ++ for (inst_obj = objset_first_obj(li->li_instance_set); ++ inst_obj; ++ inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) ++ { ++ inst = (ldbm_instance *)object_get_data(inst_obj); ++ rc = dblayer_get_id2entry(inst->inst_be, &db); ++ if (!db || rc) { ++ continue; ++ } ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting DB start: %s\n", ++ inst->inst_name); ++ /* Time to compact the DB's */ ++ dblayer_force_checkpoint(li); ++ bdb_do_compact(li); ++ dblayer_force_checkpoint(li); ++ } ++ compacting = PR_FALSE; + } + + /* +@@ -3763,15 +3810,6 @@ checkpoint_threadmain(void *param) + PR_Lock(li->li_config_mutex); + checkpoint_interval_update = (time_t)BDB_CONFIG(li)->bdb_checkpoint_interval; + compactdb_interval_update = (time_t)BDB_CONFIG(li)->bdb_compactdb_interval; +- if (!compacting) { +- /* Once we know we want to compact we need to stop refreshing the +- * TOD expiration. Otherwise if the compact time is close to +- * midnight we could roll over past midnight during the checkpoint +- * sleep interval, and we'd never actually compact the databases. +- * We also need to get this value before the sleep. +- */ +- compactdb_time = bdb_get_tod_expiration((char *)BDB_CONFIG(li)->bdb_compactdb_time); +- } + PR_Unlock(li->li_config_mutex); + + if (compactdb_interval_update != compactdb_interval) { +@@ -3861,23 +3899,21 @@ checkpoint_threadmain(void *param) + * this could have been a bug in fact, where compactdb_interval + * was 0, if you change while running it would never take effect .... + */ +- if (slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED) { +- compacting = PR_TRUE; +- if (slapi_current_utc_time() < compactdb_time) { +- /* We have passed the interval, but we need to wait for a +- * particular TOD to pass before compacting */ +- continue; +- } ++ if (compactdb_interval_update != compactdb_interval || ++ (slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED && !compacting)) ++ { ++ /* Get the time in second when the compaction should occur */ ++ PR_Lock(li->li_config_mutex); ++ compactdb_time = bdb_get_tod_expiration((char *)BDB_CONFIG(li)->bdb_compactdb_time); ++ PR_Unlock(li->li_config_mutex); + +- /* Time to compact the DB's */ +- dblayer_force_checkpoint(li); +- bdb_compact(li); +- dblayer_force_checkpoint(li); ++ /* Start compaction event */ ++ compacting = PR_TRUE; ++ slapi_eq_once_rel(bdb_compact, (void *)li, slapi_current_rel_time_t() + compactdb_time); + +- /* Now reset the timer and compacting flag */ ++ /* reset interval timer */ + compactdb_interval = compactdb_interval_update; + slapi_timespec_expire_at(compactdb_interval, &compactdb_expire); +- compacting = PR_FALSE; + } + } + slapi_log_err(SLAPI_LOG_HOUSE, "checkpoint_threadmain", "Check point before leaving\n"); +@@ -6210,14 +6246,14 @@ ldbm_back_compact(Slapi_Backend *be) + + li = (struct ldbminfo *)be->be_database->plg_private; + dblayer_force_checkpoint(li); +- rc = bdb_compact(li); ++ rc = bdb_do_compact(li); + dblayer_force_checkpoint(li); + return rc; + } + + + int32_t +-bdb_compact(struct ldbminfo *li) ++bdb_do_compact(struct ldbminfo *li) + { + Object *inst_obj; + ldbm_instance *inst; +@@ -6237,7 +6273,7 @@ bdb_compact(struct ldbminfo *li) + if (!db || rc) { + continue; + } +- slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting DB start: %s\n", ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_do_compact", "Compacting DB start: %s\n", + inst->inst_name); + + /* +@@ -6249,15 +6285,15 @@ bdb_compact(struct ldbminfo *li) + DBTYPE type; + rc = db->get_type(db, &type); + if (rc) { +- slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", +- "compactdb: failed to determine db type for %s: db error - %d %s\n", ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_do_compact", ++ "Failed to determine db type for %s: db error - %d %s\n", + inst->inst_name, rc, db_strerror(rc)); + continue; + } + + rc = dblayer_txn_begin(inst->inst_be, NULL, &txn); + if (rc) { +- slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: transaction begin failed: %d\n", rc); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_do_compact", "Transaction begin failed: %d\n", rc); + break; + } + /* +@@ -6274,26 +6310,26 @@ bdb_compact(struct ldbminfo *li) + rc = db->compact(db, txn.back_txn_txn, NULL /*start*/, NULL /*stop*/, + &c_data, compact_flags, NULL /*end*/); + if (rc) { +- slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", +- "compactdb: failed to compact %s; db error - %d %s\n", ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_do_compact", ++ "Failed to compact %s; db error - %d %s\n", + inst->inst_name, rc, db_strerror(rc)); + if ((rc = dblayer_txn_abort(inst->inst_be, &txn))) { +- slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: failed to abort txn (%s) db error - %d %s\n", ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_do_compact", "Failed to abort txn (%s) db error - %d %s\n", + inst->inst_name, rc, db_strerror(rc)); + break; + } + } else { +- slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", +- "compactdb: compact %s - %d pages freed\n", ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_do_compact", ++ "compact %s - %d pages freed\n", + inst->inst_name, c_data.compact_pages_free); + if ((rc = dblayer_txn_commit(inst->inst_be, &txn))) { +- slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: failed to commit txn (%s) db error - %d %s\n", ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_do_compact", "failed to commit txn (%s) db error - %d %s\n", + inst->inst_name, rc, db_strerror(rc)); + break; + } + } + } +- slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting databases finished.\n"); ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_do_compact", "Compacting databases finished.\n"); + + return rc; + } +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +index e3a49dbac..65a633193 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +@@ -97,7 +97,7 @@ int bdb_db_size(Slapi_PBlock *pb); + int bdb_upgradedb(Slapi_PBlock *pb); + int bdb_upgradednformat(Slapi_PBlock *pb); + int bdb_upgradeddformat(Slapi_PBlock *pb); +-int32_t bdb_compact(struct ldbminfo *li); ++int32_t bdb_do_compact(struct ldbminfo *li); + int bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task); + int bdb_cleanup(struct ldbminfo *li); + int bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock); +-- +2.38.1 + diff --git a/SOURCES/0001-Revert-4866-cl-trimming-not-applicable-in-1.4.3.patch b/SOURCES/0001-Revert-4866-cl-trimming-not-applicable-in-1.4.3.patch deleted file mode 100644 index 294dd0c..0000000 --- a/SOURCES/0001-Revert-4866-cl-trimming-not-applicable-in-1.4.3.patch +++ /dev/null @@ -1,86 +0,0 @@ -From 36da9be6b82c96a656fa6dd1f99e5a7c41c7652a Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Mon, 23 May 2022 16:53:41 +0200 -Subject: [PATCH] Revert 4866 - cl trimming not applicable in 1.4.3 - ---- - .../suites/healthcheck/health_repl_test.py | 2 +- - .../tests/suites/replication/acceptance_test.py | 17 +---------------- - src/lib389/lib389/replica.py | 13 ------------- - 3 files changed, 2 insertions(+), 30 deletions(-) - -diff --git a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py -index 9e1af2ff8..238d25290 100644 ---- a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py -+++ b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py -@@ -74,7 +74,7 @@ def set_changelog_trimming(instance): - inst_changelog = Changelog5(instance) - - log.info('Set nsslapd-changelogmaxage to 30d') -- inst_changelog.set_max_age('30d') -+ inst_changelog.add('nsslapd-changelogmaxage', '30') - - - @pytest.mark.ds50873 -diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py -index 8b96df7a4..a5f0c4c6b 100644 ---- a/dirsrvtests/tests/suites/replication/acceptance_test.py -+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py -@@ -15,7 +15,7 @@ from lib389.topologies import topology_m4 as topo_m4 - from lib389.topologies import topology_m2 as topo_m2 - from . import get_repl_entries - from lib389.idm.user import UserAccount --from lib389.replica import ReplicationManager, Changelog -+from lib389.replica import ReplicationManager - from lib389._constants import * - - pytestmark = pytest.mark.tier0 -@@ -645,21 +645,6 @@ def test_csngen_task(topo_m2): - assert m1.searchErrorsLog("_csngen_gen_tester_main") - - --def test_default_cl_trimming_enabled(topo_m2): -- """Check that changelog trimming was enabled by default -- -- :id: c37b9a28-f961-4867-b8a1-e81edd7f9bf3 -- :setup: Supplier Instance -- :steps: -- 1. Check changelog has trimming set up by default -- :expectedresults: -- 1. Success -- """ -- -- # Set up changelog trimming by default -- cl = Changelog(topo_m2.ms["supplier1"], DEFAULT_SUFFIX) -- assert cl.get_attr_val_utf8("nsslapd-changelogmaxage") == "7d" -- - - if __name__ == '__main__': - # Run isolated -diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py -index c7328605b..90905dbf1 100644 ---- a/src/lib389/lib389/replica.py -+++ b/src/lib389/lib389/replica.py -@@ -1667,19 +1667,6 @@ class Replicas(DSLdapObjects): - self._childobject = Replica - self._basedn = DN_MAPPING_TREE - -- def create(self, rdn=None, properties=None): -- replica = super(Replicas, self).create(rdn, properties) -- -- # Set up changelog trimming by default -- if properties is not None: -- for attr, val in properties.items(): -- if attr.lower() == 'nsds5replicaroot': -- cl = Changelog(self._instance, val[0]) -- cl.set_max_age("7d") -- break -- -- return replica -- - def get(self, selector=[], dn=None): - """Get a child entry (DSLdapObject, Replica, etc.) with dn or selector - using a base DN and objectClasses of our object (DSLdapObjects, Replicas, etc.) --- -2.31.1 - diff --git a/SOURCES/0002-Issue-4877-RFE-EntryUUID-to-validate-UUIDs-on-fixup-.patch b/SOURCES/0002-Issue-4877-RFE-EntryUUID-to-validate-UUIDs-on-fixup-.patch deleted file mode 100644 index 55643eb..0000000 --- a/SOURCES/0002-Issue-4877-RFE-EntryUUID-to-validate-UUIDs-on-fixup-.patch +++ /dev/null @@ -1,77 +0,0 @@ -From 6c8906559cd049b14b08e4d3158338f6611f04e4 Mon Sep 17 00:00:00 2001 -From: Firstyear -Date: Fri, 20 Aug 2021 09:18:50 +1000 -Subject: [PATCH] Issue 4877 - RFE - EntryUUID to validate UUIDs on fixup - (#4878) - -Bug Description: Due to changing the syntax of EntryUUID's -to string, we may have invalid EntryUUID's imported into -the database. - -Fix Description: To resolve this during a fixup we validate -that Uuid's have a valid syntax. If they do not, we regenerate -them. - -fixes: https://github.com/389ds/389-ds-base/issues/4877 - -Author: William Brown - -Review by: @mreynolds389 ---- - src/plugins/entryuuid/src/lib.rs | 28 ++++++++++++++++++++-------- - 1 file changed, 20 insertions(+), 8 deletions(-) - -diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs -index 29a9f1258..ad3faef4b 100644 ---- a/src/plugins/entryuuid/src/lib.rs -+++ b/src/plugins/entryuuid/src/lib.rs -@@ -144,11 +144,17 @@ impl SlapiPlugin3 for EntryUuid { - // Error if the first filter is empty? - - // Now, to make things faster, we wrap the filter in a exclude term. -+ -+ // 2021 - #4877 because we allow entryuuid to be strings, on import these may -+ // be invalid. As a result, we DO need to allow the fixup to check the entryuuid -+ // value is correct, so we can not exclude these during the search. -+ /* - let raw_filter = if !raw_filter.starts_with('(') && !raw_filter.ends_with('(') { - format!("(&({})(!(entryuuid=*)))", raw_filter) - } else { - format!("(&{}(!(entryuuid=*)))", raw_filter) - }; -+ */ - - Ok(FixupData { basedn, raw_filter }) - } -@@ -213,14 +219,20 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError - /* Supply a modification to the entry. */ - let sdn = e.get_sdnref(); - -- /* Sanity check that entryuuid doesn't already exist */ -- if e.contains_attr("entryUUID") { -- log_error!( -- ErrorLevel::Plugin, -- "skipping fixup for -> {}", -- sdn.to_dn_string() -- ); -- return Ok(()); -+ /* Check that entryuuid doesn't already exist, and is valid */ -+ if let Some(valueset) = e.get_attr("entryUUID") { -+ if valueset.iter().all(|v| { -+ let u: Result = (&v).try_into(); -+ u.is_ok() -+ }) { -+ // All values were valid uuid, move on! -+ log_error!( -+ ErrorLevel::Plugin, -+ "skipping fixup for -> {}", -+ sdn.to_dn_string() -+ ); -+ return Ok(()); -+ } - } - - // Setup the modifications --- -2.31.1 - diff --git a/SOURCES/0002-Issue-5544-Increase-default-task-TTL.patch b/SOURCES/0002-Issue-5544-Increase-default-task-TTL.patch new file mode 100644 index 0000000..542358c --- /dev/null +++ b/SOURCES/0002-Issue-5544-Increase-default-task-TTL.patch @@ -0,0 +1,30 @@ +From adb1baa6fd9fcfa0ca6d4a84d918e25adc405afd Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 28 Nov 2022 09:47:09 -0500 +Subject: [PATCH 2/3] Issue 5544 - Increase default task TTL + +Description: Increase the Time To Live of tasks from 1 hour to 12 hours + +relates: https://github.com/389ds/389-ds-base/issues/5544 + +Reviewed by: progier(Thanks!) +--- + ldap/servers/slapd/task.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c +index 71d5a2fb5..1a8be6c85 100644 +--- a/ldap/servers/slapd/task.c ++++ b/ldap/servers/slapd/task.c +@@ -48,7 +48,7 @@ static uint64_t shutting_down = 0; + #define TASK_DATE_NAME "nsTaskCreated" + #define TASK_WARNING_NAME "nsTaskWarning" + +-#define DEFAULT_TTL "3600" /* seconds */ ++#define DEFAULT_TTL "43200" /* 12 hours in seconds */ + #define TASK_SYSCONFIG_FILE_ATTR "sysconfigfile" /* sysconfig reload task file attr */ + #define TASK_SYSCONFIG_LOGCHANGES_ATTR "logchanges" + #define TASK_TOMBSTONE_FIXUP "fixup tombstones task" +-- +2.38.1 + diff --git a/SOURCES/0003-Issue-5126-Memory-leak-in-slapi_ldap_get_lderrno-515.patch b/SOURCES/0003-Issue-5126-Memory-leak-in-slapi_ldap_get_lderrno-515.patch deleted file mode 100644 index bab2e50..0000000 --- a/SOURCES/0003-Issue-5126-Memory-leak-in-slapi_ldap_get_lderrno-515.patch +++ /dev/null @@ -1,780 +0,0 @@ -From 63e1ceac74cdfda7cf432537a18670e9562b58df Mon Sep 17 00:00:00 2001 -From: progier389 -Date: Mon, 2 May 2022 18:43:25 +0200 -Subject: [PATCH] Issue 5126 - Memory leak in slapi_ldap_get_lderrno (#5153) -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -* Issue 5126 - Memory leak in slapi_ldap_get_lderrno - -The problem is that some time ago libldap API replaced ​LDAP_OPT_ERROR_STRING whose data should not be freed by -LDAP_OPT_DIAGNOSTIC_MESSAGE whose data must be freed. -slapi_ldap_get_lderrno was adapted to use the new option but the callers were not modified to free the value. - -The Solution: - Insure that we also need to free slapi_ldap_get_lderrno value if legacy LDAP_OPT_ERROR_STRING is used (by duping the value) - Insure that the callers free the value. - -Added test case about replication using SASL/Digest-md5 authentication -Added test case to check this leak -Also updated test case about SASL/GSSAPI to be comapatible with current lib389 framework but marked as skipped because it requires a specific configuration (This path should be tested by IPA tests) -Fixed valgrind lib389 function to run on prefixed installation without needing to be root. -At last I also improved lib389 mapped object to have a better diagnostic when LDAP operation fails (by adding the request within the exception) - - issue: 5126 https://github.com/389ds/389-ds-base/issues/5126 - -Reviewd by: @droideck - -(cherry picked from commit 4d89e11494233d8297896540bc752cfdbab2cc69) ---- - .../suites/gssapi_repl/gssapi_repl_test.py | 31 ++- - .../tests/suites/replication/sasl_m2_test.py | 185 ++++++++++++++++++ - ldap/servers/plugins/chainingdb/cb_search.c | 6 +- - ldap/servers/plugins/passthru/ptbind.c | 2 + - .../plugins/replication/repl5_connection.c | 4 + - .../plugins/replication/windows_connection.c | 3 + - ldap/servers/slapd/ldaputil.c | 6 + - src/lib389/lib389/_mapped_object.py | 76 ++++--- - src/lib389/lib389/utils.py | 40 +++- - 9 files changed, 311 insertions(+), 42 deletions(-) - create mode 100644 dirsrvtests/tests/suites/replication/sasl_m2_test.py - -diff --git a/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py b/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py -index 41f323c06..402684aab 100644 ---- a/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py -+++ b/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py -@@ -9,6 +9,7 @@ - import pytest - from lib389.tasks import * - from lib389.utils import * -+from lib389.agreement import * - from lib389.topologies import topology_m2 - - pytestmark = pytest.mark.tier2 -@@ -65,10 +66,27 @@ def _allow_machine_account(inst, name): - # First we need to get the mapping tree dn - mt = inst.mappingtree.list(suffix=DEFAULT_SUFFIX)[0] - inst.modify_s('cn=replica,%s' % mt.dn, [ -- (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDN', "uid=%s,ou=Machines,%s" % (name, DEFAULT_SUFFIX)) -+ (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDN', f"uid={name},ou=Machines,{DEFAULT_SUFFIX}".encode('utf-8')) - ]) - -- -+def _verify_etc_hosts(): -+ #Check if /etc/hosts is compatible with the test -+ NEEDED_HOSTS = ( ('ldapkdc.example.com', '127.0.0.1'), -+ ('ldapkdc1.example.com', '127.0.1.1'), -+ ('ldapkdc2.example.com', '127.0.2.1')) -+ found_hosts = {} -+ with open('/etc/hosts','r') as f: -+ for l in f: -+ s = l.split() -+ if len(s) < 2: -+ continue -+ for nh in NEEDED_HOSTS: -+ if (s[0] == nh[1] and s[1] == nh[0]): -+ found_hosts[s[1]] = True -+ return len(found_hosts) == len(NEEDED_HOSTS) -+ -+@pytest.mark.skipif(not _verify_etc_hosts(), reason="/etc/hosts does not contains the needed hosts.") -+@pytest.mark.skipif(True, reason="Test disabled because it requires specific kerberos requirement (server principal, keytab, etc ...") - def test_gssapi_repl(topology_m2): - """Test gssapi authenticated replication agreement of two suppliers using KDC - -@@ -94,8 +112,6 @@ def test_gssapi_repl(topology_m2): - 6. Test User should be created on M1 and M2 both - 7. Test User should be created on M1 and M2 both - """ -- -- return - supplier1 = topology_m2.ms["supplier1"] - supplier2 = topology_m2.ms["supplier2"] - -@@ -121,6 +137,7 @@ def test_gssapi_repl(topology_m2): - properties = {RA_NAME: r'meTo_$host:$port', - RA_METHOD: 'SASL/GSSAPI', - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} -+ supplier1.agreement.delete(suffix=SUFFIX, consumer_host=supplier2.host, consumer_port=supplier2.port) - m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) - if not m1_m2_agmt: - log.fatal("Fail to create a supplier -> supplier replica agreement") -@@ -133,6 +150,7 @@ def test_gssapi_repl(topology_m2): - properties = {RA_NAME: r'meTo_$host:$port', - RA_METHOD: 'SASL/GSSAPI', - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} -+ supplier2.agreement.delete(suffix=SUFFIX, consumer_host=supplier1.host, consumer_port=supplier1.port) - m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) - if not m2_m1_agmt: - log.fatal("Fail to create a supplier -> supplier replica agreement") -@@ -145,8 +163,9 @@ def test_gssapi_repl(topology_m2): - # - # Initialize all the agreements - # -- supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) -- supplier1.waitForReplInit(m1_m2_agmt) -+ agmt = Agreement(supplier1, m1_m2_agmt) -+ agmt.begin_reinit() -+ agmt.wait_reinit() - - # Check replication is working... - if supplier1.testReplication(DEFAULT_SUFFIX, supplier2): -diff --git a/dirsrvtests/tests/suites/replication/sasl_m2_test.py b/dirsrvtests/tests/suites/replication/sasl_m2_test.py -new file mode 100644 -index 000000000..d7406ac7e ---- /dev/null -+++ b/dirsrvtests/tests/suites/replication/sasl_m2_test.py -@@ -0,0 +1,185 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2022 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import logging -+import os -+import pytest -+import ldap -+import uuid -+from lib389.utils import ds_is_older, valgrind_enable, valgrind_disable, valgrind_get_results_file, valgrind_check_file -+ -+from lib389.idm.services import ServiceAccounts -+from lib389.idm.group import Groups -+from lib389.config import CertmapLegacy, Config -+from lib389._constants import DEFAULT_SUFFIX -+from lib389.agreement import Agreements -+from lib389._mapped_object import DSLdapObject -+from lib389.replica import ReplicationManager, Replicas, BootstrapReplicationManager -+from lib389.topologies import topology_m2 as topo_m2 -+ -+pytestmark = pytest.mark.tier1 -+ -+DEBUGGING = os.getenv("DEBUGGING", default=False) -+if DEBUGGING: -+ logging.getLogger(__name__).setLevel(logging.DEBUG) -+else: -+ logging.getLogger(__name__).setLevel(logging.INFO) -+log = logging.getLogger(__name__) -+ -+def set_sasl_md5_client_auth(inst, to): -+ # Create the certmap before we restart -+ cm = CertmapLegacy(to) -+ certmaps = cm.list() -+ certmaps['default']['nsSaslMapRegexString'] = '^dn:\\(.*\\)' -+ certmaps['default']['nsSaslMapBaseDNTemplate'] = 'cn=config' -+ certmaps['default']['nsSaslMapFilterTemplate'] = '(objectclass=*)' -+ cm.set(certmaps) -+ -+ Config(to).replace("passwordStorageScheme", 'CLEAR') -+ -+ # Create a repl manager on the replica -+ replication_manager_pwd = 'secret12' -+ brm = BootstrapReplicationManager(to) -+ try: -+ brm.delete() -+ except ldap.NO_SUCH_OBJECT: -+ pass -+ brm.create(properties={ -+ 'cn': brm.common_name, -+ 'userPassword': replication_manager_pwd -+ }) -+ replication_manager_dn = brm.dn -+ -+ replica = Replicas(inst).get(DEFAULT_SUFFIX) -+ replica.set('nsDS5ReplicaBindDN', brm.dn) -+ replica.remove_all('nsDS5ReplicaBindDNgroup') -+ agmt = replica.get_agreements().list()[0] -+ agmt.replace_many( -+ ('nsDS5ReplicaBindMethod', 'SASL/DIGEST-MD5'), -+ ('nsDS5ReplicaTransportInfo', 'LDAP'), -+ ('nsDS5ReplicaPort', str(to.port)), -+ ('nsDS5ReplicaBindDN', replication_manager_dn), -+ ('nsDS5ReplicaCredentials', replication_manager_pwd), -+ ) -+ -+ -+def gen_valgrind_wrapper(dir): -+ name=f"{dir}/VALGRIND" -+ with open(name, 'w') as f: -+ f.write('#!/bin/sh\n') -+ f.write('export SASL_PATH=foo\n') -+ f.write(f'valgrind -q --tool=memcheck --leak-check=yes --leak-resolution=high --num-callers=50 --log-file=/var/tmp/slapd.vg.$$ {dir}/ns-slapd.original "$@"\n') -+ os.chmod(name, 0o755) -+ return name -+ -+@pytest.fixture -+def use_valgrind(topo_m2, request): -+ """Adds entries to the supplier1""" -+ -+ log.info("Enable valgrind") -+ m1 = topo_m2.ms['supplier1'] -+ m2 = topo_m2.ms['supplier2'] -+ if m1.has_asan(): -+ pytest.skip('Tescase using valgring cannot run on asan enabled build') -+ return -+ set_sasl_md5_client_auth(m1, m2) -+ set_sasl_md5_client_auth(m2, m1) -+ m1.stop() -+ m2.stop() -+ m1.systemd_override = False -+ m2.systemd_override = False -+ valgrind_enable(m1.ds_paths.sbin_dir, gen_valgrind_wrapper(m1.ds_paths.sbin_dir)) -+ -+ def fin(): -+ log.info("Disable valgrind") -+ valgrind_disable(m1.ds_paths.sbin_dir) -+ -+ request.addfinalizer(fin) -+ -+ -+def test_repl_sasl_md5_auth(topo_m2): -+ """Test replication with SASL digest-md5 authentication -+ -+ :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e2 -+ :setup: Two supplier replication -+ :steps: -+ 1. Set sasl digest/md4 on both suppliers -+ 2. Restart the instance -+ 3. Check that replication works -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Replication works -+ """ -+ -+ m1 = topo_m2.ms['supplier1'] -+ m2 = topo_m2.ms['supplier2'] -+ -+ set_sasl_md5_client_auth(m1, m2) -+ set_sasl_md5_client_auth(m2, m1) -+ -+ m1.restart() -+ m2.restart() -+ -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ repl.test_replication_topology(topo_m2) -+ -+ -+@pytest.mark.skipif(not os.path.exists('/usr/bin/valgrind'), reason="valgrind is not installed.") -+def test_repl_sasl_leak(topo_m2, use_valgrind): -+ """Test replication with SASL digest-md5 authentication -+ -+ :id: 180e088e-841c-11ec-af4f-482ae39447e5 -+ :setup: Two supplier replication, valgrind -+ :steps: -+ 1. Set sasl digest/md4 on both suppliers -+ 2. Break sasl by setting invalid PATH -+ 3. Restart the instances -+ 4. Perform a change -+ 5. Poke replication 100 times -+ 6. Stop server -+ 7. Check presence of "SASL(-4): no mechanism available: No worthy mechs found" message in error log -+ 8 Check that there is no leak about slapi_ldap_get_lderrno -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 2. Success -+ 4. Success -+ 5. Success -+ 6. Success -+ 7. Success -+ 8. Success -+ """ -+ -+ m1 = topo_m2.ms['supplier1'] -+ m2 = topo_m2.ms['supplier2'] -+ -+ os.environ["SASL_PATH"] = 'foo' -+ -+ m1.start() -+ m2.start() -+ -+ resfile=valgrind_get_results_file(m1) -+ -+ # Perform a change -+ from_groups = Groups(m1, basedn=DEFAULT_SUFFIX, rdn=None) -+ from_group = from_groups.get('replication_managers') -+ change = str(uuid.uuid4()) -+ from_group.replace('description', change) -+ -+ # Poke replication to trigger thev leak -+ replica = Replicas(m1).get(DEFAULT_SUFFIX) -+ agmt = Agreements(m1, replica.dn).list()[0] -+ for i in range(0, 100): -+ agmt.pause() -+ agmt.resume() -+ -+ m1.stop() -+ assert m1.searchErrorsLog("worthy") -+ assert not valgrind_check_file(resfile, 'slapi_ldap_get_lderrno'); -+ -diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c -index ffc8f56f8..d6f30b357 100644 ---- a/ldap/servers/plugins/chainingdb/cb_search.c -+++ b/ldap/servers/plugins/chainingdb/cb_search.c -@@ -348,10 +348,9 @@ chainingdb_build_candidate_list(Slapi_PBlock *pb) - warned_rc = 1; - } - cb_send_ldap_result(pb, rc, NULL, ENDUSERMSG, 0, NULL); -- /* BEWARE: matched_msg and error_msg points */ -+ /* BEWARE: matched_msg points */ - /* to ld fields. */ - matched_msg = NULL; -- error_msg = NULL; - rc = -1; - } - -@@ -695,10 +694,9 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) - } - cb_send_ldap_result(pb, rc, matched_msg, ENDUSERMSG, 0, NULL); - -- /* BEWARE: Don't free matched_msg && error_msg */ -+ /* BEWARE: Don't free matched_msg */ - /* Points to the ld fields */ - matched_msg = NULL; -- error_msg = NULL; - retcode = -1; - } else { - /* Add control response sent by the farm server */ -diff --git a/ldap/servers/plugins/passthru/ptbind.c b/ldap/servers/plugins/passthru/ptbind.c -index 705ab2c3a..3e79b47f6 100644 ---- a/ldap/servers/plugins/passthru/ptbind.c -+++ b/ldap/servers/plugins/passthru/ptbind.c -@@ -33,6 +33,8 @@ passthru_simple_bind_once_s(PassThruServer *srvr, const char *dn, struct berval - * are only interested in recovering silently when the remote server is up - * but decided to close our connection, we retry without pausing between - * attempts. -+ * -+ * Note that errmsgp must be freed by the caller. - */ - int - passthru_simple_bind_s(Slapi_PBlock *pb, PassThruServer *srvr, int tries, const char *dn, struct berval *creds, LDAPControl **reqctrls, int *lderrnop, char **matcheddnp, char **errmsgp, struct berval ***refurlsp, LDAPControl ***resctrlsp) -diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c -index 2dd74f9e7..b6bc21c46 100644 ---- a/ldap/servers/plugins/replication/repl5_connection.c -+++ b/ldap/servers/plugins/replication/repl5_connection.c -@@ -244,6 +244,7 @@ conn_delete_internal(Repl_Connection *conn) - PR_ASSERT(NULL != conn); - close_connection_internal(conn); - /* slapi_ch_free accepts NULL pointer */ -+ slapi_ch_free_string(&conn->last_ldap_errmsg); - slapi_ch_free((void **)&conn->hostname); - slapi_ch_free((void **)&conn->binddn); - slapi_ch_free((void **)&conn->plain); -@@ -450,6 +451,7 @@ conn_read_result_ex(Repl_Connection *conn, char **retoidp, struct berval **retda - char *s = NULL; - - rc = slapi_ldap_get_lderrno(conn->ld, NULL, &s); -+ slapi_ch_free_string(&conn->last_ldap_errmsg); - conn->last_ldap_errmsg = s; - conn->last_ldap_error = rc; - /* some errors will require a disconnect and retry the connection -@@ -1937,6 +1939,7 @@ bind_and_check_pwp(Repl_Connection *conn, char *binddn, char *password) - agmt_get_long_name(conn->agmt), - mech ? mech : "SIMPLE", rc, - ldap_err2string(rc), errmsg ? errmsg : ""); -+ slapi_ch_free_string(&errmsg); - } else { - char *errmsg = NULL; - /* errmsg is a pointer directly into the ld structure - do not free */ -@@ -1946,6 +1949,7 @@ bind_and_check_pwp(Repl_Connection *conn, char *binddn, char *password) - agmt_get_long_name(conn->agmt), - mech ? mech : "SIMPLE", rc, - ldap_err2string(rc), errmsg ? errmsg : ""); -+ slapi_ch_free_string(&errmsg); - } - - return (CONN_OPERATION_FAILED); -diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c -index 5eca5fad1..d3f6a4e93 100644 ---- a/ldap/servers/plugins/replication/windows_connection.c -+++ b/ldap/servers/plugins/replication/windows_connection.c -@@ -331,6 +331,7 @@ windows_perform_operation(Repl_Connection *conn, int optype, const char *dn, LDA - "windows_perform_operation - %s: Received error %d: %s for %s operation\n", - agmt_get_long_name(conn->agmt), - rc, s ? s : "NULL", op_string); -+ slapi_ch_free_string(&s); - conn->last_ldap_error = rc; - /* some errors will require a disconnect and retry the connection - later */ -@@ -1709,6 +1710,7 @@ bind_and_check_pwp(Repl_Connection *conn, char *binddn, char *password) - agmt_get_long_name(conn->agmt), - mech ? mech : "SIMPLE", rc, - ldap_err2string(rc), errmsg); -+ slapi_ch_free_string(&errmsg); - } else { - char *errmsg = NULL; - /* errmsg is a pointer directly into the ld structure - do not free */ -@@ -1718,6 +1720,7 @@ bind_and_check_pwp(Repl_Connection *conn, char *binddn, char *password) - agmt_get_long_name(conn->agmt), - mech ? mech : "SIMPLE", rc, - ldap_err2string(rc), errmsg); -+ slapi_ch_free_string(&errmsg); - } - - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= bind_and_check_pwp - CONN_OPERATION_FAILED\n"); -diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c -index 336ca3912..db3300e30 100644 ---- a/ldap/servers/slapd/ldaputil.c -+++ b/ldap/servers/slapd/ldaputil.c -@@ -375,6 +375,8 @@ slapi_ldap_url_parse(const char *url, LDAPURLDesc **ludpp, int require_dn, int * - - #include - -+ -+/* Warning: caller must free s (if not NULL) */ - int - slapi_ldap_get_lderrno(LDAP *ld, char **m, char **s) - { -@@ -389,6 +391,9 @@ slapi_ldap_get_lderrno(LDAP *ld, char **m, char **s) - ldap_get_option(ld, LDAP_OPT_DIAGNOSTIC_MESSAGE, s); - #else - ldap_get_option(ld, LDAP_OPT_ERROR_STRING, s); -+ if (*s) { -+ *s = slapi_ch_strdup(*s); -+ } - #endif - } - return rc; -@@ -1517,6 +1522,7 @@ slapd_ldap_sasl_interactive_bind( - mech ? mech : "SIMPLE", - rc, ldap_err2string(rc), errmsg, - errno, slapd_system_strerror(errno)); -+ slapi_ch_free_string(&errmsg); - if (can_retry_bind(ld, mech, bindid, creds, rc, errmsg)) { - ; /* pass through to retry one time */ - } else { -diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py -index 48d3879a3..1c314322b 100644 ---- a/src/lib389/lib389/_mapped_object.py -+++ b/src/lib389/lib389/_mapped_object.py -@@ -67,6 +67,34 @@ def _gen_filter(attrtypes, values, extra=None): - return filt - - -+# Define wrappers around the ldap operation to have a clear diagnostic -+def _ldap_op_s(inst, f, fname, *args, **kwargs): -+ # f.__name__ says 'inner' so the wanted name is provided as argument -+ try: -+ return f(*args, **kwargs) -+ except ldap.LDAPError as e: -+ new_desc = f"{fname}({args},{kwargs}) on instance {inst.serverid}"; -+ if len(e.args) >= 1: -+ e.args[0]['ldap_request'] = new_desc -+ logging.getLogger().error(f"args={e.args}") -+ raise -+ -+def _add_ext_s(inst, *args, **kwargs): -+ return _ldap_op_s(inst, inst.add_ext_s, 'add_ext_s', *args, **kwargs) -+ -+def _modify_ext_s(inst, *args, **kwargs): -+ return _ldap_op_s(inst, inst.modify_ext_s, 'modify_ext_s', *args, **kwargs) -+ -+def _delete_ext_s(inst, *args, **kwargs): -+ return _ldap_op_s(inst, inst.delete_ext_s, 'delete_ext_s', *args, **kwargs) -+ -+def _search_ext_s(inst, *args, **kwargs): -+ return _ldap_op_s(inst, inst.search_ext_s, 'search_ext_s', *args, **kwargs) -+ -+def _search_s(inst, *args, **kwargs): -+ return _ldap_op_s(inst, inst.search_s, 'search_s', *args, **kwargs) -+ -+ - class DSLogging(object): - """The benefit of this is automatic name detection, and correct application - of level and verbosity to the object. -@@ -129,7 +157,7 @@ class DSLdapObject(DSLogging, DSLint): - :returns: Entry object - """ - -- return self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=["*"], -+ return _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=["*"], - serverctrls=self._server_controls, clientctrls=self._client_controls, - escapehatch='i am sure')[0] - -@@ -140,7 +168,7 @@ class DSLdapObject(DSLogging, DSLint): - """ - - try: -- self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1, -+ _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1, - serverctrls=self._server_controls, clientctrls=self._client_controls, - escapehatch='i am sure') - except ldap.NO_SUCH_OBJECT: -@@ -156,7 +184,7 @@ class DSLdapObject(DSLogging, DSLint): - search_scope = ldap.SCOPE_ONE - elif scope == 'subtree': - search_scope = ldap.SCOPE_SUBTREE -- return self._instance.search_ext_s(self._dn, search_scope, filter, -+ return _search_ext_s(self._instance,self._dn, search_scope, filter, - serverctrls=self._server_controls, - clientctrls=self._client_controls, - escapehatch='i am sure') -@@ -166,7 +194,7 @@ class DSLdapObject(DSLogging, DSLint): - - :returns: LDIF formatted string - """ -- e = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=attrlist, -+ e = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=attrlist, - serverctrls=self._server_controls, clientctrls=self._client_controls, - escapehatch='i am sure')[0] - return e.__repr__() -@@ -258,7 +286,7 @@ class DSLdapObject(DSLogging, DSLint): - raise ValueError("Invalid state. Cannot get presence on instance that is not ONLINE") - self._log.debug("%s present(%r) %s" % (self._dn, attr, value)) - -- self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ], -+ _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ], - serverctrls=self._server_controls, clientctrls=self._client_controls, - escapehatch='i am sure')[0] - values = self.get_attr_vals_bytes(attr) -@@ -313,7 +341,7 @@ class DSLdapObject(DSLogging, DSLint): - else: - value = [ensure_bytes(arg[1])] - mods.append((ldap.MOD_REPLACE, ensure_str(arg[0]), value)) -- return self._instance.modify_ext_s(self._dn, mods, serverctrls=self._server_controls, -+ return _modify_ext_s(self._instance,self._dn, mods, serverctrls=self._server_controls, - clientctrls=self._client_controls, escapehatch='i am sure') - - # This needs to work on key + val, and key -@@ -457,7 +485,7 @@ class DSLdapObject(DSLogging, DSLint): - elif value is not None: - value = [ensure_bytes(value)] - -- return self._instance.modify_ext_s(self._dn, [(action, key, value)], -+ return _modify_ext_s(self._instance,self._dn, [(action, key, value)], - serverctrls=self._server_controls, clientctrls=self._client_controls, - escapehatch='i am sure') - -@@ -497,7 +525,7 @@ class DSLdapObject(DSLogging, DSLint): - else: - # Error too many items - raise ValueError('Too many arguments in the mod op') -- return self._instance.modify_ext_s(self._dn, mod_list, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') -+ return _modify_ext_s(self._instance,self._dn, mod_list, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') - - def _unsafe_compare_attribute(self, other): - """Compare two attributes from two objects. This is currently marked unsafe as it's -@@ -593,7 +621,7 @@ class DSLdapObject(DSLogging, DSLint): - raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") - else: - # retrieving real(*) and operational attributes(+) -- attrs_entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, -+ attrs_entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, - attrlist=["*", "+"], serverctrls=self._server_controls, - clientctrls=self._client_controls, escapehatch='i am sure')[0] - # getting dict from 'entry' object -@@ -613,7 +641,7 @@ class DSLdapObject(DSLogging, DSLint): - raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") - else: - # retrieving real(*) and operational attributes(+) -- attrs_entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, -+ attrs_entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, - attrlist=["*", "+"], serverctrls=self._server_controls, - clientctrls=self._client_controls, escapehatch='i am sure')[0] - # getting dict from 'entry' object -@@ -627,7 +655,7 @@ class DSLdapObject(DSLogging, DSLint): - if self._instance.state != DIRSRV_STATE_ONLINE: - raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") - else: -- entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, -+ entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, - attrlist=keys, serverctrls=self._server_controls, - clientctrls=self._client_controls, escapehatch='i am sure')[0] - return entry.getValuesSet(keys) -@@ -636,7 +664,7 @@ class DSLdapObject(DSLogging, DSLint): - self._log.debug("%s get_attrs_vals_utf8(%r)" % (self._dn, keys)) - if self._instance.state != DIRSRV_STATE_ONLINE: - raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") -- entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=keys, -+ entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=keys, - serverctrls=self._server_controls, clientctrls=self._client_controls, - escapehatch='i am sure')[0] - vset = entry.getValuesSet(keys) -@@ -655,7 +683,7 @@ class DSLdapObject(DSLogging, DSLint): - else: - # It would be good to prevent the entry code intercepting this .... - # We have to do this in this method, because else we ignore the scope base. -- entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, -+ entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, - attrlist=[key], serverctrls=self._server_controls, - clientctrls=self._client_controls, escapehatch='i am sure')[0] - vals = entry.getValues(key) -@@ -675,7 +703,7 @@ class DSLdapObject(DSLogging, DSLint): - # In the future, I plan to add a mode where if local == true, we - # can use get on dse.ldif to get values offline. - else: -- entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, -+ entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, - attrlist=[key], serverctrls=self._server_controls, - clientctrls=self._client_controls, escapehatch='i am sure')[0] - return entry.getValue(key) -@@ -831,11 +859,11 @@ class DSLdapObject(DSLogging, DSLint): - # Is there a way to mark this as offline and kill it - if recursive: - filterstr = "(|(objectclass=*)(objectclass=ldapsubentry))" -- ents = self._instance.search_s(self._dn, ldap.SCOPE_SUBTREE, filterstr, escapehatch='i am sure') -+ ents = _search_s(self._instance, self._dn, ldap.SCOPE_SUBTREE, filterstr, escapehatch='i am sure') - for ent in sorted(ents, key=lambda e: len(e.dn), reverse=True): -- self._instance.delete_ext_s(ent.dn, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') -+ _delete_ext_s(self._instance, ent.dn, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') - else: -- self._instance.delete_ext_s(self._dn, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') -+ _delete_ext_s(self._instance, self._dn, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') - - def _validate(self, rdn, properties, basedn): - """Used to validate a create request. -@@ -933,7 +961,7 @@ class DSLdapObject(DSLogging, DSLint): - # If we are running in stateful ensure mode, we need to check if the object exists, and - # we can see the state that it is in. - try: -- self._instance.search_ext_s(dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') -+ _search_ext_s(self._instance,dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') - exists = True - except ldap.NO_SUCH_OBJECT: - pass -@@ -946,7 +974,7 @@ class DSLdapObject(DSLogging, DSLint): - mods = [] - for k, v in list(valid_props.items()): - mods.append((ldap.MOD_REPLACE, k, v)) -- self._instance.modify_ext_s(self._dn, mods, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') -+ _modify_ext_s(self._instance,self._dn, mods, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') - elif not exists: - # This case is reached in two cases. One is we are in ensure mode, and we KNOW the entry - # doesn't exist. -@@ -957,7 +985,7 @@ class DSLdapObject(DSLogging, DSLint): - e.update({'objectclass': ensure_list_bytes(self._create_objectclasses)}) - e.update(valid_props) - # We rely on exceptions here to indicate failure to the parent. -- self._instance.add_ext_s(e, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') -+ _add_ext_s(self._instance, e, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure') - self._log.debug('Created entry %s : %s' % (dn, display_log_data(e.data))) - # If it worked, we need to fix our instance dn for the object's self reference. Because - # we may not have a self reference yet (just created), it may have changed (someone -@@ -1104,7 +1132,7 @@ class DSLdapObjects(DSLogging, DSLints): - else: - # If not paged - try: -- results = self._instance.search_ext_s( -+ results = _search_ext_s(self._instance, - base=self._basedn, - scope=self._scope, - filterstr=filterstr, -@@ -1172,7 +1200,7 @@ class DSLdapObjects(DSLogging, DSLints): - filterstr = self._get_objectclass_filter() - self._log.debug('_gen_dn filter = %s' % filterstr) - self._log.debug('_gen_dn dn = %s' % dn) -- return self._instance.search_ext_s( -+ return _search_ext_s(self._instance, - base=dn, - scope=ldap.SCOPE_BASE, - filterstr=filterstr, -@@ -1187,7 +1215,7 @@ class DSLdapObjects(DSLogging, DSLints): - # This will yield and & filter for objectClass with as many terms as needed. - filterstr = self._get_selector_filter(selector) - self._log.debug('_gen_selector filter = %s' % filterstr) -- return self._instance.search_ext_s( -+ return _search_ext_s(self._instance, - base=self._basedn, - scope=self._scope, - filterstr=filterstr, -@@ -1261,7 +1289,7 @@ class DSLdapObjects(DSLogging, DSLints): - self._list_attrlist = attrlist - self._log.debug(f'list filter = {search_filter} with scope {scope} and attribute list {attrlist}') - try: -- results = self._instance.search_ext_s( -+ results = _search_ext_s(self._instance, - base=self._basedn, - scope=scope, - filterstr=search_filter, -diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py -index 6eba2d7b9..da966ed97 100644 ---- a/src/lib389/lib389/utils.py -+++ b/src/lib389/lib389/utils.py -@@ -52,7 +52,7 @@ from ldapurl import LDAPUrl - from contextlib import closing - - import lib389 --from lib389.paths import Paths -+from lib389.paths import ( Paths, DEFAULTS_PATH ) - from lib389.dseldif import DSEldif - from lib389._constants import ( - DEFAULT_USER, VALGRIND_WRAPPER, DN_CONFIG, CFGSUFFIX, LOCALHOST, -@@ -495,8 +495,10 @@ def valgrind_enable(sbin_dir, wrapper=None): - :raise EnvironmentError: If script is not run as 'root' - ''' - -- if os.geteuid() != 0: -- log.error('This script must be run as root to use valgrind') -+ if not os.access(sbin_dir, os.W_OK): -+ # Note: valgrind has no limitation but ns-slapd must be replaced -+ # This check allows non root user to use custom install prefix -+ log.error('This script must be run as root to use valgrind (Should at least be able to write in {sbin_dir})') - raise EnvironmentError - - if not wrapper: -@@ -542,7 +544,20 @@ def valgrind_enable(sbin_dir, wrapper=None): - e.strerror) - - # Disable selinux -- os.system('setenforce 0') -+ if os.geteuid() == 0: -+ os.system('setenforce 0') -+ -+ # Disable systemd by turning off with_system in .inf file -+ old_path = Paths()._get_defaults_loc(DEFAULTS_PATH) -+ new_path = f'{old_path}.orig' -+ os.rename(old_path, new_path) -+ with open(new_path, 'rt') as fin: -+ with open(old_path, 'wt') as fout: -+ for line in fin: -+ if line.startswith('with_systemd'): -+ fout.write('with_systemd = 0\n') -+ else: -+ fout.write(line) - - log.info('Valgrind is now enabled.') - -@@ -559,8 +574,10 @@ def valgrind_disable(sbin_dir): - :raise EnvironmentError: If script is not run as 'root' - ''' - -- if os.geteuid() != 0: -- log.error('This script must be run as root to use valgrind') -+ if not os.access(sbin_dir, os.W_OK): -+ # Note: valgrind has no limitation but ns-slapd must be replaced -+ # This check allows non root user to use custom install prefix -+ log.error('This script must be run as root to use valgrind (Should at least be able to write in {sbin_dir})') - raise EnvironmentError - - nsslapd_orig = '%s/ns-slapd' % sbin_dir -@@ -584,7 +601,14 @@ def valgrind_disable(sbin_dir): - e.strerror) - - # Enable selinux -- os.system('setenforce 1') -+ if os.geteuid() == 0: -+ os.system('setenforce 1') -+ -+ # Restore .inf file (for systemd) -+ new_path = Paths()._get_defaults_loc(DEFAULTS_PATH) -+ old_path = f'{new_path}.orig' -+ if os.path.exists(old_path): -+ os.replace(old_path, new_path) - - log.info('Valgrind is now disabled.') - -@@ -610,7 +634,7 @@ def valgrind_get_results_file(dirsrv_inst): - - # Run the command and grab the output - p = os.popen(cmd) -- results_file = p.readline() -+ results_file = p.readline().strip() - p.close() - - return results_file --- -2.31.1 - diff --git a/SOURCES/0003-Issue-5413-Allow-mutliple-MemberOf-fixup-tasks-with-.patch b/SOURCES/0003-Issue-5413-Allow-mutliple-MemberOf-fixup-tasks-with-.patch new file mode 100644 index 0000000..e238bb7 --- /dev/null +++ b/SOURCES/0003-Issue-5413-Allow-mutliple-MemberOf-fixup-tasks-with-.patch @@ -0,0 +1,219 @@ +From 59ebf6618126547f3861fbef0b9a268f40ccb2bd Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 13 Dec 2022 09:41:34 -0500 +Subject: [PATCH 3/3] Issue 5413 - Allow mutliple MemberOf fixup tasks with + different bases/filters + +Description: + +A change was made to only allow a single fixup task at a time, but there are +cases where you would want to run mutliple tasks but on different branches/filters. + +Now we maintain a linked list of bases/filters of the current running tasks to +monitor this. + +relates: https://github.com/389ds/389-ds-base/issues/5413 + +Reviewed by: tbordaz(Thanks!) +--- + .../suites/memberof_plugin/fixup_test.py | 5 +- + ldap/servers/plugins/memberof/memberof.c | 101 ++++++++++++++---- + 2 files changed, 85 insertions(+), 21 deletions(-) + +diff --git a/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py b/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py +index 9566e144c..d5369439f 100644 +--- a/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py ++++ b/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py +@@ -59,12 +59,15 @@ def test_fixup_task_limit(topo): + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + memberof.fixup(DEFAULT_SUFFIX) + ++ # Add second task but on different suffix which should be allowed ++ memberof.fixup("ou=people," + DEFAULT_SUFFIX) ++ + # Wait for first task to complete + task.wait() + + # Add new task which should be allowed now + memberof.fixup(DEFAULT_SUFFIX) +- ++ + + if __name__ == '__main__': + # Run isolated +diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c +index f3f817f89..a5f48d2c0 100644 +--- a/ldap/servers/plugins/memberof/memberof.c ++++ b/ldap/servers/plugins/memberof/memberof.c +@@ -52,7 +52,6 @@ static Slapi_DN* _pluginDN = NULL; + MemberOfConfig *qsortConfig = 0; + static int usetxn = 0; + static int premodfn = 0; +-static PRBool fixup_running = PR_FALSE; + static PRLock *fixup_lock = NULL; + static int32_t fixup_progress_count = 0; + static int64_t fixup_progress_elapsed = 0; +@@ -65,6 +64,15 @@ typedef struct _memberofstringll + void *next; + } memberofstringll; + ++typedef struct _fixup_ll ++{ ++ Slapi_DN *sdn; ++ char *filter_str; ++ void *next; ++} mo_fixup_ll; ++ ++static mo_fixup_ll *fixup_list = NULL; ++ + typedef struct _memberof_get_groups_data + { + MemberOfConfig *config; +@@ -438,6 +446,15 @@ memberof_postop_close(Slapi_PBlock *pb __attribute__((unused))) + PR_DestroyLock(fixup_lock); + fixup_lock = NULL; + ++ mo_fixup_ll *fixup_task = fixup_list; ++ while (fixup_task != NULL) { ++ mo_fixup_ll *tmp = fixup_task; ++ fixup_task = fixup_task->next; ++ slapi_sdn_free(&tmp->sdn); ++ slapi_ch_free_string(&tmp->filter_str); ++ slapi_ch_free((void**)&tmp); ++ } ++ + slapi_log_err(SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM, + "<-- memberof_postop_close\n"); + return 0; +@@ -2817,7 +2834,6 @@ memberof_fixup_task_thread(void *arg) + } + + PR_Lock(fixup_lock); +- fixup_running = PR_TRUE; + fixup_progress_count = 0; + fixup_progress_elapsed = slapi_current_rel_time_t(); + fixup_start_time = slapi_current_rel_time_t(); +@@ -2849,11 +2865,10 @@ memberof_fixup_task_thread(void *arg) + /* Mark this as a task operation */ + configCopy.fixup_task = 1; + configCopy.task = task; +- ++ Slapi_DN *sdn = slapi_sdn_new_dn_byref(td->dn); + if (usetxn) { +- Slapi_DN *sdn = slapi_sdn_new_dn_byref(td->dn); + Slapi_Backend *be = slapi_be_select_exact(sdn); +- slapi_sdn_free(&sdn); ++ + if (be) { + fixup_pb = slapi_pblock_new(); + slapi_pblock_set(fixup_pb, SLAPI_BACKEND, be); +@@ -2894,14 +2909,37 @@ done: + fixup_progress_count, slapi_current_rel_time_t() - fixup_start_time); + slapi_task_inc_progress(task); + ++ /* Cleanup task linked list */ ++ PR_Lock(fixup_lock); ++ mo_fixup_ll *prev = NULL; ++ for (mo_fixup_ll *curr = fixup_list; curr; curr = curr->next) { ++ mo_fixup_ll *next = curr->next; ++ if (slapi_sdn_compare(curr->sdn, sdn) == 0 && ++ strcasecmp(curr->filter_str, td->filter_str) == 0) ++ { ++ /* free current code */ ++ slapi_sdn_free(&curr->sdn); ++ slapi_ch_free_string(&curr->filter_str); ++ slapi_ch_free((void**)&curr); ++ ++ /* update linked list */ ++ if (prev == NULL) { ++ /* first node */ ++ fixup_list = next; ++ } else { ++ prev->next = next; ++ } ++ break; ++ } ++ prev = curr; ++ } ++ PR_Unlock(fixup_lock); ++ slapi_sdn_free(&sdn); ++ + /* this will queue the destruction of the task */ + slapi_task_finish(task, rc); + slapi_task_dec_refcount(task); + +- PR_Lock(fixup_lock); +- fixup_running = PR_FALSE; +- PR_Unlock(fixup_lock); +- + slapi_log_err(SLAPI_LOG_INFO, MEMBEROF_PLUGIN_SUBSYSTEM, + "memberof_fixup_task_thread - Memberof task finished (processed %d entries in %ld seconds)\n", + fixup_progress_count, slapi_current_rel_time_t() - fixup_start_time); +@@ -2919,23 +2957,13 @@ memberof_task_add(Slapi_PBlock *pb, + int rv = SLAPI_DSE_CALLBACK_OK; + task_data *mytaskdata = NULL; + Slapi_Task *task = NULL; ++ Slapi_DN *sdn = NULL; + char *bind_dn; + const char *filter; + const char *dn = 0; + + *returncode = LDAP_SUCCESS; + +- PR_Lock(fixup_lock); +- if (fixup_running) { +- PR_Unlock(fixup_lock); +- *returncode = LDAP_UNWILLING_TO_PERFORM; +- slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, +- "memberof_task_add - there is already a fixup task running\n"); +- rv = SLAPI_DSE_CALLBACK_ERROR; +- goto out; +- } +- PR_Unlock(fixup_lock); +- + /* get arg(s) */ + if ((dn = slapi_entry_attr_get_ref(e, "basedn")) == NULL) { + *returncode = LDAP_OBJECT_CLASS_VIOLATION; +@@ -2949,6 +2977,39 @@ memberof_task_add(Slapi_PBlock *pb, + goto out; + } + ++ PR_Lock(fixup_lock); ++ sdn = slapi_sdn_new_dn_byval(dn); ++ if (fixup_list == NULL) { ++ fixup_list = (mo_fixup_ll *)slapi_ch_calloc(1, sizeof(mo_fixup_ll)); ++ fixup_list->sdn = sdn; ++ fixup_list->filter_str = slapi_ch_strdup(filter); ++ } else { ++ for (mo_fixup_ll *fixup_task = fixup_list; fixup_task; fixup_task = fixup_task->next) { ++ if (slapi_sdn_compare(sdn, fixup_task->sdn) == 0 && ++ strcasecmp(filter, fixup_task->filter_str) == 0) ++ { ++ /* Found an identical running task, reject it */ ++ PR_Unlock(fixup_lock); ++ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, ++ "memberof_task_add - there is already an identical fixup task running: base: %s filter: %s\n", ++ slapi_sdn_get_dn(sdn), filter); ++ slapi_sdn_free(&sdn); ++ *returncode = LDAP_UNWILLING_TO_PERFORM; ++ rv = SLAPI_DSE_CALLBACK_ERROR; ++ goto out; ++ } ++ } ++ /* Add the new task DN to the top of the list */ ++ mo_fixup_ll *head = fixup_list; ++ mo_fixup_ll *new_task = (mo_fixup_ll *)slapi_ch_calloc(1, sizeof(mo_fixup_ll)); ++ new_task->sdn = sdn; ++ new_task->filter_str = slapi_ch_strdup(filter); ++ new_task->next = head; ++ fixup_list = new_task; ++ } ++ PR_Unlock(fixup_lock); ++ ++ + /* setup our task data */ + slapi_pblock_get(pb, SLAPI_REQUESTOR_DN, &bind_dn); + mytaskdata = (task_data *)slapi_ch_malloc(sizeof(task_data)); +-- +2.38.1 + diff --git a/SOURCES/0004-Issue-5085-Race-condition-about-snmp-collator-at-sta.patch b/SOURCES/0004-Issue-5085-Race-condition-about-snmp-collator-at-sta.patch deleted file mode 100644 index d24d530..0000000 --- a/SOURCES/0004-Issue-5085-Race-condition-about-snmp-collator-at-sta.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 16536e5d306727761ffd10403f4762956f177147 Mon Sep 17 00:00:00 2001 -From: progier389 -Date: Wed, 5 Jan 2022 12:09:27 +0100 -Subject: [PATCH] Issue 5085 - Race condition about snmp collator at startup - (#5086) - ---- - ldap/servers/slapd/snmp_collator.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c -index 10a99475d..ed34d2ac4 100644 ---- a/ldap/servers/slapd/snmp_collator.c -+++ b/ldap/servers/slapd/snmp_collator.c -@@ -201,7 +201,7 @@ set_snmp_interaction_row(char *host, int port, int error) - - /* The interactions table is using the default (first) snmp_vars*/ - snmp_vars = g_get_first_thread_snmp_vars(&cookie); -- if (snmp_vars == NULL) -+ if (snmp_vars == NULL || interaction_table_mutex == NULL) - return; - - /* stevross: our servers don't have a concept of dsName as a distinguished name -@@ -856,7 +856,6 @@ snmp_update_cache_stats(void) - - if (search_result == 0) { - int cookie; -- uint64_t total; - struct snmp_vars_t *snmp_vars; - slapi_pblock_get(search_result_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, - &search_entries); --- -2.31.1 - diff --git a/SOURCES/0004-Issue-5505-Fix-compiler-warning-5506.patch b/SOURCES/0004-Issue-5505-Fix-compiler-warning-5506.patch new file mode 100644 index 0000000..4c67070 --- /dev/null +++ b/SOURCES/0004-Issue-5505-Fix-compiler-warning-5506.patch @@ -0,0 +1,28 @@ +From 7f0d007f3d15dec801acdaf3794f4e37db9c9875 Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Wed, 9 Nov 2022 09:49:47 +0000 +Subject: [PATCH 1/2] Issue 5505 - Fix compiler warning (#5506) + +relates: https://github.com/389ds/389-ds-base/issues/5505 + +Reviewed by: @Firstyear (Thanks) +--- + ldap/servers/plugins/retrocl/retrocl_trim.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c +index 37e5fbea7..d6b24c8bf 100644 +--- a/ldap/servers/plugins/retrocl/retrocl_trim.c ++++ b/ldap/servers/plugins/retrocl/retrocl_trim.c +@@ -23,7 +23,7 @@ typedef struct _trim_status + int ts_s_trimming; /* non-zero if trimming in progress */ + PRLock *ts_s_trim_mutex; /* protects ts_s_trimming */ + } trim_status; +-static trim_status ts = {0L, 0L, 0, 0, NULL}; ++static trim_status ts = {0}; + + /* + * All standard changeLogEntry attributes (initialized in get_cleattrs) +-- +2.38.1 + diff --git a/SOURCES/0005-Issue-5079-BUG-multiple-ways-to-specific-primary-508.patch b/SOURCES/0005-Issue-5079-BUG-multiple-ways-to-specific-primary-508.patch deleted file mode 100644 index cb83187..0000000 --- a/SOURCES/0005-Issue-5079-BUG-multiple-ways-to-specific-primary-508.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 2ee8d9d2ce8bf252287089d18e15b519f15e9538 Mon Sep 17 00:00:00 2001 -From: Firstyear -Date: Thu, 6 Jan 2022 09:49:30 +1000 -Subject: [PATCH 1/5] Issue 5079 - BUG - multiple ways to specific primary - (#5087) - -Bug Description: In a winsync environment, we can only sync -changes to a primary replica. There are however, multiple -ways to specify which server is a primary for a replication -agreement, and I only accounted for one of them. - -Fix Description: Improve the check to account for the -other primary replica flags. - -fixes: https://github.com/389ds/389-ds-base/issues/5079 - -Author: William Brown - -Review by: @droideck ---- - ldap/servers/plugins/replication/repl5_agmt.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c -index 82efdcd15..a71343dec 100644 ---- a/ldap/servers/plugins/replication/repl5_agmt.c -+++ b/ldap/servers/plugins/replication/repl5_agmt.c -@@ -482,7 +482,9 @@ agmt_new_from_entry(Slapi_Entry *e) - - /* DBDB: review this code */ - if (slapi_entry_attr_hasvalue(e, "objectclass", "nsDSWindowsReplicationAgreement")) { -- if (replica && replica_get_type(replica) == REPLICA_TYPE_PRIMARY) { -+ if (replica_get_type(replica) == REPLICA_TYPE_PRIMARY -+ || (replica_get_type(replica) == REPLICA_TYPE_UPDATABLE && replica_is_flag_set(replica, REPLICA_LOG_CHANGES)) -+ ) { - ra->agreement_type = REPLICA_TYPE_WINDOWS; - windows_init_agreement_from_entry(ra, e); - } else { --- -2.37.1 - diff --git a/SOURCES/0005-Issue-5565-Change-default-password-storage-scheme.patch b/SOURCES/0005-Issue-5565-Change-default-password-storage-scheme.patch new file mode 100644 index 0000000..98c83b4 --- /dev/null +++ b/SOURCES/0005-Issue-5565-Change-default-password-storage-scheme.patch @@ -0,0 +1,85 @@ +From 1a192048a49fcdfa8bcfe79e2fa86153b339fac1 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 13 Dec 2022 17:00:28 -0500 +Subject: [PATCH 2/2] Issue 5565 - Change default password storage scheme + +Descriptrion: Becuase of replication we need to use a default storage scheme +that works on 389-ds-base-1.3.10 + +relates: https://github.com/389ds/389-ds-base/issues/5565 + +Reviewed by: spichugi & firstyear(thanks!!) +--- + .../tests/suites/healthcheck/health_security_test.py | 8 ++++---- + dirsrvtests/tests/suites/password/pwp_test.py | 2 +- + ldap/servers/slapd/pw.c | 3 ++- + src/lib389/lib389/config.py | 2 +- + 4 files changed, 8 insertions(+), 7 deletions(-) + +diff --git a/dirsrvtests/tests/suites/healthcheck/health_security_test.py b/dirsrvtests/tests/suites/healthcheck/health_security_test.py +index 519107365..d14b52c7a 100644 +--- a/dirsrvtests/tests/suites/healthcheck/health_security_test.py ++++ b/dirsrvtests/tests/suites/healthcheck/health_security_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2020 Red Hat, Inc. ++# Copyright (C) 2022 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -113,9 +113,9 @@ def test_healthcheck_insecure_pwd_hash_configured(topology_st): + standalone.config.set('passwordStorageScheme', 'SSHA512') + standalone.config.set('nsslapd-rootpwstoragescheme', 'SSHA512') + else: +- log.info('Set passwordStorageScheme and nsslapd-rootpwstoragescheme to PBKDF2-SHA512') +- standalone.config.set('passwordStorageScheme', 'PBKDF2-SHA512') +- standalone.config.set('nsslapd-rootpwstoragescheme', 'PBKDF2-SHA512') ++ log.info('Set passwordStorageScheme and nsslapd-rootpwstoragescheme to PBKDF2_SHA256') ++ standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') ++ standalone.config.set('nsslapd-rootpwstoragescheme', 'PBKDF2_SHA256') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) +diff --git a/dirsrvtests/tests/suites/password/pwp_test.py b/dirsrvtests/tests/suites/password/pwp_test.py +index ce45bc364..190881222 100644 +--- a/dirsrvtests/tests/suites/password/pwp_test.py ++++ b/dirsrvtests/tests/suites/password/pwp_test.py +@@ -27,7 +27,7 @@ else: + if is_fips(): + DEFAULT_PASSWORD_STORAGE_SCHEME = 'SSHA512' + else: +- DEFAULT_PASSWORD_STORAGE_SCHEME = 'PBKDF2-SHA512' ++ DEFAULT_PASSWORD_STORAGE_SCHEME = 'PBKDF2_SHA256' + + + def _create_user(topo, uid, cn, uidNumber, userpassword): +diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c +index 825498858..566ba87dd 100644 +--- a/ldap/servers/slapd/pw.c ++++ b/ldap/servers/slapd/pw.c +@@ -280,7 +280,8 @@ pw_name2scheme(char *name) + } else { + /* if not, let's setup pbkdf2 */ + #ifdef RUST_ENABLE +- char *pbkdf = "PBKDF2-SHA512"; ++ /* until 1.3.10 supports Rust hashers we can't use PBKDF2-SHA512 by default */ ++ char *pbkdf = "PBKDF2_SHA256"; + #else + char *pbkdf = "PBKDF2_SHA256"; + #endif +diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py +index c7abdf778..c178eb02f 100644 +--- a/src/lib389/lib389/config.py ++++ b/src/lib389/lib389/config.py +@@ -209,7 +209,7 @@ class Config(DSLdapObject): + yield report + + def _lint_passwordscheme(self): +- allowed_schemes = ['SSHA512', 'PBKDF2-SHA512', 'GOST_YESCRYPT'] ++ allowed_schemes = ['SSHA512', 'PBKDF2_SHA256', 'GOST_YESCRYPT'] + u_password_scheme = self.get_attr_val_utf8('passwordStorageScheme') + u_root_scheme = self.get_attr_val_utf8('nsslapd-rootpwstoragescheme') + if u_root_scheme not in allowed_schemes or u_password_scheme not in allowed_schemes: +-- +2.38.1 + diff --git a/SOURCES/0006-Issue-3903-Supplier-should-do-periodic-updates.patch b/SOURCES/0006-Issue-3903-Supplier-should-do-periodic-updates.patch deleted file mode 100644 index f3df492..0000000 --- a/SOURCES/0006-Issue-3903-Supplier-should-do-periodic-updates.patch +++ /dev/null @@ -1,873 +0,0 @@ -From e65d6225398901c3319e72a460bc58e5d50df67c Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 3 Aug 2022 16:27:15 -0400 -Subject: [PATCH 2/5] Issue 3903 - Supplier should do periodic updates - -Description: - -On suppliers update the keep alive entry periodically to keep the RUV up -to date in case a replica is neglected for along time. This prevents -very long changelog scans when finally processing updates. - -relates: https://github.com/389ds/389-ds-base/issues/3903 - -Reviewed by: firstyear & tbordaz(Thanks!) ---- - .../suites/replication/regression_m2_test.py | 96 +++++-------- - .../suites/replication/replica_config_test.py | 6 +- - ldap/schema/01core389.ldif | 3 +- - ldap/servers/plugins/replication/repl5.h | 11 +- - .../plugins/replication/repl5_inc_protocol.c | 44 +----- - .../plugins/replication/repl5_replica.c | 127 +++++++++++++----- - .../replication/repl5_replica_config.c | 12 ++ - .../plugins/replication/repl5_tot_protocol.c | 4 +- - ldap/servers/plugins/replication/repl_extop.c | 2 +- - .../plugins/replication/repl_globals.c | 1 + - .../src/lib/replication/replConfig.jsx | 32 ++++- - src/cockpit/389-console/src/replication.jsx | 6 + - src/lib389/lib389/cli_conf/replication.py | 6 +- - 13 files changed, 202 insertions(+), 148 deletions(-) - -diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py -index 466e3c2c0..7dd0f2984 100644 ---- a/dirsrvtests/tests/suites/replication/regression_m2_test.py -+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py -@@ -14,6 +14,7 @@ import ldif - import ldap - import pytest - import subprocess -+import time - from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts - from lib389.pwpolicy import PwPolicyManager - from lib389.utils import * -@@ -204,12 +205,12 @@ def rename_entry(server, idx, ou_name, new_parent): - def add_ldapsubentry(server, parent): - pwp = PwPolicyManager(server) - policy_props = {'passwordStorageScheme': 'ssha', -- 'passwordCheckSyntax': 'on', -- 'passwordInHistory': '6', -- 'passwordChange': 'on', -- 'passwordMinAge': '0', -- 'passwordExp': 'off', -- 'passwordMustChange': 'off',} -+ 'passwordCheckSyntax': 'on', -+ 'passwordInHistory': '6', -+ 'passwordChange': 'on', -+ 'passwordMinAge': '0', -+ 'passwordExp': 'off', -+ 'passwordMustChange': 'off',} - log.info('Create password policy for subtree {}'.format(parent)) - pwp.create_subtree_policy(parent, policy_props) - -@@ -742,7 +743,7 @@ def get_keepalive_entries(instance, replica): - try: - entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL, - "(&(objectclass=ldapsubentry)(cn=repl keep alive*))", -- ['cn', 'nsUniqueId', 'modifierTimestamp']) -+ ['cn', 'keepalivetimestamp', 'nsUniqueId', 'modifierTimestamp']) - except ldap.LDAPError as e: - log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e))) - assert False -@@ -761,6 +762,7 @@ def verify_keepalive_entries(topo, expected): - # (for example after: db2ldif / demote a supplier / ldif2db / init other suppliers) - # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries - # should be done. -+ entries = [] - for supplierId in topo.ms: - supplier = topo.ms[supplierId] - for replica in Replicas(supplier).list(): -@@ -771,6 +773,7 @@ def verify_keepalive_entries(topo, expected): - keepaliveEntries = get_keepalive_entries(supplier, replica); - expectedCount = len(topo.ms) if expected else 0 - foundCount = len(keepaliveEntries) -+ entries += keepaliveEntries - if (foundCount == expectedCount): - log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.') - else: -@@ -778,70 +781,45 @@ def verify_keepalive_entries(topo, expected): - f'while {expectedCount} were expected on {replica_info}.') - assert False - -+ return entries -+ - --def test_online_init_should_create_keepalive_entries(topo_m2): -- """Check that keep alive entries are created when initializinf a supplier from another one -+def test_keepalive_entries(topo_m2): -+ """Check that keep alive entries are created - - :id: d5940e71-d18a-4b71-aaf7-b9185361fffe - :setup: Two suppliers replication setup - :steps: -- 1. Generate ldif without replication data -- 2 Init both suppliers from that ldif -- 3 Check that keep alive entries does not exists -- 4 Perform on line init of supplier2 from supplier1 -- 5 Check that keep alive entries exists -+ 1. Keep alives entries are present -+ 2. Keep alive entries are updated every 60 seconds - :expectedresults: -- 1. No error while generating ldif -- 2. No error while importing the ldif file -- 3. No keepalive entrie should exists on any suppliers -- 4. No error while initializing supplier2 -- 5. All keepalive entries should exist on every suppliers -+ 1. Success -+ 2. Success - - """ - -- repl = ReplicationManager(DEFAULT_SUFFIX) -- m1 = topo_m2.ms["supplier1"] -- m2 = topo_m2.ms["supplier2"] -- # Step 1: Generate ldif without replication data -- m1.stop() -- m2.stop() -- ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() -- m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], -- excludeSuffixes=None, repl_data=False, -- outputfile=ldif_file, encrypt=False) -- # Remove replication metadata that are still in the ldif -- _remove_replication_data(ldif_file) -- -- # Step 2: Init both suppliers from that ldif -- m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -- m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -- m1.start() -- m2.start() -- -- """ Replica state is now as if CLI setup has been done using: -- dsconf supplier1 replication enable --suffix "${SUFFIX}" --role supplier -- dsconf supplier2 replication enable --suffix "${SUFFIX}" --role supplier -- dsconf supplier1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" -- dsconf supplier2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" -- dsconf supplier1 repl-agmt create --suffix "${SUFFIX}" -- dsconf supplier2 repl-agmt create --suffix "${SUFFIX}" -- """ -+ # default interval is 1 hour, too long for test, set it to the minimum of -+ # 60 seconds -+ for supplierId in topo_m2.ms: -+ supplier = topo_m2.ms[supplierId] -+ replica = Replicas(supplier).get(DEFAULT_SUFFIX) -+ replica.replace('nsds5ReplicaKeepAliveUpdateInterval', '60') -+ supplier.restart() - -- # Step 3: No keepalive entrie should exists on any suppliers -- verify_keepalive_entries(topo_m2, False) -+ # verify entries exist -+ entries = verify_keepalive_entries(topo_m2, True); - -- # Step 4: Perform on line init of supplier2 from supplier1 -- agmt = Agreements(m1).list()[0] -- agmt.begin_reinit() -- (done, error) = agmt.wait_reinit() -- assert done is True -- assert error is False -+ # Get current time from keep alive entry -+ keep_alive_s1 = str(entries[0].data['keepalivetimestamp']) -+ keep_alive_s2 = str(entries[1].data['keepalivetimestamp']) -+ -+ # Wait for event interval (60 secs) to pass -+ time.sleep(61) - -- # Step 5: All keepalive entries should exists on every suppliers -- # Verify the keep alive entry once replication is in sync -- # (that is the step that fails when bug is not fixed) -- repl.wait_for_ruv(m2,m1) -- verify_keepalive_entries(topo_m2, True); -+ # Check keep alives entries have been updated -+ entries = verify_keepalive_entries(topo_m2, True); -+ assert keep_alive_s1 != str(entries[0].data['keepalivetimestamp']) -+ assert keep_alive_s2 != str(entries[1].data['keepalivetimestamp']) - - - @pytest.mark.ds49915 -diff --git a/dirsrvtests/tests/suites/replication/replica_config_test.py b/dirsrvtests/tests/suites/replication/replica_config_test.py -index c2140a2ac..06ae5afcf 100644 ---- a/dirsrvtests/tests/suites/replication/replica_config_test.py -+++ b/dirsrvtests/tests/suites/replication/replica_config_test.py -@@ -50,7 +50,8 @@ repl_add_attrs = [('nsDS5ReplicaType', '-1', '4', overflow, notnum, '1'), - ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'), - ('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'), - ('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'), -- ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')] -+ ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6'), -+ ('nsds5ReplicaKeepAliveUpdateInterval', '59', too_big, overflow, notnum, '60'),] - - repl_mod_attrs = [('nsDS5Flags', '-1', '2', overflow, notnum, '1'), - ('nsds5ReplicaPurgeDelay', '-2', too_big, overflow, notnum, '1'), -@@ -59,7 +60,8 @@ repl_mod_attrs = [('nsDS5Flags', '-1', '2', overflow, notnum, '1'), - ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'), - ('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'), - ('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'), -- ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')] -+ ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6'), -+ ('nsds5ReplicaKeepAliveUpdateInterval', '59', too_big, overflow, notnum, '60'),] - - agmt_attrs = [ - ('nsds5ReplicaPort', '0', '65535', overflow, notnum, '389'), -diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif -index 0c73e5114..7a9598730 100644 ---- a/ldap/schema/01core389.ldif -+++ b/ldap/schema/01core389.ldif -@@ -327,6 +327,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2371 NAME 'nsDS5ReplicaBootstrapBindDN' - attributeTypes: ( 2.16.840.1.113730.3.1.2372 NAME 'nsDS5ReplicaBootstrapCredentials' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.2373 NAME 'nsDS5ReplicaBootstrapBindMethod' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.2374 NAME 'nsDS5ReplicaBootstrapTransportInfo' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) -+attributeTypes: ( 2.16.840.1.113730.3.1.2390 NAME 'nsds5ReplicaKeepAliveUpdateInterval' DESC '389 defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) - # - # objectclasses - # -@@ -336,7 +337,7 @@ objectClasses: ( 2.16.840.1.113730.3.2.44 NAME 'nsIndex' DESC 'Netscape defined - objectClasses: ( 2.16.840.1.113730.3.2.109 NAME 'nsBackendInstance' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' ) - objectClasses: ( 2.16.840.1.113730.3.2.110 NAME 'nsMappingTree' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' ) - objectClasses: ( 2.16.840.1.113730.3.2.104 NAME 'nsContainer' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' ) --objectClasses: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Replication configuration objectclass' SUP top MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY (cn $ nsds5ReplicaPreciseTombstonePurging $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsDS5ReplicaBindDNGroup $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ nsds5ReplicaChangeCount $ nsds5ReplicaLegacyConsumer $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaBackoffMin $ nsds5ReplicaBackoffMax $ nsds5ReplicaReleaseTimeout $ nsDS5ReplicaBindDnGroupCheckInterval ) X-ORIGIN 'Netscape Directory Server' ) -+objectClasses: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Replication configuration objectclass' SUP top MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY (cn $ nsds5ReplicaPreciseTombstonePurging $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsDS5ReplicaBindDNGroup $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ nsds5ReplicaChangeCount $ nsds5ReplicaLegacyConsumer $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaBackoffMin $ nsds5ReplicaBackoffMax $ nsds5ReplicaReleaseTimeout $ nsDS5ReplicaBindDnGroupCheckInterval $ nsds5ReplicaKeepAliveUpdateInterval ) X-ORIGIN 'Netscape Directory Server' ) - objectClasses: ( 2.16.840.1.113730.3.2.113 NAME 'nsTombstone' DESC 'Netscape defined objectclass' SUP top MAY ( nstombstonecsn $ nsParentUniqueId $ nscpEntryDN ) X-ORIGIN 'Netscape Directory Server' ) - objectClasses: ( 2.16.840.1.113730.3.2.103 NAME 'nsDS5ReplicationAgreement' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsds5ReplicaCleanRUVNotified $ nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicatedAttributeListTotal $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5ReplicaEnabled $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5ReplicaStripAttrs $ nsds5replicaSessionPauseTime $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaFlowControlWindow $ nsds5ReplicaFlowControlPause $ nsDS5ReplicaWaitForAsyncResults $ nsds5ReplicaIgnoreMissingChange $ nsDS5ReplicaBootstrapBindDN $ nsDS5ReplicaBootstrapCredentials $ nsDS5ReplicaBootstrapBindMethod $ nsDS5ReplicaBootstrapTransportInfo ) X-ORIGIN 'Netscape Directory Server' ) - objectClasses: ( 2.16.840.1.113730.3.2.39 NAME 'nsslapdConfig' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' ) -diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h -index 06e747811..c2fbff8c0 100644 ---- a/ldap/servers/plugins/replication/repl5.h -+++ b/ldap/servers/plugins/replication/repl5.h -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2020 Red Hat, Inc. -+ * Copyright (C) 2022 Red Hat, Inc. - * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. - * All rights reserved. - * -@@ -120,6 +120,8 @@ - #define PROTOCOL_STATUS_TOTAL_SENDING_DATA 711 - - #define DEFAULT_PROTOCOL_TIMEOUT 120 -+#define DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL 3600 -+#define REPLICA_KEEPALIVE_UPDATE_INTERVAL_MIN 60 - - /* To Allow Consumer Initialization when adding an agreement - */ - #define STATE_PERFORMING_TOTAL_UPDATE 501 -@@ -162,6 +164,7 @@ extern const char *type_nsds5ReplicaBootstrapBindDN; - extern const char *type_nsds5ReplicaBootstrapCredentials; - extern const char *type_nsds5ReplicaBootstrapBindMethod; - extern const char *type_nsds5ReplicaBootstrapTransportInfo; -+extern const char *type_replicaKeepAliveUpdateInterval; - - /* Attribute names for windows replication agreements */ - extern const char *type_nsds7WindowsReplicaArea; -@@ -677,8 +680,8 @@ Replica *windows_replica_new(const Slapi_DN *root); - during addition of the replica over LDAP */ - int replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, Replica **r); - void replica_destroy(void **arg); --int replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid); --int replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid); -+void replica_subentry_update(time_t when, void *arg); -+int replica_subentry_check(const char *repl_root, ReplicaId rid); - PRBool replica_get_exclusive_access(Replica *r, PRBool *isInc, uint64_t connid, int opid, const char *locking_purl, char **current_purl); - void replica_relinquish_exclusive_access(Replica *r, uint64_t connid, int opid); - PRBool replica_get_tombstone_reap_active(const Replica *r); -@@ -739,6 +742,8 @@ void consumer5_set_mapping_tree_state_for_replica(const Replica *r, RUV *supplie - Replica *replica_get_for_backend(const char *be_name); - void replica_set_purge_delay(Replica *r, uint32_t purge_delay); - void replica_set_tombstone_reap_interval(Replica *r, long interval); -+void replica_set_keepalive_update_interval(Replica *r, int64_t interval); -+int64_t replica_get_keepalive_update_interval(Replica *r); - void replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv); - Slapi_Entry *get_in_memory_ruv(Slapi_DN *suffix_sdn); - int replica_write_ruv(Replica *r); -diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c -index 4bb384882..846951b9e 100644 ---- a/ldap/servers/plugins/replication/repl5_inc_protocol.c -+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2020 Red Hat, Inc. -+ * Copyright (C) 2022 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -1677,13 +1677,9 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu - } else { - ConnResult replay_crc; - Replica *replica = prp->replica; -- PRBool subentry_update_needed = PR_FALSE; - PRUint64 release_timeout = replica_get_release_timeout(replica); - char csn_str[CSN_STRSIZE]; -- int skipped_updates = 0; -- int fractional_repl; - int finished = 0; --#define FRACTIONAL_SKIPPED_THRESHOLD 100 - - /* Start the results reading thread */ - rd = repl5_inc_rd_new(prp); -@@ -1700,7 +1696,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu - - memset((void *)&op, 0, sizeof(op)); - entry.op = &op; -- fractional_repl = agmt_is_fractional(prp->agmt); - do { - cl5_operation_parameters_done(entry.op); - memset((void *)entry.op, 0, sizeof(op)); -@@ -1781,14 +1776,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu - replica_id = csn_get_replicaid(entry.op->csn); - uniqueid = entry.op->target_address.uniqueid; - -- if (fractional_repl && message_id) { -- /* This update was sent no need to update the subentry -- * and restart counting the skipped updates -- */ -- subentry_update_needed = PR_FALSE; -- skipped_updates = 0; -- } -- - if (prp->repl50consumer && message_id) { - int operation, error = 0; - -@@ -1816,15 +1803,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu - agmt_get_long_name(prp->agmt), - entry.op->target_address.uniqueid, csn_str); - agmt_inc_last_update_changecount(prp->agmt, csn_get_replicaid(entry.op->csn), 1 /*skipped*/); -- if (fractional_repl) { -- skipped_updates++; -- if (skipped_updates > FRACTIONAL_SKIPPED_THRESHOLD) { -- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -- "send_updates - %s: skipped updates is too high (%d) if no other update is sent we will update the subentry\n", -- agmt_get_long_name(prp->agmt), skipped_updates); -- subentry_update_needed = PR_TRUE; -- } -- } - } - } - break; -@@ -1906,26 +1884,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu - PR_Unlock(rd->lock); - } while (!finished); - -- if (fractional_repl && subentry_update_needed) { -- ReplicaId rid = -1; /* Used to create the replica keep alive subentry */ -- Slapi_DN *replarea_sdn = NULL; -- -- if (replica) { -- rid = replica_get_rid(replica); -- } -- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -- "send_updates - %s: skipped updates was definitely too high (%d) update the subentry now\n", -- agmt_get_long_name(prp->agmt), skipped_updates); -- replarea_sdn = agmt_get_replarea(prp->agmt); -- if (!replarea_sdn) { -- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, -- "send_updates - Unknown replication area due to agreement not found."); -- agmt_set_last_update_status(prp->agmt, 0, -1, "Agreement is corrupted: missing suffix"); -- return_value = UPDATE_FATAL_ERROR; -- } else { -- replica_subentry_update(replarea_sdn, rid); -- } -- } - /* Terminate the results reading thread */ - if (!prp->repl50consumer) { - /* We need to ensure that we wait until all the responses have been received from our operations */ -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index 3bd57647f..ded4cf754 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2022 Red Hat, Inc. - * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. - * All rights reserved. - * -@@ -22,7 +22,6 @@ - #include "slap.h" - - #define RUV_SAVE_INTERVAL (30 * 1000) /* 30 seconds */ -- - #define REPLICA_RDN "cn=replica" - - /* -@@ -48,6 +47,7 @@ struct replica - PRMonitor *repl_lock; /* protects entire structure */ - Slapi_Eq_Context repl_eqcxt_rs; /* context to cancel event that saves ruv */ - Slapi_Eq_Context repl_eqcxt_tr; /* context to cancel event that reaps tombstones */ -+ Slapi_Eq_Context repl_eqcxt_ka_update; /* keep-alive entry update event */ - Object *repl_csngen; /* CSN generator for this replica */ - PRBool repl_csn_assigned; /* Flag set when new csn is assigned. */ - int64_t repl_purge_delay; /* When purgeable, CSNs are held on to for this many extra seconds */ -@@ -66,6 +66,7 @@ struct replica - uint64_t agmt_count; /* Number of agmts */ - Slapi_Counter *release_timeout; /* The amount of time to wait before releasing active replica */ - uint64_t abort_session; /* Abort the current replica session */ -+ int64_t keepalive_update_interval; /* interval to do dummy update to keep RUV fresh */) - }; - - -@@ -133,8 +134,8 @@ replica_new(const Slapi_DN *root) - &r); - - if (NULL == r) { -- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_new - " -- "Unable to configure replica %s: %s\n", -+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, -+ "replica_new - Unable to configure replica %s: %s\n", - slapi_sdn_get_dn(root), errorbuf); - } - slapi_entry_free(e); -@@ -232,7 +233,15 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, - In that case the updated would fail but nothing bad would happen. The next - scheduled update would save the state */ - r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name, -- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, -+ RUV_SAVE_INTERVAL); -+ -+ /* create supplier update event */ -+ if (r->repl_eqcxt_ka_update == NULL && replica_get_type(r) == REPLICA_TYPE_UPDATABLE) { -+ r->repl_eqcxt_ka_update = slapi_eq_repeat_rel(replica_subentry_update, r, -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, -+ replica_get_keepalive_update_interval(r)); -+ } - - if (r->tombstone_reap_interval > 0) { - /* -@@ -302,6 +311,11 @@ replica_destroy(void **arg) - * and ruv updates. - */ - -+ if (r->repl_eqcxt_ka_update) { -+ slapi_eq_cancel_rel(r->repl_eqcxt_ka_update); -+ r->repl_eqcxt_ka_update = NULL; -+ } -+ - if (r->repl_eqcxt_rs) { - slapi_eq_cancel_rel(r->repl_eqcxt_rs); - r->repl_eqcxt_rs = NULL; -@@ -393,7 +407,7 @@ replica_destroy(void **arg) - - - static int --replica_subentry_create(Slapi_DN *repl_root, ReplicaId rid) -+replica_subentry_create(const char *repl_root, ReplicaId rid) - { - char *entry_string = NULL; - Slapi_Entry *e = NULL; -@@ -402,7 +416,7 @@ replica_subentry_create(Slapi_DN *repl_root, ReplicaId rid) - int rc = 0; - - entry_string = slapi_ch_smprintf("dn: cn=%s %d,%s\nobjectclass: top\nobjectclass: ldapsubentry\nobjectclass: extensibleObject\ncn: %s %d", -- KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), KEEP_ALIVE_ENTRY, rid); -+ KEEP_ALIVE_ENTRY, rid, repl_root, KEEP_ALIVE_ENTRY, rid); - if (entry_string == NULL) { - slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, - "replica_subentry_create - Failed in slapi_ch_smprintf\n"); -@@ -441,7 +455,7 @@ done: - } - - int --replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid) -+replica_subentry_check(const char *repl_root, ReplicaId rid) - { - Slapi_PBlock *pb; - char *filter = NULL; -@@ -451,7 +465,7 @@ replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid) - - pb = slapi_pblock_new(); - filter = slapi_ch_smprintf("(&(objectclass=ldapsubentry)(cn=%s %d))", KEEP_ALIVE_ENTRY, rid); -- slapi_search_internal_set_pb(pb, slapi_sdn_get_dn(repl_root), LDAP_SCOPE_ONELEVEL, -+ slapi_search_internal_set_pb(pb, repl_root, LDAP_SCOPE_ONELEVEL, - filter, NULL, 0, NULL, NULL, - repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0); - slapi_search_internal_pb(pb); -@@ -460,17 +474,19 @@ replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid) - slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries); - if (entries && (entries[0] == NULL)) { - slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name, -- "replica_subentry_check - Need to create replication keep alive entry \n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root)); -+ "replica_subentry_check - Need to create replication keep alive entry \n", -+ KEEP_ALIVE_ENTRY, rid, repl_root); - rc = replica_subentry_create(repl_root, rid); - } else { - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -- "replica_subentry_check - replication keep alive entry already exists\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root)); -+ "replica_subentry_check - replication keep alive entry already exists\n", -+ KEEP_ALIVE_ENTRY, rid, repl_root); - rc = 0; - } - } else { - slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, - "replica_subentry_check - Error accessing replication keep alive entry res=%d\n", -- KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), res); -+ KEEP_ALIVE_ENTRY, rid, repl_root, res); - /* The status of the entry is not clear, do not attempt to create it */ - rc = 1; - } -@@ -481,60 +497,59 @@ replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid) - return rc; - } - --int --replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid) -+void -+replica_subentry_update(time_t when __attribute__((unused)), void *arg) - { -- int ldrc; -- int rc = LDAP_SUCCESS; /* Optimistic default */ -+ Slapi_PBlock *modpb = NULL; -+ Replica *replica = (Replica *)arg; -+ ReplicaId rid; - LDAPMod *mods[2]; - LDAPMod mod; - struct berval *vals[2]; -- char buf[SLAPI_TIMESTAMP_BUFSIZE]; - struct berval val; -- Slapi_PBlock *modpb = NULL; -- char *dn; -+ const char *repl_root = NULL; -+ char buf[SLAPI_TIMESTAMP_BUFSIZE]; -+ char *dn = NULL; -+ int ldrc = 0; - -+ rid = replica_get_rid(replica); -+ repl_root = slapi_ch_strdup(slapi_sdn_get_dn(replica_get_root(replica))); - replica_subentry_check(repl_root, rid); - - slapi_timestamp_utc_hr(buf, SLAPI_TIMESTAMP_BUFSIZE); -- -- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "subentry_update called at %s\n", buf); -- -- -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "replica_subentry_update called at %s\n", buf); - val.bv_val = buf; - val.bv_len = strlen(val.bv_val); -- - vals[0] = &val; - vals[1] = NULL; - - mod.mod_op = LDAP_MOD_REPLACE | LDAP_MOD_BVALUES; - mod.mod_type = KEEP_ALIVE_ATTR; - mod.mod_bvalues = vals; -- - mods[0] = &mod; - mods[1] = NULL; - - modpb = slapi_pblock_new(); -- dn = slapi_ch_smprintf(KEEP_ALIVE_DN_FORMAT, KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root)); -- -+ dn = slapi_ch_smprintf(KEEP_ALIVE_DN_FORMAT, KEEP_ALIVE_ENTRY, rid, repl_root); - slapi_modify_internal_set_pb(modpb, dn, mods, NULL, NULL, - repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0); - slapi_modify_internal_pb(modpb); -- - slapi_pblock_get(modpb, SLAPI_PLUGIN_INTOP_RESULT, &ldrc); -- - if (ldrc != LDAP_SUCCESS) { - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -- "Failure (%d) to update replication keep alive entry \"%s: %s\"\n", ldrc, KEEP_ALIVE_ATTR, buf); -- rc = ldrc; -+ "replica_subentry_update - " -+ "Failure (%d) to update replication keep alive entry \"%s: %s\"\n", -+ ldrc, KEEP_ALIVE_ATTR, buf); - } else { - slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name, -- "Successful update of replication keep alive entry \"%s: %s\"\n", KEEP_ALIVE_ATTR, buf); -+ "replica_subentry_update - " -+ "Successful update of replication keep alive entry \"%s: %s\"\n", -+ KEEP_ALIVE_ATTR, buf); - } - - slapi_pblock_destroy(modpb); -+ slapi_ch_free_string((char **)&repl_root); - slapi_ch_free_string(&dn); -- return rc; - } - /* - * Attempt to obtain exclusive access to replica (advisory only) -@@ -1512,7 +1527,15 @@ replica_set_enabled(Replica *r, PRBool enable) - if (r->repl_eqcxt_rs == NULL) /* event is not already registered */ - { - r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name, -- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, -+ RUV_SAVE_INTERVAL); -+ -+ } -+ /* create supplier update event */ -+ if (r->repl_eqcxt_ka_update == NULL && replica_get_type(r) == REPLICA_TYPE_UPDATABLE) { -+ r->repl_eqcxt_ka_update = slapi_eq_repeat_rel(replica_subentry_update, r, -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, -+ replica_get_keepalive_update_interval(r)); - } - } else /* disable */ - { -@@ -1521,6 +1544,11 @@ replica_set_enabled(Replica *r, PRBool enable) - slapi_eq_cancel_rel(r->repl_eqcxt_rs); - r->repl_eqcxt_rs = NULL; - } -+ /* Remove supplier update event */ -+ if (replica_get_type(r) == REPLICA_TYPE_PRIMARY) { -+ slapi_eq_cancel_rel(r->repl_eqcxt_ka_update); -+ r->repl_eqcxt_ka_update = NULL; -+ } - } - - replica_unlock(r->repl_lock); -@@ -2119,6 +2147,17 @@ _replica_init_from_config(Replica *r, Slapi_Entry *e, char *errortext) - r->tombstone_reap_interval = 3600 * 24; /* One week, in seconds */ - } - -+ if ((val = (char*)slapi_entry_attr_get_ref(e, type_replicaKeepAliveUpdateInterval))) { -+ if (repl_config_valid_num(type_replicaKeepAliveUpdateInterval, val, REPLICA_KEEPALIVE_UPDATE_INTERVAL_MIN, -+ INT_MAX, &rc, errormsg, &interval) != 0) -+ { -+ return LDAP_UNWILLING_TO_PERFORM; -+ } -+ r->keepalive_update_interval = interval; -+ } else { -+ r->keepalive_update_interval = DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL; -+ } -+ - r->tombstone_reap_stop = r->tombstone_reap_active = PR_FALSE; - - /* No supplier holding the replica */ -@@ -3646,6 +3685,26 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) - replica_unlock(r->repl_lock); - } - -+void -+replica_set_keepalive_update_interval(Replica *r, int64_t interval) -+{ -+ replica_lock(r->repl_lock); -+ r->keepalive_update_interval = interval; -+ replica_unlock(r->repl_lock); -+} -+ -+int64_t -+replica_get_keepalive_update_interval(Replica *r) -+{ -+ int64_t interval = DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL; -+ -+ replica_lock(r->repl_lock); -+ interval = r->keepalive_update_interval; -+ replica_unlock(r->repl_lock); -+ -+ return interval; -+} -+ - static void - replica_strip_cleaned_rids(Replica *r) - { -diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c -index 2c6d74b13..aea2cf506 100644 ---- a/ldap/servers/plugins/replication/repl5_replica_config.c -+++ b/ldap/servers/plugins/replication/repl5_replica_config.c -@@ -438,6 +438,9 @@ replica_config_modify(Slapi_PBlock *pb, - } else if (strcasecmp(config_attr, type_replicaBackoffMax) == 0) { - if (apply_mods) - replica_set_backoff_max(r, PROTOCOL_BACKOFF_MAXIMUM); -+ } else if (strcasecmp(config_attr, type_replicaKeepAliveUpdateInterval) == 0) { -+ if (apply_mods) -+ replica_set_keepalive_update_interval(r, DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL); - } else if (strcasecmp(config_attr, type_replicaPrecisePurge) == 0) { - if (apply_mods) - replica_set_precise_purging(r, 0); -@@ -472,6 +475,15 @@ replica_config_modify(Slapi_PBlock *pb, - } else { - break; - } -+ } else if (strcasecmp(config_attr, type_replicaKeepAliveUpdateInterval) == 0) { -+ int64_t interval = DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL; -+ if (repl_config_valid_num(config_attr, config_attr_value, REPLICA_KEEPALIVE_UPDATE_INTERVAL_MIN, -+ INT_MAX, returncode, errortext, &interval) == 0) -+ { -+ replica_set_keepalive_update_interval(r, interval); -+ } else { -+ break; -+ } - } else if (strcasecmp(config_attr, attr_replicaType) == 0) { - int64_t rtype; - slapi_ch_free_string(&new_repl_type); -diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c -index f67263c3e..4b2064912 100644 ---- a/ldap/servers/plugins/replication/repl5_tot_protocol.c -+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c -@@ -510,7 +510,7 @@ retry: - if (prp->replica) { - rid = replica_get_rid(prp->replica); - } -- replica_subentry_check(area_sdn, rid); -+ replica_subentry_check(slapi_sdn_get_dn(area_sdn), rid); - - /* Send the subtree of the suffix in the order of parentid index plus ldapsubentry and nstombstone. */ - check_suffix_entryID(be, suffix); -@@ -531,7 +531,7 @@ retry: - if (prp->replica) { - rid = replica_get_rid(prp->replica); - } -- replica_subentry_check(area_sdn, rid); -+ replica_subentry_check(slapi_sdn_get_dn(area_sdn), rid); - - slapi_search_internal_set_pb(pb, slapi_sdn_get_dn(area_sdn), - LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL, -diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c -index ef2025dd9..8b178610b 100644 ---- a/ldap/servers/plugins/replication/repl_extop.c -+++ b/ldap/servers/plugins/replication/repl_extop.c -@@ -1176,7 +1176,7 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb) - /* now that the changelog is open and started, we can alos cretae the - * keep alive entry without risk that db and cl will not match - */ -- replica_subentry_check((Slapi_DN *)replica_get_root(r), replica_get_rid(r)); -+ replica_subentry_check(slapi_sdn_get_dn(replica_get_root(r)), replica_get_rid(r)); - } - - /* ONREPL code that dealt with new RUV, etc was moved into the code -diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c -index 000777fdd..797ca957f 100644 ---- a/ldap/servers/plugins/replication/repl_globals.c -+++ b/ldap/servers/plugins/replication/repl_globals.c -@@ -89,6 +89,7 @@ const char *type_replicaReleaseTimeout = "nsds5ReplicaReleaseTimeout"; - const char *type_replicaBackoffMin = "nsds5ReplicaBackoffMin"; - const char *type_replicaBackoffMax = "nsds5ReplicaBackoffMax"; - const char *type_replicaPrecisePurge = "nsds5ReplicaPreciseTombstonePurging"; -+const char *type_replicaKeepAliveUpdateInterval = "nsds5ReplicaKeepAliveUpdateInterval"; - - /* Attribute names for replication agreement attributes */ - const char *type_nsds5ReplicaHost = "nsds5ReplicaHost"; -diff --git a/src/cockpit/389-console/src/lib/replication/replConfig.jsx b/src/cockpit/389-console/src/lib/replication/replConfig.jsx -index 1f0dc3ec5..3dffb8f1a 100644 ---- a/src/cockpit/389-console/src/lib/replication/replConfig.jsx -+++ b/src/cockpit/389-console/src/lib/replication/replConfig.jsx -@@ -48,6 +48,7 @@ export class ReplConfig extends React.Component { - nsds5replicaprotocoltimeout: Number(this.props.data.nsds5replicaprotocoltimeout) == 0 ? 120 : Number(this.props.data.nsds5replicaprotocoltimeout), - nsds5replicabackoffmin: Number(this.props.data.nsds5replicabackoffmin) == 0 ? 3 : Number(this.props.data.nsds5replicabackoffmin), - nsds5replicabackoffmax: Number(this.props.data.nsds5replicabackoffmax) == 0 ? 300 : Number(this.props.data.nsds5replicabackoffmax), -+ nsds5replicakeepaliveupdateinterval: Number(this.props.data.nsds5replicakeepaliveupdateinterval) == 0 ? 3600 : Number(this.props.data.nsds5replicakeepaliveupdateinterval), - // Original settings - _nsds5replicabinddn: this.props.data.nsds5replicabinddn, - _nsds5replicabinddngroup: this.props.data.nsds5replicabinddngroup, -@@ -59,6 +60,7 @@ export class ReplConfig extends React.Component { - _nsds5replicaprotocoltimeout: Number(this.props.data.nsds5replicaprotocoltimeout) == 0 ? 120 : Number(this.props.data.nsds5replicaprotocoltimeout), - _nsds5replicabackoffmin: Number(this.props.data.nsds5replicabackoffmin) == 0 ? 3 : Number(this.props.data.nsds5replicabackoffmin), - _nsds5replicabackoffmax: Number(this.props.data.nsds5replicabackoffmax) == 0 ? 300 : Number(this.props.data.nsds5replicabackoffmax), -+ _nsds5replicakeepaliveupdateinterval: Number(this.props.data.nsds5replicakeepaliveupdateinterval) == 0 ? 3600 : Number(this.props.data.nsds5replicakeepaliveupdateinterval), - }; - - this.onToggle = (isExpanded) => { -@@ -275,7 +277,7 @@ export class ReplConfig extends React.Component { - 'nsds5replicapurgedelay', 'nsds5replicatombstonepurgeinterval', - 'nsds5replicareleasetimeout', 'nsds5replicaprotocoltimeout', - 'nsds5replicabackoffmin', 'nsds5replicabackoffmax', -- 'nsds5replicaprecisetombstonepurging' -+ 'nsds5replicaprecisetombstonepurging', 'nsds5replicakeepaliveupdateinterval', - ]; - // Check if a setting was changed, if so enable the save button - for (const config_attr of config_attrs) { -@@ -301,7 +303,7 @@ export class ReplConfig extends React.Component { - 'nsds5replicapurgedelay', 'nsds5replicatombstonepurgeinterval', - 'nsds5replicareleasetimeout', 'nsds5replicaprotocoltimeout', - 'nsds5replicabackoffmin', 'nsds5replicabackoffmax', -- 'nsds5replicaprecisetombstonepurging' -+ 'nsds5replicaprecisetombstonepurging', 'nsds5replicakeepaliveupdateinterval', - ]; - // Check if a setting was changed, if so enable the save button - for (const config_attr of config_attrs) { -@@ -451,6 +453,9 @@ export class ReplConfig extends React.Component { - if (this.state.nsds5replicabinddngroupcheckinterval != this.state._nsds5replicabinddngroupcheckinterval) { - cmd.push("--repl-bind-group-interval=" + this.state.nsds5replicabinddngroupcheckinterval); - } -+ if (this.state.nsds5replicakeepaliveupdateinterval != this.state._nsds5replicakeepaliveupdateinterval) { -+ cmd.push("--repl-keepalive-update-interval=" + this.state.nsds5replicakeepaliveupdateinterval); -+ } - if (this.state.nsds5replicareleasetimeout != this.state._nsds5replicareleasetimeout) { - cmd.push("--repl-release-timeout=" + this.state.nsds5replicareleasetimeout); - } -@@ -786,6 +791,29 @@ export class ReplConfig extends React.Component { - /> - - -+ -+ -+ Refresh RUV Interval -+ -+ -+ { this.onMinusConfig("nsds5replicakeepaliveupdateinterval") }} -+ onChange={(e) => { this.onConfigChange(e, "nsds5replicakeepaliveupdateinterval", 60) }} -+ onPlus={() => { this.onPlusConfig("nsds5replicakeepaliveupdateinterval") }} -+ inputName="input" -+ inputAriaLabel="number input" -+ minusBtnAriaLabel="minus" -+ plusBtnAriaLabel="plus" -+ widthChars={8} -+ /> -+ -+ - -Date: Fri, 5 Aug 2022 10:08:45 -0700 -Subject: [PATCH 3/5] Issue 5399 - UI - LDAP Editor is not updated when we - switch instances (#5400) - -Description: We don't refresh LDAP Editor when we switch instances. -It may lead to unpleasant errors. - -Add componentDidUpdate function with the appropriate processing and -properties. - -Fixes: https://github.com/389ds/389-ds-base/issues/5399 - -Reviewed by: @mreynolds389 (Thanks!) ---- - src/cockpit/389-console/src/LDAPEditor.jsx | 19 +++++++++++++++++++ - src/cockpit/389-console/src/ds.jsx | 1 + - 2 files changed, 20 insertions(+) - -diff --git a/src/cockpit/389-console/src/LDAPEditor.jsx b/src/cockpit/389-console/src/LDAPEditor.jsx -index 70324be39..04fc97d41 100644 ---- a/src/cockpit/389-console/src/LDAPEditor.jsx -+++ b/src/cockpit/389-console/src/LDAPEditor.jsx -@@ -60,6 +60,7 @@ export class LDAPEditor extends React.Component { - - this.state = { - activeTabKey: 0, -+ firstLoad: true, - keyIndex: 0, - suffixList: [], - changeLayout: false, -@@ -249,6 +250,12 @@ export class LDAPEditor extends React.Component { - baseDn: this.state.baseDN - }; - -+ if (this.state.firstLoad) { -+ this.setState({ -+ firstLoad: false -+ }); -+ } -+ - this.setState({ - searching: true, - loading: refresh -@@ -361,6 +368,18 @@ export class LDAPEditor extends React.Component { - }); - } - -+ componentDidUpdate(prevProps) { -+ if (this.props.wasActiveList.includes(7)) { -+ if (this.state.firstLoad) { -+ this.handleReload(true); -+ } else { -+ if (this.props.serverId !== prevProps.serverId) { -+ this.handleReload(true); -+ } -+ } -+ } -+ } -+ - getPageData (page, perPage) { - if (page === 1) { - const pagedRows = this.state.rows.slice(0, 2 * perPage); // Each parent has a single child. -diff --git a/src/cockpit/389-console/src/ds.jsx b/src/cockpit/389-console/src/ds.jsx -index e88915e41..de4385292 100644 ---- a/src/cockpit/389-console/src/ds.jsx -+++ b/src/cockpit/389-console/src/ds.jsx -@@ -764,6 +764,7 @@ export class DSInstance extends React.Component { - key="ldap-editor" - addNotification={this.addNotification} - serverId={this.state.serverId} -+ wasActiveList={this.state.wasActiveList} - setPageSectionVariant={this.setPageSectionVariant} - /> - --- -2.37.1 - diff --git a/SOURCES/0008-Issue-5397-Fix-various-memory-leaks.patch b/SOURCES/0008-Issue-5397-Fix-various-memory-leaks.patch deleted file mode 100644 index d558aa7..0000000 --- a/SOURCES/0008-Issue-5397-Fix-various-memory-leaks.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 877df07df2e41988a797778b132935b7d8acfd87 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 5 Aug 2022 14:07:18 -0400 -Subject: [PATCH 4/5] Issue 5397 - Fix various memory leaks - -Description: - -Fixed memory leaks in: - -- Filter optimizer introduced sr_norm_filter_intent which dupped a filter - but never freed it. -- Replication connections would leak the replication manager's - credentials. - -relates: https://github.com/389ds/389-ds-base/issues/5397 - -Reviewed by: progier & jchapman (Thanks!!) ---- - ldap/servers/plugins/replication/repl5_connection.c | 1 + - ldap/servers/slapd/back-ldbm/ldbm_search.c | 1 + - 2 files changed, 2 insertions(+) - -diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c -index b6bc21c46..be8bba08e 100644 ---- a/ldap/servers/plugins/replication/repl5_connection.c -+++ b/ldap/servers/plugins/replication/repl5_connection.c -@@ -247,6 +247,7 @@ conn_delete_internal(Repl_Connection *conn) - slapi_ch_free_string(&conn->last_ldap_errmsg); - slapi_ch_free((void **)&conn->hostname); - slapi_ch_free((void **)&conn->binddn); -+ slapi_ch_free((void **)&conn->creds); - slapi_ch_free((void **)&conn->plain); - } - -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c -index d0f52b6f7..771c35a33 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_search.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c -@@ -1930,6 +1930,7 @@ delete_search_result_set(Slapi_PBlock *pb, back_search_result_set **sr) - rc, filt_errs); - } - slapi_filter_free((*sr)->sr_norm_filter, 1); -+ slapi_filter_free((*sr)->sr_norm_filter_intent, 1); - memset(*sr, 0, sizeof(back_search_result_set)); - slapi_ch_free((void **)sr); - return; --- -2.37.1 - diff --git a/SOURCES/0009-Issue-3903-keep-alive-update-event-starts-too-soon.patch b/SOURCES/0009-Issue-3903-keep-alive-update-event-starts-too-soon.patch deleted file mode 100644 index 6c3aad6..0000000 --- a/SOURCES/0009-Issue-3903-keep-alive-update-event-starts-too-soon.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 27f0c60a54514773e3ffaa09cfbb71c350f44143 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Sat, 6 Aug 2022 14:03:16 -0400 -Subject: [PATCH 5/5] Issue 3903 - keep alive update event starts too soon - -Description: THe keep alive update needs a little more time to start to -allow changelog, and other replication protocols to startup - -relates: https://github.com/389ds/389-ds-base/issues/3903 - -Reviewed by: tbordaz (Thanks!) ---- - dirsrvtests/tests/suites/replication/regression_m2_test.py | 5 +++++ - ldap/servers/plugins/replication/repl5_replica.c | 7 ++++--- - 2 files changed, 9 insertions(+), 3 deletions(-) - -diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py -index 7dd0f2984..bbf9c8486 100644 ---- a/dirsrvtests/tests/suites/replication/regression_m2_test.py -+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py -@@ -821,6 +821,11 @@ def test_keepalive_entries(topo_m2): - assert keep_alive_s1 != str(entries[0].data['keepalivetimestamp']) - assert keep_alive_s2 != str(entries[1].data['keepalivetimestamp']) - -+ # Test replication -+ supplier = topo_m2.ms['supplier1'] -+ replica = Replicas(supplier).get(DEFAULT_SUFFIX) -+ assert replica.test_replication([topo_m2.ms['supplier2']]) -+ - - @pytest.mark.ds49915 - @pytest.mark.bz1626375 -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index ded4cf754..fa6419262 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -239,7 +239,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, - /* create supplier update event */ - if (r->repl_eqcxt_ka_update == NULL && replica_get_type(r) == REPLICA_TYPE_UPDATABLE) { - r->repl_eqcxt_ka_update = slapi_eq_repeat_rel(replica_subentry_update, r, -- slapi_current_rel_time_t() + START_UPDATE_DELAY, -+ slapi_current_rel_time_t() + 30, - replica_get_keepalive_update_interval(r)); - } - -@@ -415,8 +415,9 @@ replica_subentry_create(const char *repl_root, ReplicaId rid) - int return_value; - int rc = 0; - -- entry_string = slapi_ch_smprintf("dn: cn=%s %d,%s\nobjectclass: top\nobjectclass: ldapsubentry\nobjectclass: extensibleObject\ncn: %s %d", -- KEEP_ALIVE_ENTRY, rid, repl_root, KEEP_ALIVE_ENTRY, rid); -+ entry_string = slapi_ch_smprintf("dn: cn=%s %d,%s\nobjectclass: top\nobjectclass: ldapsubentry\n" -+ "objectclass: extensibleObject\n%s: 0\ncn: %s %d", -+ KEEP_ALIVE_ENTRY, rid, repl_root, KEEP_ALIVE_ATTR, KEEP_ALIVE_ENTRY, rid); - if (entry_string == NULL) { - slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, - "replica_subentry_create - Failed in slapi_ch_smprintf\n"); --- -2.37.1 - diff --git a/SOURCES/0010-Issue-5397-Fix-check-pick-error.patch b/SOURCES/0010-Issue-5397-Fix-check-pick-error.patch deleted file mode 100644 index 94f66e5..0000000 --- a/SOURCES/0010-Issue-5397-Fix-check-pick-error.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 1b2cc62c0802af650f80eebcc716b5d5db87030e Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Mon, 8 Aug 2022 13:56:49 -0400 -Subject: [PATCH] Issue 5397 - Fix check pick error - -Description: - -Original commit included a free for a new filter, but that filter was -not implemented in 1.4.3 ---- - ldap/servers/slapd/back-ldbm/ldbm_search.c | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c -index 771c35a33..d0f52b6f7 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_search.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c -@@ -1930,7 +1930,6 @@ delete_search_result_set(Slapi_PBlock *pb, back_search_result_set **sr) - rc, filt_errs); - } - slapi_filter_free((*sr)->sr_norm_filter, 1); -- slapi_filter_free((*sr)->sr_norm_filter_intent, 1); - memset(*sr, 0, sizeof(back_search_result_set)); - slapi_ch_free((void **)sr); - return; --- -2.37.1 - diff --git a/SOURCES/0011-Issue-5397-Fix-check-pick-error-2.patch b/SOURCES/0011-Issue-5397-Fix-check-pick-error-2.patch deleted file mode 100644 index 1831d93..0000000 --- a/SOURCES/0011-Issue-5397-Fix-check-pick-error-2.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 1203808f59614f3bace1631cc713dcaa89026dde Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Mon, 8 Aug 2022 14:19:36 -0400 -Subject: [PATCH] Issue 5397 - Fix check pick error #2 - -Description: - -Original commit included a free for repl conn creds which does not exist -in 1.4.3 ---- - ldap/servers/plugins/replication/repl5_connection.c | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c -index be8bba08e..b6bc21c46 100644 ---- a/ldap/servers/plugins/replication/repl5_connection.c -+++ b/ldap/servers/plugins/replication/repl5_connection.c -@@ -247,7 +247,6 @@ conn_delete_internal(Repl_Connection *conn) - slapi_ch_free_string(&conn->last_ldap_errmsg); - slapi_ch_free((void **)&conn->hostname); - slapi_ch_free((void **)&conn->binddn); -- slapi_ch_free((void **)&conn->creds); - slapi_ch_free((void **)&conn->plain); - } - --- -2.37.1 - diff --git a/SOURCES/0012-Issue-3903-Fix-another-cherry-pick-error.patch b/SOURCES/0012-Issue-3903-Fix-another-cherry-pick-error.patch deleted file mode 100644 index 704e7e9..0000000 --- a/SOURCES/0012-Issue-3903-Fix-another-cherry-pick-error.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 4e712bcb7ce7bd972515d996b5659fc607e09e2f Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Mon, 8 Aug 2022 14:41:47 -0400 -Subject: [PATCH] Issue 3903 - Fix another cherry-pick error - -Description: erroneous ")" was added to the replica struct which broke -the build ---- - ldap/servers/plugins/replication/repl5_replica.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index fa6419262..5dab57de4 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -66,7 +66,7 @@ struct replica - uint64_t agmt_count; /* Number of agmts */ - Slapi_Counter *release_timeout; /* The amount of time to wait before releasing active replica */ - uint64_t abort_session; /* Abort the current replica session */ -- int64_t keepalive_update_interval; /* interval to do dummy update to keep RUV fresh */) -+ int64_t keepalive_update_interval; /* interval to do dummy update to keep RUV fresh */ - }; - - --- -2.37.1 - diff --git a/SOURCES/0013-Issue-5329-Improve-replication-extended-op-logging.patch b/SOURCES/0013-Issue-5329-Improve-replication-extended-op-logging.patch deleted file mode 100644 index 927af90..0000000 --- a/SOURCES/0013-Issue-5329-Improve-replication-extended-op-logging.patch +++ /dev/null @@ -1,508 +0,0 @@ -From 508a6dd02986024b03eeef62d135f7e16b0c85e9 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 2 Jun 2022 16:57:07 -0400 -Subject: [PATCH 1/4] Issue 5329 - Improve replication extended op logging - -Description: - -We need logging around parsing extended op payload, right now when it -fails we have no idea why. - -relates: https://github.com/389ds/389-ds-base/issues/5329 - -Reviewed by: progier, firstyear, and spichugi(Thanks!!!) ---- - ldap/servers/plugins/replication/repl_extop.c | 207 +++++++++++++++++- - ldap/servers/slapd/slapi2runtime.c | 1 - - 2 files changed, 197 insertions(+), 11 deletions(-) - -diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c -index 8b178610b..70c45ec50 100644 ---- a/ldap/servers/plugins/replication/repl_extop.c -+++ b/ldap/servers/plugins/replication/repl_extop.c -@@ -73,6 +73,18 @@ done: - return rc; - } - -+static void -+ruv_dump_to_log(const RUV *ruv, char *log_name) -+{ -+ if (!ruv) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "%s: RUV: None\n", log_name); -+ } else { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "%s: RUV:\n", log_name); -+ ruv_dump(ruv, log_name, NULL); -+ } -+} -+ -+ - /* The data_guid and data parameters should only be set if we - * are talking with a 9.0 replica. */ - static struct berval * -@@ -95,33 +107,60 @@ create_ReplicationExtopPayload(const char *protocol_oid, - PR_ASSERT(protocol_oid != NULL || send_end); - PR_ASSERT(repl_root != NULL); - -- /* Create the request data */ -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - " -+ "encoding '%s' payload...\n", -+ send_end ? "End Replication" : "Start Replication"); -+ } - -+ /* Create the request data */ - if ((tmp_bere = der_alloc()) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: der_alloc failed\n"); - rc = LDAP_ENCODING_ERROR; - goto loser; - } - if (!send_end) { - if (ber_printf(tmp_bere, "{ss", protocol_oid, repl_root) == -1) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: ber_printf failed - protocol_oid (%s) repl_root (%s)\n", -+ protocol_oid, repl_root); - rc = LDAP_ENCODING_ERROR; - goto loser; - } -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - " -+ "encoding protocol_oid: %s\n", protocol_oid); -+ } - } else { - if (ber_printf(tmp_bere, "{s", repl_root) == -1) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: ber_printf failed - repl_root (%s)\n", -+ repl_root); - rc = LDAP_ENCODING_ERROR; - goto loser; - } -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - " -+ "encoding repl_root: %s\n", repl_root); -+ } - } - - sdn = slapi_sdn_new_dn_byref(repl_root); - repl = replica_get_replica_from_dn(sdn); - if (repl == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: failed to get replica from dn (%s)\n", -+ slapi_sdn_get_dn(sdn)); - rc = LDAP_OPERATIONS_ERROR; - goto loser; - } - - ruv_obj = replica_get_ruv(repl); - if (ruv_obj == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: failed to get ruv from replica suffix (%s)\n", -+ slapi_sdn_get_dn(sdn)); - rc = LDAP_OPERATIONS_ERROR; - goto loser; - } -@@ -134,8 +173,14 @@ create_ReplicationExtopPayload(const char *protocol_oid, - /* We need to encode and send each time the local ruv in case we have changed it */ - rc = encode_ruv(tmp_bere, ruv); - if (rc != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: encode_ruv failed for replica suffix (%s)\n", -+ slapi_sdn_get_dn(sdn)); - goto loser; - } -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ ruv_dump_to_log(ruv, "create_ReplicationExtopPayload"); -+ } - - if (!send_end) { - char s[CSN_STRSIZE]; -@@ -157,36 +202,67 @@ create_ReplicationExtopPayload(const char *protocol_oid, - charray_merge(&referrals_to_send, local_replica_referral, 0); - if (NULL != referrals_to_send) { - if (ber_printf(tmp_bere, "[v]", referrals_to_send) == -1) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: ber_printf (referrals_to_send)\n"); - rc = LDAP_ENCODING_ERROR; - goto loser; - } -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ for (size_t i = 0; referrals_to_send[i]; i++) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - " -+ "encoding ref: %s\n", referrals_to_send[i]); -+ } -+ } - slapi_ch_free((void **)&referrals_to_send); - } - /* Add the CSN */ - PR_ASSERT(NULL != csn); - if (ber_printf(tmp_bere, "s", csn_as_string(csn, PR_FALSE, s)) == -1) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: ber_printf (csnstr)\n"); - rc = LDAP_ENCODING_ERROR; - goto loser; - } -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - " -+ "encoding csn: %s\n", csn_as_string(csn, PR_FALSE, s)); -+ } - } - - /* If we have data to send to a 9.0 style replica, set it here. */ - if (data_guid && data) { - if (ber_printf(tmp_bere, "sO", data_guid, data) == -1) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: ber_printf (data_guid, data)\n"); - rc = LDAP_ENCODING_ERROR; - goto loser; - } -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - " -+ "encoding data_guid (%s) data (%s:%ld)\n", -+ data_guid, data->bv_val, data->bv_len); -+ } - } - -+ - if (ber_printf(tmp_bere, "}") == -1) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: ber_printf\n"); - rc = LDAP_ENCODING_ERROR; - goto loser; - } - - if (ber_flatten(tmp_bere, &req_data) == -1) { -+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload", -+ "encoding failed: ber_flatten failed\n"); - rc = LDAP_LOCAL_ERROR; - goto loser; - } -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - " -+ "Encoding finished\n"); -+ } -+ - /* Success */ - goto done; - -@@ -293,8 +369,14 @@ decode_startrepl_extop(Slapi_PBlock *pb, char **protocol_oid, char **repl_root, - if ((NULL == extop_oid) || - ((strcmp(extop_oid, REPL_START_NSDS50_REPLICATION_REQUEST_OID) != 0) && - (strcmp(extop_oid, REPL_START_NSDS90_REPLICATION_REQUEST_OID) != 0)) || -- !BV_HAS_DATA(extop_value)) { -+ !BV_HAS_DATA(extop_value)) -+ { - /* bogus */ -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: extop_oid (%s) (%s) extop_value (%s)\n", -+ NULL == extop_oid ? "NULL" : "Ok", -+ extop_oid ? extop_oid : "", -+ extop_value ? !BV_HAS_DATA(extop_value) ? "No data" : "Ok" : "No data"); - rc = -1; - goto free_and_return; - } -@@ -307,25 +389,36 @@ decode_startrepl_extop(Slapi_PBlock *pb, char **protocol_oid, char **repl_root, - } - - if ((tmp_bere = ber_init(extop_value)) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: ber_init for extop_value (%s:%lu)\n", -+ extop_value->bv_val, extop_value->bv_len); - rc = -1; - goto free_and_return; - } - if (ber_scanf(tmp_bere, "{") == LBER_ERROR) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: ber_scanf 1\n"); - rc = -1; - goto free_and_return; - } - /* Get the required protocol OID and root of replicated subtree */ - if (ber_get_stringa(tmp_bere, protocol_oid) == LBER_DEFAULT) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: ber_get_stringa (protocol_oid)\n"); - rc = -1; - goto free_and_return; - } - if (ber_get_stringa(tmp_bere, repl_root) == LBER_DEFAULT) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: ber_get_stringa (repl_root)\n"); - rc = -1; - goto free_and_return; - } - - /* get supplier's ruv */ - if (decode_ruv(tmp_bere, supplier_ruv) == -1) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: decode_ruv (supplier_ruv)\n"); - rc = -1; - goto free_and_return; - } -@@ -333,33 +426,45 @@ decode_startrepl_extop(Slapi_PBlock *pb, char **protocol_oid, char **repl_root, - /* Get the optional set of referral URLs */ - if (ber_peek_tag(tmp_bere, &len) == LBER_SET) { - if (ber_scanf(tmp_bere, "[v]", extra_referrals) == LBER_ERROR) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: ber_scanf (extra_referrals)\n"); - rc = -1; - goto free_and_return; - } - } - /* Get the CSN */ - if (ber_get_stringa(tmp_bere, csnstr) == LBER_ERROR) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: ber_get_stringa (csnstr)\n"); - rc = -1; - goto free_and_return; - } - /* Get the optional replication session callback data. */ - if (ber_peek_tag(tmp_bere, &len) == LBER_OCTETSTRING) { - if (ber_get_stringa(tmp_bere, data_guid) == LBER_ERROR) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: ber_get_stringa (data_guid)\n"); - rc = -1; - goto free_and_return; - } - /* If a data_guid was specified, data must be specified as well. */ - if (ber_peek_tag(tmp_bere, &len) == LBER_OCTETSTRING) { - if (ber_get_stringal(tmp_bere, data) == LBER_ERROR) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: ber_get_stringal (data)\n"); - rc = -1; - goto free_and_return; - } - } else { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: ber_peek_tag\n"); - rc = -1; - goto free_and_return; - } - } - if (ber_scanf(tmp_bere, "}") == LBER_ERROR) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop", -+ "decoding failed: ber_scanf 2\n"); - rc = -1; - goto free_and_return; - } -@@ -378,6 +483,22 @@ free_and_return: - if (*supplier_ruv) { - ruv_destroy(supplier_ruv); - } -+ } else if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -+ "decode_startrepl_extop - decoding payload...\n"); -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -+ "decode_startrepl_extop - decoded protocol_oid: %s\n", *protocol_oid); -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -+ "decode_startrepl_extop - decoded repl_root: %s\n", *repl_root); -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -+ "decode_startrepl_extop - decoded csn: %s\n", *csnstr); -+ ruv_dump_to_log(*supplier_ruv, "decode_startrepl_extop"); -+ for (size_t i = 0; *extra_referrals && *extra_referrals[i]; i++) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "decode_startrepl_extop - " -+ "decoded referral: %s\n", *extra_referrals[i]); -+ } -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -+ "decode_startrepl_extop - Finshed decoding payload.\n"); - } - if (NULL != tmp_bere) { - ber_free(tmp_bere, 1); -@@ -406,30 +527,54 @@ decode_endrepl_extop(Slapi_PBlock *pb, char **repl_root) - - if ((NULL == extop_oid) || - (strcmp(extop_oid, REPL_END_NSDS50_REPLICATION_REQUEST_OID) != 0) || -- !BV_HAS_DATA(extop_value)) { -+ !BV_HAS_DATA(extop_value)) -+ { - /* bogus */ -+ slapi_log_err(SLAPI_LOG_ERR, "decode_endrepl_extop", -+ "decoding failed: extop_oid (%s) correct oid (%s) extop_value data (%s)\n", -+ extop_oid ? extop_oid : "NULL", -+ extop_oid ? strcmp(extop_oid, REPL_END_NSDS50_REPLICATION_REQUEST_OID) != 0 ? "wrong oid" : "correct oid" : "NULL", -+ !BV_HAS_DATA(extop_value) ? "No data" : "Has data"); - rc = -1; - goto free_and_return; - } - - if ((tmp_bere = ber_init(extop_value)) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_endrepl_extop", -+ "decoding failed: ber_init failed: extop_value (%s:%lu)\n", -+ extop_value->bv_val, extop_value->bv_len); - rc = -1; - goto free_and_return; - } - if (ber_scanf(tmp_bere, "{") == LBER_DEFAULT) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_endrepl_extop", -+ "decoding failed: ber_scanf failed1\n"); - rc = -1; - goto free_and_return; - } - /* Get the required root of replicated subtree */ - if (ber_get_stringa(tmp_bere, repl_root) == LBER_DEFAULT) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_endrepl_extop", -+ "decoding failed: ber_get_stringa failed\n"); - rc = -1; - goto free_and_return; - } - if (ber_scanf(tmp_bere, "}") == LBER_DEFAULT) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_endrepl_extop", -+ "decoding failed: ber_scanf2 failed\n"); - rc = -1; - goto free_and_return; - } - -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, "decode_endrepl_extop", -+ "Decoding payload...\n"); -+ slapi_log_err(SLAPI_LOG_REPL, "decode_endrepl_extop", -+ "Decoded repl_root: %s\n", *repl_root); -+ slapi_log_err(SLAPI_LOG_REPL, "decode_endrepl_extop", -+ "Finished decoding payload.\n"); -+ } -+ - free_and_return: - if (NULL != tmp_bere) { - ber_free(tmp_bere, 1); -@@ -461,27 +606,46 @@ decode_repl_ext_response(struct berval *bvdata, int *response_code, struct berva - PR_ASSERT(NULL != ruv_bervals); - - if ((NULL == response_code) || (NULL == ruv_bervals) || -- (NULL == data_guid) || (NULL == data) || !BV_HAS_DATA(bvdata)) { -+ (NULL == data_guid) || (NULL == data) || !BV_HAS_DATA(bvdata)) -+ { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response", -+ "decoding failed: response_code (%s) ruv_bervals (%s) data_guid (%s) data (%s) bvdata (%s)\n", -+ NULL == response_code ? "NULL" : "Ok", -+ NULL == ruv_bervals ? "NULL" : "Ok", -+ NULL == data_guid ? "NULL" : "Ok", -+ NULL == data ? "NULL" : "Ok", -+ !BV_HAS_DATA(bvdata) ? "No data" : "Ok"); - return_value = -1; - } else { - ber_len_t len; - ber_int_t temp_response_code = 0; - *ruv_bervals = NULL; - if ((tmp_bere = ber_init(bvdata)) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response", -+ "decoding failed: ber_init failed from bvdata (%s:%lu)\n", -+ bvdata->bv_val, bvdata->bv_len); - return_value = -1; - } else if (ber_scanf(tmp_bere, "{e", &temp_response_code) == LBER_ERROR) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response", -+ "decoding failed: ber_scanf failed\n"); - return_value = -1; - } else if (ber_peek_tag(tmp_bere, &len) == LBER_SEQUENCE) { - if (ber_scanf(tmp_bere, "{V}", ruv_bervals) == LBER_ERROR) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response", -+ "decoding failed: ber_scanf2 failed from ruv_bervals\n"); - return_value = -1; - } - } - /* Check for optional data from replication session callback */ - if (ber_peek_tag(tmp_bere, &len) == LBER_OCTETSTRING) { - if (ber_scanf(tmp_bere, "aO}", data_guid, data) == LBER_ERROR) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response", -+ "decoding failed: ber_scanf3 failed from data_guid & data\n"); - return_value = -1; - } - } else if (ber_scanf(tmp_bere, "}") == LBER_ERROR) { -+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response", -+ "decoding failed: ber_scanf4 failed\n"); - return_value = -1; - } - -@@ -934,17 +1098,36 @@ send_response: - /* ONREPL - not sure what we suppose to do here */ - } - ber_printf(resp_bere, "{e", response); -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -+ "multisupplier_extop_StartNSDS50ReplicationRequest - encoded response: %d\n", -+ response); -+ } - if (NULL != ruv_bervals) { - ber_printf(resp_bere, "{V}", ruv_bervals); -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ ruv_dump_to_log(ruv, "multisupplier_extop_StartNSDS50ReplicationRequest"); -+ } - } -+ - /* Add extra data from replication session callback if necessary */ - if (is90 && data_guid && data) { - ber_printf(resp_bere, "sO", data_guid, data); -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -+ "multisupplier_extop_StartNSDS50ReplicationRequest - encoded data_guid (%s) data (%s:%ld)\n", -+ data_guid, data->bv_val, data->bv_len); -+ } - } - - ber_printf(resp_bere, "}"); - ber_flatten(resp_bere, &resp_bval); - -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -+ "multisupplier_extop_StartNSDS50ReplicationRequest - Finished encoding payload\n"); -+ } -+ - if (is90) { - slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, REPL_NSDS90_REPLICATION_RESPONSE_OID); - } else { -@@ -1005,8 +1188,8 @@ send_response: - * sending this request). - * The situation is confused - */ -- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multimaster_extop_StartNSDS50ReplicationRequest - " -- "already acquired replica: replica not ready (%d) (replica=%s)\n", -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multisupplier_extop_StartNSDS50ReplicationRequest - " -+ "already acquired replica: replica not ready (%d) (replica=%s)\n", - response, replica_get_name(r) ? replica_get_name(r) : "no name"); - - /* -@@ -1016,8 +1199,8 @@ send_response: - if (r) { - - r_locking_conn = replica_get_locking_conn(r); -- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multimaster_extop_StartNSDS50ReplicationRequest - " -- "already acquired replica: locking_conn=%" PRIu64 ", current connid=%" PRIu64 "\n", -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multisupplier_extop_StartNSDS50ReplicationRequest - " -+ "already acquired replica: locking_conn=%" PRIu64 ", current connid=%" PRIu64 "\n", - r_locking_conn, connid); - - if ((r_locking_conn != ULONG_MAX) && (r_locking_conn == connid)) { -@@ -1032,8 +1215,8 @@ send_response: - * On the supplier, we need to close the connection so - * that the RA will restart a new session in a clear state - */ -- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multimaster_extop_StartNSDS50ReplicationRequest - " -- "already acquired replica: disconnect conn=%" PRIu64 "\n", -+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multisupplier_extop_StartNSDS50ReplicationRequest - " -+ "already acquired replica: disconnect conn=%" PRIu64 "\n", - connid); - slapi_disconnect_server(conn); - } -@@ -1210,6 +1393,10 @@ send_response: - if ((resp_bere = der_alloc()) == NULL) { - goto free_and_return; - } -+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { -+ slapi_log_err(SLAPI_LOG_REPL, "multisupplier_extop_EndNSDS50ReplicationRequest", -+ "encoded response: %d\n", response); -+ } - ber_printf(resp_bere, "{e}", response); - ber_flatten(resp_bere, &resp_bval); - slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, REPL_NSDS50_REPLICATION_RESPONSE_OID); -diff --git a/ldap/servers/slapd/slapi2runtime.c b/ldap/servers/slapd/slapi2runtime.c -index 53927934a..e622f1b65 100644 ---- a/ldap/servers/slapd/slapi2runtime.c -+++ b/ldap/servers/slapd/slapi2runtime.c -@@ -88,7 +88,6 @@ slapi_lock_mutex(Slapi_Mutex *mutex) - inline int __attribute__((always_inline)) - slapi_unlock_mutex(Slapi_Mutex *mutex) - { -- PR_ASSERT(mutex != NULL); - if (mutex == NULL || pthread_mutex_unlock((pthread_mutex_t *)mutex) != 0) { - return (0); - } else { --- -2.37.1 - diff --git a/SOURCES/0014-Issue-5412-lib389-do-not-set-backend-name-to-lowerca.patch b/SOURCES/0014-Issue-5412-lib389-do-not-set-backend-name-to-lowerca.patch deleted file mode 100644 index e1bab86..0000000 --- a/SOURCES/0014-Issue-5412-lib389-do-not-set-backend-name-to-lowerca.patch +++ /dev/null @@ -1,88 +0,0 @@ -From 6fd4fd082424838f7d06e0de8683d28f04ec0d43 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 10 Aug 2022 08:59:15 -0400 -Subject: [PATCH 2/4] Issue 5412 - lib389 - do not set backend name to - lowercase - -Description: - -There is no reason to set a new suffix to lowercase. The server -will correctly handle the case, and some customers, especially -with migrations, want to have the base suffix a certain case. - -relates: https://github.com/389ds/389-ds-base/issues/5412 - -Reviewed by: spichugi(Thanks!) ---- - dirsrvtests/tests/suites/basic/basic_test.py | 33 ++++++++++++++++++-- - src/lib389/lib389/backend.py | 3 +- - 2 files changed, 32 insertions(+), 4 deletions(-) - -diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py -index 003cd8f28..6fa4dea25 100644 ---- a/dirsrvtests/tests/suites/basic/basic_test.py -+++ b/dirsrvtests/tests/suites/basic/basic_test.py -@@ -22,6 +22,8 @@ from lib389.idm.directorymanager import DirectoryManager - from lib389.config import LDBMConfig - from lib389.dseldif import DSEldif - from lib389.rootdse import RootDSE -+from lib389.backend import Backends -+from lib389.idm.domain import Domain - - - pytestmark = pytest.mark.tier0 -@@ -1410,8 +1412,35 @@ def test_ldbm_modification_audit_log(topology_st): - assert conn.searchAuditLog('%s: %s' % (attr, VALUE)) - - --@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'), -- reason="This test is only required if perl is enabled, and requires root.") -+def test_suffix_case(topology_st): -+ """Test that the suffix case is preserved when creating a new backend -+ -+ :id: 4eff15be-6cde-4312-b492-c88941876bda -+ :setup: Standalone Instance -+ :steps: -+ 1. Create backend with uppercase characters -+ 2. Create root node entry -+ 3. Search should return suffix with upper case characters -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ """ -+ -+ # Start with a clean slate -+ topology_st.standalone.restart() -+ -+ TEST_SUFFIX = 'dc=UPPER_CASE' -+ -+ backends = Backends(topology_st.standalone) -+ backends.create(properties={'nsslapd-suffix': TEST_SUFFIX, -+ 'name': 'upperCaseRoot', -+ 'sample_entries': '001004002'}) -+ -+ domain = Domain(topology_st.standalone, TEST_SUFFIX) -+ assert domain.dn == TEST_SUFFIX -+ -+ - def test_dscreate(request): - """Test that dscreate works, we need this for now until setup-ds.pl is - fully discontinued. -diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py -index cbd2810e9..24613655d 100644 ---- a/src/lib389/lib389/backend.py -+++ b/src/lib389/lib389/backend.py -@@ -608,8 +608,7 @@ class Backend(DSLdapObject): - dn = ",".join(dn_comps) - - if properties is not None: -- suffix_dn = properties['nsslapd-suffix'].lower() -- dn_comps = ldap.dn.explode_dn(suffix_dn) -+ dn_comps = ldap.dn.explode_dn(properties['nsslapd-suffix']) - ndn = ",".join(dn_comps) - properties['nsslapd-suffix'] = ndn - sample_entries = properties.pop(BACKEND_SAMPLE_ENTRIES, False) --- -2.37.1 - diff --git a/SOURCES/0015-Issue-5418-Sync_repl-may-crash-while-managing-invali.patch b/SOURCES/0015-Issue-5418-Sync_repl-may-crash-while-managing-invali.patch deleted file mode 100644 index 4bce950..0000000 --- a/SOURCES/0015-Issue-5418-Sync_repl-may-crash-while-managing-invali.patch +++ /dev/null @@ -1,110 +0,0 @@ -From 48ef747b731b5debfefc20757f3b3775828504c2 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Thu, 18 Aug 2022 11:17:30 +0200 -Subject: [PATCH 3/4] Issue 5418 - Sync_repl may crash while managing invalid - cookie (#5420) - -Bug description: - If the servers receives an invalid cookie without separator '#', - it parses it into an empty cookie (Sync_Cookie) instead of a NULL - cookie (failure). - Later it sigsegv when using the empty cookie. - -Fix description: - If the parsing fails return NULL - -relates: #5418 - -Reviewed by: Viktor Ashirov, Mark Reynolds, William Brown, Simon - Pichugin (thanks !) ---- - .../suites/syncrepl_plugin/basic_test.py | 76 +++++++++++++++++++ - 1 file changed, 76 insertions(+) - -diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -index 533460e8f..375517693 100644 ---- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -@@ -594,3 +594,79 @@ def test_sync_repl_cenotaph(topo_m2, request): - pass - - request.addfinalizer(fin) -+ -+def test_sync_repl_invalid_cookie(topology, request): -+ """Test sync_repl with invalid cookie -+ -+ :id: 8fa4a8f8-acf4-42a5-90f1-6ba1d8080e46 -+ :setup: install a standalone instance -+ :steps: -+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin) -+ 2. Enable retroCL/content_sync -+ 3. Establish a sync_repl connection -+ 4. Tests servers results to search with invalid cookie -+ 5. Add/delete an user entry to check the server is up and running -+ :expectedresults: -+ 1. Should succeeds -+ 2. Should succeeds -+ 3. Should succeeds -+ 4. Should succeeds -+ 5. Should succeeds -+ """ -+ -+ # Reset the instance in a default config -+ # Disable content sync plugin -+ topology.standalone.restart() -+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC) -+ -+ # Disable retro changelog -+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG) -+ -+ # Disable dynamic plugins -+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')]) -+ topology.standalone.restart() -+ -+ # Enable retro changelog -+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) -+ -+ # Enbale content sync plugin -+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC) -+ topology.standalone.restart() -+ -+ # Setup the syncer -+ sync = ISyncRepl(topology.standalone) -+ -+ # Test invalid cookies -+ cookies = ('#', '##', 'a#a#a', 'a#a#1', 'foo') -+ for invalid_cookie in cookies: -+ log.info('Testing cookie: %s' % invalid_cookie) -+ try: -+ ldap_search = sync.syncrepl_search(base=DEFAULT_SUFFIX, -+ scope=ldap.SCOPE_SUBTREE, -+ attrlist=['objectclass', 'cn', 'homedirectory', 'sn','uid'], -+ filterstr='(|(objectClass=groupofnames)(objectClass=person))', -+ mode='refreshOnly', -+ cookie=invalid_cookie) -+ poll_result = sync.syncrepl_poll(all=1) -+ -+ log.fatal('Invalid cookie accepted!') -+ assert False -+ except Exception as e: -+ log.info('Invalid cookie correctly rejected: {}'.format(e.args[0]['info'])) -+ pass -+ -+ # check that the server is still up and running -+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) -+ user = users.create_test_user(uid=1000) -+ -+ # Success -+ log.info('Test complete') -+ -+ def fin(): -+ topology.standalone.restart() -+ try: -+ user.delete() -+ except: -+ pass -+ -+ request.addfinalizer(fin) --- -2.37.1 - diff --git a/SOURCES/0016-Issue-3903-fix-repl-keep-alive-event-interval.patch b/SOURCES/0016-Issue-3903-fix-repl-keep-alive-event-interval.patch deleted file mode 100644 index 57c16c2..0000000 --- a/SOURCES/0016-Issue-3903-fix-repl-keep-alive-event-interval.patch +++ /dev/null @@ -1,2510 +0,0 @@ -From 01e941e3eadd7a208982d20c0ca9c104142f2b91 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 10 Aug 2022 08:58:28 -0400 -Subject: [PATCH 4/4] Issue 3903 - fix repl keep alive event interval - -Description: Previously we passed the interval as seconds to the - event queue, but it is supposed to be milliseconds. - - Fixed a crash with repl logging and decoding extended - op payload (referrals). - - Also reworked alot of the replication CI tests that - were flaky. - -relates: https://github.com/389ds/389-ds-base/issues/3903 - -Reviewed by: tbordaz & spichugi(Thanks!) ---- - .../suites/replication/acceptance_test.py | 52 +- - .../cleanallruv_abort_certify_test.py | 136 ++++ - .../cleanallruv_abort_restart_test.py | 146 ++++ - .../replication/cleanallruv_abort_test.py | 123 +++ - .../replication/cleanallruv_force_test.py | 187 +++++ - .../cleanallruv_multiple_force_test.py | 214 +++++ - .../replication/cleanallruv_restart_test.py | 161 ++++ - .../cleanallruv_shutdown_crash_test.py | 123 +++ - .../replication/cleanallruv_stress_test.py | 216 +++++ - .../suites/replication/cleanallruv_test.py | 742 +----------------- - .../suites/replication/regression_m2_test.py | 13 +- - .../replication/regression_m2c2_test.py | 1 + - .../plugins/replication/repl5_replica.c | 12 +- - ldap/servers/plugins/replication/repl_extop.c | 4 +- - ldap/servers/slapd/task.c | 8 +- - src/lib389/lib389/instance/remove.py | 6 + - 16 files changed, 1385 insertions(+), 759 deletions(-) - create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_abort_certify_test.py - create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_abort_restart_test.py - create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_abort_test.py - create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_force_test.py - create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_multiple_force_test.py - create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_restart_test.py - create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py - create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_stress_test.py - -diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py -index a5f0c4c6b..863ee2553 100644 ---- a/dirsrvtests/tests/suites/replication/acceptance_test.py -+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py -@@ -8,6 +8,7 @@ - # - import pytest - import logging -+import time - from lib389.replica import Replicas - from lib389.tasks import * - from lib389.utils import * -@@ -124,12 +125,16 @@ def test_modify_entry(topo_m4, create_entry): - 8. Some time should pass - 9. The change should be present on all suppliers - """ -+ if DEBUGGING: -+ sleep_time = 8 -+ else: -+ sleep_time = 2 - - log.info('Modifying entry {} - add operation'.format(TEST_ENTRY_DN)) - - test_user = UserAccount(topo_m4.ms["supplier1"], TEST_ENTRY_DN) - test_user.add('mail', '{}@redhat.com'.format(TEST_ENTRY_NAME)) -- time.sleep(1) -+ time.sleep(sleep_time) - - all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) - for u in all_user: -@@ -137,7 +142,7 @@ def test_modify_entry(topo_m4, create_entry): - - log.info('Modifying entry {} - replace operation'.format(TEST_ENTRY_DN)) - test_user.replace('mail', '{}@greenhat.com'.format(TEST_ENTRY_NAME)) -- time.sleep(1) -+ time.sleep(sleep_time) - - all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) - for u in all_user: -@@ -145,7 +150,7 @@ def test_modify_entry(topo_m4, create_entry): - - log.info('Modifying entry {} - delete operation'.format(TEST_ENTRY_DN)) - test_user.remove('mail', '{}@greenhat.com'.format(TEST_ENTRY_NAME)) -- time.sleep(1) -+ time.sleep(sleep_time) - - all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) - for u in all_user: -@@ -167,7 +172,10 @@ def test_delete_entry(topo_m4, create_entry): - - log.info('Deleting entry {} during the test'.format(TEST_ENTRY_DN)) - topo_m4.ms["supplier1"].delete_s(TEST_ENTRY_DN) -- -+ if DEBUGGING: -+ time.sleep(8) -+ else: -+ time.sleep(1) - entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) - assert not entries, "Entry deletion {} wasn't replicated successfully".format(TEST_ENTRY_DN) - -@@ -231,6 +239,11 @@ def test_modrdn_after_pause(topo_m4): - 5. The change should be present on all suppliers - """ - -+ if DEBUGGING: -+ sleep_time = 8 -+ else: -+ sleep_time = 3 -+ - newrdn_name = 'newrdn' - newrdn_dn = 'uid={},{}'.format(newrdn_name, DEFAULT_SUFFIX) - -@@ -264,7 +277,7 @@ def test_modrdn_after_pause(topo_m4): - topo_m4.resume_all_replicas() - - log.info('Wait for replication to happen') -- time.sleep(3) -+ time.sleep(sleep_time) - - try: - entries_new = get_repl_entries(topo_m4, newrdn_name, ["uid"]) -@@ -354,6 +367,11 @@ def test_many_attrs(topo_m4, create_entry): - for add_name in add_list: - test_user.add('description', add_name) - -+ if DEBUGGING: -+ time.sleep(10) -+ else: -+ time.sleep(1) -+ - log.info('Check that everything was properly replicated after an add operation') - entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["description"]) - for entry in entries: -@@ -363,6 +381,11 @@ def test_many_attrs(topo_m4, create_entry): - for delete_name in delete_list: - test_user.remove('description', delete_name) - -+ if DEBUGGING: -+ time.sleep(10) -+ else: -+ time.sleep(1) -+ - log.info('Check that everything was properly replicated after a delete operation') - entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["description"]) - for entry in entries: -@@ -386,12 +409,22 @@ def test_double_delete(topo_m4, create_entry): - log.info('Deleting entry {} from supplier1'.format(TEST_ENTRY_DN)) - topo_m4.ms["supplier1"].delete_s(TEST_ENTRY_DN) - -+ if DEBUGGING: -+ time.sleep(5) -+ else: -+ time.sleep(1) -+ - log.info('Deleting entry {} from supplier2'.format(TEST_ENTRY_DN)) - try: - topo_m4.ms["supplier2"].delete_s(TEST_ENTRY_DN) - except ldap.NO_SUCH_OBJECT: - log.info("Entry {} wasn't found supplier2. It is expected.".format(TEST_ENTRY_DN)) - -+ if DEBUGGING: -+ time.sleep(5) -+ else: -+ time.sleep(1) -+ - log.info('Make searches to check if server is alive') - entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) - assert not entries, "Entry deletion {} wasn't replicated successfully".format(TEST_ENTRY_DN) -@@ -436,6 +469,11 @@ def test_password_repl_error(topo_m4, create_entry): - m3_conn = test_user_m3.bind(TEST_ENTRY_NEW_PASS) - m4_conn = test_user_m4.bind(TEST_ENTRY_NEW_PASS) - -+ if DEBUGGING: -+ time.sleep(5) -+ else: -+ time.sleep(1) -+ - log.info('Check the error log for the error with {}'.format(TEST_ENTRY_DN)) - assert not m2.ds_error_log.match('.*can.t add a change for uid={}.*'.format(TEST_ENTRY_NAME)) - -@@ -552,7 +590,7 @@ def test_csnpurge_large_valueset(topo_m2): - replica = replicas.list()[0] - log.info('nsds5ReplicaPurgeDelay to 5') - replica.set('nsds5ReplicaPurgeDelay', '5') -- time.sleep(6) -+ time.sleep(10) - - # add some new values to the valueset containing entries that should be purged - for i in range(21,25): -@@ -612,7 +650,7 @@ def test_urp_trigger_substring_search(topo_m2): - break - else: - log.info('Entry not yet replicated on M2, wait a bit') -- time.sleep(2) -+ time.sleep(3) - - # check that M2 access logs does not "(&(objectclass=nstombstone)(nscpentrydn=uid=asterisk_*_in_value,dc=example,dc=com))" - log.info('Check that on M2, URP as not triggered such internal search') -diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_abort_certify_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_abort_certify_test.py -new file mode 100644 -index 000000000..603693b9e ---- /dev/null -+++ b/dirsrvtests/tests/suites/replication/cleanallruv_abort_certify_test.py -@@ -0,0 +1,136 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2022 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import logging -+import pytest -+import os -+import time -+from lib389._constants import DEFAULT_SUFFIX -+from lib389.topologies import topology_m4 -+from lib389.tasks import CleanAllRUVTask -+from lib389.replica import ReplicationManager, Replicas -+ -+log = logging.getLogger(__name__) -+ -+ -+def remove_supplier4_agmts(msg, topology_m4): -+ """Remove all the repl agmts to supplier4. """ -+ -+ log.info('%s: remove all the agreements to supplier 4...' % msg) -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ # This will delete m4 from the topo *and* remove all incoming agreements -+ # to m4. -+ repl.remove_supplier(topology_m4.ms["supplier4"], -+ [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) -+ -+def task_done(topology_m4, task_dn, timeout=60): -+ """Check if the task is complete""" -+ -+ attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', -+ 'nsTaskCurrentItem', 'nsTaskTotalItems'] -+ done = False -+ count = 0 -+ -+ while not done and count < timeout: -+ try: -+ entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) -+ if entry is not None: -+ if entry.hasAttr('nsTaskExitCode'): -+ done = True -+ break -+ else: -+ done = True -+ break -+ except ldap.NO_SUCH_OBJECT: -+ done = True -+ break -+ except ldap.LDAPError: -+ break -+ time.sleep(1) -+ count += 1 -+ -+ return done -+ -+@pytest.mark.flaky(max_runs=2, min_passes=1) -+def test_abort_certify(topology_m4): -+ """Test the abort task with a replica-certify-all option -+ -+ :id: 78959966-d644-44a8-b98c-1fcf21b45eb0 -+ :setup: Replication setup with four suppliers -+ :steps: -+ 1. Disable replication on supplier 4 -+ 2. Remove agreements to supplier 4 from other suppliers -+ 3. Stop supplier 2 -+ 4. Run a cleanallruv task on supplier 1 -+ 5. Run a cleanallruv abort task on supplier 1 with a replica-certify-all option -+ :expectedresults: No hanging tasks left -+ 1. Replication on supplier 4 should be disabled -+ 2. Agreements to supplier 4 should be removed -+ 3. Supplier 2 should be stopped -+ 4. Operation should be successful -+ 5. Operation should be successful -+ """ -+ -+ log.info('Running test_abort_certify...') -+ -+ # Remove the agreements from the other suppliers that point to supplier 4 -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ m4rid = repl.get_rid(topology_m4.ms["supplier4"]) -+ remove_supplier4_agmts("test_abort_certify", topology_m4) -+ -+ # Stop supplier 2 -+ log.info('test_abort_certify: stop supplier 2 to freeze the cleanAllRUV task...') -+ topology_m4.ms["supplier2"].stop() -+ -+ # Run the task -+ log.info('test_abort_certify: add the cleanAllRUV task...') -+ cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -+ cruv_task.create(properties={ -+ 'replica-id': m4rid, -+ 'replica-base-dn': DEFAULT_SUFFIX, -+ 'replica-force-cleaning': 'no', -+ 'replica-certify-all': 'yes' -+ }) -+ # Wait a bit -+ time.sleep(2) -+ -+ # Abort the task -+ log.info('test_abort_certify: abort the cleanAllRUV task...') -+ abort_task = cruv_task.abort(certify=True) -+ -+ # Wait a while and make sure the abort task is still running -+ log.info('test_abort_certify...') -+ -+ if task_done(topology_m4, abort_task.dn, 10): -+ log.fatal('test_abort_certify: abort task incorrectly finished') -+ assert False -+ -+ # Now start supplier 2 so it can be aborted -+ log.info('test_abort_certify: start supplier 2 to allow the abort task to finish...') -+ topology_m4.ms["supplier2"].start() -+ -+ # Wait for the abort task to stop -+ if not task_done(topology_m4, abort_task.dn, 90): -+ log.fatal('test_abort_certify: The abort CleanAllRUV task was not aborted') -+ assert False -+ -+ # Check supplier 1 does not have the clean task running -+ log.info('test_abort_certify: check supplier 1 no longer has a cleanAllRUV task...') -+ if not task_done(topology_m4, cruv_task.dn): -+ log.fatal('test_abort_certify: CleanAllRUV task was not aborted') -+ assert False -+ -+ log.info('test_abort_certify PASSED') -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_abort_restart_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_abort_restart_test.py -new file mode 100644 -index 000000000..1406c6553 ---- /dev/null -+++ b/dirsrvtests/tests/suites/replication/cleanallruv_abort_restart_test.py -@@ -0,0 +1,146 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2022 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import logging -+import pytest -+import os -+import time -+from lib389._constants import DEFAULT_SUFFIX -+from lib389.topologies import topology_m4 -+from lib389.tasks import CleanAllRUVTask -+from lib389.replica import ReplicationManager -+ -+log = logging.getLogger(__name__) -+ -+ -+def remove_supplier4_agmts(msg, topology_m4): -+ """Remove all the repl agmts to supplier4. """ -+ -+ log.info('%s: remove all the agreements to supplier 4...' % msg) -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ # This will delete m4 from the topo *and* remove all incoming agreements -+ # to m4. -+ repl.remove_supplier(topology_m4.ms["supplier4"], -+ [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) -+ -+def task_done(topology_m4, task_dn, timeout=60): -+ """Check if the task is complete""" -+ -+ attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', -+ 'nsTaskCurrentItem', 'nsTaskTotalItems'] -+ done = False -+ count = 0 -+ -+ while not done and count < timeout: -+ try: -+ entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) -+ if entry is not None: -+ if entry.hasAttr('nsTaskExitCode'): -+ done = True -+ break -+ else: -+ done = True -+ break -+ except ldap.NO_SUCH_OBJECT: -+ done = True -+ break -+ except ldap.LDAPError: -+ break -+ time.sleep(1) -+ count += 1 -+ -+ return done -+ -+ -+@pytest.mark.flaky(max_runs=2, min_passes=1) -+def test_abort_restart(topology_m4): -+ """Test the abort task can handle a restart, and then resume -+ -+ :id: b66e33d4-fe85-4e1c-b882-75da80f70ab3 -+ :setup: Replication setup with four suppliers -+ :steps: -+ 1. Disable replication on supplier 4 -+ 2. Remove agreements to supplier 4 from other suppliers -+ 3. Stop supplier 3 -+ 4. Run a cleanallruv task on supplier 1 -+ 5. Run a cleanallruv abort task on supplier 1 -+ 6. Restart supplier 1 -+ 7. Make sure that no crash happened -+ 8. Start supplier 3 -+ 9. Check supplier 1 does not have the clean task running -+ 10. Check that errors log doesn't have 'Aborting abort task' message -+ :expectedresults: -+ 1. Replication on supplier 4 should be disabled -+ 2. Agreements to supplier 4 should be removed -+ 3. Supplier 3 should be stopped -+ 4. Operation should be successful -+ 5. Operation should be successful -+ 6. Supplier 1 should be restarted -+ 7. No crash should happened -+ 8. Supplier 3 should be started -+ 9. Check supplier 1 shouldn't have the clean task running -+ 10. Errors log shouldn't have 'Aborting abort task' message -+ """ -+ -+ log.info('Running test_abort_restart...') -+ # Remove the agreements from the other suppliers that point to supplier 4 -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ m4rid = repl.get_rid(topology_m4.ms["supplier4"]) -+ remove_supplier4_agmts("test_abort", topology_m4) -+ -+ # Stop supplier 3 -+ log.info('test_abort_restart: stop supplier 3 to freeze the cleanAllRUV task...') -+ topology_m4.ms["supplier3"].stop() -+ -+ # Run the task -+ log.info('test_abort_restart: add the cleanAllRUV task...') -+ cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -+ cruv_task.create(properties={ -+ 'replica-id': m4rid, -+ 'replica-base-dn': DEFAULT_SUFFIX, -+ 'replica-force-cleaning': 'no', -+ 'replica-certify-all': 'yes' -+ }) -+ # Wait a bit -+ time.sleep(2) -+ -+ # Abort the task -+ cruv_task.abort(certify=True) -+ -+ # Check supplier 1 does not have the clean task running -+ log.info('test_abort_abort: check supplier 1 no longer has a cleanAllRUV task...') -+ if not task_done(topology_m4, cruv_task.dn): -+ log.fatal('test_abort_restart: CleanAllRUV task was not aborted') -+ assert False -+ -+ # Now restart supplier 1, and make sure the abort process completes -+ topology_m4.ms["supplier1"].restart() -+ if topology_m4.ms["supplier1"].detectDisorderlyShutdown(): -+ log.fatal('test_abort_restart: Supplier 1 previously crashed!') -+ assert False -+ -+ # Start supplier 3 -+ topology_m4.ms["supplier3"].start() -+ -+ # Need to wait 5 seconds before server processes any leftover tasks -+ time.sleep(6) -+ -+ # Check supplier 1 tried to run abort task. We expect the abort task to be aborted. -+ if not topology_m4.ms["supplier1"].searchErrorsLog('Aborting abort task'): -+ log.fatal('test_abort_restart: Abort task did not restart') -+ assert False -+ -+ log.info('test_abort_restart PASSED') -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_abort_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_abort_test.py -new file mode 100644 -index 000000000..f89188165 ---- /dev/null -+++ b/dirsrvtests/tests/suites/replication/cleanallruv_abort_test.py -@@ -0,0 +1,123 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2022 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import logging -+import pytest -+import os -+import time -+from lib389._constants import DEFAULT_SUFFIX -+from lib389.topologies import topology_m4 -+from lib389.tasks import CleanAllRUVTask -+from lib389.replica import ReplicationManager -+ -+log = logging.getLogger(__name__) -+ -+ -+def remove_supplier4_agmts(msg, topology_m4): -+ """Remove all the repl agmts to supplier4. """ -+ -+ log.info('%s: remove all the agreements to supplier 4...' % msg) -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ # This will delete m4 from the topo *and* remove all incoming agreements -+ # to m4. -+ repl.remove_supplier(topology_m4.ms["supplier4"], -+ [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) -+ -+def task_done(topology_m4, task_dn, timeout=60): -+ """Check if the task is complete""" -+ -+ attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', -+ 'nsTaskCurrentItem', 'nsTaskTotalItems'] -+ done = False -+ count = 0 -+ -+ while not done and count < timeout: -+ try: -+ entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) -+ if entry is not None: -+ if entry.hasAttr('nsTaskExitCode'): -+ done = True -+ break -+ else: -+ done = True -+ break -+ except ldap.NO_SUCH_OBJECT: -+ done = True -+ break -+ except ldap.LDAPError: -+ break -+ time.sleep(1) -+ count += 1 -+ -+ return done -+ -+ -+@pytest.mark.flaky(max_runs=2, min_passes=1) -+def test_abort(topology_m4): -+ """Test the abort task basic functionality -+ -+ :id: b09a6887-8de0-4fac-8e41-73ccbaaf7a08 -+ :setup: Replication setup with four suppliers -+ :steps: -+ 1. Disable replication on supplier 4 -+ 2. Remove agreements to supplier 4 from other suppliers -+ 3. Stop supplier 2 -+ 4. Run a cleanallruv task on supplier 1 -+ 5. Run a cleanallruv abort task on supplier 1 -+ :expectedresults: No hanging tasks left -+ 1. Replication on supplier 4 should be disabled -+ 2. Agreements to supplier 4 should be removed -+ 3. Supplier 2 should be stopped -+ 4. Operation should be successful -+ 5. Operation should be successful -+ """ -+ -+ log.info('Running test_abort...') -+ # Remove the agreements from the other suppliers that point to supplier 4 -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ m4rid = repl.get_rid(topology_m4.ms["supplier4"]) -+ remove_supplier4_agmts("test_abort", topology_m4) -+ -+ # Stop supplier 2 -+ log.info('test_abort: stop supplier 2 to freeze the cleanAllRUV task...') -+ topology_m4.ms["supplier2"].stop() -+ -+ # Run the task -+ log.info('test_abort: add the cleanAllRUV task...') -+ cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -+ cruv_task.create(properties={ -+ 'replica-id': m4rid, -+ 'replica-base-dn': DEFAULT_SUFFIX, -+ 'replica-force-cleaning': 'no', -+ 'replica-certify-all': 'yes' -+ }) -+ # Wait a bit -+ time.sleep(2) -+ -+ # Abort the task -+ cruv_task.abort() -+ -+ # Check supplier 1 does not have the clean task running -+ log.info('test_abort: check supplier 1 no longer has a cleanAllRUV task...') -+ if not task_done(topology_m4, cruv_task.dn): -+ log.fatal('test_abort: CleanAllRUV task was not aborted') -+ assert False -+ -+ # Start supplier 2 -+ log.info('test_abort: start supplier 2 to begin the restore process...') -+ topology_m4.ms["supplier2"].start() -+ -+ log.info('test_abort PASSED') -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_force_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_force_test.py -new file mode 100644 -index 000000000..d5b930584 ---- /dev/null -+++ b/dirsrvtests/tests/suites/replication/cleanallruv_force_test.py -@@ -0,0 +1,187 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2022 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import logging -+import pytest -+import os -+import time -+import random -+import threading -+from lib389._constants import DEFAULT_SUFFIX -+from lib389.topologies import topology_m4 -+from lib389.tasks import CleanAllRUVTask -+from lib389.replica import Replicas, ReplicationManager -+from lib389.idm.directorymanager import DirectoryManager -+from lib389.idm.user import UserAccounts -+ -+log = logging.getLogger(__name__) -+ -+ -+class AddUsers(threading.Thread): -+ def __init__(self, inst, num_users): -+ threading.Thread.__init__(self) -+ self.daemon = True -+ self.inst = inst -+ self.num_users = num_users -+ -+ def run(self): -+ """Start adding users""" -+ -+ dm = DirectoryManager(self.inst) -+ conn = dm.bind() -+ -+ users = UserAccounts(conn, DEFAULT_SUFFIX) -+ -+ u_range = list(range(self.num_users)) -+ random.shuffle(u_range) -+ -+ for idx in u_range: -+ try: -+ users.create(properties={ -+ 'uid': 'testuser%s' % idx, -+ 'cn' : 'testuser%s' % idx, -+ 'sn' : 'user%s' % idx, -+ 'uidNumber' : '%s' % (1000 + idx), -+ 'gidNumber' : '%s' % (1000 + idx), -+ 'homeDirectory' : '/home/testuser%s' % idx -+ }) -+ # One of the suppliers was probably put into read only mode - just break out -+ except ldap.UNWILLING_TO_PERFORM: -+ break -+ except ldap.ALREADY_EXISTS: -+ pass -+ conn.close() -+ -+def remove_some_supplier4_agmts(msg, topology_m4): -+ """Remove all the repl agmts to supplier4 except from supplier3. Used by -+ the force tests.""" -+ -+ log.info('%s: remove the agreements to supplier 4...' % msg) -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ # This will delete m4 from the topo *and* remove all incoming agreements -+ # to m4. -+ repl.remove_supplier(topology_m4.ms["supplier4"], -+ [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"]]) -+ -+def task_done(topology_m4, task_dn, timeout=60): -+ """Check if the task is complete""" -+ -+ attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', -+ 'nsTaskCurrentItem', 'nsTaskTotalItems'] -+ done = False -+ count = 0 -+ -+ while not done and count < timeout: -+ try: -+ entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) -+ if entry is not None: -+ if entry.hasAttr('nsTaskExitCode'): -+ done = True -+ break -+ else: -+ done = True -+ break -+ except ldap.NO_SUCH_OBJECT: -+ done = True -+ break -+ except ldap.LDAPError: -+ break -+ time.sleep(1) -+ count += 1 -+ -+ return done -+ -+def check_ruvs(msg, topology_m4, m4rid): -+ """Check suppliers 1-3 for supplier 4's rid.""" -+ for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): -+ clean = False -+ replicas = Replicas(inst) -+ replica = replicas.get(DEFAULT_SUFFIX) -+ log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) -+ -+ count = 0 -+ while not clean and count < 20: -+ ruv = replica.get_ruv() -+ if m4rid in ruv._rids: -+ time.sleep(5) -+ count = count + 1 -+ else: -+ clean = True -+ if not clean: -+ raise Exception("Supplier %s was not cleaned in time." % inst.serverid) -+ return True -+ -+def test_clean_force(topology_m4): -+ """Check that multiple tasks with a 'force' option work properly -+ -+ :id: f8810dfe-d2d2-4dd9-ba03-5fc14896fabe -+ :setup: Replication setup with four suppliers -+ :steps: -+ 1. Stop supplier 3 -+ 2. Add a bunch of updates to supplier 4 -+ 3. Disable replication on supplier 4 -+ 4. Start supplier 3 -+ 5. Remove agreements to supplier 4 from other suppliers -+ 6. Run a cleanallruv task on supplier 1 with a 'force' option 'on' -+ 7. Check that everything was cleaned -+ :expectedresults: -+ 1. Supplier 3 should be stopped -+ 2. Operation should be successful -+ 3. Replication on supplier 4 should be disabled -+ 4. Supplier 3 should be started -+ 5. Agreements to supplier 4 should be removed -+ 6. Operation should be successful -+ 7. Everything should be cleaned -+ """ -+ -+ log.info('Running test_clean_force...') -+ -+ # Stop supplier 3, while we update supplier 4, so that 3 is behind the other suppliers -+ topology_m4.ms["supplier3"].stop() -+ -+ # Add a bunch of updates to supplier 4 -+ m4_add_users = AddUsers(topology_m4.ms["supplier4"], 10) -+ m4_add_users.start() -+ m4_add_users.join() -+ -+ # Remove the agreements from the other suppliers that point to supplier 4 -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ m4rid = repl.get_rid(topology_m4.ms["supplier4"]) -+ remove_some_supplier4_agmts("test_clean_force", topology_m4) -+ -+ # Start supplier 3, it should be out of sync with the other replicas... -+ topology_m4.ms["supplier3"].start() -+ -+ # Remove the agreement to replica 4 -+ replica = Replicas(topology_m4.ms["supplier3"]).get(DEFAULT_SUFFIX) -+ replica.get_agreements().get("004").delete() -+ -+ # Run the task, use "force" because supplier 3 is not in sync with the other replicas -+ # in regards to the replica 4 RUV -+ log.info('test_clean: run the cleanAllRUV task...') -+ cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -+ cruv_task.create(properties={ -+ 'replica-id': m4rid, -+ 'replica-base-dn': DEFAULT_SUFFIX, -+ 'replica-force-cleaning': 'yes' -+ }) -+ cruv_task.wait() -+ -+ # Check the other supplier's RUV for 'replica 4' -+ log.info('test_clean_force: check all the suppliers have been cleaned...') -+ clean = check_ruvs("test_clean_force", topology_m4, m4rid) -+ assert clean -+ -+ log.info('test_clean_force PASSED') -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_multiple_force_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_multiple_force_test.py -new file mode 100644 -index 000000000..0a0848bda ---- /dev/null -+++ b/dirsrvtests/tests/suites/replication/cleanallruv_multiple_force_test.py -@@ -0,0 +1,214 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2022 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import ldap -+import logging -+import os -+import pytest -+import random -+import time -+import threading -+from lib389._constants import DEFAULT_SUFFIX -+from lib389.topologies import topology_m4 -+from lib389.tasks import CleanAllRUVTask -+from lib389.idm.directorymanager import DirectoryManager -+from lib389.idm.user import UserAccounts -+from lib389.replica import ReplicationManager, Replicas -+ -+log = logging.getLogger(__name__) -+ -+ -+class AddUsers(threading.Thread): -+ def __init__(self, inst, num_users): -+ threading.Thread.__init__(self) -+ self.daemon = True -+ self.inst = inst -+ self.num_users = num_users -+ -+ def run(self): -+ """Start adding users""" -+ -+ dm = DirectoryManager(self.inst) -+ conn = dm.bind() -+ -+ users = UserAccounts(conn, DEFAULT_SUFFIX) -+ -+ u_range = list(range(self.num_users)) -+ random.shuffle(u_range) -+ -+ for idx in u_range: -+ try: -+ users.create(properties={ -+ 'uid': 'testuser%s' % idx, -+ 'cn' : 'testuser%s' % idx, -+ 'sn' : 'user%s' % idx, -+ 'uidNumber' : '%s' % (1000 + idx), -+ 'gidNumber' : '%s' % (1000 + idx), -+ 'homeDirectory' : '/home/testuser%s' % idx -+ }) -+ # One of the suppliers was probably put into read only mode - just break out -+ except ldap.UNWILLING_TO_PERFORM: -+ break -+ except ldap.ALREADY_EXISTS: -+ pass -+ conn.close() -+ -+def remove_some_supplier4_agmts(msg, topology_m4): -+ """Remove all the repl agmts to supplier4 except from supplier3. Used by -+ the force tests.""" -+ -+ log.info('%s: remove the agreements to supplier 4...' % msg) -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ # This will delete m4 from the topo *and* remove all incoming agreements -+ # to m4. -+ repl.remove_supplier(topology_m4.ms["supplier4"], -+ [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"]]) -+ -+def task_done(topology_m4, task_dn, timeout=60): -+ """Check if the task is complete""" -+ -+ attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', -+ 'nsTaskCurrentItem', 'nsTaskTotalItems'] -+ done = False -+ count = 0 -+ -+ while not done and count < timeout: -+ try: -+ entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) -+ if entry is not None: -+ if entry.hasAttr('nsTaskExitCode'): -+ done = True -+ break -+ else: -+ done = True -+ break -+ except ldap.NO_SUCH_OBJECT: -+ done = True -+ break -+ except ldap.LDAPError: -+ break -+ time.sleep(1) -+ count += 1 -+ -+ return done -+ -+def check_ruvs(msg, topology_m4, m4rid): -+ """Check suppliers 1-3 for supplier 4's rid.""" -+ for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): -+ clean = False -+ replicas = Replicas(inst) -+ replica = replicas.get(DEFAULT_SUFFIX) -+ log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) -+ -+ count = 0 -+ while not clean and count < 20: -+ ruv = replica.get_ruv() -+ if m4rid in ruv._rids: -+ time.sleep(5) -+ count = count + 1 -+ else: -+ clean = True -+ if not clean: -+ raise Exception("Supplier %s was not cleaned in time." % inst.serverid) -+ return True -+ -+ -+def test_multiple_tasks_with_force(topology_m4): -+ """Check that multiple tasks with a 'force' option work properly -+ -+ :id: eb76a93d-8d1c-405e-9f25-6e8d5a781098 -+ :setup: Replication setup with four suppliers -+ :steps: -+ 1. Stop supplier 3 -+ 2. Add a bunch of updates to supplier 4 -+ 3. Disable replication on supplier 4 -+ 4. Start supplier 3 -+ 5. Remove agreements to supplier 4 from other suppliers -+ 6. Run a cleanallruv task on supplier 1 with a 'force' option 'on' -+ 7. Run one more cleanallruv task on supplier 1 with a 'force' option 'off' -+ 8. Check that everything was cleaned -+ :expectedresults: -+ 1. Supplier 3 should be stopped -+ 2. Operation should be successful -+ 3. Replication on supplier 4 should be disabled -+ 4. Supplier 3 should be started -+ 5. Agreements to supplier 4 should be removed -+ 6. Operation should be successful -+ 7. Operation should be successful -+ 8. Everything should be cleaned -+ """ -+ -+ log.info('Running test_multiple_tasks_with_force...') -+ -+ # Stop supplier 3, while we update supplier 4, so that 3 is behind the other suppliers -+ topology_m4.ms["supplier3"].stop() -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ m4rid = repl.get_rid(topology_m4.ms["supplier4"]) -+ -+ # Add a bunch of updates to supplier 4 -+ m4_add_users = AddUsers(topology_m4.ms["supplier4"], 10) -+ m4_add_users.start() -+ m4_add_users.join() -+ -+ # Disable supplier 4 -+ # Remove the agreements from the other suppliers that point to supplier 4 -+ remove_some_supplier4_agmts("test_multiple_tasks_with_force", topology_m4) -+ -+ # Start supplier 3, it should be out of sync with the other replicas... -+ topology_m4.ms["supplier3"].start() -+ -+ # Remove the agreement to replica 4 -+ replica = Replicas(topology_m4.ms["supplier3"]).get(DEFAULT_SUFFIX) -+ replica.get_agreements().get("004").delete() -+ -+ # Run the task, use "force" because supplier 3 is not in sync with the other replicas -+ # in regards to the replica 4 RUV -+ log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" on...') -+ cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -+ cruv_task.create(properties={ -+ 'replica-id': m4rid, -+ 'replica-base-dn': DEFAULT_SUFFIX, -+ 'replica-force-cleaning': 'yes', -+ 'replica-certify-all': 'no' -+ }) -+ -+ log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" off...') -+ -+ # NOTE: This must be try not py.test raises, because the above may or may -+ # not have completed yet .... -+ try: -+ cruv_task_fail = CleanAllRUVTask(topology_m4.ms["supplier1"]) -+ cruv_task_fail.create(properties={ -+ 'replica-id': m4rid, -+ 'replica-base-dn': DEFAULT_SUFFIX, -+ 'replica-force-cleaning': 'no', -+ 'replica-certify-all': 'no' -+ }) -+ cruv_task_fail.wait() -+ except ldap.UNWILLING_TO_PERFORM: -+ pass -+ # Wait for the force task .... -+ cruv_task.wait() -+ -+ # Check the other supplier's RUV for 'replica 4' -+ log.info('test_multiple_tasks_with_force: check all the suppliers have been cleaned...') -+ clean = check_ruvs("test_clean_force", topology_m4, m4rid) -+ assert clean -+ # Check supplier 1 does not have the clean task running -+ log.info('test_abort: check supplier 1 no longer has a cleanAllRUV task...') -+ if not task_done(topology_m4, cruv_task.dn): -+ log.fatal('test_abort: CleanAllRUV task was not aborted') -+ assert False -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_restart_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_restart_test.py -new file mode 100644 -index 000000000..2e8d7e4a6 ---- /dev/null -+++ b/dirsrvtests/tests/suites/replication/cleanallruv_restart_test.py -@@ -0,0 +1,161 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2022 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import logging -+import pytest -+import os -+import time -+from lib389._constants import DEFAULT_SUFFIX -+from lib389.topologies import topology_m4 -+from lib389.tasks import CleanAllRUVTask -+from lib389.replica import ReplicationManager, Replicas -+ -+log = logging.getLogger(__name__) -+ -+ -+def remove_supplier4_agmts(msg, topology_m4): -+ """Remove all the repl agmts to supplier4. """ -+ -+ log.info('%s: remove all the agreements to supplier 4...' % msg) -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ # This will delete m4 from the topo *and* remove all incoming agreements -+ # to m4. -+ repl.remove_supplier(topology_m4.ms["supplier4"], -+ [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) -+ -+def task_done(topology_m4, task_dn, timeout=60): -+ """Check if the task is complete""" -+ -+ attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', -+ 'nsTaskCurrentItem', 'nsTaskTotalItems'] -+ done = False -+ count = 0 -+ -+ while not done and count < timeout: -+ try: -+ entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) -+ if entry is not None: -+ if entry.hasAttr('nsTaskExitCode'): -+ done = True -+ break -+ else: -+ done = True -+ break -+ except ldap.NO_SUCH_OBJECT: -+ done = True -+ break -+ except ldap.LDAPError: -+ break -+ time.sleep(1) -+ count += 1 -+ -+ return done -+ -+ -+def check_ruvs(msg, topology_m4, m4rid): -+ """Check suppliers 1-3 for supplier 4's rid.""" -+ for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): -+ clean = False -+ replicas = Replicas(inst) -+ replica = replicas.get(DEFAULT_SUFFIX) -+ log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) -+ -+ count = 0 -+ while not clean and count < 20: -+ ruv = replica.get_ruv() -+ if m4rid in ruv._rids: -+ time.sleep(5) -+ count = count + 1 -+ else: -+ clean = True -+ if not clean: -+ raise Exception("Supplier %s was not cleaned in time." % inst.serverid) -+ return True -+ -+ -+@pytest.mark.flaky(max_runs=2, min_passes=1) -+def test_clean_restart(topology_m4): -+ """Check that cleanallruv task works properly after a restart -+ -+ :id: c6233bb3-092c-4919-9ac9-80dd02cc6e02 -+ :setup: Replication setup with four suppliers -+ :steps: -+ 1. Disable replication on supplier 4 -+ 2. Remove agreements to supplier 4 from other suppliers -+ 3. Stop supplier 3 -+ 4. Run a cleanallruv task on supplier 1 -+ 5. Stop supplier 1 -+ 6. Start supplier 3 -+ 7. Make sure that no crash happened -+ 8. Start supplier 1 -+ 9. Make sure that no crash happened -+ 10. Check that everything was cleaned -+ :expectedresults: -+ 1. Operation should be successful -+ 2. Agreements to supplier 4 should be removed -+ 3. Supplier 3 should be stopped -+ 4. Cleanallruv task should be successfully executed -+ 5. Supplier 1 should be stopped -+ 6. Supplier 3 should be started -+ 7. No crash should happened -+ 8. Supplier 1 should be started -+ 9. No crash should happened -+ 10. Everything should be cleaned -+ """ -+ log.info('Running test_clean_restart...') -+ -+ # Disable supplier 4 -+ log.info('test_clean: disable supplier 4...') -+ -+ # Remove the agreements from the other suppliers that point to supplier 4 -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ m4rid = repl.get_rid(topology_m4.ms["supplier4"]) -+ remove_supplier4_agmts("test_clean", topology_m4) -+ -+ # Stop supplier 3 to keep the task running, so we can stop supplier 1... -+ topology_m4.ms["supplier3"].stop() -+ -+ # Run the task -+ log.info('test_clean: run the cleanAllRUV task...') -+ cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -+ cruv_task.create(properties={ -+ 'replica-id': m4rid, -+ 'replica-base-dn': DEFAULT_SUFFIX, -+ 'replica-force-cleaning': 'no', -+ 'replica-certify-all': 'yes' -+ }) -+ -+ # Sleep a bit, then stop supplier 1 -+ time.sleep(5) -+ topology_m4.ms["supplier1"].stop() -+ -+ # Now start supplier 3 & 1, and make sure we didn't crash -+ topology_m4.ms["supplier3"].start() -+ if topology_m4.ms["supplier3"].detectDisorderlyShutdown(): -+ log.fatal('test_clean_restart: Supplier 3 previously crashed!') -+ assert False -+ -+ topology_m4.ms["supplier1"].start(timeout=30) -+ if topology_m4.ms["supplier1"].detectDisorderlyShutdown(): -+ log.fatal('test_clean_restart: Supplier 1 previously crashed!') -+ assert False -+ -+ # Check the other supplier's RUV for 'replica 4' -+ log.info('test_clean_restart: check all the suppliers have been cleaned...') -+ clean = check_ruvs("test_clean_restart", topology_m4, m4rid) -+ assert clean -+ -+ log.info('test_clean_restart PASSED, restoring supplier 4...') -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py -new file mode 100644 -index 000000000..b4b74e339 ---- /dev/null -+++ b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py -@@ -0,0 +1,123 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2022 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import logging -+import pytest -+import os -+import time -+from lib389._constants import DEFAULT_SUFFIX -+from lib389.topologies import topology_m4 -+from lib389.tasks import CleanAllRUVTask -+from lib389.replica import ReplicationManager, Replicas -+from lib389.config import CertmapLegacy -+from lib389.idm.services import ServiceAccounts -+ -+log = logging.getLogger(__name__) -+ -+ -+def test_clean_shutdown_crash(topology_m2): -+ """Check that server didn't crash after shutdown when running CleanAllRUV task -+ -+ :id: c34d0b40-3c3e-4f53-8656-5e4c2a310aaf -+ :setup: Replication setup with two suppliers -+ :steps: -+ 1. Enable TLS on both suppliers -+ 2. Reconfigure both agreements to use TLS Client auth -+ 3. Stop supplier2 -+ 4. Run the CleanAllRUV task -+ 5. Restart supplier1 -+ 6. Check if supplier1 didn't crash -+ 7. Restart supplier1 again -+ 8. Check if supplier1 didn't crash -+ -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ 5. Success -+ 6. Success -+ 7. Success -+ 8. Success -+ """ -+ -+ m1 = topology_m2.ms["supplier1"] -+ m2 = topology_m2.ms["supplier2"] -+ -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ -+ cm_m1 = CertmapLegacy(m1) -+ cm_m2 = CertmapLegacy(m2) -+ -+ certmaps = cm_m1.list() -+ certmaps['default']['DNComps'] = None -+ certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' -+ -+ cm_m1.set(certmaps) -+ cm_m2.set(certmaps) -+ -+ log.info('Enabling TLS') -+ [i.enable_tls() for i in topology_m2] -+ -+ log.info('Creating replication dns') -+ services = ServiceAccounts(m1, DEFAULT_SUFFIX) -+ repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) -+ repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) -+ -+ repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) -+ repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) -+ -+ log.info('Changing auth type') -+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) -+ agmt_m1 = replica_m1.get_agreements().list()[0] -+ agmt_m1.replace_many( -+ ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), -+ ('nsDS5ReplicaTransportInfo', 'SSL'), -+ ('nsDS5ReplicaPort', '%s' % m2.sslport), -+ ) -+ -+ agmt_m1.remove_all('nsDS5ReplicaBindDN') -+ -+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) -+ agmt_m2 = replica_m2.get_agreements().list()[0] -+ -+ agmt_m2.replace_many( -+ ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), -+ ('nsDS5ReplicaTransportInfo', 'SSL'), -+ ('nsDS5ReplicaPort', '%s' % m1.sslport), -+ ) -+ agmt_m2.remove_all('nsDS5ReplicaBindDN') -+ -+ log.info('Stopping supplier2') -+ m2.stop() -+ -+ log.info('Run the cleanAllRUV task') -+ cruv_task = CleanAllRUVTask(m1) -+ cruv_task.create(properties={ -+ 'replica-id': repl.get_rid(m1), -+ 'replica-base-dn': DEFAULT_SUFFIX, -+ 'replica-force-cleaning': 'no', -+ 'replica-certify-all': 'yes' -+ }) -+ -+ m1.restart() -+ -+ log.info('Check if supplier1 crashed') -+ assert not m1.detectDisorderlyShutdown() -+ -+ log.info('Repeat') -+ m1.restart() -+ assert not m1.detectDisorderlyShutdown() -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_stress_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_stress_test.py -new file mode 100644 -index 000000000..0d43dd7d4 ---- /dev/null -+++ b/dirsrvtests/tests/suites/replication/cleanallruv_stress_test.py -@@ -0,0 +1,216 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2022 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import ldap -+import logging -+import pytest -+import os -+import random -+import time -+import threading -+from lib389._constants import DEFAULT_SUFFIX -+from lib389.topologies import topology_m4 -+from lib389.tasks import CleanAllRUVTask -+from lib389.idm.directorymanager import DirectoryManager -+from lib389.idm.user import UserAccounts -+from lib389.replica import ReplicationManager, Replicas -+from lib389.config import LDBMConfig -+ -+log = logging.getLogger(__name__) -+ -+ -+class AddUsers(threading.Thread): -+ def __init__(self, inst, num_users): -+ threading.Thread.__init__(self) -+ self.daemon = True -+ self.inst = inst -+ self.num_users = num_users -+ -+ def run(self): -+ """Start adding users""" -+ -+ dm = DirectoryManager(self.inst) -+ conn = dm.bind() -+ -+ users = UserAccounts(conn, DEFAULT_SUFFIX) -+ -+ u_range = list(range(self.num_users)) -+ random.shuffle(u_range) -+ -+ for idx in u_range: -+ try: -+ users.create(properties={ -+ 'uid': 'testuser%s' % idx, -+ 'cn' : 'testuser%s' % idx, -+ 'sn' : 'user%s' % idx, -+ 'uidNumber' : '%s' % (1000 + idx), -+ 'gidNumber' : '%s' % (1000 + idx), -+ 'homeDirectory' : '/home/testuser%s' % idx -+ }) -+ # One of the suppliers was probably put into read only mode - just break out -+ except ldap.UNWILLING_TO_PERFORM: -+ break -+ except ldap.ALREADY_EXISTS: -+ pass -+ conn.close() -+ -+def remove_supplier4_agmts(msg, topology_m4): -+ """Remove all the repl agmts to supplier4. """ -+ -+ log.info('%s: remove all the agreements to supplier 4...' % msg) -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ # This will delete m4 from the topo *and* remove all incoming agreements -+ # to m4. -+ repl.remove_supplier(topology_m4.ms["supplier4"], -+ [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) -+ -+def task_done(topology_m4, task_dn, timeout=60): -+ """Check if the task is complete""" -+ -+ attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', -+ 'nsTaskCurrentItem', 'nsTaskTotalItems'] -+ done = False -+ count = 0 -+ -+ while not done and count < timeout: -+ try: -+ entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) -+ if entry is not None: -+ if entry.hasAttr('nsTaskExitCode'): -+ done = True -+ break -+ else: -+ done = True -+ break -+ except ldap.NO_SUCH_OBJECT: -+ done = True -+ break -+ except ldap.LDAPError: -+ break -+ time.sleep(1) -+ count += 1 -+ -+ return done -+ -+def check_ruvs(msg, topology_m4, m4rid): -+ """Check suppliers 1-3 for supplier 4's rid.""" -+ for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): -+ clean = False -+ replicas = Replicas(inst) -+ replica = replicas.get(DEFAULT_SUFFIX) -+ log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) -+ -+ count = 0 -+ while not clean and count < 20: -+ ruv = replica.get_ruv() -+ if m4rid in ruv._rids: -+ time.sleep(5) -+ count = count + 1 -+ else: -+ clean = True -+ if not clean: -+ raise Exception("Supplier %s was not cleaned in time." % inst.serverid) -+ return True -+ -+ -+@pytest.mark.flaky(max_runs=2, min_passes=1) -+def test_stress_clean(topology_m4): -+ """Put each server(m1 - m4) under a stress, and perform the entire clean process -+ -+ :id: a8263cd6-f068-4357-86e0-e7c34504c8c5 -+ :setup: Replication setup with four suppliers -+ :steps: -+ 1. Add a bunch of updates to all suppliers -+ 2. Put supplier 4 to read-only mode -+ 3. Disable replication on supplier 4 -+ 4. Remove agreements to supplier 4 from other suppliers -+ 5. Run a cleanallruv task on supplier 1 -+ 6. Check that everything was cleaned -+ :expectedresults: -+ 1. Operation should be successful -+ 2. Supplier 4 should be put to read-only mode -+ 3. Replication on supplier 4 should be disabled -+ 4. Agreements to supplier 4 should be removed -+ 5. Operation should be successful -+ 6. Everything should be cleaned -+ """ -+ -+ log.info('Running test_stress_clean...') -+ log.info('test_stress_clean: put all the suppliers under load...') -+ -+ ldbm_config = LDBMConfig(topology_m4.ms["supplier4"]) -+ -+ # Put all the suppliers under load -+ # not too high load else it takes a long time to converge and -+ # the test result becomes instable -+ m1_add_users = AddUsers(topology_m4.ms["supplier1"], 200) -+ m1_add_users.start() -+ m2_add_users = AddUsers(topology_m4.ms["supplier2"], 200) -+ m2_add_users.start() -+ m3_add_users = AddUsers(topology_m4.ms["supplier3"], 200) -+ m3_add_users.start() -+ m4_add_users = AddUsers(topology_m4.ms["supplier4"], 200) -+ m4_add_users.start() -+ -+ # Allow sometime to get replication flowing in all directions -+ log.info('test_stress_clean: allow some time for replication to get flowing...') -+ time.sleep(5) -+ -+ # Put supplier 4 into read only mode -+ ldbm_config.set('nsslapd-readonly', 'on') -+ # We need to wait for supplier 4 to push its changes out -+ log.info('test_stress_clean: allow some time for supplier 4 to push changes out (60 seconds)...') -+ time.sleep(60) -+ -+ # Remove the agreements from the other suppliers that point to supplier 4 -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ m4rid = repl.get_rid(topology_m4.ms["supplier4"]) -+ remove_supplier4_agmts("test_stress_clean", topology_m4) -+ -+ # Run the task -+ cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -+ cruv_task.create(properties={ -+ 'replica-id': m4rid, -+ 'replica-base-dn': DEFAULT_SUFFIX, -+ 'replica-force-cleaning': 'no' -+ }) -+ cruv_task.wait() -+ -+ # Wait for the update to finish -+ log.info('test_stress_clean: wait for all the updates to finish...') -+ m1_add_users.join() -+ m2_add_users.join() -+ m3_add_users.join() -+ m4_add_users.join() -+ -+ # Check the other supplier's RUV for 'replica 4' -+ log.info('test_stress_clean: check if all the replicas have been cleaned...') -+ clean = check_ruvs("test_stress_clean", topology_m4, m4rid) -+ assert clean -+ -+ log.info('test_stress_clean: PASSED, restoring supplier 4...') -+ -+ # Sleep for a bit to replication complete -+ log.info("Sleep for 120 seconds to allow replication to complete...") -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ repl.test_replication_topology([ -+ topology_m4.ms["supplier1"], -+ topology_m4.ms["supplier2"], -+ topology_m4.ms["supplier3"], -+ ], timeout=120) -+ -+ # Turn off readonly mode -+ ldbm_config.set('nsslapd-readonly', 'off') -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py -index 1e9cd7c28..6d7141ada 100644 ---- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py -+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py -@@ -1,27 +1,20 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2022 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). - # See LICENSE for details. - # --- END COPYRIGHT BLOCK --- - # --import threading - import pytest --import random - from lib389 import DirSrv - from lib389.tasks import * - from lib389.utils import * - from lib389.topologies import topology_m4, topology_m2 --from lib389._constants import * -- --from lib389.idm.directorymanager import DirectoryManager -+from lib389._constants import DEFAULT_SUFFIX - from lib389.replica import ReplicationManager, Replicas - from lib389.tasks import CleanAllRUVTask --from lib389.idm.user import UserAccounts --from lib389.config import LDBMConfig --from lib389.config import CertmapLegacy --from lib389.idm.services import ServiceAccounts -+ - - pytestmark = pytest.mark.tier1 - -@@ -29,42 +22,6 @@ logging.getLogger(__name__).setLevel(logging.DEBUG) - log = logging.getLogger(__name__) - - --class AddUsers(threading.Thread): -- def __init__(self, inst, num_users): -- threading.Thread.__init__(self) -- self.daemon = True -- self.inst = inst -- self.num_users = num_users -- -- def run(self): -- """Start adding users""" -- -- dm = DirectoryManager(self.inst) -- conn = dm.bind() -- -- users = UserAccounts(conn, DEFAULT_SUFFIX) -- -- u_range = list(range(self.num_users)) -- random.shuffle(u_range) -- -- for idx in u_range: -- try: -- users.create(properties={ -- 'uid': 'testuser%s' % idx, -- 'cn' : 'testuser%s' % idx, -- 'sn' : 'user%s' % idx, -- 'uidNumber' : '%s' % (1000 + idx), -- 'gidNumber' : '%s' % (1000 + idx), -- 'homeDirectory' : '/home/testuser%s' % idx -- }) -- # One of the suppliers was probably put into read only mode - just break out -- except ldap.UNWILLING_TO_PERFORM: -- break -- except ldap.ALREADY_EXISTS: -- pass -- conn.close() -- -- - def remove_supplier4_agmts(msg, topology_m4): - """Remove all the repl agmts to supplier4. """ - -@@ -96,92 +53,7 @@ def check_ruvs(msg, topology_m4, m4rid): - return True - - --def task_done(topology_m4, task_dn, timeout=60): -- """Check if the task is complete""" -- -- attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', -- 'nsTaskCurrentItem', 'nsTaskTotalItems'] -- done = False -- count = 0 -- -- while not done and count < timeout: -- try: -- entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) -- if entry is not None: -- if entry.hasAttr('nsTaskExitCode'): -- done = True -- break -- else: -- done = True -- break -- except ldap.NO_SUCH_OBJECT: -- done = True -- break -- except ldap.LDAPError: -- break -- time.sleep(1) -- count += 1 -- -- return done -- -- --def restore_supplier4(topology_m4): -- """In our tests will always be removing supplier 4, so we need a common -- way to restore it for another test -- """ -- -- # Restart the remaining suppliers to allow rid 4 to be reused. -- for inst in topology_m4.ms.values(): -- inst.restart() -- -- repl = ReplicationManager(DEFAULT_SUFFIX) -- repl.join_supplier(topology_m4.ms["supplier1"], topology_m4.ms["supplier4"]) -- -- # Add the 2,3 -> 4 agmt. -- repl.ensure_agreement(topology_m4.ms["supplier2"], topology_m4.ms["supplier4"]) -- repl.ensure_agreement(topology_m4.ms["supplier3"], topology_m4.ms["supplier4"]) -- # And in reverse ... -- repl.ensure_agreement(topology_m4.ms["supplier4"], topology_m4.ms["supplier2"]) -- repl.ensure_agreement(topology_m4.ms["supplier4"], topology_m4.ms["supplier3"]) -- -- log.info('Supplier 4 has been successfully restored.') -- -- --@pytest.fixture() --def m4rid(request, topology_m4): -- log.debug("Wait a bit before the reset - it is required for the slow machines") -- time.sleep(5) -- log.debug("-------------- BEGIN RESET of m4 -----------------") -- repl = ReplicationManager(DEFAULT_SUFFIX) -- repl.test_replication_topology(topology_m4.ms.values()) -- # What is supplier4's rid? -- m4rid = repl.get_rid(topology_m4.ms["supplier4"]) -- -- def fin(): -- try: -- # Restart the suppliers and rerun cleanallruv -- for inst in topology_m4.ms.values(): -- inst.restart() -- -- cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -- cruv_task.create(properties={ -- 'replica-id': m4rid, -- 'replica-base-dn': DEFAULT_SUFFIX, -- 'replica-force-cleaning': 'no', -- }) -- cruv_task.wait() -- except ldap.UNWILLING_TO_PERFORM: -- # In some casse we already cleaned rid4, so if we fail, it's okay -- pass -- restore_supplier4(topology_m4) -- # Make sure everything works. -- repl.test_replication_topology(topology_m4.ms.values()) -- request.addfinalizer(fin) -- log.debug("-------------- FINISH RESET of m4 -----------------") -- return m4rid -- -- --def test_clean(topology_m4, m4rid): -+def test_clean(topology_m4): - """Check that cleanallruv task works properly - - :id: e9b3ce5c-e17c-409e-aafc-e97d630f2878 -@@ -204,6 +76,8 @@ def test_clean(topology_m4, m4rid): - # Disable supplier 4 - # Remove the agreements from the other suppliers that point to supplier 4 - log.info('test_clean: disable supplier 4...') -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ m4rid = repl.get_rid(topology_m4.ms["supplier4"]) - remove_supplier4_agmts("test_clean", topology_m4) - - # Run the task -@@ -221,610 +95,6 @@ def test_clean(topology_m4, m4rid): - clean = check_ruvs("test_clean", topology_m4, m4rid) - assert clean - -- log.info('test_clean PASSED, restoring supplier 4...') -- --@pytest.mark.flaky(max_runs=2, min_passes=1) --def test_clean_restart(topology_m4, m4rid): -- """Check that cleanallruv task works properly after a restart -- -- :id: c6233bb3-092c-4919-9ac9-80dd02cc6e02 -- :setup: Replication setup with four suppliers -- :steps: -- 1. Disable replication on supplier 4 -- 2. Remove agreements to supplier 4 from other suppliers -- 3. Stop supplier 3 -- 4. Run a cleanallruv task on supplier 1 -- 5. Stop supplier 1 -- 6. Start supplier 3 -- 7. Make sure that no crash happened -- 8. Start supplier 1 -- 9. Make sure that no crash happened -- 10. Check that everything was cleaned -- :expectedresults: -- 1. Operation should be successful -- 2. Agreements to supplier 4 should be removed -- 3. Supplier 3 should be stopped -- 4. Cleanallruv task should be successfully executed -- 5. Supplier 1 should be stopped -- 6. Supplier 3 should be started -- 7. No crash should happened -- 8. Supplier 1 should be started -- 9. No crash should happened -- 10. Everything should be cleaned -- """ -- log.info('Running test_clean_restart...') -- -- # Disable supplier 4 -- log.info('test_clean: disable supplier 4...') -- # Remove the agreements from the other suppliers that point to supplier 4 -- remove_supplier4_agmts("test_clean", topology_m4) -- -- # Stop supplier 3 to keep the task running, so we can stop supplier 1... -- topology_m4.ms["supplier3"].stop() -- -- # Run the task -- log.info('test_clean: run the cleanAllRUV task...') -- cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -- cruv_task.create(properties={ -- 'replica-id': m4rid, -- 'replica-base-dn': DEFAULT_SUFFIX, -- 'replica-force-cleaning': 'no', -- 'replica-certify-all': 'yes' -- }) -- -- # Sleep a bit, then stop supplier 1 -- time.sleep(5) -- topology_m4.ms["supplier1"].stop() -- -- # Now start supplier 3 & 1, and make sure we didn't crash -- topology_m4.ms["supplier3"].start() -- if topology_m4.ms["supplier3"].detectDisorderlyShutdown(): -- log.fatal('test_clean_restart: Supplier 3 previously crashed!') -- assert False -- -- topology_m4.ms["supplier1"].start(timeout=30) -- if topology_m4.ms["supplier1"].detectDisorderlyShutdown(): -- log.fatal('test_clean_restart: Supplier 1 previously crashed!') -- assert False -- -- # Check the other supplier's RUV for 'replica 4' -- log.info('test_clean_restart: check all the suppliers have been cleaned...') -- clean = check_ruvs("test_clean_restart", topology_m4, m4rid) -- assert clean -- -- log.info('test_clean_restart PASSED, restoring supplier 4...') -- -- --@pytest.mark.flaky(max_runs=2, min_passes=1) --def test_clean_force(topology_m4, m4rid): -- """Check that multiple tasks with a 'force' option work properly -- -- :id: f8810dfe-d2d2-4dd9-ba03-5fc14896fabe -- :setup: Replication setup with four suppliers -- :steps: -- 1. Stop supplier 3 -- 2. Add a bunch of updates to supplier 4 -- 3. Disable replication on supplier 4 -- 4. Start supplier 3 -- 5. Remove agreements to supplier 4 from other suppliers -- 6. Run a cleanallruv task on supplier 1 with a 'force' option 'on' -- 7. Check that everything was cleaned -- :expectedresults: -- 1. Supplier 3 should be stopped -- 2. Operation should be successful -- 3. Replication on supplier 4 should be disabled -- 4. Supplier 3 should be started -- 5. Agreements to supplier 4 should be removed -- 6. Operation should be successful -- 7. Everything should be cleaned -- """ -- -- log.info('Running test_clean_force...') -- -- # Stop supplier 3, while we update supplier 4, so that 3 is behind the other suppliers -- topology_m4.ms["supplier3"].stop() -- -- # Add a bunch of updates to supplier 4 -- m4_add_users = AddUsers(topology_m4.ms["supplier4"], 1500) -- m4_add_users.start() -- m4_add_users.join() -- -- # Start supplier 3, it should be out of sync with the other replicas... -- topology_m4.ms["supplier3"].start() -- -- # Remove the agreements from the other suppliers that point to supplier 4 -- remove_supplier4_agmts("test_clean_force", topology_m4) -- -- # Run the task, use "force" because supplier 3 is not in sync with the other replicas -- # in regards to the replica 4 RUV -- log.info('test_clean: run the cleanAllRUV task...') -- cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -- cruv_task.create(properties={ -- 'replica-id': m4rid, -- 'replica-base-dn': DEFAULT_SUFFIX, -- 'replica-force-cleaning': 'yes' -- }) -- cruv_task.wait() -- -- # Check the other supplier's RUV for 'replica 4' -- log.info('test_clean_force: check all the suppliers have been cleaned...') -- clean = check_ruvs("test_clean_force", topology_m4, m4rid) -- assert clean -- -- log.info('test_clean_force PASSED, restoring supplier 4...') -- -- --@pytest.mark.flaky(max_runs=2, min_passes=1) --def test_abort(topology_m4, m4rid): -- """Test the abort task basic functionality -- -- :id: b09a6887-8de0-4fac-8e41-73ccbaaf7a08 -- :setup: Replication setup with four suppliers -- :steps: -- 1. Disable replication on supplier 4 -- 2. Remove agreements to supplier 4 from other suppliers -- 3. Stop supplier 2 -- 4. Run a cleanallruv task on supplier 1 -- 5. Run a cleanallruv abort task on supplier 1 -- :expectedresults: No hanging tasks left -- 1. Replication on supplier 4 should be disabled -- 2. Agreements to supplier 4 should be removed -- 3. Supplier 2 should be stopped -- 4. Operation should be successful -- 5. Operation should be successful -- """ -- -- log.info('Running test_abort...') -- # Remove the agreements from the other suppliers that point to supplier 4 -- remove_supplier4_agmts("test_abort", topology_m4) -- -- # Stop supplier 2 -- log.info('test_abort: stop supplier 2 to freeze the cleanAllRUV task...') -- topology_m4.ms["supplier2"].stop() -- -- # Run the task -- log.info('test_abort: add the cleanAllRUV task...') -- cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -- cruv_task.create(properties={ -- 'replica-id': m4rid, -- 'replica-base-dn': DEFAULT_SUFFIX, -- 'replica-force-cleaning': 'no', -- 'replica-certify-all': 'yes' -- }) -- # Wait a bit -- time.sleep(2) -- -- # Abort the task -- cruv_task.abort() -- -- # Check supplier 1 does not have the clean task running -- log.info('test_abort: check supplier 1 no longer has a cleanAllRUV task...') -- if not task_done(topology_m4, cruv_task.dn): -- log.fatal('test_abort: CleanAllRUV task was not aborted') -- assert False -- -- # Start supplier 2 -- log.info('test_abort: start supplier 2 to begin the restore process...') -- topology_m4.ms["supplier2"].start() -- -- log.info('test_abort PASSED, restoring supplier 4...') -- -- --@pytest.mark.flaky(max_runs=2, min_passes=1) --def test_abort_restart(topology_m4, m4rid): -- """Test the abort task can handle a restart, and then resume -- -- :id: b66e33d4-fe85-4e1c-b882-75da80f70ab3 -- :setup: Replication setup with four suppliers -- :steps: -- 1. Disable replication on supplier 4 -- 2. Remove agreements to supplier 4 from other suppliers -- 3. Stop supplier 3 -- 4. Run a cleanallruv task on supplier 1 -- 5. Run a cleanallruv abort task on supplier 1 -- 6. Restart supplier 1 -- 7. Make sure that no crash happened -- 8. Start supplier 3 -- 9. Check supplier 1 does not have the clean task running -- 10. Check that errors log doesn't have 'Aborting abort task' message -- :expectedresults: -- 1. Replication on supplier 4 should be disabled -- 2. Agreements to supplier 4 should be removed -- 3. Supplier 3 should be stopped -- 4. Operation should be successful -- 5. Operation should be successful -- 6. Supplier 1 should be restarted -- 7. No crash should happened -- 8. Supplier 3 should be started -- 9. Check supplier 1 shouldn't have the clean task running -- 10. Errors log shouldn't have 'Aborting abort task' message -- """ -- -- log.info('Running test_abort_restart...') -- # Remove the agreements from the other suppliers that point to supplier 4 -- remove_supplier4_agmts("test_abort", topology_m4) -- -- # Stop supplier 3 -- log.info('test_abort_restart: stop supplier 3 to freeze the cleanAllRUV task...') -- topology_m4.ms["supplier3"].stop() -- -- # Run the task -- log.info('test_abort_restart: add the cleanAllRUV task...') -- cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -- cruv_task.create(properties={ -- 'replica-id': m4rid, -- 'replica-base-dn': DEFAULT_SUFFIX, -- 'replica-force-cleaning': 'no', -- 'replica-certify-all': 'yes' -- }) -- # Wait a bit -- time.sleep(2) -- -- # Abort the task -- cruv_task.abort(certify=True) -- -- # Check supplier 1 does not have the clean task running -- log.info('test_abort_abort: check supplier 1 no longer has a cleanAllRUV task...') -- if not task_done(topology_m4, cruv_task.dn): -- log.fatal('test_abort_restart: CleanAllRUV task was not aborted') -- assert False -- -- # Now restart supplier 1, and make sure the abort process completes -- topology_m4.ms["supplier1"].restart() -- if topology_m4.ms["supplier1"].detectDisorderlyShutdown(): -- log.fatal('test_abort_restart: Supplier 1 previously crashed!') -- assert False -- -- # Start supplier 3 -- topology_m4.ms["supplier3"].start() -- -- # Need to wait 5 seconds before server processes any leftover tasks -- time.sleep(6) -- -- # Check supplier 1 tried to run abort task. We expect the abort task to be aborted. -- if not topology_m4.ms["supplier1"].searchErrorsLog('Aborting abort task'): -- log.fatal('test_abort_restart: Abort task did not restart') -- assert False -- -- log.info('test_abort_restart PASSED, restoring supplier 4...') -- -- --@pytest.mark.flaky(max_runs=2, min_passes=1) --def test_abort_certify(topology_m4, m4rid): -- """Test the abort task with a replica-certify-all option -- -- :id: 78959966-d644-44a8-b98c-1fcf21b45eb0 -- :setup: Replication setup with four suppliers -- :steps: -- 1. Disable replication on supplier 4 -- 2. Remove agreements to supplier 4 from other suppliers -- 3. Stop supplier 2 -- 4. Run a cleanallruv task on supplier 1 -- 5. Run a cleanallruv abort task on supplier 1 with a replica-certify-all option -- :expectedresults: No hanging tasks left -- 1. Replication on supplier 4 should be disabled -- 2. Agreements to supplier 4 should be removed -- 3. Supplier 2 should be stopped -- 4. Operation should be successful -- 5. Operation should be successful -- """ -- -- log.info('Running test_abort_certify...') -- -- # Remove the agreements from the other suppliers that point to supplier 4 -- remove_supplier4_agmts("test_abort_certify", topology_m4) -- -- # Stop supplier 2 -- log.info('test_abort_certify: stop supplier 2 to freeze the cleanAllRUV task...') -- topology_m4.ms["supplier2"].stop() -- -- # Run the task -- log.info('test_abort_certify: add the cleanAllRUV task...') -- cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -- cruv_task.create(properties={ -- 'replica-id': m4rid, -- 'replica-base-dn': DEFAULT_SUFFIX, -- 'replica-force-cleaning': 'no', -- 'replica-certify-all': 'yes' -- }) -- # Wait a bit -- time.sleep(2) -- -- # Abort the task -- log.info('test_abort_certify: abort the cleanAllRUV task...') -- abort_task = cruv_task.abort(certify=True) -- -- # Wait a while and make sure the abort task is still running -- log.info('test_abort_certify...') -- -- if task_done(topology_m4, abort_task.dn, 10): -- log.fatal('test_abort_certify: abort task incorrectly finished') -- assert False -- -- # Now start supplier 2 so it can be aborted -- log.info('test_abort_certify: start supplier 2 to allow the abort task to finish...') -- topology_m4.ms["supplier2"].start() -- -- # Wait for the abort task to stop -- if not task_done(topology_m4, abort_task.dn, 90): -- log.fatal('test_abort_certify: The abort CleanAllRUV task was not aborted') -- assert False -- -- # Check supplier 1 does not have the clean task running -- log.info('test_abort_certify: check supplier 1 no longer has a cleanAllRUV task...') -- if not task_done(topology_m4, cruv_task.dn): -- log.fatal('test_abort_certify: CleanAllRUV task was not aborted') -- assert False -- -- log.info('test_abort_certify PASSED, restoring supplier 4...') -- -- --@pytest.mark.flaky(max_runs=2, min_passes=1) --def test_stress_clean(topology_m4, m4rid): -- """Put each server(m1 - m4) under a stress, and perform the entire clean process -- -- :id: a8263cd6-f068-4357-86e0-e7c34504c8c5 -- :setup: Replication setup with four suppliers -- :steps: -- 1. Add a bunch of updates to all suppliers -- 2. Put supplier 4 to read-only mode -- 3. Disable replication on supplier 4 -- 4. Remove agreements to supplier 4 from other suppliers -- 5. Run a cleanallruv task on supplier 1 -- 6. Check that everything was cleaned -- :expectedresults: -- 1. Operation should be successful -- 2. Supplier 4 should be put to read-only mode -- 3. Replication on supplier 4 should be disabled -- 4. Agreements to supplier 4 should be removed -- 5. Operation should be successful -- 6. Everything should be cleaned -- """ -- -- log.info('Running test_stress_clean...') -- log.info('test_stress_clean: put all the suppliers under load...') -- -- ldbm_config = LDBMConfig(topology_m4.ms["supplier4"]) -- -- # not too high load else it takes a long time to converge and -- # the test result becomes instable -- m1_add_users = AddUsers(topology_m4.ms["supplier1"], 500) -- m1_add_users.start() -- m2_add_users = AddUsers(topology_m4.ms["supplier2"], 500) -- m2_add_users.start() -- m3_add_users = AddUsers(topology_m4.ms["supplier3"], 500) -- m3_add_users.start() -- m4_add_users = AddUsers(topology_m4.ms["supplier4"], 500) -- m4_add_users.start() -- -- # Allow sometime to get replication flowing in all directions -- log.info('test_stress_clean: allow some time for replication to get flowing...') -- time.sleep(5) -- -- # Put supplier 4 into read only mode -- ldbm_config.set('nsslapd-readonly', 'on') -- # We need to wait for supplier 4 to push its changes out -- log.info('test_stress_clean: allow some time for supplier 4 to push changes out (60 seconds)...') -- time.sleep(30) -- -- # Remove the agreements from the other suppliers that point to supplier 4 -- remove_supplier4_agmts("test_stress_clean", topology_m4) -- -- # Run the task -- cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -- cruv_task.create(properties={ -- 'replica-id': m4rid, -- 'replica-base-dn': DEFAULT_SUFFIX, -- 'replica-force-cleaning': 'no' -- }) -- cruv_task.wait() -- -- # Wait for the update to finish -- log.info('test_stress_clean: wait for all the updates to finish...') -- m1_add_users.join() -- m2_add_users.join() -- m3_add_users.join() -- m4_add_users.join() -- -- # Check the other supplier's RUV for 'replica 4' -- log.info('test_stress_clean: check if all the replicas have been cleaned...') -- clean = check_ruvs("test_stress_clean", topology_m4, m4rid) -- assert clean -- -- log.info('test_stress_clean: PASSED, restoring supplier 4...') -- -- # Sleep for a bit to replication complete -- log.info("Sleep for 120 seconds to allow replication to complete...") -- repl = ReplicationManager(DEFAULT_SUFFIX) -- repl.test_replication_topology([ -- topology_m4.ms["supplier1"], -- topology_m4.ms["supplier2"], -- topology_m4.ms["supplier3"], -- ], timeout=120) -- -- # Turn off readonly mode -- ldbm_config.set('nsslapd-readonly', 'off') -- -- --@pytest.mark.flaky(max_runs=2, min_passes=1) --def test_multiple_tasks_with_force(topology_m4, m4rid): -- """Check that multiple tasks with a 'force' option work properly -- -- :id: eb76a93d-8d1c-405e-9f25-6e8d5a781098 -- :setup: Replication setup with four suppliers -- :steps: -- 1. Stop supplier 3 -- 2. Add a bunch of updates to supplier 4 -- 3. Disable replication on supplier 4 -- 4. Start supplier 3 -- 5. Remove agreements to supplier 4 from other suppliers -- 6. Run a cleanallruv task on supplier 1 with a 'force' option 'on' -- 7. Run one more cleanallruv task on supplier 1 with a 'force' option 'off' -- 8. Check that everything was cleaned -- :expectedresults: -- 1. Supplier 3 should be stopped -- 2. Operation should be successful -- 3. Replication on supplier 4 should be disabled -- 4. Supplier 3 should be started -- 5. Agreements to supplier 4 should be removed -- 6. Operation should be successful -- 7. Operation should be successful -- 8. Everything should be cleaned -- """ -- -- log.info('Running test_multiple_tasks_with_force...') -- -- # Stop supplier 3, while we update supplier 4, so that 3 is behind the other suppliers -- topology_m4.ms["supplier3"].stop() -- -- # Add a bunch of updates to supplier 4 -- m4_add_users = AddUsers(topology_m4.ms["supplier4"], 1500) -- m4_add_users.start() -- m4_add_users.join() -- -- # Start supplier 3, it should be out of sync with the other replicas... -- topology_m4.ms["supplier3"].start() -- -- # Disable supplier 4 -- # Remove the agreements from the other suppliers that point to supplier 4 -- remove_supplier4_agmts("test_multiple_tasks_with_force", topology_m4) -- -- # Run the task, use "force" because supplier 3 is not in sync with the other replicas -- # in regards to the replica 4 RUV -- log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" on...') -- cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) -- cruv_task.create(properties={ -- 'replica-id': m4rid, -- 'replica-base-dn': DEFAULT_SUFFIX, -- 'replica-force-cleaning': 'yes', -- 'replica-certify-all': 'no' -- }) -- -- log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" off...') -- -- # NOTE: This must be try not py.test raises, because the above may or may -- # not have completed yet .... -- try: -- cruv_task_fail = CleanAllRUVTask(topology_m4.ms["supplier1"]) -- cruv_task_fail.create(properties={ -- 'replica-id': m4rid, -- 'replica-base-dn': DEFAULT_SUFFIX, -- 'replica-force-cleaning': 'no', -- 'replica-certify-all': 'no' -- }) -- cruv_task_fail.wait() -- except ldap.UNWILLING_TO_PERFORM: -- pass -- # Wait for the force task .... -- cruv_task.wait() -- -- # Check the other supplier's RUV for 'replica 4' -- log.info('test_multiple_tasks_with_force: check all the suppliers have been cleaned...') -- clean = check_ruvs("test_clean_force", topology_m4, m4rid) -- assert clean -- # Check supplier 1 does not have the clean task running -- log.info('test_abort: check supplier 1 no longer has a cleanAllRUV task...') -- if not task_done(topology_m4, cruv_task.dn): -- log.fatal('test_abort: CleanAllRUV task was not aborted') -- assert False -- -- --@pytest.mark.bz1466441 --@pytest.mark.ds50370 --def test_clean_shutdown_crash(topology_m2): -- """Check that server didn't crash after shutdown when running CleanAllRUV task -- -- :id: c34d0b40-3c3e-4f53-8656-5e4c2a310aaf -- :setup: Replication setup with two suppliers -- :steps: -- 1. Enable TLS on both suppliers -- 2. Reconfigure both agreements to use TLS Client auth -- 3. Stop supplier2 -- 4. Run the CleanAllRUV task -- 5. Restart supplier1 -- 6. Check if supplier1 didn't crash -- 7. Restart supplier1 again -- 8. Check if supplier1 didn't crash -- -- :expectedresults: -- 1. Success -- 2. Success -- 3. Success -- 4. Success -- 5. Success -- 6. Success -- 7. Success -- 8. Success -- """ -- -- m1 = topology_m2.ms["supplier1"] -- m2 = topology_m2.ms["supplier2"] -- -- repl = ReplicationManager(DEFAULT_SUFFIX) -- -- cm_m1 = CertmapLegacy(m1) -- cm_m2 = CertmapLegacy(m2) -- -- certmaps = cm_m1.list() -- certmaps['default']['DNComps'] = None -- certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' -- -- cm_m1.set(certmaps) -- cm_m2.set(certmaps) -- -- log.info('Enabling TLS') -- [i.enable_tls() for i in topology_m2] -- -- log.info('Creating replication dns') -- services = ServiceAccounts(m1, DEFAULT_SUFFIX) -- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) -- repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) -- -- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) -- repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) -- -- log.info('Changing auth type') -- replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) -- agmt_m1 = replica_m1.get_agreements().list()[0] -- agmt_m1.replace_many( -- ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), -- ('nsDS5ReplicaTransportInfo', 'SSL'), -- ('nsDS5ReplicaPort', '%s' % m2.sslport), -- ) -- -- agmt_m1.remove_all('nsDS5ReplicaBindDN') -- -- replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) -- agmt_m2 = replica_m2.get_agreements().list()[0] -- -- agmt_m2.replace_many( -- ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), -- ('nsDS5ReplicaTransportInfo', 'SSL'), -- ('nsDS5ReplicaPort', '%s' % m1.sslport), -- ) -- agmt_m2.remove_all('nsDS5ReplicaBindDN') -- -- log.info('Stopping supplier2') -- m2.stop() -- -- log.info('Run the cleanAllRUV task') -- cruv_task = CleanAllRUVTask(m1) -- cruv_task.create(properties={ -- 'replica-id': repl.get_rid(m1), -- 'replica-base-dn': DEFAULT_SUFFIX, -- 'replica-force-cleaning': 'no', -- 'replica-certify-all': 'yes' -- }) -- -- m1.restart() -- -- log.info('Check if supplier1 crashed') -- assert not m1.detectDisorderlyShutdown() -- -- log.info('Repeat') -- m1.restart() -- assert not m1.detectDisorderlyShutdown() -- - - if __name__ == '__main__': - # Run isolated -diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py -index bbf9c8486..65c299a0c 100644 ---- a/dirsrvtests/tests/suites/replication/regression_m2_test.py -+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py -@@ -240,8 +240,12 @@ def test_double_delete(topo_m2, create_entry): - log.info('Deleting entry {} from supplier1'.format(create_entry.dn)) - topo_m2.ms["supplier1"].delete_s(create_entry.dn) - -- log.info('Deleting entry {} from supplier2'.format(create_entry.dn)) -- topo_m2.ms["supplier2"].delete_s(create_entry.dn) -+ try: -+ log.info('Deleting entry {} from supplier2'.format(create_entry.dn)) -+ topo_m2.ms["supplier2"].delete_s(create_entry.dn) -+ except ldap.NO_SUCH_OBJECT: -+ # replication was too fast (DEBUGGING is probably set) -+ pass - - repl.enable_to_supplier(m2, [m1]) - repl.enable_to_supplier(m1, [m2]) -@@ -813,8 +817,9 @@ def test_keepalive_entries(topo_m2): - keep_alive_s1 = str(entries[0].data['keepalivetimestamp']) - keep_alive_s2 = str(entries[1].data['keepalivetimestamp']) - -- # Wait for event interval (60 secs) to pass -- time.sleep(61) -+ # Wait for event interval (60 secs) to pass, but first update doesn't -+ # start until 30 seconds after startup -+ time.sleep(91) - - # Check keep alives entries have been updated - entries = verify_keepalive_entries(topo_m2, True); -diff --git a/dirsrvtests/tests/suites/replication/regression_m2c2_test.py b/dirsrvtests/tests/suites/replication/regression_m2c2_test.py -index 97b35c7ab..f9de7383c 100644 ---- a/dirsrvtests/tests/suites/replication/regression_m2c2_test.py -+++ b/dirsrvtests/tests/suites/replication/regression_m2c2_test.py -@@ -289,6 +289,7 @@ def test_csngen_state_not_updated_if_different_uuid(topo_m2c2): - log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}") - assert False - c1.start() -+ time.sleep(5) - - # Step 8: Check that c2 has time skew - # Stop server to insure that dse.ldif is uptodate -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index 5dab57de4..d67f1bc71 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -239,8 +239,8 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, - /* create supplier update event */ - if (r->repl_eqcxt_ka_update == NULL && replica_get_type(r) == REPLICA_TYPE_UPDATABLE) { - r->repl_eqcxt_ka_update = slapi_eq_repeat_rel(replica_subentry_update, r, -- slapi_current_rel_time_t() + 30, -- replica_get_keepalive_update_interval(r)); -+ slapi_current_rel_time_t() + 30, -+ 1000 * replica_get_keepalive_update_interval(r)); - } - - if (r->tombstone_reap_interval > 0) { -@@ -518,7 +518,7 @@ replica_subentry_update(time_t when __attribute__((unused)), void *arg) - replica_subentry_check(repl_root, rid); - - slapi_timestamp_utc_hr(buf, SLAPI_TIMESTAMP_BUFSIZE); -- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "replica_subentry_update called at %s\n", buf); -+ slapi_log_err(SLAPI_LOG_REPL, "NSMMReplicationPlugin", "replica_subentry_update called at %s\n", buf); - val.bv_val = buf; - val.bv_len = strlen(val.bv_val); - vals[0] = &val; -@@ -542,7 +542,7 @@ replica_subentry_update(time_t when __attribute__((unused)), void *arg) - "Failure (%d) to update replication keep alive entry \"%s: %s\"\n", - ldrc, KEEP_ALIVE_ATTR, buf); - } else { -- slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name, -+ slapi_log_err(SLAPI_LOG_REPL, "NSMMReplicationPlugin", - "replica_subentry_update - " - "Successful update of replication keep alive entry \"%s: %s\"\n", - KEEP_ALIVE_ATTR, buf); -@@ -1536,7 +1536,7 @@ replica_set_enabled(Replica *r, PRBool enable) - if (r->repl_eqcxt_ka_update == NULL && replica_get_type(r) == REPLICA_TYPE_UPDATABLE) { - r->repl_eqcxt_ka_update = slapi_eq_repeat_rel(replica_subentry_update, r, - slapi_current_rel_time_t() + START_UPDATE_DELAY, -- replica_get_keepalive_update_interval(r)); -+ 1000 * replica_get_keepalive_update_interval(r)); - } - } else /* disable */ - { -@@ -1546,7 +1546,7 @@ replica_set_enabled(Replica *r, PRBool enable) - r->repl_eqcxt_rs = NULL; - } - /* Remove supplier update event */ -- if (replica_get_type(r) == REPLICA_TYPE_PRIMARY) { -+ if (replica_get_type(r) == REPLICA_TYPE_UPDATABLE) { - slapi_eq_cancel_rel(r->repl_eqcxt_ka_update); - r->repl_eqcxt_ka_update = NULL; - } -diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c -index 70c45ec50..b32d00941 100644 ---- a/ldap/servers/plugins/replication/repl_extop.c -+++ b/ldap/servers/plugins/replication/repl_extop.c -@@ -493,7 +493,7 @@ free_and_return: - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "decode_startrepl_extop - decoded csn: %s\n", *csnstr); - ruv_dump_to_log(*supplier_ruv, "decode_startrepl_extop"); -- for (size_t i = 0; *extra_referrals && *extra_referrals[i]; i++) { -+ for (size_t i = 0; *extra_referrals && extra_referrals[i]; i++) { - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "decode_startrepl_extop - " - "decoded referral: %s\n", *extra_referrals[i]); - } -@@ -1661,7 +1661,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb) - * Launch the cleanruv monitoring thread. Once all the replicas are cleaned it will release the rid - */ - -- cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread..."); -+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Launching cleanAllRUV thread..."); - data = (cleanruv_data *)slapi_ch_calloc(1, sizeof(cleanruv_data)); - if (data == NULL) { - slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Failed to allocate " -diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c -index 4c7262ab3..71d5a2fb5 100644 ---- a/ldap/servers/slapd/task.c -+++ b/ldap/servers/slapd/task.c -@@ -742,7 +742,7 @@ get_internal_entry(Slapi_PBlock *pb, char *dn) - slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &ret); - if (ret != LDAP_SUCCESS) { - slapi_log_err(SLAPI_LOG_WARNING, "get_internal_entry", -- "Can't find task entry '%s'\n", dn); -+ "Failed to search for task entry '%s' error: %d\n", dn, ret); - return NULL; - } - -@@ -786,9 +786,9 @@ modify_internal_entry(char *dn, LDAPMod **mods) - * entry -- try at least 3 times before giving up. - */ - tries++; -- if (tries == 3) { -- slapi_log_err(SLAPI_LOG_WARNING, "modify_internal_entry", "Can't modify task " -- "entry '%s'; %s (%d)\n", -+ if (tries == 5) { -+ slapi_log_err(SLAPI_LOG_WARNING, "modify_internal_entry", -+ "Can't modify task entry '%s'; %s (%d)\n", - dn, ldap_err2string(ret), ret); - slapi_pblock_destroy(pb); - return; -diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py -index e96db3896..5668f375b 100644 ---- a/src/lib389/lib389/instance/remove.py -+++ b/src/lib389/lib389/instance/remove.py -@@ -90,6 +90,12 @@ def remove_ds_instance(dirsrv, force=False): - # Remove parent (/var/lib/dirsrv/slapd-INST) - shutil.rmtree(remove_paths['db_dir'].replace('db', ''), ignore_errors=True) - -+ # Remove /run/slapd-isntance -+ try: -+ os.remove(f'/run/slapd-{dirsrv.serverid}.socket') -+ except OSError as e: -+ _log.debug("Failed to remove socket file: " + str(e)) -+ - # We can not assume we have systemd ... - if dirsrv.ds_paths.with_systemd: - # Remove the systemd symlink --- -2.37.1 - diff --git a/SOURCES/Cargo.lock b/SOURCES/Cargo.lock index fe8e0b9..18078a8 100644 --- a/SOURCES/Cargo.lock +++ b/SOURCES/Cargo.lock @@ -3,6 +3,17 @@ version = 3 [[package]] +name = "ahash" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom", + "once_cell", + "version_check", +] + +[[package]] name = "ansi_term" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -30,9 +41,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "bitflags" @@ -65,9 +76,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.73" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f" dependencies = [ "jobserver", ] @@ -94,6 +105,90 @@ dependencies = [ ] [[package]] +name = "concread" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcc9816f5ac93ebd51c37f7f9a6bf2b40dfcd42978ad2aea5d542016e9244cf6" +dependencies = [ + "ahash", + "crossbeam", + "crossbeam-epoch", + "crossbeam-utils", + "lru", + "parking_lot", + "rand", + "smallvec", + "tokio", +] + +[[package]] +name = "crossbeam" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" +dependencies = [ + "cfg-if", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +dependencies = [ + "cfg-if", +] + +[[package]] name = "entryuuid" version = "0.1.0" dependencies = [ @@ -117,9 +212,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -154,9 +249,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "getrandom" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "libc", @@ -164,6 +259,15 @@ dependencies = [ ] [[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash", +] + +[[package]] name = "hermit-abi" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -183,30 +287,24 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "jobserver" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] [[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] name = "libc" -version = "0.2.125" +version = "0.2.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" [[package]] name = "librnsslapd" @@ -222,11 +320,22 @@ name = "librslapd" version = "0.1.0" dependencies = [ "cbindgen", + "concread", "libc", "slapd", ] [[package]] +name = "lock_api" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] name = "log" version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -236,16 +345,34 @@ dependencies = [ ] [[package]] +name = "lru" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" +dependencies = [ + "hashbrown", +] + +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + +[[package]] name = "once_cell" -version = "1.10.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" [[package]] name = "openssl" -version = "0.10.40" +version = "0.10.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" +checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" dependencies = [ "bitflags", "cfg-if", @@ -269,9 +396,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.73" +version = "0.9.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5fd19fb3e0a8191c1e34935718976a3e70c112ab9a24af6d7cadccd9d90bc0" +checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" dependencies = [ "autocfg", "cc", @@ -281,6 +408,31 @@ dependencies = [ ] [[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall", + "smallvec", + "winapi", +] + +[[package]] name = "paste" version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -300,10 +452,22 @@ dependencies = [ ] [[package]] +name = "pin-project-lite" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" + +[[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-hack" @@ -313,27 +477,70 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.38" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9027b48e9d4c9175fa2218adf3557f91c1137021739951d4932f5f8268ac48aa" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" dependencies = [ - "unicode-xid", + "unicode-ident", +] + +[[package]] +name = "pwdchan" +version = "0.1.0" +dependencies = [ + "base64", + "cc", + "libc", + "openssl", + "paste", + "slapi_r_plugin", + "uuid", ] [[package]] name = "quote" -version = "1.0.18" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] [[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -353,24 +560,30 @@ version = "0.1.0" [[package]] name = "ryu" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.137" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", @@ -379,9 +592,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.81" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" dependencies = [ "itoa", "ryu", @@ -399,13 +612,18 @@ dependencies = [ name = "slapi_r_plugin" version = "0.1.0" dependencies = [ - "lazy_static", "libc", "paste", "uuid", ] [[package]] +name = "smallvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" + +[[package]] name = "strsim" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -413,13 +631,13 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "syn" -version = "1.0.94" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a07e33e919ebcd69113d5be0e4d70c5707004ff45188910106854f38b960df4a" +checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] @@ -458,6 +676,28 @@ dependencies = [ ] [[package]] +name = "tokio" +version = "1.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +dependencies = [ + "autocfg", + "pin-project-lite", + "tokio-macros", +] + +[[package]] +name = "tokio-macros" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] name = "toml" version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -467,16 +707,22 @@ dependencies = [ ] [[package]] +name = "unicode-ident" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" + +[[package]] name = "unicode-width" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode-xid" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "uuid" @@ -500,10 +746,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "winapi" @@ -529,9 +781,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "zeroize" -version = "1.5.5" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" dependencies = [ "zeroize_derive", ] diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec index c4742ad..80ac92e 100644 --- a/SPECS/389-ds-base.spec +++ b/SPECS/389-ds-base.spec @@ -47,9 +47,9 @@ ExcludeArch: i686 Summary: 389 Directory Server (base) Name: 389-ds-base -Version: 1.4.3.30 -Release: %{?relprefix}6%{?prerel}%{?dist} -License: GPLv3+ and MIT and (ASL 2.0 or MIT) and (ASL 2.0 or Boost) and MPLv2.0 and ASL 2.0 and BSD and (ASL 2.0 with exceptions or ASL 2.0 or MIT) and (Unlicense or MIT) +Version: 1.4.3.32 +Release: %{?relprefix}3%{?prerel}%{?dist} +License: GPLv3+ and ASL 2.0 and MIT URL: https://www.port389.org Group: System Environment/Daemons Conflicts: selinux-policy-base < 3.9.8 @@ -58,67 +58,92 @@ Obsoletes: %{name} <= 1.4.0.9 Provides: ldif2ldbm >= 0 ##### Bundled cargo crates list - START ##### +Provides: bundled(crate(ahash)) = 0.7.6 Provides: bundled(crate(ansi_term)) = 0.12.1 Provides: bundled(crate(atty)) = 0.2.14 Provides: bundled(crate(autocfg)) = 1.1.0 -Provides: bundled(crate(base64)) = 0.13.0 +Provides: bundled(crate(base64)) = 0.13.1 Provides: bundled(crate(bitflags)) = 1.3.2 Provides: bundled(crate(byteorder)) = 1.4.3 Provides: bundled(crate(cbindgen)) = 0.9.1 -Provides: bundled(crate(cc)) = 1.0.73 +Provides: bundled(crate(cc)) = 1.0.76 Provides: bundled(crate(cfg-if)) = 1.0.0 Provides: bundled(crate(clap)) = 2.34.0 +Provides: bundled(crate(concread)) = 0.2.21 +Provides: bundled(crate(crossbeam)) = 0.8.2 +Provides: bundled(crate(crossbeam-channel)) = 0.5.6 +Provides: bundled(crate(crossbeam-deque)) = 0.8.2 +Provides: bundled(crate(crossbeam-epoch)) = 0.9.11 +Provides: bundled(crate(crossbeam-queue)) = 0.3.6 +Provides: bundled(crate(crossbeam-utils)) = 0.8.12 Provides: bundled(crate(entryuuid)) = 0.1.0 Provides: bundled(crate(entryuuid_syntax)) = 0.1.0 -Provides: bundled(crate(fastrand)) = 1.7.0 +Provides: bundled(crate(fastrand)) = 1.8.0 Provides: bundled(crate(fernet)) = 0.1.4 Provides: bundled(crate(foreign-types)) = 0.3.2 Provides: bundled(crate(foreign-types-shared)) = 0.1.1 -Provides: bundled(crate(getrandom)) = 0.2.6 +Provides: bundled(crate(getrandom)) = 0.2.8 +Provides: bundled(crate(hashbrown)) = 0.12.3 Provides: bundled(crate(hermit-abi)) = 0.1.19 Provides: bundled(crate(instant)) = 0.1.12 -Provides: bundled(crate(itoa)) = 1.0.1 -Provides: bundled(crate(jobserver)) = 0.1.24 -Provides: bundled(crate(lazy_static)) = 1.4.0 -Provides: bundled(crate(libc)) = 0.2.125 +Provides: bundled(crate(itoa)) = 1.0.4 +Provides: bundled(crate(jobserver)) = 0.1.25 +Provides: bundled(crate(libc)) = 0.2.137 Provides: bundled(crate(librnsslapd)) = 0.1.0 Provides: bundled(crate(librslapd)) = 0.1.0 +Provides: bundled(crate(lock_api)) = 0.4.9 Provides: bundled(crate(log)) = 0.4.17 -Provides: bundled(crate(once_cell)) = 1.10.0 -Provides: bundled(crate(openssl)) = 0.10.40 +Provides: bundled(crate(lru)) = 0.7.8 +Provides: bundled(crate(memoffset)) = 0.6.5 +Provides: bundled(crate(once_cell)) = 1.16.0 +Provides: bundled(crate(openssl)) = 0.10.42 Provides: bundled(crate(openssl-macros)) = 0.1.0 -Provides: bundled(crate(openssl-sys)) = 0.9.73 +Provides: bundled(crate(openssl-sys)) = 0.9.77 +Provides: bundled(crate(parking_lot)) = 0.11.2 +Provides: bundled(crate(parking_lot_core)) = 0.8.5 Provides: bundled(crate(paste)) = 0.1.18 Provides: bundled(crate(paste-impl)) = 0.1.18 -Provides: bundled(crate(pkg-config)) = 0.3.25 +Provides: bundled(crate(pin-project-lite)) = 0.2.9 +Provides: bundled(crate(pkg-config)) = 0.3.26 +Provides: bundled(crate(ppv-lite86)) = 0.2.17 Provides: bundled(crate(proc-macro-hack)) = 0.5.19 -Provides: bundled(crate(proc-macro2)) = 1.0.38 -Provides: bundled(crate(quote)) = 1.0.18 -Provides: bundled(crate(redox_syscall)) = 0.2.13 +Provides: bundled(crate(proc-macro2)) = 1.0.47 +Provides: bundled(crate(pwdchan)) = 0.1.0 +Provides: bundled(crate(quote)) = 1.0.21 +Provides: bundled(crate(rand)) = 0.8.5 +Provides: bundled(crate(rand_chacha)) = 0.3.1 +Provides: bundled(crate(rand_core)) = 0.6.4 +Provides: bundled(crate(redox_syscall)) = 0.2.16 Provides: bundled(crate(remove_dir_all)) = 0.5.3 Provides: bundled(crate(rsds)) = 0.1.0 -Provides: bundled(crate(ryu)) = 1.0.9 -Provides: bundled(crate(serde)) = 1.0.137 -Provides: bundled(crate(serde_derive)) = 1.0.137 -Provides: bundled(crate(serde_json)) = 1.0.81 +Provides: bundled(crate(ryu)) = 1.0.11 +Provides: bundled(crate(scopeguard)) = 1.1.0 +Provides: bundled(crate(serde)) = 1.0.147 +Provides: bundled(crate(serde_derive)) = 1.0.147 +Provides: bundled(crate(serde_json)) = 1.0.87 Provides: bundled(crate(slapd)) = 0.1.0 Provides: bundled(crate(slapi_r_plugin)) = 0.1.0 +Provides: bundled(crate(smallvec)) = 1.10.0 Provides: bundled(crate(strsim)) = 0.8.0 -Provides: bundled(crate(syn)) = 1.0.94 +Provides: bundled(crate(syn)) = 1.0.103 Provides: bundled(crate(synstructure)) = 0.12.6 Provides: bundled(crate(tempfile)) = 3.3.0 Provides: bundled(crate(textwrap)) = 0.11.0 +Provides: bundled(crate(tokio)) = 1.21.2 +Provides: bundled(crate(tokio-macros)) = 1.8.0 Provides: bundled(crate(toml)) = 0.5.9 -Provides: bundled(crate(unicode-width)) = 0.1.9 -Provides: bundled(crate(unicode-xid)) = 0.2.3 +Provides: bundled(crate(unicode-ident)) = 1.0.5 +Provides: bundled(crate(unicode-width)) = 0.1.10 +Provides: bundled(crate(unicode-xid)) = 0.2.4 Provides: bundled(crate(uuid)) = 0.8.2 Provides: bundled(crate(vcpkg)) = 0.2.15 Provides: bundled(crate(vec_map)) = 0.8.2 -Provides: bundled(crate(wasi)) = 0.10.2+wasi_snapshot_preview1 +Provides: bundled(crate(version_check)) = 0.9.4 +Provides: bundled(crate(wasi)) = 0.11.0+wasi_snapshot_preview1 Provides: bundled(crate(winapi)) = 0.3.9 Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0 Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0 -Provides: bundled(crate(zeroize)) = 1.5.5 +Provides: bundled(crate(zeroize)) = 1.5.7 Provides: bundled(crate(zeroize_derive)) = 1.3.2 ##### Bundled cargo crates list - END ##### @@ -247,23 +272,11 @@ Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download Source4: vendor-%{version}-1.tar.gz Source5: Cargo.lock %endif - -Patch01: 0001-Revert-4866-cl-trimming-not-applicable-in-1.4.3.patch -Patch02: 0002-Issue-4877-RFE-EntryUUID-to-validate-UUIDs-on-fixup-.patch -Patch03: 0003-Issue-5126-Memory-leak-in-slapi_ldap_get_lderrno-515.patch -Patch04: 0004-Issue-5085-Race-condition-about-snmp-collator-at-sta.patch -Patch05: 0005-Issue-5079-BUG-multiple-ways-to-specific-primary-508.patch -Patch06: 0006-Issue-3903-Supplier-should-do-periodic-updates.patch -Patch07: 0007-Issue-5399-UI-LDAP-Editor-is-not-updated-when-we-swi.patch -Patch08: 0008-Issue-5397-Fix-various-memory-leaks.patch -Patch09: 0009-Issue-3903-keep-alive-update-event-starts-too-soon.patch -Patch10: 0010-Issue-5397-Fix-check-pick-error.patch -Patch11: 0011-Issue-5397-Fix-check-pick-error-2.patch -Patch12: 0012-Issue-3903-Fix-another-cherry-pick-error.patch -Patch13: 0013-Issue-5329-Improve-replication-extended-op-logging.patch -Patch14: 0014-Issue-5412-lib389-do-not-set-backend-name-to-lowerca.patch -Patch15: 0015-Issue-5418-Sync_repl-may-crash-while-managing-invali.patch -Patch16: 0016-Issue-3903-fix-repl-keep-alive-event-interval.patch +Patch01: 0001-Issue-5532-Make-db-compaction-TOD-day-more-robust.patch +Patch02: 0002-Issue-5544-Increase-default-task-TTL.patch +Patch03: 0003-Issue-5413-Allow-mutliple-MemberOf-fixup-tasks-with-.patch +Patch04: 0004-Issue-5505-Fix-compiler-warning-5506.patch +Patch05: 0005-Issue-5565-Change-default-password-storage-scheme.patch %description @@ -885,20 +898,19 @@ exit 0 %doc README.md %changelog -* Thu Aug 18 2022 Mark Reynolds - 1.4.3.20-6 -- Bump version to 1.4.3.30-6 -- Resolves: Bug 2113002 - ipa-replica-manage --connect --winsync fails with traceback -- Resolves: Bug 2118763 - SIGSEGV in sync_repl - -* Mon Aug 8 2022 Mark Reynolds - 1.4.3.20-5 -- Bump version to 1.4.3.30-5 -- Resolves: Bug 2113002 - ipa-replica-manage --connect --winsync fails with traceback - -* Thu Jul 28 2022 Thierry Bordaz - 1.4.3.30-4 -- Bump version to 1.4.3.30-4 -- Resolves: Bug 2085562 - Rebase 389-ds-base in 8.7 - -* Fri Jun 10 2022 Thierry Bordaz - 1.4.3.30-3 -- Bump version to 1.4.3.30-3 -- Resolves: Bug 2085562 - Rebase 389-ds-base in 8.7 +* Tue Dec 13 2022 Mark Reynolds - 1.4.3.32-3 +- Bump version to 1.4.3.32-3 +- Resolves: Bug 2149956 - change default password storage scheme to be backwards compatible with RHEL 7 + +* Tue Dec 13 2022 Mark Reynolds - 1.4.3.32-2 +- Bump version to 1.4.3.32-2 +- Resolves: Bug 2149956 - ipa-server-install displays mdd failure Server is unwilling to perform + +* Tue Nov 15 2022 Mark Reynolds - 1.4.3.32-1 +- Bump version to 1.4.3.32-1 +- Resolves: Bug 2098138 - broken nsslapd-subtree-rename-switch option in rhds11 +- Resolves: Bug 2119063 - entryuuid fixup tasks fails because entryUUID is not mutable +- Resolves: Bug 2136610 - [RFE] Add 'cn' attribute to IPA audit logs +- Resolves: Bug 2142638 - pam mutex lock causing high etimes, affecting red hat internal sso +- Resolves: Bug 2096795 - [RFE] Support ECDSA private keys for TLS