From e65d6225398901c3319e72a460bc58e5d50df67c Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 3 Aug 2022 16:27:15 -0400
Subject: [PATCH 2/5] Issue 3903 - Supplier should do periodic updates
Description:
On suppliers update the keep alive entry periodically to keep the RUV up
to date in case a replica is neglected for along time. This prevents
very long changelog scans when finally processing updates.
relates: https://github.com/389ds/389-ds-base/issues/3903
Reviewed by: firstyear & tbordaz(Thanks!)
---
.../suites/replication/regression_m2_test.py | 96 +++++--------
.../suites/replication/replica_config_test.py | 6 +-
ldap/schema/01core389.ldif | 3 +-
ldap/servers/plugins/replication/repl5.h | 11 +-
.../plugins/replication/repl5_inc_protocol.c | 44 +-----
.../plugins/replication/repl5_replica.c | 127 +++++++++++++-----
.../replication/repl5_replica_config.c | 12 ++
.../plugins/replication/repl5_tot_protocol.c | 4 +-
ldap/servers/plugins/replication/repl_extop.c | 2 +-
.../plugins/replication/repl_globals.c | 1 +
.../src/lib/replication/replConfig.jsx | 32 ++++-
src/cockpit/389-console/src/replication.jsx | 6 +
src/lib389/lib389/cli_conf/replication.py | 6 +-
13 files changed, 202 insertions(+), 148 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
index 466e3c2c0..7dd0f2984 100644
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
@@ -14,6 +14,7 @@ import ldif
import ldap
import pytest
import subprocess
+import time
from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
from lib389.pwpolicy import PwPolicyManager
from lib389.utils import *
@@ -204,12 +205,12 @@ def rename_entry(server, idx, ou_name, new_parent):
def add_ldapsubentry(server, parent):
pwp = PwPolicyManager(server)
policy_props = {'passwordStorageScheme': 'ssha',
- 'passwordCheckSyntax': 'on',
- 'passwordInHistory': '6',
- 'passwordChange': 'on',
- 'passwordMinAge': '0',
- 'passwordExp': 'off',
- 'passwordMustChange': 'off',}
+ 'passwordCheckSyntax': 'on',
+ 'passwordInHistory': '6',
+ 'passwordChange': 'on',
+ 'passwordMinAge': '0',
+ 'passwordExp': 'off',
+ 'passwordMustChange': 'off',}
log.info('Create password policy for subtree {}'.format(parent))
pwp.create_subtree_policy(parent, policy_props)
@@ -742,7 +743,7 @@ def get_keepalive_entries(instance, replica):
try:
entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL,
"(&(objectclass=ldapsubentry)(cn=repl keep alive*))",
- ['cn', 'nsUniqueId', 'modifierTimestamp'])
+ ['cn', 'keepalivetimestamp', 'nsUniqueId', 'modifierTimestamp'])
except ldap.LDAPError as e:
log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e)))
assert False
@@ -761,6 +762,7 @@ def verify_keepalive_entries(topo, expected):
# (for example after: db2ldif / demote a supplier / ldif2db / init other suppliers)
# ==> if the function is somehow pushed in lib389, a check better than simply counting the entries
# should be done.
+ entries = []
for supplierId in topo.ms:
supplier = topo.ms[supplierId]
for replica in Replicas(supplier).list():
@@ -771,6 +773,7 @@ def verify_keepalive_entries(topo, expected):
keepaliveEntries = get_keepalive_entries(supplier, replica);
expectedCount = len(topo.ms) if expected else 0
foundCount = len(keepaliveEntries)
+ entries += keepaliveEntries
if (foundCount == expectedCount):
log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.')
else:
@@ -778,70 +781,45 @@ def verify_keepalive_entries(topo, expected):
f'while {expectedCount} were expected on {replica_info}.')
assert False
+ return entries
+
-def test_online_init_should_create_keepalive_entries(topo_m2):
- """Check that keep alive entries are created when initializinf a supplier from another one
+def test_keepalive_entries(topo_m2):
+ """Check that keep alive entries are created
:id: d5940e71-d18a-4b71-aaf7-b9185361fffe
:setup: Two suppliers replication setup
:steps:
- 1. Generate ldif without replication data
- 2 Init both suppliers from that ldif
- 3 Check that keep alive entries does not exists
- 4 Perform on line init of supplier2 from supplier1
- 5 Check that keep alive entries exists
+ 1. Keep alives entries are present
+ 2. Keep alive entries are updated every 60 seconds
:expectedresults:
- 1. No error while generating ldif
- 2. No error while importing the ldif file
- 3. No keepalive entrie should exists on any suppliers
- 4. No error while initializing supplier2
- 5. All keepalive entries should exist on every suppliers
+ 1. Success
+ 2. Success
"""
- repl = ReplicationManager(DEFAULT_SUFFIX)
- m1 = topo_m2.ms["supplier1"]
- m2 = topo_m2.ms["supplier2"]
- # Step 1: Generate ldif without replication data
- m1.stop()
- m2.stop()
- ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
- m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
- excludeSuffixes=None, repl_data=False,
- outputfile=ldif_file, encrypt=False)
- # Remove replication metadata that are still in the ldif
- _remove_replication_data(ldif_file)
-
- # Step 2: Init both suppliers from that ldif
- m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
- m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
- m1.start()
- m2.start()
-
- """ Replica state is now as if CLI setup has been done using:
- dsconf supplier1 replication enable --suffix "${SUFFIX}" --role supplier
- dsconf supplier2 replication enable --suffix "${SUFFIX}" --role supplier
- dsconf supplier1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
- dsconf supplier2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
- dsconf supplier1 repl-agmt create --suffix "${SUFFIX}"
- dsconf supplier2 repl-agmt create --suffix "${SUFFIX}"
- """
+ # default interval is 1 hour, too long for test, set it to the minimum of
+ # 60 seconds
+ for supplierId in topo_m2.ms:
+ supplier = topo_m2.ms[supplierId]
+ replica = Replicas(supplier).get(DEFAULT_SUFFIX)
+ replica.replace('nsds5ReplicaKeepAliveUpdateInterval', '60')
+ supplier.restart()
- # Step 3: No keepalive entrie should exists on any suppliers
- verify_keepalive_entries(topo_m2, False)
+ # verify entries exist
+ entries = verify_keepalive_entries(topo_m2, True);
- # Step 4: Perform on line init of supplier2 from supplier1
- agmt = Agreements(m1).list()[0]
- agmt.begin_reinit()
- (done, error) = agmt.wait_reinit()
- assert done is True
- assert error is False
+ # Get current time from keep alive entry
+ keep_alive_s1 = str(entries[0].data['keepalivetimestamp'])
+ keep_alive_s2 = str(entries[1].data['keepalivetimestamp'])
+
+ # Wait for event interval (60 secs) to pass
+ time.sleep(61)
- # Step 5: All keepalive entries should exists on every suppliers
- # Verify the keep alive entry once replication is in sync
- # (that is the step that fails when bug is not fixed)
- repl.wait_for_ruv(m2,m1)
- verify_keepalive_entries(topo_m2, True);
+ # Check keep alives entries have been updated
+ entries = verify_keepalive_entries(topo_m2, True);
+ assert keep_alive_s1 != str(entries[0].data['keepalivetimestamp'])
+ assert keep_alive_s2 != str(entries[1].data['keepalivetimestamp'])
@pytest.mark.ds49915
diff --git a/dirsrvtests/tests/suites/replication/replica_config_test.py b/dirsrvtests/tests/suites/replication/replica_config_test.py
index c2140a2ac..06ae5afcf 100644
--- a/dirsrvtests/tests/suites/replication/replica_config_test.py
+++ b/dirsrvtests/tests/suites/replication/replica_config_test.py
@@ -50,7 +50,8 @@ repl_add_attrs = [('nsDS5ReplicaType', '-1', '4', overflow, notnum, '1'),
('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'),
('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'),
('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'),
- ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')]
+ ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6'),
+ ('nsds5ReplicaKeepAliveUpdateInterval', '59', too_big, overflow, notnum, '60'),]
repl_mod_attrs = [('nsDS5Flags', '-1', '2', overflow, notnum, '1'),
('nsds5ReplicaPurgeDelay', '-2', too_big, overflow, notnum, '1'),
@@ -59,7 +60,8 @@ repl_mod_attrs = [('nsDS5Flags', '-1', '2', overflow, notnum, '1'),
('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'),
('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'),
('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'),
- ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')]
+ ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6'),
+ ('nsds5ReplicaKeepAliveUpdateInterval', '59', too_big, overflow, notnum, '60'),]
agmt_attrs = [
('nsds5ReplicaPort', '0', '65535', overflow, notnum, '389'),
diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif
index 0c73e5114..7a9598730 100644
--- a/ldap/schema/01core389.ldif
+++ b/ldap/schema/01core389.ldif
@@ -327,6 +327,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2371 NAME 'nsDS5ReplicaBootstrapBindDN'
attributeTypes: ( 2.16.840.1.113730.3.1.2372 NAME 'nsDS5ReplicaBootstrapCredentials' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2373 NAME 'nsDS5ReplicaBootstrapBindMethod' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2374 NAME 'nsDS5ReplicaBootstrapTransportInfo' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2390 NAME 'nsds5ReplicaKeepAliveUpdateInterval' DESC '389 defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
#
# objectclasses
#
@@ -336,7 +337,7 @@ objectClasses: ( 2.16.840.1.113730.3.2.44 NAME 'nsIndex' DESC 'Netscape defined
objectClasses: ( 2.16.840.1.113730.3.2.109 NAME 'nsBackendInstance' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.110 NAME 'nsMappingTree' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.104 NAME 'nsContainer' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' )
-objectClasses: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Replication configuration objectclass' SUP top MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY (cn $ nsds5ReplicaPreciseTombstonePurging $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsDS5ReplicaBindDNGroup $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ nsds5ReplicaChangeCount $ nsds5ReplicaLegacyConsumer $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaBackoffMin $ nsds5ReplicaBackoffMax $ nsds5ReplicaReleaseTimeout $ nsDS5ReplicaBindDnGroupCheckInterval ) X-ORIGIN 'Netscape Directory Server' )
+objectClasses: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Replication configuration objectclass' SUP top MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY (cn $ nsds5ReplicaPreciseTombstonePurging $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsDS5ReplicaBindDNGroup $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ nsds5ReplicaChangeCount $ nsds5ReplicaLegacyConsumer $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaBackoffMin $ nsds5ReplicaBackoffMax $ nsds5ReplicaReleaseTimeout $ nsDS5ReplicaBindDnGroupCheckInterval $ nsds5ReplicaKeepAliveUpdateInterval ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.113 NAME 'nsTombstone' DESC 'Netscape defined objectclass' SUP top MAY ( nstombstonecsn $ nsParentUniqueId $ nscpEntryDN ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.103 NAME 'nsDS5ReplicationAgreement' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsds5ReplicaCleanRUVNotified $ nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicatedAttributeListTotal $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5ReplicaEnabled $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5ReplicaStripAttrs $ nsds5replicaSessionPauseTime $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaFlowControlWindow $ nsds5ReplicaFlowControlPause $ nsDS5ReplicaWaitForAsyncResults $ nsds5ReplicaIgnoreMissingChange $ nsDS5ReplicaBootstrapBindDN $ nsDS5ReplicaBootstrapCredentials $ nsDS5ReplicaBootstrapBindMethod $ nsDS5ReplicaBootstrapTransportInfo ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.39 NAME 'nsslapdConfig' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' )
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 06e747811..c2fbff8c0 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -1,6 +1,6 @@
/** BEGIN COPYRIGHT BLOCK
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2020 Red Hat, Inc.
+ * Copyright (C) 2022 Red Hat, Inc.
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* All rights reserved.
*
@@ -120,6 +120,8 @@
#define PROTOCOL_STATUS_TOTAL_SENDING_DATA 711
#define DEFAULT_PROTOCOL_TIMEOUT 120
+#define DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL 3600
+#define REPLICA_KEEPALIVE_UPDATE_INTERVAL_MIN 60
/* To Allow Consumer Initialization when adding an agreement - */
#define STATE_PERFORMING_TOTAL_UPDATE 501
@@ -162,6 +164,7 @@ extern const char *type_nsds5ReplicaBootstrapBindDN;
extern const char *type_nsds5ReplicaBootstrapCredentials;
extern const char *type_nsds5ReplicaBootstrapBindMethod;
extern const char *type_nsds5ReplicaBootstrapTransportInfo;
+extern const char *type_replicaKeepAliveUpdateInterval;
/* Attribute names for windows replication agreements */
extern const char *type_nsds7WindowsReplicaArea;
@@ -677,8 +680,8 @@ Replica *windows_replica_new(const Slapi_DN *root);
during addition of the replica over LDAP */
int replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, Replica **r);
void replica_destroy(void **arg);
-int replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid);
-int replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid);
+void replica_subentry_update(time_t when, void *arg);
+int replica_subentry_check(const char *repl_root, ReplicaId rid);
PRBool replica_get_exclusive_access(Replica *r, PRBool *isInc, uint64_t connid, int opid, const char *locking_purl, char **current_purl);
void replica_relinquish_exclusive_access(Replica *r, uint64_t connid, int opid);
PRBool replica_get_tombstone_reap_active(const Replica *r);
@@ -739,6 +742,8 @@ void consumer5_set_mapping_tree_state_for_replica(const Replica *r, RUV *supplie
Replica *replica_get_for_backend(const char *be_name);
void replica_set_purge_delay(Replica *r, uint32_t purge_delay);
void replica_set_tombstone_reap_interval(Replica *r, long interval);
+void replica_set_keepalive_update_interval(Replica *r, int64_t interval);
+int64_t replica_get_keepalive_update_interval(Replica *r);
void replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv);
Slapi_Entry *get_in_memory_ruv(Slapi_DN *suffix_sdn);
int replica_write_ruv(Replica *r);
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 4bb384882..846951b9e 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -1,6 +1,6 @@
/** BEGIN COPYRIGHT BLOCK
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2020 Red Hat, Inc.
+ * Copyright (C) 2022 Red Hat, Inc.
* All rights reserved.
*
* License: GPL (version 3 or any later version).
@@ -1677,13 +1677,9 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
} else {
ConnResult replay_crc;
Replica *replica = prp->replica;
- PRBool subentry_update_needed = PR_FALSE;
PRUint64 release_timeout = replica_get_release_timeout(replica);
char csn_str[CSN_STRSIZE];
- int skipped_updates = 0;
- int fractional_repl;
int finished = 0;
-#define FRACTIONAL_SKIPPED_THRESHOLD 100
/* Start the results reading thread */
rd = repl5_inc_rd_new(prp);
@@ -1700,7 +1696,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
memset((void *)&op, 0, sizeof(op));
entry.op = &op;
- fractional_repl = agmt_is_fractional(prp->agmt);
do {
cl5_operation_parameters_done(entry.op);
memset((void *)entry.op, 0, sizeof(op));
@@ -1781,14 +1776,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
replica_id = csn_get_replicaid(entry.op->csn);
uniqueid = entry.op->target_address.uniqueid;
- if (fractional_repl && message_id) {
- /* This update was sent no need to update the subentry
- * and restart counting the skipped updates
- */
- subentry_update_needed = PR_FALSE;
- skipped_updates = 0;
- }
-
if (prp->repl50consumer && message_id) {
int operation, error = 0;
@@ -1816,15 +1803,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
agmt_get_long_name(prp->agmt),
entry.op->target_address.uniqueid, csn_str);
agmt_inc_last_update_changecount(prp->agmt, csn_get_replicaid(entry.op->csn), 1 /*skipped*/);
- if (fractional_repl) {
- skipped_updates++;
- if (skipped_updates > FRACTIONAL_SKIPPED_THRESHOLD) {
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
- "send_updates - %s: skipped updates is too high (%d) if no other update is sent we will update the subentry\n",
- agmt_get_long_name(prp->agmt), skipped_updates);
- subentry_update_needed = PR_TRUE;
- }
- }
}
}
break;
@@ -1906,26 +1884,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
PR_Unlock(rd->lock);
} while (!finished);
- if (fractional_repl && subentry_update_needed) {
- ReplicaId rid = -1; /* Used to create the replica keep alive subentry */
- Slapi_DN *replarea_sdn = NULL;
-
- if (replica) {
- rid = replica_get_rid(replica);
- }
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
- "send_updates - %s: skipped updates was definitely too high (%d) update the subentry now\n",
- agmt_get_long_name(prp->agmt), skipped_updates);
- replarea_sdn = agmt_get_replarea(prp->agmt);
- if (!replarea_sdn) {
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
- "send_updates - Unknown replication area due to agreement not found.");
- agmt_set_last_update_status(prp->agmt, 0, -1, "Agreement is corrupted: missing suffix");
- return_value = UPDATE_FATAL_ERROR;
- } else {
- replica_subentry_update(replarea_sdn, rid);
- }
- }
/* Terminate the results reading thread */
if (!prp->repl50consumer) {
/* We need to ensure that we wait until all the responses have been received from our operations */
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index 3bd57647f..ded4cf754 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -1,6 +1,6 @@
/** BEGIN COPYRIGHT BLOCK
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2022 Red Hat, Inc.
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* All rights reserved.
*
@@ -22,7 +22,6 @@
#include "slap.h"
#define RUV_SAVE_INTERVAL (30 * 1000) /* 30 seconds */
-
#define REPLICA_RDN "cn=replica"
/*
@@ -48,6 +47,7 @@ struct replica
PRMonitor *repl_lock; /* protects entire structure */
Slapi_Eq_Context repl_eqcxt_rs; /* context to cancel event that saves ruv */
Slapi_Eq_Context repl_eqcxt_tr; /* context to cancel event that reaps tombstones */
+ Slapi_Eq_Context repl_eqcxt_ka_update; /* keep-alive entry update event */
Object *repl_csngen; /* CSN generator for this replica */
PRBool repl_csn_assigned; /* Flag set when new csn is assigned. */
int64_t repl_purge_delay; /* When purgeable, CSNs are held on to for this many extra seconds */
@@ -66,6 +66,7 @@ struct replica
uint64_t agmt_count; /* Number of agmts */
Slapi_Counter *release_timeout; /* The amount of time to wait before releasing active replica */
uint64_t abort_session; /* Abort the current replica session */
+ int64_t keepalive_update_interval; /* interval to do dummy update to keep RUV fresh */)
};
@@ -133,8 +134,8 @@ replica_new(const Slapi_DN *root)
&r);
if (NULL == r) {
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_new - "
- "Unable to configure replica %s: %s\n",
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
+ "replica_new - Unable to configure replica %s: %s\n",
slapi_sdn_get_dn(root), errorbuf);
}
slapi_entry_free(e);
@@ -232,7 +233,15 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation,
In that case the updated would fail but nothing bad would happen. The next
scheduled update would save the state */
r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name,
- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL);
+ slapi_current_rel_time_t() + START_UPDATE_DELAY,
+ RUV_SAVE_INTERVAL);
+
+ /* create supplier update event */
+ if (r->repl_eqcxt_ka_update == NULL && replica_get_type(r) == REPLICA_TYPE_UPDATABLE) {
+ r->repl_eqcxt_ka_update = slapi_eq_repeat_rel(replica_subentry_update, r,
+ slapi_current_rel_time_t() + START_UPDATE_DELAY,
+ replica_get_keepalive_update_interval(r));
+ }
if (r->tombstone_reap_interval > 0) {
/*
@@ -302,6 +311,11 @@ replica_destroy(void **arg)
* and ruv updates.
*/
+ if (r->repl_eqcxt_ka_update) {
+ slapi_eq_cancel_rel(r->repl_eqcxt_ka_update);
+ r->repl_eqcxt_ka_update = NULL;
+ }
+
if (r->repl_eqcxt_rs) {
slapi_eq_cancel_rel(r->repl_eqcxt_rs);
r->repl_eqcxt_rs = NULL;
@@ -393,7 +407,7 @@ replica_destroy(void **arg)
static int
-replica_subentry_create(Slapi_DN *repl_root, ReplicaId rid)
+replica_subentry_create(const char *repl_root, ReplicaId rid)
{
char *entry_string = NULL;
Slapi_Entry *e = NULL;
@@ -402,7 +416,7 @@ replica_subentry_create(Slapi_DN *repl_root, ReplicaId rid)
int rc = 0;
entry_string = slapi_ch_smprintf("dn: cn=%s %d,%s\nobjectclass: top\nobjectclass: ldapsubentry\nobjectclass: extensibleObject\ncn: %s %d",
- KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), KEEP_ALIVE_ENTRY, rid);
+ KEEP_ALIVE_ENTRY, rid, repl_root, KEEP_ALIVE_ENTRY, rid);
if (entry_string == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
"replica_subentry_create - Failed in slapi_ch_smprintf\n");
@@ -441,7 +455,7 @@ done:
}
int
-replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
+replica_subentry_check(const char *repl_root, ReplicaId rid)
{
Slapi_PBlock *pb;
char *filter = NULL;
@@ -451,7 +465,7 @@ replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
pb = slapi_pblock_new();
filter = slapi_ch_smprintf("(&(objectclass=ldapsubentry)(cn=%s %d))", KEEP_ALIVE_ENTRY, rid);
- slapi_search_internal_set_pb(pb, slapi_sdn_get_dn(repl_root), LDAP_SCOPE_ONELEVEL,
+ slapi_search_internal_set_pb(pb, repl_root, LDAP_SCOPE_ONELEVEL,
filter, NULL, 0, NULL, NULL,
repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0);
slapi_search_internal_pb(pb);
@@ -460,17 +474,19 @@ replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
if (entries && (entries[0] == NULL)) {
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name,
- "replica_subentry_check - Need to create replication keep alive entry <cn=%s %d,%s>\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+ "replica_subentry_check - Need to create replication keep alive entry <cn=%s %d,%s>\n",
+ KEEP_ALIVE_ENTRY, rid, repl_root);
rc = replica_subentry_create(repl_root, rid);
} else {
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
- "replica_subentry_check - replication keep alive entry <cn=%s %d,%s> already exists\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+ "replica_subentry_check - replication keep alive entry <cn=%s %d,%s> already exists\n",
+ KEEP_ALIVE_ENTRY, rid, repl_root);
rc = 0;
}
} else {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
"replica_subentry_check - Error accessing replication keep alive entry <cn=%s %d,%s> res=%d\n",
- KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), res);
+ KEEP_ALIVE_ENTRY, rid, repl_root, res);
/* The status of the entry is not clear, do not attempt to create it */
rc = 1;
}
@@ -481,60 +497,59 @@ replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
return rc;
}
-int
-replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid)
+void
+replica_subentry_update(time_t when __attribute__((unused)), void *arg)
{
- int ldrc;
- int rc = LDAP_SUCCESS; /* Optimistic default */
+ Slapi_PBlock *modpb = NULL;
+ Replica *replica = (Replica *)arg;
+ ReplicaId rid;
LDAPMod *mods[2];
LDAPMod mod;
struct berval *vals[2];
- char buf[SLAPI_TIMESTAMP_BUFSIZE];
struct berval val;
- Slapi_PBlock *modpb = NULL;
- char *dn;
+ const char *repl_root = NULL;
+ char buf[SLAPI_TIMESTAMP_BUFSIZE];
+ char *dn = NULL;
+ int ldrc = 0;
+ rid = replica_get_rid(replica);
+ repl_root = slapi_ch_strdup(slapi_sdn_get_dn(replica_get_root(replica)));
replica_subentry_check(repl_root, rid);
slapi_timestamp_utc_hr(buf, SLAPI_TIMESTAMP_BUFSIZE);
-
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "subentry_update called at %s\n", buf);
-
-
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "replica_subentry_update called at %s\n", buf);
val.bv_val = buf;
val.bv_len = strlen(val.bv_val);
-
vals[0] = &val;
vals[1] = NULL;
mod.mod_op = LDAP_MOD_REPLACE | LDAP_MOD_BVALUES;
mod.mod_type = KEEP_ALIVE_ATTR;
mod.mod_bvalues = vals;
-
mods[0] = &mod;
mods[1] = NULL;
modpb = slapi_pblock_new();
- dn = slapi_ch_smprintf(KEEP_ALIVE_DN_FORMAT, KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
-
+ dn = slapi_ch_smprintf(KEEP_ALIVE_DN_FORMAT, KEEP_ALIVE_ENTRY, rid, repl_root);
slapi_modify_internal_set_pb(modpb, dn, mods, NULL, NULL,
repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0);
slapi_modify_internal_pb(modpb);
-
slapi_pblock_get(modpb, SLAPI_PLUGIN_INTOP_RESULT, &ldrc);
-
if (ldrc != LDAP_SUCCESS) {
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
- "Failure (%d) to update replication keep alive entry \"%s: %s\"\n", ldrc, KEEP_ALIVE_ATTR, buf);
- rc = ldrc;
+ "replica_subentry_update - "
+ "Failure (%d) to update replication keep alive entry \"%s: %s\"\n",
+ ldrc, KEEP_ALIVE_ATTR, buf);
} else {
slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name,
- "Successful update of replication keep alive entry \"%s: %s\"\n", KEEP_ALIVE_ATTR, buf);
+ "replica_subentry_update - "
+ "Successful update of replication keep alive entry \"%s: %s\"\n",
+ KEEP_ALIVE_ATTR, buf);
}
slapi_pblock_destroy(modpb);
+ slapi_ch_free_string((char **)&repl_root);
slapi_ch_free_string(&dn);
- return rc;
}
/*
* Attempt to obtain exclusive access to replica (advisory only)
@@ -1512,7 +1527,15 @@ replica_set_enabled(Replica *r, PRBool enable)
if (r->repl_eqcxt_rs == NULL) /* event is not already registered */
{
r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name,
- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL);
+ slapi_current_rel_time_t() + START_UPDATE_DELAY,
+ RUV_SAVE_INTERVAL);
+
+ }
+ /* create supplier update event */
+ if (r->repl_eqcxt_ka_update == NULL && replica_get_type(r) == REPLICA_TYPE_UPDATABLE) {
+ r->repl_eqcxt_ka_update = slapi_eq_repeat_rel(replica_subentry_update, r,
+ slapi_current_rel_time_t() + START_UPDATE_DELAY,
+ replica_get_keepalive_update_interval(r));
}
} else /* disable */
{
@@ -1521,6 +1544,11 @@ replica_set_enabled(Replica *r, PRBool enable)
slapi_eq_cancel_rel(r->repl_eqcxt_rs);
r->repl_eqcxt_rs = NULL;
}
+ /* Remove supplier update event */
+ if (replica_get_type(r) == REPLICA_TYPE_PRIMARY) {
+ slapi_eq_cancel_rel(r->repl_eqcxt_ka_update);
+ r->repl_eqcxt_ka_update = NULL;
+ }
}
replica_unlock(r->repl_lock);
@@ -2119,6 +2147,17 @@ _replica_init_from_config(Replica *r, Slapi_Entry *e, char *errortext)
r->tombstone_reap_interval = 3600 * 24; /* One week, in seconds */
}
+ if ((val = (char*)slapi_entry_attr_get_ref(e, type_replicaKeepAliveUpdateInterval))) {
+ if (repl_config_valid_num(type_replicaKeepAliveUpdateInterval, val, REPLICA_KEEPALIVE_UPDATE_INTERVAL_MIN,
+ INT_MAX, &rc, errormsg, &interval) != 0)
+ {
+ return LDAP_UNWILLING_TO_PERFORM;
+ }
+ r->keepalive_update_interval = interval;
+ } else {
+ r->keepalive_update_interval = DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL;
+ }
+
r->tombstone_reap_stop = r->tombstone_reap_active = PR_FALSE;
/* No supplier holding the replica */
@@ -3646,6 +3685,26 @@ replica_set_tombstone_reap_interval(Replica *r, long interval)
replica_unlock(r->repl_lock);
}
+void
+replica_set_keepalive_update_interval(Replica *r, int64_t interval)
+{
+ replica_lock(r->repl_lock);
+ r->keepalive_update_interval = interval;
+ replica_unlock(r->repl_lock);
+}
+
+int64_t
+replica_get_keepalive_update_interval(Replica *r)
+{
+ int64_t interval = DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL;
+
+ replica_lock(r->repl_lock);
+ interval = r->keepalive_update_interval;
+ replica_unlock(r->repl_lock);
+
+ return interval;
+}
+
static void
replica_strip_cleaned_rids(Replica *r)
{
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 2c6d74b13..aea2cf506 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -438,6 +438,9 @@ replica_config_modify(Slapi_PBlock *pb,
} else if (strcasecmp(config_attr, type_replicaBackoffMax) == 0) {
if (apply_mods)
replica_set_backoff_max(r, PROTOCOL_BACKOFF_MAXIMUM);
+ } else if (strcasecmp(config_attr, type_replicaKeepAliveUpdateInterval) == 0) {
+ if (apply_mods)
+ replica_set_keepalive_update_interval(r, DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL);
} else if (strcasecmp(config_attr, type_replicaPrecisePurge) == 0) {
if (apply_mods)
replica_set_precise_purging(r, 0);
@@ -472,6 +475,15 @@ replica_config_modify(Slapi_PBlock *pb,
} else {
break;
}
+ } else if (strcasecmp(config_attr, type_replicaKeepAliveUpdateInterval) == 0) {
+ int64_t interval = DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL;
+ if (repl_config_valid_num(config_attr, config_attr_value, REPLICA_KEEPALIVE_UPDATE_INTERVAL_MIN,
+ INT_MAX, returncode, errortext, &interval) == 0)
+ {
+ replica_set_keepalive_update_interval(r, interval);
+ } else {
+ break;
+ }
} else if (strcasecmp(config_attr, attr_replicaType) == 0) {
int64_t rtype;
slapi_ch_free_string(&new_repl_type);
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index f67263c3e..4b2064912 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -510,7 +510,7 @@ retry:
if (prp->replica) {
rid = replica_get_rid(prp->replica);
}
- replica_subentry_check(area_sdn, rid);
+ replica_subentry_check(slapi_sdn_get_dn(area_sdn), rid);
/* Send the subtree of the suffix in the order of parentid index plus ldapsubentry and nstombstone. */
check_suffix_entryID(be, suffix);
@@ -531,7 +531,7 @@ retry:
if (prp->replica) {
rid = replica_get_rid(prp->replica);
}
- replica_subentry_check(area_sdn, rid);
+ replica_subentry_check(slapi_sdn_get_dn(area_sdn), rid);
slapi_search_internal_set_pb(pb, slapi_sdn_get_dn(area_sdn),
LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL,
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index ef2025dd9..8b178610b 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1176,7 +1176,7 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
/* now that the changelog is open and started, we can alos cretae the
* keep alive entry without risk that db and cl will not match
*/
- replica_subentry_check((Slapi_DN *)replica_get_root(r), replica_get_rid(r));
+ replica_subentry_check(slapi_sdn_get_dn(replica_get_root(r)), replica_get_rid(r));
}
/* ONREPL code that dealt with new RUV, etc was moved into the code
diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c
index 000777fdd..797ca957f 100644
--- a/ldap/servers/plugins/replication/repl_globals.c
+++ b/ldap/servers/plugins/replication/repl_globals.c
@@ -89,6 +89,7 @@ const char *type_replicaReleaseTimeout = "nsds5ReplicaReleaseTimeout";
const char *type_replicaBackoffMin = "nsds5ReplicaBackoffMin";
const char *type_replicaBackoffMax = "nsds5ReplicaBackoffMax";
const char *type_replicaPrecisePurge = "nsds5ReplicaPreciseTombstonePurging";
+const char *type_replicaKeepAliveUpdateInterval = "nsds5ReplicaKeepAliveUpdateInterval";
/* Attribute names for replication agreement attributes */
const char *type_nsds5ReplicaHost = "nsds5ReplicaHost";
diff --git a/src/cockpit/389-console/src/lib/replication/replConfig.jsx b/src/cockpit/389-console/src/lib/replication/replConfig.jsx
index 1f0dc3ec5..3dffb8f1a 100644
--- a/src/cockpit/389-console/src/lib/replication/replConfig.jsx
+++ b/src/cockpit/389-console/src/lib/replication/replConfig.jsx
@@ -48,6 +48,7 @@ export class ReplConfig extends React.Component {
nsds5replicaprotocoltimeout: Number(this.props.data.nsds5replicaprotocoltimeout) == 0 ? 120 : Number(this.props.data.nsds5replicaprotocoltimeout),
nsds5replicabackoffmin: Number(this.props.data.nsds5replicabackoffmin) == 0 ? 3 : Number(this.props.data.nsds5replicabackoffmin),
nsds5replicabackoffmax: Number(this.props.data.nsds5replicabackoffmax) == 0 ? 300 : Number(this.props.data.nsds5replicabackoffmax),
+ nsds5replicakeepaliveupdateinterval: Number(this.props.data.nsds5replicakeepaliveupdateinterval) == 0 ? 3600 : Number(this.props.data.nsds5replicakeepaliveupdateinterval),
// Original settings
_nsds5replicabinddn: this.props.data.nsds5replicabinddn,
_nsds5replicabinddngroup: this.props.data.nsds5replicabinddngroup,
@@ -59,6 +60,7 @@ export class ReplConfig extends React.Component {
_nsds5replicaprotocoltimeout: Number(this.props.data.nsds5replicaprotocoltimeout) == 0 ? 120 : Number(this.props.data.nsds5replicaprotocoltimeout),
_nsds5replicabackoffmin: Number(this.props.data.nsds5replicabackoffmin) == 0 ? 3 : Number(this.props.data.nsds5replicabackoffmin),
_nsds5replicabackoffmax: Number(this.props.data.nsds5replicabackoffmax) == 0 ? 300 : Number(this.props.data.nsds5replicabackoffmax),
+ _nsds5replicakeepaliveupdateinterval: Number(this.props.data.nsds5replicakeepaliveupdateinterval) == 0 ? 3600 : Number(this.props.data.nsds5replicakeepaliveupdateinterval),
};
this.onToggle = (isExpanded) => {
@@ -275,7 +277,7 @@ export class ReplConfig extends React.Component {
'nsds5replicapurgedelay', 'nsds5replicatombstonepurgeinterval',
'nsds5replicareleasetimeout', 'nsds5replicaprotocoltimeout',
'nsds5replicabackoffmin', 'nsds5replicabackoffmax',
- 'nsds5replicaprecisetombstonepurging'
+ 'nsds5replicaprecisetombstonepurging', 'nsds5replicakeepaliveupdateinterval',
];
// Check if a setting was changed, if so enable the save button
for (const config_attr of config_attrs) {
@@ -301,7 +303,7 @@ export class ReplConfig extends React.Component {
'nsds5replicapurgedelay', 'nsds5replicatombstonepurgeinterval',
'nsds5replicareleasetimeout', 'nsds5replicaprotocoltimeout',
'nsds5replicabackoffmin', 'nsds5replicabackoffmax',
- 'nsds5replicaprecisetombstonepurging'
+ 'nsds5replicaprecisetombstonepurging', 'nsds5replicakeepaliveupdateinterval',
];
// Check if a setting was changed, if so enable the save button
for (const config_attr of config_attrs) {
@@ -451,6 +453,9 @@ export class ReplConfig extends React.Component {
if (this.state.nsds5replicabinddngroupcheckinterval != this.state._nsds5replicabinddngroupcheckinterval) {
cmd.push("--repl-bind-group-interval=" + this.state.nsds5replicabinddngroupcheckinterval);
}
+ if (this.state.nsds5replicakeepaliveupdateinterval != this.state._nsds5replicakeepaliveupdateinterval) {
+ cmd.push("--repl-keepalive-update-interval=" + this.state.nsds5replicakeepaliveupdateinterval);
+ }
if (this.state.nsds5replicareleasetimeout != this.state._nsds5replicareleasetimeout) {
cmd.push("--repl-release-timeout=" + this.state.nsds5replicareleasetimeout);
}
@@ -786,6 +791,29 @@ export class ReplConfig extends React.Component {
/>
</GridItem>
</Grid>
+ <Grid
+ title="The interval in seconds that the server will apply an internal update to get the RUV from getting stale. (nsds5replicakeepaliveupdateinterval)."
+ className="ds-margin-top"
+ >
+ <GridItem className="ds-label" span={3}>
+ Refresh RUV Interval
+ </GridItem>
+ <GridItem span={9}>
+ <NumberInput
+ value={this.state.nsds5replicakeepaliveupdateinterval}
+ min={60}
+ max={this.maxValue}
+ onMinus={() => { this.onMinusConfig("nsds5replicakeepaliveupdateinterval") }}
+ onChange={(e) => { this.onConfigChange(e, "nsds5replicakeepaliveupdateinterval", 60) }}
+ onPlus={() => { this.onPlusConfig("nsds5replicakeepaliveupdateinterval") }}
+ inputName="input"
+ inputAriaLabel="number input"
+ minusBtnAriaLabel="minus"
+ plusBtnAriaLabel="plus"
+ widthChars={8}
+ />
+ </GridItem>
+ </Grid>
<Grid
title="Enables faster tombstone purging (nsds5replicaprecisetombstonepurging)."
className="ds-margin-top"
diff --git a/src/cockpit/389-console/src/replication.jsx b/src/cockpit/389-console/src/replication.jsx
index 28364156a..db9d030db 100644
--- a/src/cockpit/389-console/src/replication.jsx
+++ b/src/cockpit/389-console/src/replication.jsx
@@ -553,6 +553,7 @@ export class Replication extends React.Component {
nsds5replicaprotocoltimeout: 'nsds5replicaprotocoltimeout' in config.attrs ? config.attrs.nsds5replicaprotocoltimeout[0] : "",
nsds5replicabackoffmin: 'nsds5replicabackoffmin' in config.attrs ? config.attrs.nsds5replicabackoffmin[0] : "",
nsds5replicabackoffmax: 'nsds5replicabackoffmax' in config.attrs ? config.attrs.nsds5replicabackoffmax[0] : "",
+ nsds5replicakeepaliveupdateinterval: 'nsds5replicakeepaliveupdateinterval' in config.attrs ? config.attrs.nsds5replicakeepaliveupdateinterval[0] : "3600",
},
suffixSpinning: false,
disabled: false,
@@ -695,6 +696,11 @@ export class Replication extends React.Component {
nsds5replicaprotocoltimeout: 'nsds5replicaprotocoltimeout' in config.attrs ? config.attrs.nsds5replicaprotocoltimeout[0] : "",
nsds5replicabackoffmin: 'nsds5replicabackoffmin' in config.attrs ? config.attrs.nsds5replicabackoffmin[0] : "",
nsds5replicabackoffmax: 'nsds5replicabackoffmax' in config.attrs ? config.attrs.nsds5replicabackoffmax[0] : "",
+ nsds5replicakeepaliveupdateinterval: 'nsds5replicakeepaliveupdateinterval' in config.attrs ? config.attrs.nsds5replicakeepaliveupdateinterval[0] : "3600",
+ clMaxEntries: "",
+ clMaxAge: "",
+ clTrimInt: "",
+ clEncrypt: false,
}
}, this.loadLDIFs);
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
index 0048cd09b..450246b3d 100644
--- a/src/lib389/lib389/cli_conf/replication.py
+++ b/src/lib389/lib389/cli_conf/replication.py
@@ -33,6 +33,7 @@ arg_to_attr = {
'repl_backoff_min': 'nsds5replicabackoffmin',
'repl_backoff_max': 'nsds5replicabackoffmax',
'repl_release_timeout': 'nsds5replicareleasetimeout',
+ 'repl_keepalive_update_interval': 'nsds5replicakeepaliveupdateinterval',
# Changelog
'cl_dir': 'nsslapd-changelogdir',
'max_entries': 'nsslapd-changelogmaxentries',
@@ -1278,6 +1279,9 @@ def create_parser(subparsers):
"while waiting to acquire the consumer. Default is 3 seconds")
repl_set_parser.add_argument('--repl-release-timeout', help="A timeout in seconds a replication supplier should send "
"updates before it yields its replication session")
+ repl_set_parser.add_argument('--repl-keepalive-update-interval', help="Interval in seconds for how often the server will apply "
+ "an internal update to keep the RUV from getting stale. "
+ "The default is 1 hour (3600 seconds)")
repl_monitor_parser = repl_subcommands.add_parser('monitor', help='Display the full replication topology report')
repl_monitor_parser.set_defaults(func=get_repl_monitor_info)
@@ -1289,7 +1293,7 @@ def create_parser(subparsers):
repl_monitor_parser.add_argument('-a', '--aliases', nargs="*",
help="Enables displaying an alias instead of host:port, if an alias is "
"assigned to a host:port combination. The format: alias=host:port")
-#
+
############################################
# Replication Agmts
############################################
--
2.37.1