This is an automated email from the git hooks/post-receive script.
spichugi pushed a change to branch master in repository 389-ds-base.
from 7658232 Ticket 49534 - Fix coverity issues and regression new e163c44 Ticket 49516 - Add python 3 support for replication suite
The 1 revisions listed above as "new" are entirely new to this repository and will be described in separate emails. The revisions listed as "adds" were already present in the repository and have only been added to this reference.
Summary of changes: dirsrvtests/tests/suites/basic/basic_test.py | 4 +- .../tests/suites/replication/acceptance_test.py | 39 +- .../tests/suites/replication/changelog_test.py | 11 +- .../suites/replication/changelog_trimming_test.py | 72 +- .../tests/suites/replication/cleanallruv_test.py | 616 ++++-------- .../tests/suites/replication/regression_test.py | 117 ++- .../suites/replication/replica_config_test.py | 304 ++---- .../tests/suites/replication/ruvstore_test.py | 2 +- .../tests/suites/replication/single_master_test.py | 126 +-- .../replication/tls_client_auth_repl_test.py | 92 ++ .../tests/suites/replication/tombstone_test.py | 50 +- .../replication/wait_for_async_feature_test.py | 130 +-- ldap/ldif/template-dse.ldif.in | 7 + ldap/schema/01core389.ldif | 9 +- ldap/schema/02common.ldif | 1 - src/lib389/lib389/__init__.py | 105 +- src/lib389/lib389/_mapped_object.py | 141 ++- src/lib389/lib389/agreement.py | 151 ++- src/lib389/lib389/changelog.py | 52 +- src/lib389/lib389/idm/domain.py | 2 +- src/lib389/lib389/idm/group.py | 18 +- src/lib389/lib389/idm/services.py | 11 + src/lib389/lib389/instance/remove.py | 2 +- src/lib389/lib389/nss_ssl.py | 87 +- src/lib389/lib389/replica.py | 1060 ++++++++++++++------ src/lib389/lib389/tasks.py | 72 +- src/lib389/lib389/tombstone.py | 90 ++ src/lib389/lib389/topologies.py | 126 +-- 28 files changed, 1995 insertions(+), 1502 deletions(-) create mode 100644 dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py create mode 100644 src/lib389/lib389/tombstone.py
This is an automated email from the git hooks/post-receive script.
spichugi pushed a commit to branch master in repository 389-ds-base.
commit e163c443052b745c81a63d9ff389b260ed0557cb Author: William Brown firstyear@redhat.com Date: Fri Dec 8 16:28:17 2017 +0100
Ticket 49516 - Add python 3 support for replication suite
Bug Description: Add support for python 3 to the replication suite. Additionally, update many types to correct be dsldapobject designed and created.
Fix Description:
* Update all repl test to use python 3 * Mark tests that cannot be currently executed due to server limitations * Add replication manager types to allow proper coordination of replicas. * Improve supprot for tls with replication tests.
https://pagure.io/389-ds-base/issue/49516
Author: wibrown
Review by: spichugi
Signed-off-by: Simon Pichugin spichugi@redhat.com --- dirsrvtests/tests/suites/basic/basic_test.py | 4 +- .../tests/suites/replication/acceptance_test.py | 39 +- .../tests/suites/replication/changelog_test.py | 11 +- .../suites/replication/changelog_trimming_test.py | 72 +- .../tests/suites/replication/cleanallruv_test.py | 616 ++++-------- .../tests/suites/replication/regression_test.py | 117 ++- .../suites/replication/replica_config_test.py | 304 ++---- .../tests/suites/replication/ruvstore_test.py | 2 +- .../tests/suites/replication/single_master_test.py | 126 +-- .../replication/tls_client_auth_repl_test.py | 92 ++ .../tests/suites/replication/tombstone_test.py | 50 +- .../replication/wait_for_async_feature_test.py | 130 +-- ldap/ldif/template-dse.ldif.in | 7 + ldap/schema/01core389.ldif | 9 +- ldap/schema/02common.ldif | 1 - src/lib389/lib389/__init__.py | 105 +- src/lib389/lib389/_mapped_object.py | 141 ++- src/lib389/lib389/agreement.py | 151 ++- src/lib389/lib389/changelog.py | 52 +- src/lib389/lib389/idm/domain.py | 2 +- src/lib389/lib389/idm/group.py | 18 +- src/lib389/lib389/idm/services.py | 11 + src/lib389/lib389/instance/remove.py | 2 +- src/lib389/lib389/nss_ssl.py | 87 +- src/lib389/lib389/replica.py | 1060 ++++++++++++++------ src/lib389/lib389/tasks.py | 72 +- src/lib389/lib389/tombstone.py | 90 ++ src/lib389/lib389/topologies.py | 126 +-- 28 files changed, 1995 insertions(+), 1502 deletions(-)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py index 3fb51ea..18f3767 100644 --- a/dirsrvtests/tests/suites/basic/basic_test.py +++ b/dirsrvtests/tests/suites/basic/basic_test.py @@ -44,7 +44,7 @@ def import_example_ldif(topology_st):
log.info('Initializing the "basic" test suite')
- ldif = '%s/Example.ldif' % get_data_dir(topology_st.standalone.prefix) + ldif = '%s/dirsrv/data/Example.ldif' % topology_st.standalone.get_data_dir() import_ldif = topology_st.standalone.get_ldif_dir() + "/Example.ldif" shutil.copyfile(ldif, import_ldif) topology_st.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, @@ -298,7 +298,7 @@ def test_basic_import_export(topology_st, import_example_ldif): # # Cleanup - Import the Example LDIF for the other tests in this suite # - ldif = '%s/Example.ldif' % get_data_dir(topology_st.standalone.prefix) + ldif = '%s/dirsrv/data/Example.ldif' % topology_st.standalone.get_data_dir() import_ldif = topology_st.standalone.get_ldif_dir() + "/Example.ldif" shutil.copyfile(ldif, import_ldif) try: diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py index 824a627..c524547 100644 --- a/dirsrvtests/tests/suites/replication/acceptance_test.py +++ b/dirsrvtests/tests/suites/replication/acceptance_test.py @@ -13,6 +13,8 @@ from lib389.topologies import topology_m4 as topo_m4 from . import get_repl_entries from lib389.idm.user import UserAccount
+from lib389.replica import ReplicationManager + from lib389._constants import (BACKEND_NAME, DEFAULT_SUFFIX, LOG_REPLICA, REPLICA_RUV_FILTER, ReplicaRole, REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, defaultProperties, @@ -312,43 +314,20 @@ def test_new_suffix(topo_m4, new_suffix): 2. Replication should work 3. Replication on the new suffix should be disabled """ - m1 = topo_m4.ms["master1"] m2 = topo_m4.ms["master2"] - log.info('Enable replication for new suffix {} on two masters'.format(NEW_SUFFIX)) - m1.replica.enableReplication(NEW_SUFFIX, ReplicaRole.MASTER, 101) - m2.replica.enableReplication(NEW_SUFFIX, ReplicaRole.MASTER, 102) - - log.info("Creating agreement from master1 to master2") - properties = {RA_NAME: 'newMeTo_{}:{}'.format(m2.host, str(m2.port)), - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m1_m2_agmt = m1.agreement.create(NEW_SUFFIX, m2.host, m2.port, properties) - - if not m1_m2_agmt: - log.fatal("Fail to create a hub -> consumer replica agreement") - sys.exit(1) - log.info("{} is created".format(m1_m2_agmt)) - - # Allow the replicas to get situated with the new agreements... - time.sleep(2)
- log.info("Initialize the agreement") - m1.agreement.init(NEW_SUFFIX, m2.host, m2.port) - m1.waitForReplInit(m1_m2_agmt) + repl = ReplicationManager(NEW_SUFFIX)
- log.info("Check the replication is working") - assert m1.testReplication(NEW_SUFFIX, m2), 'Replication for new suffix {} is not working.'.format(NEW_SUFFIX) + repl.create_first_master(m1)
- log.info("Delete the agreement") - m1.agreement.delete(NEW_SUFFIX, m2.host, m2.port, m1_m2_agmt) + repl.join_master(m1, m2)
- log.info("Disable replication for the new suffix") - m1.replica.disableReplication(NEW_SUFFIX) - m2.replica.disableReplication(NEW_SUFFIX) + repl.test_replication(m1, m2) + repl.test_replication(m2, m1)
+ repl.remove_master(m1) + repl.remove_master(m2)
def test_many_attrs(topo_m4, test_entry): """Check a replication with many attributes (add and delete) diff --git a/dirsrvtests/tests/suites/replication/changelog_test.py b/dirsrvtests/tests/suites/replication/changelog_test.py index 0b6b886..3b5d547 100755 --- a/dirsrvtests/tests/suites/replication/changelog_test.py +++ b/dirsrvtests/tests/suites/replication/changelog_test.py @@ -59,10 +59,9 @@ def _create_changelog_dump(topo): """Dump changelog using nss5task and check if ldap operations are logged"""
log.info('Dump changelog using nss5task and check if ldap operations are logged') - db_dir = os.path.dirname(topo.ms['master1'].dbdir) - changelog_dir = os.path.join(db_dir, DEFAULT_CHANGELOG_DB) + changelog_dir = topo.ms['master1'].get_changelog_dir() replicas = Replicas(topo.ms["master1"]) - dn_replica = replicas.get_dn(DEFAULT_SUFFIX) + replica = replicas.get(DEFAULT_SUFFIX) log.info('Remove ldif files, if present in: {}'.format(changelog_dir)) for files in os.listdir(changelog_dir): if files.endswith('.ldif'): @@ -77,11 +76,7 @@ def _create_changelog_dump(topo): log.info('No existing changelog ldif files present')
log.info('Running nsds5task to dump changelog database to a file') - try: - topo.ms['master1'].modify_s(dn_replica, [(ldap.MOD_REPLACE, 'nsds5task', 'cl2ldif')]) - except ldap.LDAPError as e: - log.fatal('Failed to dump changelog to ldif file') - raise e + replica.begin_task_cl2ldif()
log.info('Check if changelog ldif file exist in: {}'.format(changelog_dir)) for files in os.listdir(changelog_dir): diff --git a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py index 0c39ea9..ffab7bf 100644 --- a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py +++ b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py @@ -5,7 +5,9 @@ import ldap import time from lib389._constants import * from lib389.properties import * -from lib389.topologies import create_topology +from lib389.topologies import topology_m1 as topo +from lib389.changelog import Changelog5 +from lib389.idm.domain import Domain
DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: @@ -18,15 +20,9 @@ log = logging.getLogger(__name__) def do_mods(master, num): """Perform a num of mods on the default suffix """ - for i in xrange(num): - try: - master.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, - "description", - "new")]) - except ldap.LDAPError as e: - log.fatal("Failed to make modify: " + str(e)) - assert False - + domain = Domain(master, DEFAULT_SUFFIX) + for i in range(num): + domain.replace('description', 'change %s' % i)
@pytest.fixture(scope="module") def setup_max_entries(topo, request): @@ -35,13 +31,10 @@ def setup_max_entries(topo, request): master = topo.ms["master1"]
master.config.loglevel((LOG_REPLICA,), 'error') - try: - master.modify_s(DN_CHANGELOG, [(ldap.MOD_REPLACE, CL_MAX_ENTRIES, "2"), - (ldap.MOD_REPLACE, CL_TRIM_INTERVAL, "300")]) - except ldap.LDAPError as e: - log.fatal("Failed to set change log config: " + str(e)) - assert False
+ cl = Changelog5(master) + cl.set_max_entries('2') + cl.set_trim_interval('300')
@pytest.fixture(scope="module") def setup_max_age(topo, request): @@ -49,37 +42,10 @@ def setup_max_age(topo, request): """ master = topo.ms["master1"] master.config.loglevel((LOG_REPLICA,), 'error') - try: - master.modify_s(DN_CHANGELOG, [(ldap.MOD_REPLACE, CL_MAXAGE, "5"), - (ldap.MOD_REPLACE, CL_TRIM_INTERVAL, "300")]) - except ldap.LDAPError as e: - log.fatal("Failed to set change log config: " + str(e)) - assert False - - -@pytest.fixture(scope="module") -def topo(request): - """Create a topology with 1 masters""" - - topology = create_topology({ - ReplicaRole.MASTER: 1, - }) - # You can write replica test here. Just uncomment the block and choose instances - # replicas = Replicas(topology.ms["master1"]) - # replicas.test(DEFAULT_SUFFIX, topology.cs["consumer1"]) - - def fin(): - """If we are debugging just stop the instances, otherwise remove them""" - - if DEBUGGING: - map(lambda inst: inst.stop(), topology.all_insts.values()) - else: - map(lambda inst: inst.delete(), topology.all_insts.values()) - - request.addfinalizer(fin) - - return topology
+ cl = Changelog5(master) + cl.set_max_age('5') + cl.set_trim_interval('300')
def test_max_age(topo, setup_max_age): """Test changing the trimming interval works with max age @@ -100,6 +66,7 @@ def test_max_age(topo, setup_max_age): log.info("Testing changelog triming interval with max age...")
master = topo.ms["master1"] + cl = Changelog5(master)
# Do mods to build if cl entries do_mods(master, 10) @@ -109,11 +76,7 @@ def test_max_age(topo, setup_max_age): log.fatal('Trimming event unexpectedly occurred') assert False
- try: - master.modify_s(DN_CHANGELOG, [(ldap.MOD_REPLACE, CL_TRIM_INTERVAL, "5")]) - except ldap.LDAPError as e: - log.fatal("Failed to set chance log trim interval: " + str(e)) - assert False + cl.set_trim_interval('5')
time.sleep(6) # Trimming should have occured
@@ -141,6 +104,7 @@ def test_max_entries(topo, setup_max_entries):
log.info("Testing changelog triming interval with max entries...") master = topo.ms["master1"] + cl = Changelog5(master)
# reset errors log master.deleteErrorLogs() @@ -152,11 +116,7 @@ def test_max_entries(topo, setup_max_entries): log.fatal('Trimming event unexpectedly occurred') assert False
- try: - master.modify_s(DN_CHANGELOG, [(ldap.MOD_REPLACE, CL_TRIM_INTERVAL, "5")]) - except ldap.LDAPError as e: - log.fatal("Failed to set chance log trim interval: " + str(e)) - assert False + cl.set_trim_interval('5')
time.sleep(6) # Trimming should have occured
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py index a042fbd..4486c2f 100644 --- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py +++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py @@ -9,12 +9,19 @@ import threading
import pytest +import random from lib389 import DirSrv from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m4 from lib389._constants import *
+from lib389.idm.directorymanager import DirectoryManager +from lib389.replica import ReplicationManager, Replicas +from lib389.tasks import CleanAllRUVTask +from lib389.idm.user import UserAccounts +from lib389.config import LDBMConfig + logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__)
@@ -26,89 +33,62 @@ class AddUsers(threading.Thread): self.inst = inst self.num_users = num_users
- def openConnection(self, inst): - """Open a new connection to our LDAP server""" - - server = DirSrv(verbose=False) - args_instance[SER_HOST] = inst.host - args_instance[SER_PORT] = inst.port - args_instance[SER_SERVERID_PROP] = inst.serverid - args_standalone = args_instance.copy() - server.allocate(args_standalone) - server.open() - return server - def run(self): """Start adding users"""
- conn = self.openConnection(self.inst) - idx = 0 + dm = DirectoryManager(self.inst) + conn = dm.bind()
- while idx < self.num_users: - USER_DN = 'uid=' + self.inst.serverid + '_' + str(idx) + ',' + DEFAULT_SUFFIX - try: - conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), - 'uid': 'user' + str(idx)}))) + users = UserAccounts(conn, DEFAULT_SUFFIX) + + u_range = list(range(self.num_users)) + random.shuffle(u_range)
+ for idx in u_range: + try: + users.create(properties={ + 'uid': 'testuser%s' % idx, + 'cn' : 'testuser%s' % idx, + 'sn' : 'user%s' % idx, + 'uidNumber' : '%s' % (1000 + idx), + 'gidNumber' : '%s' % (1000 + idx), + 'homeDirectory' : '/home/testuser%s' % idx + }) # One of the masters was probably put into read only mode - just break out except ldap.UNWILLING_TO_PERFORM: break except ldap.ALREADY_EXISTS: pass - except ldap.LDAPError as e: - log.error('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc']) - assert False - idx += 1 - conn.close()
- def remove_master4_agmts(msg, topology_m4): """Remove all the repl agmts to master4. """
log.info('%s: remove all the agreements to master 4...' % msg) - for num in range(1, 4): - try: - topology_m4.ms["master{}".format(num)].agreement.delete(DEFAULT_SUFFIX, - topology_m4.ms["master4"].host, - topology_m4.ms["master4"].port) - except ldap.LDAPError as e: - log.fatal('{}: Failed to delete agmt(m{} -> m4), error: {}'.format(msg, num, str(e))) - assert False + repl = ReplicationManager(DEFAULT_SUFFIX) + # This will delete m4 frm the topo *and* remove all incoming agreements + # to m4. + repl.remove_master(topology_m4.ms["master4"], + [topology_m4.ms["master1"], topology_m4.ms["master2"], topology_m4.ms["master3"]])
- -def check_ruvs(msg, topology_m4): +def check_ruvs(msg, topology_m4, m4rid): """Check masters 1- 3 for master 4's rid.""" - - clean = False - count = 0 - while not clean and count < 10: - clean = True - - for num in range(1, 4): - try: - entry = topology_m4.ms["master{}".format(num)].search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - REPLICA_RUV_FILTER) - if not entry: - log.error('%s: Failed to find db tombstone entry from master' % - msg) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('{}: Master {} is not cleaned!'.format(msg, num)) - clean = False - if clean: - log.info('{}: Master {} is cleaned!'.format(msg, num)) - except ldap.LDAPError as e: - log.fatal('{}: Unable to search master {} for db tombstone: {}'.format(msg, num, str(e))) - # Sleep a bit and give it chance to clean up... - time.sleep(5) - count += 1 - - return clean - + for inst in (topology_m4.ms["master1"], topology_m4.ms["master2"], topology_m4.ms["master3"]): + clean = False + replicas = Replicas(inst) + replica = replicas.get(DEFAULT_SUFFIX) + + count = 0 + while not clean and count < 10: + ruv = replica.get_ruv() + if m4rid in ruv._rids: + time.sleep(5) + count = count + 1 + else: + clean = True + if not clean: + raise Exception("Master %s was not cleaned in time." % inst.serverid) + return True
def task_done(topology_m4, task_dn, timeout=60): """Check if the task is complete""" @@ -140,81 +120,49 @@ def restore_master4(topology_m4): way to restore it for another test """
- log.info('Restoring master 4...') - - # Enable replication on master 4 - topology_m4.ms["master4"].replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, - replicaId=REPLICAID_MASTER_4) - - for num in range(1, 4): - host_to = topology_m4.ms["master{}".format(num)].host - port_to = topology_m4.ms["master{}".format(num)].port - properties = {RA_NAME: 'meTo_{}:{}'.format(host_to, port_to), - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - agmt = topology_m4.ms["master4"].agreement.create(suffix=SUFFIX, host=host_to, - port=port_to, properties=properties) - if not agmt: - log.fatal("Fail to create a master -> master replica agreement") - assert False - log.debug("%s created" % agmt) - - host_to = topology_m4.ms["master4"].host - port_to = topology_m4.ms["master4"].port - properties = {RA_NAME: 'meTo_{}:{}'.format(host_to, port_to), - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - agmt = topology_m4.ms["master{}".format(num)].agreement.create(suffix=SUFFIX, host=host_to, - port=port_to, properties=properties) - if not agmt: - log.fatal("Fail to create a master -> master replica agreement") - assert False - log.debug("%s created" % agmt) - - # Stop the servers - this allows the rid(for master4) to be used again - for num in range(1, 5): - topology_m4.ms["master{}".format(num)].stop(timeout=30) - - # Initialize the agreements - topology_m4.ms["master1"].start(timeout=30) - for num in range(2, 5): - host_to = topology_m4.ms["master{}".format(num)].host - port_to = topology_m4.ms["master{}".format(num)].port - topology_m4.ms["master{}".format(num)].start(timeout=30) - time.sleep(5) - topology_m4.ms["master1"].agreement.init(SUFFIX, host_to, port_to) - agreement = topology_m4.ms["master1"].agreement.list(suffix=SUFFIX, - consumer_host=host_to, - consumer_port=port_to)[0].dn - topology_m4.ms["master1"].waitForReplInit(agreement) + # Restart the remaining masters to allow rid 4 to be reused. + for inst in topology_m4.ms.values(): + inst.restart()
- time.sleep(5) + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.join_master(topology_m4.ms["master1"], topology_m4.ms["master4"])
- # Test Replication is working - for num in range(2, 5): - if topology_m4.ms["master1"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master{}".format(num)]): - log.info('Replication is working m1 -> m{}.'.format(num)) - else: - log.fatal('restore_master4: Replication is not working from m1 -> m{}.'.format(num)) - assert False - time.sleep(1) - - # Check replication is working from master 4 to master1... - if topology_m4.ms["master4"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master1"]): - log.info('Replication is working m4 -> m1.') - else: - log.fatal('restore_master4: Replication is not working from m4 -> 1.') - assert False - time.sleep(5) + # Add the 2,3 -> 4 agmt. + repl.ensure_agreement(topology_m4.ms["master2"], topology_m4.ms["master4"]) + repl.ensure_agreement(topology_m4.ms["master3"], topology_m4.ms["master4"]) + # And in reverse ... + repl.ensure_agreement(topology_m4.ms["master4"], topology_m4.ms["master2"]) + repl.ensure_agreement(topology_m4.ms["master4"], topology_m4.ms["master3"])
log.info('Master 4 has been successfully restored.')
+@pytest.fixture() +def m4rid(request, topology_m4): + log.debug("-------------- BEGIN RESET of m4 -----------------") + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topology_m4.ms.values()) + # What is master4's rid? + m4rid = repl.get_rid(topology_m4.ms["master4"])
-def test_clean(topology_m4): + def fin(): + try: + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX + }) + cruv_task.wait() + except ldap.UNWILLING_TO_PERFORM: + # In some casse we already cleaned rid4, so if we fail, it's okay + pass + restore_master4(topology_m4) + # Make sure everything works. + repl.test_replication_topology(topology_m4.ms.values()) + request.addfinalizer(fin) + log.debug("-------------- FINISH RESET of m4 -----------------") + return m4rid + +def test_clean(topology_m4, m4rid): """Check that cleanallruv task works properly
:id: e9b3ce5c-e17c-409e-aafc-e97d630f2878 @@ -234,53 +182,29 @@ def test_clean(topology_m4): """
log.info('Running test_clean...') - - log.info('Check that replication works properly on all masters') - agmt_nums = {"master1": ("2", "3", "4"), - "master2": ("1", "3", "4"), - "master3": ("1", "2", "4"), - "master4": ("1", "2", "3")} - - for inst_name, agmts in agmt_nums.items(): - for num in agmts: - if not topology_m4.ms[inst_name].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master{}".format(num)]): - log.fatal( - 'test_replication: Replication is not working between {} and master {}.'.format(inst_name, - num)) - assert False - # Disable master 4 - log.info('test_clean: disable master 4...') - topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX) - # Remove the agreements from the other masters that point to master 4 + log.info('test_clean: disable master 4...') remove_master4_agmts("test_clean", topology_m4)
# Run the task log.info('test_clean: run the cleanAllRUV task...') - try: - topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_clean: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX + }) + cruv_task.wait()
# Check the other master's RUV for 'replica 4' log.info('test_clean: check all the masters have been cleaned...') - clean = check_ruvs("test_clean", topology_m4) - - if not clean: - log.fatal('test_clean: Failed to clean replicas') - assert False + clean = check_ruvs("test_clean", topology_m4, m4rid) + assert clean
log.info('test_clean PASSED, restoring master 4...')
- # Cleanup - restore master 4 - restore_master4(topology_m4)
- -def test_clean_restart(topology_m4): +def test_clean_restart(topology_m4, m4rid): """Check that cleanallruv task works properly after a restart
:id: c6233bb3-092c-4919-9ac9-80dd02cc6e02 @@ -308,36 +232,30 @@ def test_clean_restart(topology_m4): 9. No crash should happened 10. Everything should be cleaned """ - log.info('Running test_clean_restart...')
# Disable master 4 - log.info('test_clean_restart: disable master 4...') - topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX) - + log.info('test_clean: disable master 4...') # Remove the agreements from the other masters that point to master 4 - log.info('test_clean: remove all the agreements to master 4...') - remove_master4_agmts("test_clean restart", topology_m4) + remove_master4_agmts("test_clean", topology_m4)
# Stop master 3 to keep the task running, so we can stop master 1... - topology_m4.ms["master3"].stop(timeout=30) + topology_m4.ms["master3"].stop()
# Run the task - log.info('test_clean_restart: run the cleanAllRUV task...') - try: - (task_dn, rc) = topology_m4.ms["master1"].tasks.cleanAllRUV( - suffix=DEFAULT_SUFFIX, replicaid='4', args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_clean_restart: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False + log.info('test_clean: run the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX + })
# Sleep a bit, then stop master 1 time.sleep(5) - topology_m4.ms["master1"].stop(timeout=30) + topology_m4.ms["master1"].stop()
# Now start master 3 & 1, and make sure we didn't crash - topology_m4.ms["master3"].start(timeout=30) + topology_m4.ms["master3"].start() if topology_m4.ms["master3"].detectDisorderlyShutdown(): log.fatal('test_clean_restart: Master 3 previously crashed!') assert False @@ -347,25 +265,14 @@ def test_clean_restart(topology_m4): log.fatal('test_clean_restart: Master 1 previously crashed!') assert False
- # Wait a little for agmts/cleanallruv to wake up - if not task_done(topology_m4, task_dn): - log.fatal('test_clean_restart: cleanAllRUV task did not finish') - assert False - # Check the other master's RUV for 'replica 4' log.info('test_clean_restart: check all the masters have been cleaned...') - clean = check_ruvs("test_clean_restart", topology_m4) - if not clean: - log.fatal('Failed to clean replicas') - assert False + clean = check_ruvs("test_clean_restart", topology_m4, m4rid) + assert clean
log.info('test_clean_restart PASSED, restoring master 4...')
- # Cleanup - restore master 4 - restore_master4(topology_m4) - - -def test_clean_force(topology_m4): +def test_clean_force(topology_m4, m4rid): """Check that multiple tasks with a 'force' option work properly
:id: eb76a93d-8d1c-405e-9f25-6e8d5a781098 @@ -391,48 +298,39 @@ def test_clean_force(topology_m4): log.info('Running test_clean_force...')
# Stop master 3, while we update master 4, so that 3 is behind the other masters - topology_m4.ms["master3"].stop(timeout=10) + topology_m4.ms["master3"].stop()
# Add a bunch of updates to master 4 m4_add_users = AddUsers(topology_m4.ms["master4"], 1500) m4_add_users.start() m4_add_users.join()
- # Disable master 4 - log.info('test_clean_force: disable master 4...') - topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX) - # Start master 3, it should be out of sync with the other replicas... - topology_m4.ms["master3"].start(timeout=30) + topology_m4.ms["master3"].start()
# Remove the agreements from the other masters that point to master 4 remove_master4_agmts("test_clean_force", topology_m4)
# Run the task, use "force" because master 3 is not in sync with the other replicas # in regards to the replica 4 RUV - log.info('test_clean_force: run the cleanAllRUV task...') - try: - topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - force=True, args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_clean_force: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False + log.info('test_clean: run the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'yes' + }) + cruv_task.wait()
# Check the other master's RUV for 'replica 4' log.info('test_clean_force: check all the masters have been cleaned...') - clean = check_ruvs("test_clean_force", topology_m4) - if not clean: - log.fatal('test_clean_force: Failed to clean replicas') - assert False + clean = check_ruvs("test_clean_force", topology_m4, m4rid) + assert clean
log.info('test_clean_force PASSED, restoring master 4...')
- # Cleanup - restore master 4 - restore_master4(topology_m4) -
-def test_abort(topology_m4): +def test_abort(topology_m4, m4rid): """Test the abort task basic functionality
:id: b09a6887-8de0-4fac-8e41-73ccbaaf7a08 @@ -452,69 +350,39 @@ def test_abort(topology_m4): """
log.info('Running test_abort...') - - # Disable master 4 - log.info('test_abort: disable replication on master 4...') - topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX) - # Remove the agreements from the other masters that point to master 4 remove_master4_agmts("test_abort", topology_m4)
# Stop master 2 log.info('test_abort: stop master 2 to freeze the cleanAllRUV task...') - topology_m4.ms["master2"].stop(timeout=30) + topology_m4.ms["master2"].stop()
# Run the task log.info('test_abort: add the cleanAllRUV task...') - try: - (clean_task_dn, rc) = topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_abort: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX + }) # Wait a bit - time.sleep(5) + time.sleep(2)
# Abort the task - log.info('test_abort: abort the cleanAllRUV task...') - try: - topology_m4.ms["master1"].tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_abort: Problem running abortCleanAllRuv task: ' + - e.message('desc')) - assert False + cruv_task.abort()
# Check master 1 does not have the clean task running log.info('test_abort: check master 1 no longer has a cleanAllRUV task...') - if not task_done(topology_m4, clean_task_dn): + if not task_done(topology_m4, cruv_task.dn): log.fatal('test_abort: CleanAllRUV task was not aborted') assert False
# Start master 2 log.info('test_abort: start master 2 to begin the restore process...') - topology_m4.ms["master2"].start(timeout=30) - - # - # Now run the clean task task again to we can properly restore master 4 - # - log.info('test_abort: run cleanAllRUV task so we can properly restore master 4...') - try: - topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_abort: Problem running cleanAllRuv task: ' + e.message('desc')) - assert False + topology_m4.ms["master2"].start()
log.info('test_abort PASSED, restoring master 4...')
- # Cleanup - Restore master 4 - restore_master4(topology_m4) - - -def test_abort_restart(topology_m4): +def test_abort_restart(topology_m4, m4rid): """Test the abort task can handle a restart, and then resume
:id: b66e33d4-fe85-4e1c-b882-75da80f70ab3 @@ -544,14 +412,8 @@ def test_abort_restart(topology_m4): """
log.info('Running test_abort_restart...') - - # Disable master 4 - log.info('test_abort_restart: disable replication on master 4...') - topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX) - # Remove the agreements from the other masters that point to master 4 - log.info('test_abort_restart: remove all the agreements to master 4...)') - remove_master4_agmts("test_abort_restart", topology_m4) + remove_master4_agmts("test_abort", topology_m4)
# Stop master 3 log.info('test_abort_restart: stop master 3 to freeze the cleanAllRUV task...') @@ -559,34 +421,20 @@ def test_abort_restart(topology_m4):
# Run the task log.info('test_abort_restart: add the cleanAllRUV task...') - try: - (clean_task_dn, rc) = topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_abort_restart: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX + }) # Wait a bit - time.sleep(5) + time.sleep(2)
# Abort the task - log.info('test_abort_restart: abort the cleanAllRUV task...') - try: - topology_m4.ms["master1"].tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - certify=True, args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_abort_restart: Problem running test_abort_restart task: ' + - e.message('desc')) - assert False - - # Allow task to run for a bit: - time.sleep(5) + abort_task = cruv_task.abort()
# Check master 1 does not have the clean task running - log.info('test_abort: check master 1 no longer has a cleanAllRUV task...') - - if not task_done(topology_m4, clean_task_dn): + log.info('test_abort_abort: check master 1 no longer has a cleanAllRUV task...') + if not task_done(topology_m4, cruv_task.dn): log.fatal('test_abort_restart: CleanAllRUV task was not aborted') assert False
@@ -604,23 +452,10 @@ def test_abort_restart(topology_m4): log.fatal('test_abort_restart: Abort task did not restart') assert False
- # Now run the clean task task again to we can properly restore master 4 - log.info('test_abort_restart: run cleanAllRUV task so we can properly restore master 4...') - try: - topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_abort_restart: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - log.info('test_abort_restart PASSED, restoring master 4...')
- # Cleanup - Restore master 4 - restore_master4(topology_m4) -
-def test_abort_certify(topology_m4): +def test_abort_certify(topology_m4, m4rid): """Test the abort task with a replica-certify-all option
:id: 78959966-d644-44a8-b98c-1fcf21b45eb0 @@ -641,10 +476,6 @@ def test_abort_certify(topology_m4):
log.info('Running test_abort_certify...')
- # Disable master 4 - log.info('test_abort_certify: disable replication on master 4...') - topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX) - # Remove the agreements from the other masters that point to master 4 remove_master4_agmts("test_abort_certify", topology_m4)
@@ -654,33 +485,23 @@ def test_abort_certify(topology_m4):
# Run the task log.info('test_abort_certify: add the cleanAllRUV task...') - try: - (clean_task_dn, rc) = topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_abort_certify: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - - # Allow the clean task to get started... - time.sleep(5) + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX + }) + # Wait a bit + time.sleep(2)
# Abort the task log.info('test_abort_certify: abort the cleanAllRUV task...') - try: - (abort_task_dn, rc) = topology_m4.ms["master1"].tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', certify=True, - args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_abort_certify: Problem running abortCleanAllRuv task: ' + - e.message('desc')) - assert False + abort_task = cruv_task.abort(certify=True)
# Wait a while and make sure the abort task is still running log.info('test_abort_certify: sleep for 5 seconds') time.sleep(5)
- if task_done(topology_m4, abort_task_dn, 60): + if task_done(topology_m4, abort_task.dn, 60): log.fatal('test_abort_certify: abort task incorrectly finished') assert False
@@ -689,37 +510,20 @@ def test_abort_certify(topology_m4): topology_m4.ms["master2"].start()
# Wait for the abort task to stop - if not task_done(topology_m4, abort_task_dn, 60): + if not task_done(topology_m4, abort_task.dn, 60): log.fatal('test_abort_certify: The abort CleanAllRUV task was not aborted') assert False
# Check master 1 does not have the clean task running log.info('test_abort_certify: check master 1 no longer has a cleanAllRUV task...') - if not task_done(topology_m4, clean_task_dn): + if not task_done(topology_m4, cruv_task.dn): log.fatal('test_abort_certify: CleanAllRUV task was not aborted') assert False
- # Start master 2 - log.info('test_abort_certify: start master 2 to begin the restore process...') - topology_m4.ms["master2"].start() - - # Now run the clean task task again to we can properly restore master 4 - log.info('test_abort_certify: run cleanAllRUV task so we can properly restore master 4...') - try: - topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_abort_certify: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - log.info('test_abort_certify PASSED, restoring master 4...')
- # Cleanup - Restore master 4 - restore_master4(topology_m4)
- -def test_stress_clean(topology_m4): +def test_stress_clean(topology_m4, m4rid): """Put each server(m1 - m4) under a stress, and perform the entire clean process
:id: a8263cd6-f068-4357-86e0-e7c34504c8c5 @@ -744,6 +548,8 @@ def test_stress_clean(topology_m4): log.info('Running test_stress_clean...') log.info('test_stress_clean: put all the masters under load...')
+ ldbm_config = LDBMConfig(topology_m4.ms["master4"]) + # Put all the masters under load m1_add_users = AddUsers(topology_m4.ms["master1"], 2000) m1_add_users.start() @@ -759,38 +565,21 @@ def test_stress_clean(topology_m4): time.sleep(5)
# Put master 4 into read only mode - log.info('test_stress_clean: put master 4 into read-only mode...') - try: - topology_m4.ms["master4"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'on')]) - except ldap.LDAPError as e: - log.fatal('test_stress_clean: Failed to put master 4 into read-only mode: error ' + - e.message['desc']) - assert False - + ldbm_config.set('nsslapd-readonly', 'on') # We need to wait for master 4 to push its changes out log.info('test_stress_clean: allow some time for master 4 to push changes out (60 seconds)...') - time.sleep(60) - - # Disable master 4 - log.info('test_stress_clean: disable replication on master 4...') - try: - topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX) - except: - log.fatal('test_stress_clean: failed to diable replication') - assert False + time.sleep(30)
# Remove the agreements from the other masters that point to master 4 remove_master4_agmts("test_stress_clean", topology_m4)
# Run the task - log.info('test_stress_clean: Run the cleanAllRUV task...') - try: - topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_stress_clean: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX + }) + cruv_task.wait()
# Wait for the update to finish log.info('test_stress_clean: wait for all the updates to finish...') @@ -801,30 +590,25 @@ def test_stress_clean(topology_m4):
# Check the other master's RUV for 'replica 4' log.info('test_stress_clean: check if all the replicas have been cleaned...') - clean = check_ruvs("test_stress_clean", topology_m4) - if not clean: - log.fatal('test_stress_clean: Failed to clean replicas') - assert False + clean = check_ruvs("test_stress_clean", topology_m4, m4rid) + assert clean
log.info('test_stress_clean: PASSED, restoring master 4...')
- # Cleanup - restore master 4 # Sleep for a bit to replication complete log.info("Sleep for 120 seconds to allow replication to complete...") - time.sleep(120) + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology([ + topology_m4.ms["master1"], + topology_m4.ms["master2"], + topology_m4.ms["master3"], + ], timeout=120)
# Turn off readonly mode - try: - topology_m4.ms["master4"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'off')]) - except ldap.LDAPError as e: - log.fatal('test_stress_clean: Failed to put master 4 into read-only mode: error ' + - e.message['desc']) - assert False + ldbm_config.set('nsslapd-readonly', 'off')
- restore_master4(topology_m4)
- -def test_multiple_tasks_with_force(topology_m4): +def test_multiple_tasks_with_force(topology_m4, m4rid): """Check that multiple tasks with a 'force' option work properly
:id: eb76a93d-8d1c-405e-9f25-6e8d5a781098 @@ -852,60 +636,56 @@ def test_multiple_tasks_with_force(topology_m4): log.info('Running test_multiple_tasks_with_force...')
# Stop master 3, while we update master 4, so that 3 is behind the other masters - topology_m4.ms["master3"].stop(timeout=10) + topology_m4.ms["master3"].stop()
# Add a bunch of updates to master 4 m4_add_users = AddUsers(topology_m4.ms["master4"], 1500) m4_add_users.start() m4_add_users.join()
- # Disable master 4 - log.info('test_multiple_tasks_with_force: disable master 4...') - topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX) - # Start master 3, it should be out of sync with the other replicas... - topology_m4.ms["master3"].start(timeout=30) + topology_m4.ms["master3"].start()
+ # Disable master 4 # Remove the agreements from the other masters that point to master 4 remove_master4_agmts("test_multiple_tasks_with_force", topology_m4)
# Run the task, use "force" because master 3 is not in sync with the other replicas # in regards to the replica 4 RUV log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" on...') - try: - (clean_task_dn, rc) = topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - force=True, args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_multiple_tasks_with_force: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'yes' + })
log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" off...') + + # NOTE: This must be try not py.test raises, because the above may or may + # not have completed yet .... try: - topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_multiple_tasks_with_force: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False + cruv_task_fail = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task_fail.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX + }) + cruv_task_fail.wait() + except ldap.UNWILLING_TO_PERFORM: + pass + # Wait for the force task .... + cruv_task.wait()
# Check the other master's RUV for 'replica 4' log.info('test_multiple_tasks_with_force: check all the masters have been cleaned...') - clean = check_ruvs("test_clean_force", topology_m4) - if not clean: - log.fatal('test_multiple_tasks_with_force: Failed to clean replicas') - assert False - + clean = check_ruvs("test_clean_force", topology_m4, m4rid) + assert clean # Check master 1 does not have the clean task running log.info('test_abort: check master 1 no longer has a cleanAllRUV task...') - if not task_done(topology_m4, clean_task_dn): + if not task_done(topology_m4, cruv_task.dn): log.fatal('test_abort: CleanAllRUV task was not aborted') assert False
- # Cleanup - restore master 4 - restore_master4(topology_m4) - - if __name__ == '__main__': # Run isolated # -s for DEBUG mode diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py index a9dfbed..3fa4d51 100644 --- a/dirsrvtests/tests/suites/replication/regression_test.py +++ b/dirsrvtests/tests/suites/replication/regression_test.py @@ -13,7 +13,8 @@ from lib389.topologies import topology_m2 as topo_m2, TopologyMain from lib389._constants import * from . import get_repl_entries from lib389.idm.organisationalunit import OrganisationalUnits -from lib389.replica import Replicas +from lib389.idm.user import UserAccount +from lib389.replica import Replicas, ReplicationManager
NEW_SUFFIX_NAME = 'test_repl' NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) @@ -27,22 +28,13 @@ else: log = logging.getLogger(__name__)
-@pytest.fixture(scope="function") +@pytest.fixture() def test_entry(topo_m2, request): """Add test entry using UserAccounts"""
log.info('Adding a test entry user') users = UserAccounts(topo_m2.ms["master1"], DEFAULT_SUFFIX) - tuser = users.create(properties=TEST_USER_PROPERTIES) - - def fin(): - if users.list(): - log.info('Deleting user-{}'.format(tuser.dn)) - tuser.delete() - else: - log.info('There is no user to delete') - - request.addfinalizer(fin) + tuser = users.ensure_state(properties=TEST_USER_PROPERTIES) return tuser
@@ -50,7 +42,7 @@ def test_double_delete(topo_m2, test_entry): """Check that double delete of the entry doesn't crash server
:id: 3496c82d-636a-48c9-973c-2455b12164cc - :setup: Four masters replication setup, a test entry + :setup: Two masters replication setup, a test entry :steps: 1. Delete the entry on the first master 2. Delete the entry on the second master @@ -61,21 +53,24 @@ def test_double_delete(topo_m2, test_entry): 3. Server should me alive """
- test_entry_rdn = test_entry.rdn + m1 = topo_m2.ms["master1"] + m2 = topo_m2.ms["master2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.disable_to_master(m1, [m2]) + repl.disable_to_master(m2, [m1])
log.info('Deleting entry {} from master1'.format(test_entry.dn)) topo_m2.ms["master1"].delete_s(test_entry.dn)
log.info('Deleting entry {} from master2'.format(test_entry.dn)) - try: - topo_m2.ms["master2"].delete_s(test_entry.dn) - except ldap.NO_SUCH_OBJECT: - log.info("Entry {} wasn't found master2. It is expected.".format(test_entry.dn)) + topo_m2.ms["master2"].delete_s(test_entry.dn)
- log.info('Make searches to check if server is alive') - entries = get_repl_entries(topo_m2, test_entry_rdn, ["uid"]) - assert not entries, "Entry deletion {} wasn't replicated successfully".format(test_entry.dn) + repl.enable_to_master(m2, [m1]) + repl.enable_to_master(m1, [m2])
+ repl.test_replication(m1, m2) + repl.test_replication(m2, m1)
@pytest.mark.bz1506831 def test_repl_modrdn(topo_m2): @@ -108,6 +103,8 @@ def test_repl_modrdn(topo_m2): master1 = topo_m2.ms["master1"] master2 = topo_m2.ms["master2"]
+ repl = ReplicationManager(DEFAULT_SUFFIX) + log.info("Add test entries - Add 3 OUs and 2 same users under 2 different OUs") OUs = OrganisationalUnits(master1, DEFAULT_SUFFIX) OU_A = OUs.create(properties={ @@ -129,7 +126,8 @@ def test_repl_modrdn(topo_m2): users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn)) tuser_B = users.create(properties=TEST_USER_PROPERTIES)
- time.sleep(10) + repl.test_replication(master1, master2) + repl.test_replication(master2, master1)
log.info("Stop Replication") topo_m2.pause_all_replicas() @@ -144,7 +142,8 @@ def test_repl_modrdn(topo_m2): topo_m2.resume_all_replicas()
log.info("Wait for sometime for repl to resume") - time.sleep(10) + repl.test_replication(master1, master2) + repl.test_replication(master2, master1)
log.info("Check that there should be only one test entry under ou=C on both masters") users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn)) @@ -154,10 +153,9 @@ def test_repl_modrdn(topo_m2): assert len(users.list()) == 1
log.info("Check that the replication is working fine both ways, M1 <-> M2") - replicas_m1 = Replicas(master1) - replicas_m2 = Replicas(master2) - replicas_m1.test(DEFAULT_SUFFIX, master2) - replicas_m2.test(DEFAULT_SUFFIX, master1) + repl.test_replication(master1, master2) + repl.test_replication(master2, master1) +
def test_password_repl_error(topo_m2, test_entry): @@ -186,30 +184,24 @@ def test_password_repl_error(topo_m2, test_entry): m2.setLogLevel(LOG_REPLICA)
log.info('Modifying entry {} - change userpassword on master 1'.format(test_entry.dn)) - try: - m1.modify_s(test_entry.dn, [(ldap.MOD_REPLACE, 'userpassword', TEST_ENTRY_NEW_PASS)]) - except ldap.LDAPError as e: - log.error('Failed to modify entry (%s): error (%s)' % (test_entry.dn, - e.message['desc'])) - raise e + + test_entry.set('userpassword', TEST_ENTRY_NEW_PASS) + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(m1, m2)
log.info('Restart the servers to flush the logs') for num in range(1, 3): - topo_m2.ms["master{}".format(num)].restart(timeout=10) - - time.sleep(5) + topo_m2.ms["master{}".format(num)].restart()
try: log.info('Check that password works on master 2') - m2.simple_bind_s(test_entry.dn, TEST_ENTRY_NEW_PASS) - m2.simple_bind_s(DN_DM, PASSWORD) + test_entry_m2 = UserAccount(m2, test_entry.dn) + test_entry_m2.bind(TEST_ENTRY_NEW_PASS)
log.info('Check the error log for the error with {}'.format(test_entry.dn)) assert not m2.ds_error_log.match('.*can.t add a change for {}.*'.format(test_entry.dn)) finally: - log.info('Reset bind DN to Directory manager') - for num in range(1, 3): - topo_m2.ms["master{}".format(num)].simple_bind_s(DN_DM, PASSWORD) log.info('Set the default loglevel') m2.setLogLevel(LOG_DEFAULT)
@@ -228,29 +220,34 @@ def test_invalid_agmt(topo_m2): """
m1 = topo_m2.ms["master1"] + m2 = topo_m2.ms["master2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + replicas = Replicas(m1) + replica = replicas.get(DEFAULT_SUFFIX) + agmts = replica.get_agreements()
# Add invalid agreement (nsds5ReplicaEnabled set to invalid value) - AGMT_DN = 'cn=whatever,cn=replica,cn="dc=example,dc=com",cn=mapping tree,cn=config' - try: - invalid_props = {RA_ENABLED: 'True', # Invalid value - RA_SCHEDULE: '0001-2359 0123456'} - m1.agreement.create(suffix=DEFAULT_SUFFIX, host='localhost', port=389, properties=invalid_props) - except ldap.UNWILLING_TO_PERFORM: - m1.log.info('Invalid repl agreement correctly rejected') - except ldap.LDAPError as e: - m1.log.fatal('Got unexpected error adding invalid agreement: ' + str(e)) - assert False - else: - m1.log.fatal('Invalid agreement was incorrectly accepted by the server') - assert False + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + agmts.create(properties={ + 'cn': 'whatever', + 'nsDS5ReplicaRoot': DEFAULT_SUFFIX, + 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config', + 'nsDS5ReplicaBindMethod': 'simple' , + 'nsDS5ReplicaTransportInfo': 'LDAP', + 'nsds5replicaTimeout': '5', + 'description': "test agreement", + 'nsDS5ReplicaHost': m2.host, + 'nsDS5ReplicaPort': str(m2.port), + 'nsDS5ReplicaCredentials': 'whatever', + 'nsds5ReplicaEnabled': 'YEAH MATE, LETS REPLICATE' + })
# Verify the server is still running - try: - m1.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - m1.log.fatal('Failed to bind: ' + str(e)) - assert False - + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication(m1, m2) + repl.test_replication(m2, m1)
if __name__ == '__main__': # Run isolated diff --git a/dirsrvtests/tests/suites/replication/replica_config_test.py b/dirsrvtests/tests/suites/replication/replica_config_test.py index add4d80..c6ef80a 100644 --- a/dirsrvtests/tests/suites/replication/replica_config_test.py +++ b/dirsrvtests/tests/suites/replication/replica_config_test.py @@ -7,6 +7,9 @@ from lib389._constants import * from lib389 import Entry from lib389.topologies import topology_st as topo
+from lib389.replica import Replicas +from lib389.agreement import Agreements + DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) @@ -14,14 +17,11 @@ else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__)
-REPLICA_DN = 'cn=replica,cn="dc=example,dc=com",cn=mapping tree,cn=config' -AGMT_DN = 'cn=test_agreement,cn=replica,cn="dc=example,dc=com",cn=mapping tree,cn=config' notnum = 'invalid' too_big = '9223372036854775807' overflow = '9999999999999999999999999999999999999999999999999999999999999999999'
-replica_dict = {'objectclass': 'top nsDS5Replica'.split(), - 'nsDS5ReplicaRoot': 'dc=example,dc=com', +replica_dict = {'nsDS5ReplicaRoot': 'dc=example,dc=com', 'nsDS5ReplicaType': '3', 'nsDS5Flags': '1', 'nsDS5ReplicaId': '65535', @@ -29,8 +29,7 @@ replica_dict = {'objectclass': 'top nsDS5Replica'.split(), 'nsDS5ReplicaBindDN': 'cn=u', 'cn': 'replica'}
-agmt_dict = {'objectClass': 'top nsDS5ReplicationAgreement'.split(), - 'cn': 'test_agreement', +agmt_dict = {'cn': 'test_agreement', 'nsDS5ReplicaRoot': 'dc=example,dc=com', 'nsDS5ReplicaHost': 'localhost.localdomain', 'nsDS5ReplicaPort': '5555', @@ -60,58 +59,55 @@ repl_mod_attrs = [('nsDS5Flags', '-1', '2', overflow, notnum, '1'), ('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'), ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')]
-agmt_attrs = [('nsds5ReplicaPort', '0', '65536', overflow, notnum, '389'), +agmt_attrs = [ + ('nsds5ReplicaPort', '0', '65536', overflow, notnum, '389'), ('nsds5ReplicaTimeout', '-1', too_big, overflow, notnum, '6'), ('nsds5ReplicaBusyWaitTime', '-1', too_big, overflow, notnum, '6'), ('nsds5ReplicaSessionPauseTime', '-1', too_big, overflow, notnum, '6'), ('nsds5ReplicaFlowControlWindow', '-1', too_big, overflow, notnum, '6'), ('nsds5ReplicaFlowControlPause', '-1', too_big, overflow, notnum, '6'), - ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '6')] + ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '6') + ]
+def replica_reset(topo): + """Purge all existing replica details""" + replicas = Replicas(topo.standalone) + for r in replicas.list(): + r.delete()
def replica_setup(topo): """Add a valid replica config entry to modify """ - try: - topo.standalone.delete_s(REPLICA_DN) - except: - pass - - try: - topo.standalone.add_s(Entry((REPLICA_DN, replica_dict))) - except ldap.LDAPError as e: - log.fatal("Failed to add replica entry: " + str(e)) - assert False - - -def replica_reset(topo): - try: - topo.standalone.delete_s(REPLICA_DN) - except: - pass + replicas = Replicas(topo.standalone) + for r in replicas.list(): + r.delete() + return replicas.create(properties=replica_dict)
+def agmt_reset(topo): + """Purge all existing agreements for testing""" + agmts = Agreements(topo.standalone) + for a in agmts.list(): + a.delete()
def agmt_setup(topo): """Add a valid replica config entry to modify """ - try: - topo.standalone.delete_s(AGMT_DN) - except: - pass - - try: - topo.standalone.add_s(Entry((AGMT_DN, agmt_dict))) - except ldap.LDAPError as e: - log.fatal("Failed to add agreement entry: " + str(e)) - assert False - - -def agmt_reset(topo): - try: - topo.standalone.delete_s(AGMT_DN) - except: - pass - + # Reset the agreements too. + replica = replica_setup(topo) + agmts = Agreements(topo.standalone, basedn=replica.dn) + for a in agmts.list(): + a.delete() + return agmts.create(properties=agmt_dict) + +def perform_invalid_create(many, properties, attr, value): + my_properties = copy.deepcopy(properties) + my_properties[attr] = value + with pytest.raises(ldap.LDAPError): + many.create(properties=my_properties) + +def perform_invalid_modify(o, attr, value): + with pytest.raises(ldap.LDAPError): + o.replace(attr, value)
@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", repl_add_attrs) def test_replica_num_add(topo, attr, too_small, too_big, overflow, notnum, valid): @@ -132,59 +128,22 @@ def test_replica_num_add(topo, attr, too_small, too_big, overflow, notnum, valid 4. Add is rejected 5. Add is allowed """ - replica_reset(topo)
- # Test too small - my_replica = copy.deepcopy(replica_dict) - my_replica[attr] = too_small - try: - topo.standalone.add_s(Entry((REPLICA_DN, my_replica))) - log.fatal("Incorrectly allowed to add replica entry with {}:{}".format(attr, too_small)) - assert False - except ldap.LDAPError as e: - log.info("Correctly failed to add replica entry with {}:{} error: {}".format(attr, too_small, str(e))) + replicas = Replicas(topo.standalone)
+ # Test too small + perform_invalid_create(replicas, replica_dict, attr, too_small) # Test too big - my_replica = copy.deepcopy(replica_dict) - my_replica[attr] = too_big - try: - topo.standalone.add_s(Entry((REPLICA_DN, my_replica))) - log.fatal("Incorrectly allowed to add replica entry with {}:{}".format(attr, too_big)) - assert False - except ldap.LDAPError as e: - log.info("Correctly failed to add replica entry with {}:{} error: {}".format(attr, too_big, str(e))) - + perform_invalid_create(replicas, replica_dict, attr, too_big) # Test overflow - my_replica = copy.deepcopy(replica_dict) - my_replica[attr] = overflow - try: - topo.standalone.add_s(Entry((REPLICA_DN, my_replica))) - log.fatal("Incorrectly allowed to add replica entry with {}:{}".format(attr, overflow)) - assert False - except ldap.LDAPError as e: - log.info("Correctly failed to add replica entry with {}:{} error: {}".format(attr, overflow, str(e))) - + perform_invalid_create(replicas, replica_dict, attr, overflow) # test not a number - my_replica = copy.deepcopy(replica_dict) - my_replica[attr] = notnum - try: - topo.standalone.add_s(Entry((REPLICA_DN, my_replica))) - log.fatal("Incorrectly allowed to add replica entry with {}:{}".format(attr, notnum)) - assert False - except ldap.LDAPError as e: - log.info("Correctly failed to add replica entry with {}:{} error: {}".format(attr, notnum, str(e))) - + perform_invalid_create(replicas, replica_dict, attr, notnum) # Test valid value my_replica = copy.deepcopy(replica_dict) my_replica[attr] = valid - try: - topo.standalone.add_s(Entry((REPLICA_DN, my_replica))) - log.info("Correctly allowed to add replica entry with {}: {}".format(attr, valid)) - except ldap.LDAPError as e: - log.fatal("Incorrectly failed to add replica entry with {}: {} error: {}".format(attr, valid, str(e))) - assert False - + replicas.create(properties=my_replica)
@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", repl_mod_attrs) def test_replica_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid): @@ -205,53 +164,21 @@ def test_replica_num_modify(topo, attr, too_small, too_big, overflow, notnum, va 4. Value is rejected 5. Value is allowed """ + replica = replica_setup(topo)
# Value too small - replica_setup(topo) - try: - topo.standalone.modify_s(REPLICA_DN, [(ldap.MOD_REPLACE, attr, too_small)]) - log.fatal('Invalid value for {}:{} was incorrectly allowed'.format(attr, too_small)) - assert False - except: - log.info('Invalid value for {}:{} was correctly rejected'.format(attr, too_small)) - + perform_invalid_modify(replica, attr, too_small) # Value too big - replica_setup(topo) - try: - topo.standalone.modify_s(REPLICA_DN, [(ldap.MOD_REPLACE, attr, too_big)]) - log.fatal('Invalid value for {}:{} was incorrectly allowed'.format(attr, too_big)) - assert False - except: - log.info('Invalid value for {}:{} was correctly rejected'.format(attr, too_big)) - + perform_invalid_modify(replica, attr, too_big) # Value overflow - replica_setup(topo) - try: - topo.standalone.modify_s(REPLICA_DN, [(ldap.MOD_REPLACE, attr, overflow)]) - log.fatal('Invalid value for {}:{} was incorrectly allowed'.format(attr, overflow)) - assert False - except: - log.info('Invalid value for {}:{} was correctly rejected'.format(attr, overflow)) - + perform_invalid_modify(replica, attr, overflow) # Value not a number - replica_setup(topo) - try: - topo.standalone.modify_s(REPLICA_DN, [(ldap.MOD_REPLACE, attr, notnum)]) - log.fatal('Invalid value for {}:{} was incorrectly allowed'.format(attr, notnum)) - assert False - except: - log.info('Invalid value for {}:{} was correctly rejected'.format(attr, notnum)) - + perform_invalid_modify(replica, attr, notnum) # Value is valid - replica_setup(topo) - try: - topo.standalone.modify_s(REPLICA_DN, [(ldap.MOD_REPLACE, attr, valid)]) - log.info('Correctly added valid agreement attribute value: {}:{}'.format(attr, valid)) - except ldap.LDAPError as e: - log.fatal('Valid value for {}:{} was incorrectly rejected. Error {}'.format(attr, valid, str(e))) - assert False + replica.replace(attr, valid)
+@pytest.mark.skip(reason="Agreement validation current does not work.") @pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs) def test_agmt_num_add(topo, attr, too_small, too_big, overflow, notnum, valid): """Test all the number values you can set for a replica config entry @@ -272,58 +199,25 @@ def test_agmt_num_add(topo, attr, too_small, too_big, overflow, notnum, valid): 5. Add is allowed """ agmt_reset(topo) + replica = replica_setup(topo)
- # Test too small - my_agmt = copy.deepcopy(agmt_dict) - my_agmt[attr] = too_small - try: - topo.standalone.add_s(Entry((AGMT_DN, my_agmt))) - log.fatal("Incorrectly allowed to add agreement entry with {}:{}".format(attr, too_small)) - assert False - except ldap.LDAPError as e: - log.info("Correctly failed to add agreement entry with {}:{} error: {}".format(attr, too_small, str(e))) + agmts = Agreements(topo.standalone, basedn=replica.dn)
+ # Test too small + perform_invalid_create(agmts, agmt_dict, attr, too_small) # Test too big - my_agmt = copy.deepcopy(agmt_dict) - my_agmt[attr] = too_big - try: - topo.standalone.add_s(Entry((AGMT_DN, my_agmt))) - log.fatal("Incorrectly allowed to add agreement entry with {}:{}".format(attr, too_big)) - assert False - except ldap.LDAPError as e: - log.info("Correctly failed to add agreement entry with {}:{} error: {}".format(attr, too_big, str(e))) - + perform_invalid_create(agmts, agmt_dict, attr, too_big) # Test overflow - my_agmt = copy.deepcopy(agmt_dict) - my_agmt[attr] = overflow - try: - topo.standalone.add_s(Entry((AGMT_DN, my_agmt))) - log.fatal("Incorrectly allowed to add agreement entry with {}:{}".format(attr, overflow)) - assert False - except ldap.LDAPError as e: - log.info("Correctly failed to add agreement entry with {}:{} error: {}".format(attr, overflow, str(e))) - + perform_invalid_create(agmts, agmt_dict, attr, overflow) # test not a number - my_agmt = copy.deepcopy(agmt_dict) - my_agmt[attr] = notnum - try: - topo.standalone.add_s(Entry((AGMT_DN, my_agmt))) - log.fatal("Incorrectly allowed to add agreement entry with {}:{}".format(attr, notnum)) - assert False - except ldap.LDAPError as e: - log.info("Correctly failed to add agreement entry with {}:{} error: {}".format(attr, notnum, str(e))) - + perform_invalid_create(agmts, agmt_dict, attr, notnum) # Test valid value my_agmt = copy.deepcopy(agmt_dict) my_agmt[attr] = valid - try: - topo.standalone.add_s(Entry((AGMT_DN, my_agmt))) - log.info("Correctly allowed to add agreement entry with {}: {}".format(attr, valid)) - except ldap.LDAPError as e: - log.fatal("Incorrectly failed to add agreement entry with {}: {} error: {}".format(attr, valid, str(e))) - assert False + agmts.create(properties=my_agmt)
+@pytest.mark.skip(reason="Agreement validation current does not work.") @pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs) def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid): """Test all the number values you can set for a replica config entry @@ -344,78 +238,18 @@ def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid 5. Value is allowed """
- # Value too small - agmt_setup(topo) - try: - topo.standalone.modify_s(AGMT_DN, [(ldap.MOD_REPLACE, attr, too_small)]) - log.fatal('Invalid value for {}:{} was incorrectly allowed'.format(attr, too_small)) - assert False - except: - log.info('Invalid value for {}:{} was correctly rejected'.format(attr, too_small)) + agmt = agmt_setup(topo)
+ # Value too small + perform_invalid_modify(agmt, attr, too_small) # Value too big - agmt_setup(topo) - try: - topo.standalone.modify_s(AGMT_DN, [(ldap.MOD_REPLACE, attr, too_big)]) - log.fatal('Invalid value for {}:{} was incorrectly allowed'.format(attr, too_big)) - assert False - except: - log.info('Invalid value for {}:{} was correctly rejected'.format(attr, too_big)) - + perform_invalid_modify(agmt, attr, too_big) # Value overflow - agmt_setup(topo) - try: - topo.standalone.modify_s(AGMT_DN, [(ldap.MOD_REPLACE, attr, overflow)]) - log.fatal('Invalid value for {}:{} was incorrectly allowed'.format(attr, overflow)) - assert False - except: - log.info('Invalid value for {}:{} was correctly rejected'.format(attr, overflow)) - + perform_invalid_modify(agmt, attr, overflow) # Value not a number - agmt_setup(topo) - try: - topo.standalone.modify_s(AGMT_DN, [(ldap.MOD_REPLACE, attr, notnum)]) - log.fatal('Invalid value for {}:{} was incorrectly allowed'.format(attr, notnum)) - assert False - except: - log.info('Invalid value for {}:{} was correctly rejected'.format(attr, notnum)) - + perform_invalid_modify(agmt, attr, notnum) # Value is valid - agmt_setup(topo) - try: - topo.standalone.modify_s(AGMT_DN, [(ldap.MOD_REPLACE, attr, valid)]) - except ldap.LDAPError as e: - log.fatal('Valid value for {}:{} was incorrectly rejected. Error {}'.format(attr, valid, str(e))) - assert False - - -def test_replicaid_modification(topo): - """Check that nsDS5ReplicaId accepts only valid values - - :id: 7dcab36f-2113-4e24-ab3d-01843ce65cac - :setup: Standalone instance - :steps: - 1. Enable replication - 2. Try to set valid values to nsDS5ReplicaId - 3. Try to set invalid values to nsDS5ReplicaId - :expectedresults: - 1. Operation should be successful - 2. Operation should be successful - 3. Unwilling to perform error should be raised - """ - - log.info('Add a replica entry') - replica = topo.standalone.replicas.enable(suffix=DEFAULT_SUFFIX, - role=ReplicaRole.MASTER, - replicaID=REPLICAID_MASTER_1) - - log.info('Set {} to valid value'.format(REPL_ID)) - replica.set(REPL_ID, str(REPLICAID_MASTER_1 + 1)) - - log.info('Set {} to invalid value'.format(REPL_ID)) - with pytest.raises(ldap.UNWILLING_TO_PERFORM): - replica.set(REPL_ID, "wrong_id") - + agmt.replace(attr, valid)
if __name__ == '__main__': # Run isolated diff --git a/dirsrvtests/tests/suites/replication/ruvstore_test.py b/dirsrvtests/tests/suites/replication/ruvstore_test.py index 6baf850..096670b 100755 --- a/dirsrvtests/tests/suites/replication/ruvstore_test.py +++ b/dirsrvtests/tests/suites/replication/ruvstore_test.py @@ -113,7 +113,7 @@ def test_ruv_entry_backup(topo): parser = MyLDIF(ldif_file) parser.parse()
- +@pytest.mark.skip(reason="No method to safety access DB ruv currenty exists online.") def test_memoryruv_sync_with_databaseruv(topo): """Check if memory ruv and database ruv are synced
diff --git a/dirsrvtests/tests/suites/replication/single_master_test.py b/dirsrvtests/tests/suites/replication/single_master_test.py index b1cba70..1af37cc 100644 --- a/dirsrvtests/tests/suites/replication/single_master_test.py +++ b/dirsrvtests/tests/suites/replication/single_master_test.py @@ -9,6 +9,12 @@ import pytest from lib389.tasks import * from lib389.utils import * + +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +from lib389.replica import ReplicationManager, Replicas +from lib389.backend import Backends + from lib389.topologies import topology_m1c1 as topo_r # Replication from lib389.topologies import topology_i2 as topo_nr # No replication
@@ -25,60 +31,7 @@ else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__)
-TEST_USER_NAME = 'smrepl_test' -TEST_USER_DN = 'uid={},{}'.format(TEST_USER_NAME, DEFAULT_SUFFIX) -TEST_USER_PWD = 'smrepl_test' - - -@pytest.fixture -def test_user(topo_r, request): - """User for binding operation""" - - log.info('Adding user {}'.format(TEST_USER_DN)) - try: - topo_r.ms["master1"].add_s(Entry((TEST_USER_DN, { - 'objectclass': 'top person'.split(), - 'objectclass': 'organizationalPerson', - 'objectclass': 'inetorgperson', - 'cn': TEST_USER_NAME, - 'sn': TEST_USER_NAME, - 'userpassword': TEST_USER_PWD, - 'mail': '{}@redhat.com'.format(TEST_USER_NAME), - 'uid': TEST_USER_NAME - }))) - except ldap.LDAPError as e: - log.error('Failed to add user (%s): error (%s)' % (TEST_USER_DN, - e.message['desc'])) - raise e - - def fin(): - log.info('Deleting user {}'.format(TEST_USER_DN)) - topo_r.ms["master1"].delete_s(TEST_USER_DN) - - request.addfinalizer(fin) - - -@pytest.fixture(scope="module") -def replica_without_init(topo_nr): - """Enable replica without initialization""" - - master = topo_nr.ins["standalone1"] - consumer = topo_nr.ins["standalone2"] - - master.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=ReplicaRole.MASTER, - replicaId=REPLICAID_MASTER_1) - consumer.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=ReplicaRole.CONSUMER) - properties = {RA_NAME: 'meTo_{}:{}'.format(consumer.host, str(consumer.port)), - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - agmt = master.agreement.create(suffix=DEFAULT_SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - - return agmt - - -def test_mail_attr_repl(topo_r, test_user): +def test_mail_attr_repl(topo_r): """Check that no crash happens during mail attribute replication
:id: 959edc84-05be-4bf9-a541-53afae482052 @@ -102,18 +55,23 @@ def test_mail_attr_repl(topo_r, test_user):
master = topo_r.ms["master1"] consumer = topo_r.cs["consumer1"] + repl = ReplicationManager(DEFAULT_SUFFIX)
- log.info("Wait for a user to be replicated") - time.sleep(3) + m_users = UserAccounts(topo_r.ms["master1"], DEFAULT_SUFFIX) + m_user = m_users.ensure_state(properties=TEST_USER_PROPERTIES) + m_user.ensure_present('mail', 'testuser@redhat.com')
log.info("Check that replication is working") - entries = consumer.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid={}".format(TEST_USER_NAME), - ["uid"]) - assert entries, "User {} wasn't replicated successfully".format(TEST_USER_NAME) + repl.wait_for_replication(master, consumer) + c_users = UserAccounts(topo_r.cs["consumer1"], DEFAULT_SUFFIX) + c_user = c_users.get('testuser')
- entries = consumer.backend.list(DEFAULT_SUFFIX) - db_dir = entries[0]["nsslapd-directory"] - mail_db = filter(lambda fl: fl.startswith("mail"), os.listdir(db_dir)) + c_bes = Backends(consumer) + c_be = c_bes.get(DEFAULT_SUFFIX) + + db_dir = c_be.get_attr_val_utf8('nsslapd-directory') + + mail_db = list(filter(lambda fl: fl.startswith("mail"), os.listdir(db_dir))) assert mail_db, "mail.* wasn't found in {}" mail_db_path = os.path.join(db_dir, mail_db[0]) backup_path = os.path.join(DEFAULT_BACKUPDIR, mail_db[0]) @@ -124,14 +82,10 @@ def test_mail_attr_repl(topo_r, test_user): consumer.start()
log.info("Remove 'mail' attr from master") - try: - master.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'mail', '{}@redhat.com'.format(TEST_USER_NAME))]) - except ldap.LDAPError as e: - log.error('Failed to delete att user {}: error {}'.format(TEST_USER_DN, e.message['desc'])) - raise e + m_user.remove_all('mail')
log.info("Wait for the replication to happen") - time.sleep(5) + repl.wait_for_replication(master, consumer)
consumer.stop() log.info("Restore {} to {}".format(backup_path, mail_db_path)) @@ -139,15 +93,13 @@ def test_mail_attr_repl(topo_r, test_user): consumer.start()
log.info("Make a search for mail attribute in attempt to crash server") - consumer.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "mail={}*".format(TEST_USER_NAME), ["mail"]) + consumer.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(mail=testuser@redhat.com)", ["mail"])
log.info("Make sure that server hasn't crashed") - entries = consumer.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid={}".format(TEST_USER_NAME), - ["uid"]) - assert entries, "User {} wasn't replicated successfully".format(TEST_USER_NAME) + repl.test_replication(master, consumer)
-def test_lastupdate_attr_before_init(topo_nr, replica_without_init): +def test_lastupdate_attr_before_init(topo_nr): """Check that LastUpdate replica attributes show right values
:id: bc8ce431-ff65-41f5-9331-605cbcaaa887 @@ -167,16 +119,28 @@ def test_lastupdate_attr_before_init(topo_nr, replica_without_init): master = topo_nr.ins["standalone1"] consumer = topo_nr.ins["standalone2"]
- assert not master.testReplication(DEFAULT_SUFFIX, consumer) + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_master(master) + + # Manually create an un-synced consumer. + + consumer_replicas = Replicas(consumer) + consumer_replicas.create(properties={ + 'cn': 'replica', + 'nsDS5ReplicaRoot': DEFAULT_SUFFIX, + 'nsDS5ReplicaId': '65535', + 'nsDS5Flags': '0', + 'nsDS5ReplicaType': '2', + }) + + agmt = repl.ensure_agreement(master, consumer) + with pytest.raises(Exception): + repl.wait_for_replication(master, consumer, timeout=5)
- agmt = master.search_s(replica_without_init, ldap.SCOPE_BASE, "(objectClass=*)", - ["nsds5replicaLastUpdateStart", - "nsds5replicaLastUpdateEnd", - "nsds5replicaLastUpdateStatus"])[0] + assert agmt.get_attr_val_bytes('nsds5replicaLastUpdateStart') == b"19700101000000Z" + assert agmt.get_attr_val_bytes("nsds5replicaLastUpdateEnd") == b"19700101000000Z" + assert b"Replica acquired successfully" not in agmt.get_attr_val_bytes("nsds5replicaLastUpdateStatus")
- assert agmt["nsds5replicaLastUpdateStart"] == b"19700101000000Z" - assert agmt["nsds5replicaLastUpdateEnd"] == b"19700101000000Z" - assert b"Replica acquired successfully" not in agmt["nsds5replicaLastUpdateStatus"]
if __name__ == '__main__': diff --git a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py new file mode 100644 index 0000000..a27ee11 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py @@ -0,0 +1,92 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.topologies import topology_m2 as topo_m2 + +from lib389.idm.organisationalunit import OrganisationalUnits +from lib389.idm.group import Groups +from lib389.idm.services import ServiceAccounts +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +from lib389.nss_ssl import NssSsl + +from lib389.config import CertmapLegacy + +from lib389._constants import DEFAULT_SUFFIX + +from lib389.replica import ReplicationManager, Replicas + +def test_tls_client_auth(topo_m2): + """Test TLS client authentication between two masters operates + as expected. + + :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e6 + :steps: + 1. Enable TLS on both masters + 2. Reconfigure both agreements to use TLS Client auth + 3. Ensure replication events work + :expectedresults: + 1. Tls is setup + 2. The configuration works, and authentication works + 3. Replication ... replicates. + """ + m1 = topo_m2.ms['master1'] + m2 = topo_m2.ms['master2'] + # Create the certmap before we restart for enable_tls + cm_m1 = CertmapLegacy(m1) + cm_m2 = CertmapLegacy(m2) + + # We need to configure the same maps for both .... + certmaps = cm_m1.list() + certmaps['default']['DNComps'] = None + certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' + + cm_m1.set(certmaps) + cm_m2.set(certmaps) + + [i.enable_tls() for i in topo_m2] + + # Create the replication dns + services = ServiceAccounts(m1, DEFAULT_SUFFIX) + repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) + repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) + + repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) + repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) + + # Check the replication is "done". + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(m1, m2) + # Now change the auth type + + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + + agmt_m1.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m2.sslport), + ) + agmt_m1.remove_all('nsDS5ReplicaBindDN') + + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + agmt_m2 = replica_m2.get_agreements().list()[0] + + agmt_m2.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m1.sslport), + ) + agmt_m2.remove_all('nsDS5ReplicaBindDN') + + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + + diff --git a/dirsrvtests/tests/suites/replication/tombstone_test.py b/dirsrvtests/tests/suites/replication/tombstone_test.py index a701148..10b6ca3 100644 --- a/dirsrvtests/tests/suites/replication/tombstone_test.py +++ b/dirsrvtests/tests/suites/replication/tombstone_test.py @@ -9,12 +9,13 @@ import pytest from lib389.tasks import * from lib389.utils import * -from lib389.topologies import topology_st +from lib389.topologies import topology_m1
-from lib389._constants import DEFAULT_SUFFIX, ReplicaRole, REPLICAID_MASTER_1 +from lib389.tombstone import Tombstones +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
-def test_purge_success(topology_st): +def test_purge_success(topology_m1): """Verify that tombstones are created successfully
:id: adb86f50-ae76-4ed6-82b4-3cdc30ccab78 @@ -30,37 +31,28 @@ def test_purge_success(topology_st): 3. The entry should be successfully deleted 4. Tombstone entry should exist """ + m1 = topology_m1.ms['master1']
- log.info('Setting up replication...') - topology_st.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, - role=ReplicaRole.MASTER, - replicaId=REPLICAID_MASTER_1) + users = UserAccounts(m1, DEFAULT_SUFFIX) + user = users.create(properties=TEST_USER_PROPERTIES)
- log.info("Add and then delete an entry to create a tombstone...") - try: - topology_st.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', { - 'objectclass': 'top person'.split(), - 'sn': 'user', - 'cn': 'entry1'}))) - except ldap.LDAPError as e: - log.error('Failed to add entry: {}'.format(e.message['desc'])) - assert False + tombstones = Tombstones(m1, DEFAULT_SUFFIX)
- try: - topology_st.standalone.delete_s('cn=entry1,dc=example,dc=com') - except ldap.LDAPError as e: - log.error('Failed to delete entry: {}'.format(e.message['desc'])) - assert False + assert len(tombstones.list()) == 0
- log.info('Search for tombstone entries...') - try: - entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, - '(objectclass=nsTombstone)') - assert entries - except ldap.LDAPError as e: - log.fatal('Search failed: {}'.format(e.message['desc'])) - assert False + user.delete()
+ assert len(tombstones.list()) == 1 + assert len(users.list()) == 0 + + ts = tombstones.get('testuser') + assert ts.exists() + + if not ds_is_older('1.4.0'): + ts.revive() + + assert len(users.list()) == 1 + user_revived = users.get('testuser')
if __name__ == '__main__': # Run isolated diff --git a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py index 21edbee..c5a9939 100644 --- a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py +++ b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py @@ -15,14 +15,14 @@ from lib389.topologies import topology_m2
from lib389._constants import SUFFIX, DEFAULT_SUFFIX, LOG_REPLICA
+from lib389.agreement import Agreements +from lib389.idm.organisationalunit import OrganisationalUnits + logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__)
installation1_prefix = None
-WAITFOR_ASYNC_ATTR = "nsDS5ReplicaWaitForAsyncResults" - - @pytest.fixture(params=[(None, (4, 11)), ('2000', (0, 2)), ('0', (4, 11)), @@ -34,22 +34,18 @@ def waitfor_async_attr(topology_m2, request): expected_result = request.param[1]
# Run through all masters - for num in range(1, 3): - master = topology_m2.ms["master{}".format(num)] - agmt = master.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn - try: - if attr_value: - log.info("Set %s: %s on %s" % ( - WAITFOR_ASYNC_ATTR, attr_value, master.serverid)) - mod = [(ldap.MOD_REPLACE, WAITFOR_ASYNC_ATTR, attr_value)] - else: - log.info("Delete %s from %s" % ( - WAITFOR_ASYNC_ATTR, master.serverid)) - mod = [(ldap.MOD_DELETE, WAITFOR_ASYNC_ATTR, None)] - master.modify_s(agmt, mod) - except ldap.LDAPError as e: - log.error('Failed to set or delete %s attribute: (%s)' % ( - WAITFOR_ASYNC_ATTR, e.message['desc'])) + + for master in topology_m2.ms.values(): + agmt = Agreements(master).list()[0] + + if attr_value: + agmt.set_wait_for_async_results(attr_value) + else: + try: + # Sometimes we can double remove this. + agmt.remove_wait_for_async_results() + except ldap.NO_SUCH_ATTRIBUTE: + pass
return (attr_value, expected_result)
@@ -60,32 +56,19 @@ def entries(topology_m2, request):
master1 = topology_m2.ms["master1"]
- TEST_OU = "test" - test_dn = SUFFIX test_list = []
log.info("Add 100 nested entries under replicated suffix on %s" % master1.serverid) + ous = OrganisationalUnits(master1, DEFAULT_SUFFIX) for i in range(100): - test_dn = 'ou=%s%s,%s' % (TEST_OU, i, test_dn) - test_list.insert(0, test_dn) - try: - master1.add_s(Entry((test_dn, - {'objectclass': 'top', - 'objectclass': 'organizationalUnit', - 'ou': TEST_OU}))) - except ldap.LDAPError as e: - log.error('Failed to add entry (%s): error (%s)' % (test_dn, - e.message['desc'])) - assert False + ou = ous.create(properties={ + 'ou' : 'test_ou_%s' % i, + }) + test_list.append(ou)
log.info("Delete created entries") - for test_dn in test_list: - try: - master1.delete_s(test_dn) - except ldap.LDAPError as e: - log.error('Failed to delete entry (%s): error (%s)' % (test_dn, - e.message['desc'])) - assert False + for test_ou in test_list: + test_ou.delete()
def fin(): log.info("Clear the errors log in the end of the test case") @@ -106,18 +89,11 @@ def test_not_int_value(topology_m2): :expectedresults: 1. Invalid syntax error should be raised """ - - master1 = topology_m2.ms["master1"] - agmt = master1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn - - log.info("Try to set %s: wv1" % WAITFOR_ASYNC_ATTR) - try: - mod = [(ldap.MOD_REPLACE, WAITFOR_ASYNC_ATTR, "wv1")] - master1.modify_s(agmt, mod) - except ldap.LDAPError as e: - assert e.message['desc'] == 'Invalid syntax' + agmt = Agreements(master1).list()[0]
+ with pytest.raises(ldap.INVALID_SYNTAX): + agmt.set_wait_for_async_results("ws2")
def test_multi_value(topology_m2): """Tests multi value @@ -134,20 +110,11 @@ def test_multi_value(topology_m2): """
master1 = topology_m2.ms["master1"] - agmt = master1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn - - log.info("agmt: %s" % agmt) - - log.info("Try to set %s: 100 and 101 in the same time (multi value test)" % ( - WAITFOR_ASYNC_ATTR)) - try: - mod = [(ldap.MOD_ADD, WAITFOR_ASYNC_ATTR, "100")] - master1.modify_s(agmt, mod) - mod = [(ldap.MOD_ADD, WAITFOR_ASYNC_ATTR, "101")] - master1.modify_s(agmt, mod) - except ldap.LDAPError as e: - assert e.message['desc'] == 'Object class violation' + agmt = Agreements(master1).list()[0]
+ agmt.set_wait_for_async_results('100') + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + agmt.add('nsDS5ReplicaWaitForAsyncResults', '101')
def test_value_check(topology_m2, waitfor_async_attr): """Checks that value has been set correctly @@ -166,23 +133,11 @@ def test_value_check(topology_m2, waitfor_async_attr):
attr_value = waitfor_async_attr[0]
- for num in range(1, 3): - master = topology_m2.ms["master{}".format(num)] - agmt = master.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn - - log.info("Check attr %s on %s" % (WAITFOR_ASYNC_ATTR, master.serverid)) - try: - if attr_value: - entry = master.search_s(agmt, ldap.SCOPE_BASE, "%s=%s" % ( - WAITFOR_ASYNC_ATTR, attr_value)) - assert entry - else: - entry = master.search_s(agmt, ldap.SCOPE_BASE, "%s=*" % WAITFOR_ASYNC_ATTR) - assert not entry - except ldap.LDAPError as e: - log.fatal('Search failed, error: ' + e.message['desc']) - assert False + for master in topology_m2.ms.values(): + agmt = Agreements(master).list()[0]
+ server_value = agmt.get_wait_for_async_results_utf8() + assert server_value == attr_value
def test_behavior_with_value(topology_m2, waitfor_async_attr, entries): """Tests replication behavior with valid @@ -212,11 +167,6 @@ def test_behavior_with_value(topology_m2, waitfor_async_attr, entries): master1.setLogLevel(LOG_REPLICA) master2.setLogLevel(LOG_REPLICA)
- master1.modify_s("cn=config", [(ldap.MOD_REPLACE, - 'nsslapd-logging-hr-timestamps-enabled', "off")]) - master2.modify_s("cn=config", [(ldap.MOD_REPLACE, - 'nsslapd-logging-hr-timestamps-enabled', "off")]) - sync_dict = Counter() min_ap = waitfor_async_attr[1][0] max_ap = waitfor_async_attr[1][1] @@ -230,12 +180,24 @@ def test_behavior_with_value(topology_m2, waitfor_async_attr, entries): # Watch only over unsuccessful sync attempts for line in errlog_filtered: if line.split()[3] != line.split()[4]: - timestamp = line.split(']')[0] + # A timestamp looks like: + # [03/Jan/2018:14:35:15.806396035 +1000] LOGMESSAGE HERE + # We want to assert a range of "seconds", so we need to reduce + # this to a reasonable amount. IE: + # [03/Jan/2018:14:35:15 + # So to achieve this we split on ] and . IE. + # [03/Jan/2018:14:35:15.806396035 +1000] LOGMESSAGE HERE + # ^ split here first + # ^ now split here + # [03/Jan/2018:14:35:15 + # ^ final result + timestamp = line.split(']')[0].split('.')[0] sync_dict[timestamp] += 1
log.info("Take the most common timestamp and assert it has appeared " \ "in the range from %s to %s times" % (min_ap, max_ap)) most_common_val = sync_dict.most_common(1)[0][1] + log.debug("%s <= %s <= %s" % (min_ap, most_common_val, max_ap)) assert min_ap <= most_common_val <= max_ap
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in index a034325..41a6a7c 100644 --- a/ldap/ldif/template-dse.ldif.in +++ b/ldap/ldif/template-dse.ldif.in @@ -988,6 +988,13 @@ cn: nsUniqueId nssystemindex: true nsindextype: eq
+dn: cn=nsCertSubjectDN,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config +objectclass: top +objectclass: nsIndex +cn: nsCertSubjectDN +nssystemindex: true +nsindextype: eq + dn: cn=numsubordinates,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config objectclass: top objectclass: nsIndex diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif index ab124c8..2eccc0a 100644 --- a/ldap/schema/01core389.ldif +++ b/ldap/schema/01core389.ldif @@ -112,6 +112,7 @@ attributeTypes: ( ServerKeyExtractFile-oid NAME 'ServerKeyExtractFile' DESC 'Net attributeTypes: ( ServerCertExtractFile-oid NAME 'ServerCertExtractFile' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' ) attributeTypes: ( 2.16.840.1.113730.3.1.2091 NAME 'nsslapd-suffix' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape' ) attributeTypes: ( 2.16.840.1.113730.3.1.2092 NAME 'nsslapd-ldapiautodnsuffix' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2093 NAME 'nsslapd-changelogsuffix' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape' ) attributeTypes: ( 2.16.840.1.113730.3.1.2095 NAME 'connection' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' ) attributeTypes: ( 2.16.840.1.113730.3.1.2096 NAME 'entryusn' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'Netscape' ) attributeTypes: ( 2.16.840.1.113730.3.1.2113 NAME 'internalModifiersName' DESC 'plugin dn' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN '389 Directory Server' ) @@ -304,6 +305,10 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2332 NAME 'allowWeakDHParam' DESC 'Netsc attributeTypes: ( 2.16.840.1.113730.3.1.2333 NAME 'nsds5ReplicaReleaseTimeout' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 2.16.840.1.113730.3.1.2335 NAME 'nsds5ReplicaIgnoreMissingChange' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 2.16.840.1.113730.3.1.2336 NAME 'nsDS5ReplicaBindDnGroupCheckInterval' DESC 'Replication configuration setting for controlling the bind dn group check interval' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2338 NAME 'nsDS5ReplicaBindDNGroup' DESC 'Group whose members are treated as replication managers' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2339 NAME 'nsslapd-changelogdir' DESC 'The changelog5 directory storage location' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2340 NAME 'nsslapd-changelogmaxage' DESC 'The changelog5 time where an entry will be retained' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2341 NAME 'nsslapd-changelogmaxentries' DESC 'The changelog5 max entries limit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) # # objectclasses # @@ -313,7 +318,7 @@ objectClasses: ( 2.16.840.1.113730.3.2.44 NAME 'nsIndex' DESC 'Netscape defined objectClasses: ( 2.16.840.1.113730.3.2.109 NAME 'nsBackendInstance' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' ) objectClasses: ( 2.16.840.1.113730.3.2.110 NAME 'nsMappingTree' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' ) objectClasses: ( 2.16.840.1.113730.3.2.104 NAME 'nsContainer' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' ) -objectClasses: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Replication configuration objectclass' SUP top MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY (cn $ nsds5ReplicaPreciseTombstonePurging $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ nsds5ReplicaChangeCount $ [...] +objectClasses: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Replication configuration objectclass' SUP top MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY (cn $ nsds5ReplicaPreciseTombstonePurging $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsDS5ReplicaBindDNGroup $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ [...] objectClasses: ( 2.16.840.1.113730.3.2.113 NAME 'nsTombstone' DESC 'Netscape defined objectclass' SUP top MAY ( nstombstonecsn $ nsParentUniqueId $ nscpEntryDN ) X-ORIGIN 'Netscape Directory Server' ) objectClasses: ( 2.16.840.1.113730.3.2.103 NAME 'nsDS5ReplicationAgreement' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsds5ReplicaCleanRUVNotified $ nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicatedAttributeListTotal $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastM [...] objectClasses: ( 2.16.840.1.113730.3.2.39 NAME 'nsslapdConfig' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' ) @@ -323,4 +328,4 @@ objectClasses: ( nsEncryptionConfig-oid NAME 'nsEncryptionConfig' DESC 'Netscape objectClasses: ( nsEncryptionModule-oid NAME 'nsEncryptionModule' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsSSLToken $ nsSSLPersonalityssl $ nsSSLActivation $ ServerKeyExtractFile $ ServerCertExtractFile ) X-ORIGIN 'Netscape' ) objectClasses: ( 2.16.840.1.113730.3.2.327 NAME 'rootDNPluginConfig' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( rootdn-open-time $ rootdn-close-time $ rootdn-days-allowed $ rootdn-allow-host $ rootdn-deny-host $ rootdn-allow-ip $ rootdn-deny-ip ) X-ORIGIN 'Netscape' ) objectClasses: ( 2.16.840.1.113730.3.2.328 NAME 'nsSchemaPolicy' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ schemaUpdateObjectclassAccept $ schemaUpdateObjectclassReject $ schemaUpdateAttributeAccept $ schemaUpdateAttributeReject) X-ORIGIN 'Netscape Directory Server' ) - +objectClasses: ( 2.16.840.1.113730.3.2.332 NAME 'nsChangelogConfig' DESC 'Configuration of the changelog5 object' SUP top MUST ( cn $ nsslapd-changelogdir ) MAY ( nsslapd-changelogmaxage $ nsslapd-changelogtrim-interval $ nsslapd-changelogmaxentries $ nsslapd-changelogsuffix $ nsslapd-changelogcompactdb-interval ) X-ORIGIN '389 Directory Server' ) diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif index 70d64c0..0f8a5f5 100644 --- a/ldap/schema/02common.ldif +++ b/ldap/schema/02common.ldif @@ -127,7 +127,6 @@ attributeTypes: ( 1.3.6.1.1.4 NAME 'vendorName' EQUALITY 1.3.6.1.4.1.1466.109.11 attributeTypes: ( 1.3.6.1.1.5 NAME 'vendorVersion' EQUALITY 1.3.6.1.4.1.1466.109.114.1 SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation X-ORIGIN 'RFC 3045' ) attributeTypes: ( 2.16.840.1.113730.3.1.3023 NAME 'nsViewFilter' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 2.16.840.1.113730.3.1.2063 NAME 'nsEncryptionAlgorithm' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) -attributeTypes: ( 2.16.840.1.113730.3.1.2093 NAME 'nsslapd-changelogsuffix' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape' ) attributeTypes: ( 2.16.840.1.113730.3.1.2094 NAME 'nsslapd-parent-suffix' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape' ) attributeTypes: ( 2.16.840.1.113730.3.1.2401 NAME 'ConflictCSN' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 USAGE directoryOperation X-ORIGIN 'Netscape Directory Server' ) # diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index b636ebb..0514fb7 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -52,6 +52,7 @@ import signal import errno import pwd import grp +import uuid from shutil import copy2 try: # There are too many issues with this on EL7 @@ -307,8 +308,7 @@ class DirSrv(SimpleLDAPObject, object): from lib389.suffix import Suffix from lib389.replica import ReplicaLegacy as Replica from lib389.replica import Replicas - from lib389.changelog import Changelog - from lib389.agreement import Agreement + from lib389.agreement import AgreementLegacy as Agreement from lib389.schema import SchemaLegacy as Schema from lib389.plugins import Plugins from lib389.tasks import Tasks @@ -319,7 +319,6 @@ class DirSrv(SimpleLDAPObject, object): # Need updating self.agreement = Agreement(self) self.replica = Replica(self) - self.changelog = Changelog(self) self.backend = Backend(self) self.config = Config(self) self.index = Index(self) @@ -364,8 +363,9 @@ class DirSrv(SimpleLDAPObject, object): """
self.state = DIRSRV_STATE_INIT + self.uuid = str(uuid.uuid4()) self.verbose = verbose - + # If we have an external logger, use it! self.log = logger if external_log is None: @@ -381,25 +381,12 @@ class DirSrv(SimpleLDAPObject, object): self.ds_paths = Paths(instance=self)
# Reset the args (py.test reuses the args_instance for each test case) - args_instance[SER_DEPLOYED_DIR] = os.environ.get('PREFIX', self.ds_paths.prefix) - args_instance[SER_BACKUP_INST_DIR] = os.environ.get('BACKUPDIR', DEFAULT_BACKUPDIR) - args_instance[SER_ROOT_DN] = DN_DM - args_instance[SER_ROOT_PW] = PW_DM - args_instance[SER_HOST] = LOCALHOST - args_instance[SER_PORT] = DEFAULT_PORT - args_instance[SER_SECURE_PORT] = None - args_instance[SER_SERVERID_PROP] = None # "template" - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_instance[SER_USER_ID] = None - args_instance[SER_GROUP_ID] = None - args_instance[SER_INST_SCRIPTS_ENABLED] = None - # We allocate a "default" prefix here which allows an un-allocate or # un-instantiated DirSrv # instance to be able to do an an instance discovery. For example: # ds = lib389.DirSrv() # ds.list(all=True) - self.prefix = args_instance[SER_DEPLOYED_DIR] + # self.ds_paths.prefix = args_instance[SER_DEPLOYED_DIR] self.containerised = False
self.__wrapmethods() @@ -437,6 +424,11 @@ class DirSrv(SimpleLDAPObject, object):
self.ldapuri = ldapuri
+ # We must also alloc host and ports for some manipulation tasks + self.host = socket.gethostname() + # self.port ... + # self.sslport ... + self.binddn = binddn self.bindpw = password self.state = DIRSRV_STATE_ALLOCATED @@ -562,17 +554,9 @@ class DirSrv(SimpleLDAPObject, object):
self.groupid = args.get(SER_GROUP_ID, self.userid) self.backupdir = args.get(SER_BACKUP_INST_DIR, DEFAULT_BACKUPDIR) - # Allocate from the args, or use our env, or use / - if args.get(SER_DEPLOYED_DIR, self.prefix) is not None: - self.prefix = args.get(SER_DEPLOYED_DIR, self.prefix) # This will be externally populated in topologies. self.realm = None
- # Those variables needs to be revisited (sroot for 64 bits) - # self.sroot = os.path.join(self.prefix, "lib/dirsrv") - # self.errlog = os.path.join(self.prefix, - # "var/log/dirsrv/slapd-%s/errors" % self.serverid) - # additional settings self.suffixes = {} self.agmt = {} @@ -659,7 +643,6 @@ class DirSrv(SimpleLDAPObject, object): prop = {} prop[CONF_SERVER_ID] = serverid prop[SER_SERVERID_PROP] = serverid - prop[SER_DEPLOYED_DIR] = self.prefix myfile = open(filename, 'r') for line in myfile: # retrieve the value in line:: @@ -778,7 +761,6 @@ class DirSrv(SimpleLDAPObject, object): #
# Don't need a default value now since it's set in init. - prefix = self.prefix if serverid is None and hasattr(self, 'serverid'): serverid = self.serverid
@@ -889,7 +871,6 @@ class DirSrv(SimpleLDAPObject, object): SER_USER_ID: self.userid, SER_SERVERID_PROP: self.serverid, SER_GROUP_ID: self.groupid, - SER_DEPLOYED_DIR: self.prefix, SER_BACKUP_INST_DIR: self.backupdir, SER_STRICT_HOSTNAME_CHECKING: self.strict_hostname}
@@ -897,8 +878,7 @@ class DirSrv(SimpleLDAPObject, object): args[SER_INST_SCRIPTS_ENABLED] = self.inst_scripts
content = formatInfData(args) - result = DirSrvTools.runInfProg(prog, content, self.verbose, - prefix=self.prefix) + result = DirSrvTools.runInfProg(prog, content, self.verbose, prefix=self.ds_paths.prefix) if result != 0: raise Exception('Failed to run setup-ds.pl')
@@ -943,7 +923,6 @@ class DirSrv(SimpleLDAPObject, object): # Go! sds.create_from_args(general, slapd, backends, None)
- def create(self, pyinstall=False, version=INSTALL_LATEST_CONFIG): """ Creates an instance with the parameters sets in dirsrv @@ -1011,7 +990,7 @@ class DirSrv(SimpleLDAPObject, object):
# Now time to remove the instance prog = os.path.join(self.ds_paths.sbin_dir, CMD_PATH_REMOVE_DS) - if (not self.prefix or self.prefix == '/') and os.geteuid() != 0: + if (not self.ds_paths.prefix or self.ds_paths.prefix == '/') and os.geteuid() != 0: raise ValueError("Error: without prefix deployment it is required to be root user") cmd = "%s -i %s%s" % (prog, DEFAULT_INST_HEAD, self.serverid) self.log.debug("running: %s " % cmd) @@ -1442,9 +1421,9 @@ class DirSrv(SimpleLDAPObject, object): # goes under the directory where the DS is deployed listFilesToBackup = [] here = os.getcwd() - if self.prefix: - os.chdir("%s/" % self.prefix) - prefix_pattern = "%s/" % self.prefix + if self.ds_paths.prefix: + os.chdir("%s/" % self.ds_paths.prefix) + prefix_pattern = "%s/" % self.ds_paths.prefix else: os.chdir("/") prefix_pattern = None @@ -1475,14 +1454,14 @@ class DirSrv(SimpleLDAPObject, object): for b_dir in dirs: name = os.path.join(root, b_dir) self.log.debug("backupFS b_dir = %s (%s) [name=%s]" % - (b_dir, self.prefix, name)) + (b_dir, self.ds_paths.prefix, name)) if prefix_pattern: name = re.sub(prefix_pattern, '', name)
if os.path.isdir(name): listFilesToBackup.append(name) self.log.debug("backupFS add = %s (%s)" % - (name, self.prefix)) + (name, self.ds_paths.prefix))
for file in files: name = os.path.join(root, file) @@ -1492,7 +1471,7 @@ class DirSrv(SimpleLDAPObject, object): if os.path.isfile(name): listFilesToBackup.append(name) self.log.debug("backupFS add = %s (%s)" % - (name, self.prefix)) + (name, self.ds_paths.prefix))
# create the archive name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S")) @@ -1559,8 +1538,8 @@ class DirSrv(SimpleLDAPObject, object):
# Then restore from the directory where DS was deployed here = os.getcwd() - if self.prefix: - prefix_pattern = "%s/" % self.prefix + if self.ds_paths.prefix: + prefix_pattern = "%s/" % self.ds_paths.prefix os.chdir(prefix_pattern) else: prefix_pattern = "/" @@ -1643,14 +1622,13 @@ class DirSrv(SimpleLDAPObject, object): :type post_open: bool """ # If it doesn't exist, create a cadb. - ssca_path = os.path.join(self.get_sysconf_dir(), 'dirsrv/ssca/') - ssca = NssSsl(dbpath=ssca_path) + ssca = NssSsl(dbpath=self.get_ssca_dir()) if not ssca._db_exists(): ssca.reinit() ssca.create_rsa_ca()
# Create certificate database. - tlsdb = NssSsl(dbpath=self.get_cert_dir()) + tlsdb = NssSsl(dirsrv=self) # Remember, DS breaks the db, so force reinit it. tlsdb.reinit() csr = tlsdb.create_rsa_key_and_csr() @@ -1707,10 +1685,32 @@ class DirSrv(SimpleLDAPObject, object): """Return the server instance ldif directory.""" return self.ds_paths.backup_dir
+ def get_data_dir(self): + """Return the server data path + + :returns: The string path of the data location. + """ + return self.ds_paths.data_dir + def get_local_state_dir(self): + """Return the server data path + + :returns: The string path of the data location. + """ return self.ds_paths.local_state_dir
+ def get_changelog_dir(self): + """Return the server changelog path + + :returns: The string path of changelog location. + """ + return os.path.abspath(os.path.join(self.ds_paths.db_dir, '../changelogdb')) + def get_config_dir(self): + """Return the server config directory + + :returns: The string path of config location. + """ return self.ds_paths.config_dir
def get_cert_dir(self): @@ -1750,12 +1750,27 @@ class DirSrv(SimpleLDAPObject, object): def get_group_gid(self): return grp.getgrnam(self.ds_paths.group).gr_gid
+ def get_uuid(self): + """Get the python dirsrv unique id. + + :returns: String of the object uuid + """ + return self.uuid + def has_asan(self): return self.ds_paths.asan_enabled
def with_systemd(self): return self.ds_paths.with_systemd
+ def get_server_tls_subject(self): + """ Get the servers TLS subject line for enrollment purposes. + + :returns: String of the Server-Cert subject line. + """ + tlsdb = NssSsl(dirsrv=self) + return tlsdb.get_server_cert_subject() + # # Get entries # @@ -2704,7 +2719,7 @@ class DirSrv(SimpleLDAPObject, object): online = True else: online = False - DirSrvTools.runUpgrade(self.prefix, online) + DirSrvTools.runUpgrade(self.ds_paths.prefix, online)
# # The following are the functions to perform offline scripts(when the diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py index 64ccd56..23ed0f8 100644 --- a/src/lib389/lib389/_mapped_object.py +++ b/src/lib389/lib389/_mapped_object.py @@ -107,6 +107,7 @@ class DSLdapObject(DSLogging): self._lint_functions = None self._server_controls = None self._client_controls = None + self._object_filter = '(objectClass=*)'
def __unicode__(self): val = self._dn @@ -124,7 +125,7 @@ class DSLdapObject(DSLogging): :returns: Entry object """
- return self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, attrlist=["*"], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] + return self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=["*"], serverctrls=self._server_controls, clientctrls=self._client_controls)[0]
def exists(self): """Check if the entry exists @@ -133,7 +134,7 @@ class DSLdapObject(DSLogging): """
try: - self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, attrsonly=1, serverctrls=self._server_controls, clientctrls=self._client_controls) + self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1, serverctrls=self._server_controls, clientctrls=self._client_controls) except ldap.NO_SUCH_OBJECT: return False
@@ -145,7 +146,7 @@ class DSLdapObject(DSLogging): :returns: LDIF formatted string """
- e = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, attrlist=["*"], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] + e = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=["*"], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] return e.__repr__()
def display_attr(self, attr): @@ -221,7 +222,7 @@ class DSLdapObject(DSLogging): raise ValueError("Invalid state. Cannot get presence on instance that is not ONLINE") self._log.debug("%s present(%r) %s" % (self._dn, attr, value))
- e = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, attrlist=[attr, ], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] + e = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] values = self.get_attr_vals_bytes(attr) self._log.debug("%s contains %s" % (self._dn, values))
@@ -297,6 +298,18 @@ class DSLdapObject(DSLogging): except ldap.NO_SUCH_ATTRIBUTE: pass
+ def ensure_present(self, attr, value): + """Ensure that an attribute and value are present in a state, + or add it. + + :param key: an attribute name + :type key: str + :param value: an attribute value + :type value: str + """ + if not self.present(attr, value): + self.add(attr, value) + # maybe this could be renamed? def set(self, key, value, action=ldap.MOD_REPLACE): """Perform a specified action on a key with value @@ -429,7 +442,7 @@ class DSLdapObject(DSLogging): raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") else: # retrieving real(*) and operational attributes(+) - attrs_entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, attrlist=["*", "+"], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] + attrs_entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=["*", "+"], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] # getting dict from 'entry' object attrs_dict = attrs_entry.data return attrs_dict @@ -439,7 +452,7 @@ class DSLdapObject(DSLogging): if self._instance.state != DIRSRV_STATE_ONLINE: raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") else: - entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, attrlist=keys, serverctrls=self._server_controls, clientctrls=self._client_controls)[0] + entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=keys, serverctrls=self._server_controls, clientctrls=self._client_controls)[0] return entry.getValuesSet(keys)
def get_attr_vals(self, key): @@ -452,7 +465,7 @@ class DSLdapObject(DSLogging): else: # It would be good to prevent the entry code intercepting this .... # We have to do this in this method, because else we ignore the scope base. - entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, attrlist=[key], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] + entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[key], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] return entry.getValues(key)
def get_attr_val(self, key): @@ -463,7 +476,7 @@ class DSLdapObject(DSLogging): # In the future, I plan to add a mode where if local == true, we # can use get on dse.ldif to get values offline. else: - entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, attrlist=[key], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] + entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[key], serverctrls=self._server_controls, clientctrls=self._client_controls)[0] return entry.getValue(key)
def get_attr_val_bytes(self, key): @@ -682,6 +695,51 @@ class DSLdapObject(DSLogging): # Do we need to do extra dn validation here? return (tdn, str_props)
+ def _create(self, rdn=None, properties=None, basedn=None, ensure=False): + """Internal implementation of create. This is used by ensure + and create, to prevent code duplication. You should *never* call + this method directly. + """ + assert(len(self._create_objectclasses) > 0) + basedn = ensure_str(basedn) + self._log.debug('Checking "%s" under %s : %s' % (rdn, basedn, properties)) + # Add the objectClasses to the properties + (dn, valid_props) = self._validate(rdn, properties, basedn) + # Check if the entry exists or not? .add_s is going to error anyway ... + self._log.debug('Validated dn %s : valid_props %s' % (dn, valid_props)) + + exists = False + + try: + self._instance.search_ext_s(dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1, serverctrls=self._server_controls, clientctrls=self._client_controls) + exists = True + except ldap.NO_SUCH_OBJECT: + pass + + if exists and ensure: + # update properties + self._log.debug('Exists %s' % dn) + self._dn = dn + # Now use replace_many to setup our values + mods = [] + for k,v in valid_props.items(): + mods.append( (ldap.MOD_REPLACE, k, v)) + self._instance.modify_ext_s(self._dn, mods, serverctrls=self._server_controls, clientctrls=self._client_controls) + elif exists and not ensure: + # raise "already exists." + raise ldap.ALREADY_EXISTS("Entry %s already exists" % dn) + if not exists: + self._log.debug('Creating %s' % dn) + e = Entry(dn) + e.update({'objectclass': ensure_list_bytes(self._create_objectclasses)}) + e.update(valid_props) + # We rely on exceptions here to indicate failure to the parent. + self._log.debug('Creating entry %s : %s' % (dn, e)) + self._instance.add_ext_s(e, serverctrls=self._server_controls, clientctrls=self._client_controls) + # If it worked, we need to fix our instance dn + self._dn = dn + return self + def create(self, rdn=None, properties=None, basedn=None): """Add a new entry
@@ -694,24 +752,22 @@ class DSLdapObject(DSLogging):
:returns: DSLdapObject of the created entry """ + return self._create(rdn, properties, basedn, ensure=False)
- assert(len(self._create_objectclasses) > 0) - basedn = ensure_str(basedn) - self._log.debug('Creating "%s" under %s : %s' % (rdn, basedn, properties)) - # Add the objectClasses to the properties - (dn, valid_props) = self._validate(rdn, properties, basedn) - # Check if the entry exists or not? .add_s is going to error anyway ... - self._log.debug('Validated dn %s : valid_props %s' % (dn, valid_props)) + def ensure_state(self, rdn=None, properties=None, basedn=None): + """Ensure an entry exists with the following state, created + if necessary.
- e = Entry(dn) - e.update({'objectclass': ensure_list_bytes(self._create_objectclasses)}) - e.update(valid_props) - # We rely on exceptions here to indicate failure to the parent. - self._log.debug('Creating entry %s : %s' % (dn, e)) - self._instance.add_ext_s(e, serverctrls=self._server_controls, clientctrls=self._client_controls) - # If it worked, we need to fix our instance dn - self._dn = dn - return self + :param rdn: RDN of the new entry + :type rdn: str + :param properties: Attributes for the new entry + :type properties: dict + :param basedn: Base DN of the new entry + :type rdn: str + + :returns: DSLdapObject of the created entry + """ + return self._create(rdn, properties, basedn, ensure=True)
def lint(self): """Override this to create a linter for a type. This means that we can detect @@ -872,19 +928,6 @@ class DSLdapObjects(DSLogging): if type(properties) != dict: raise ldap.UNWILLING_TO_PERFORM("properties must be a dictionary")
- # Get the rdn out of the properties if it's unset??? - # if rdn is None and self._rdn_attribute in properties: - # # First see if we can get it from the properties. - # trdn = properties.get(self._rdn_attribute) - # if type(trdn) == str: - # rdn = "%s=%s" % (self._rdn_attribute, trdn) - # elif type(trdn) == list and len(trdn) != 1: - # raise ldap.UNWILLING_TO_PERFORM("Cannot determine rdn %s from properties. Too many choices" % (self._rdn_attribute)) - # elif type(trdn) == list: - # rdn = "%s=%s" % (self._rdn_attribute, trdn[0]) - # else: - # raise ldap.UNWILLING_TO_PERFORM("Cannot determine rdn %s from properties, Invalid type" % type(trdn)) - return (rdn, properties)
def create(self, rdn=None, properties=None): @@ -907,3 +950,27 @@ class DSLdapObjects(DSLogging): (rdn, properties) = self._validate(rdn, properties) # Now actually commit the creation req return co.create(rdn, properties, self._basedn) + + def ensure_state(self, rdn=None, properties=None): + """Create an object under base DN of our entry, or + assert it exists and update it's properties. + + :param rdn: RDN of the new entry + :type rdn: str + :param properties: Attributes for the new entry + :type properties: dict + + :returns: DSLdapObject of the created entry + """ + + # Should we inject the rdn to properties? + # This may not work in all cases, especially when we consider plugins. + # + co = self._entry_to_instance(dn=None, entry=None) + # Make the rdn naming attr avaliable + self._rdn_attribute = co._rdn_attribute + (rdn, properties) = self._validate(rdn, properties) + # Now actually commit the creation req + return co.ensure_state(rdn, properties, self._basedn) + + diff --git a/src/lib389/lib389/agreement.py b/src/lib389/lib389/agreement.py index b0ab881..d7ca0a2 100644 --- a/src/lib389/lib389/agreement.py +++ b/src/lib389/lib389/agreement.py @@ -17,8 +17,157 @@ from lib389._entry import FormatDict from lib389.utils import normalizeDN, ensure_bytes, ensure_str, ensure_dict_str from lib389 import Entry, DirSrv, NoSuchEntryError, InvalidArgumentError
+from lib389._mapped_object import DSLdapObject, DSLdapObjects
-class Agreement(object): + +class Agreement(DSLdapObject): + """A replication agreement from this server instance to + another instance of directory server. + + - must attributes: [ 'cn' ] + - RDN attribute: 'cn' + + :param instance: An instance + :type instance: lib389.DirSrv + :param dn: Entry DN + :type dn: str + """ + + def __init__(self, instance, dn=None): + super(Agreement, self).__init__(instance, dn) + self._rdn_attribute = 'cn' + self._must_attributes = [ + 'cn', + ] + self._create_objectclasses = [ + 'top', + 'nsds5replicationagreement', + ] + self._protected = False + + def begin_reinit(self): + """Begin a total reinit of the consumer. This will send + our data to the server we are replicating too. + """ + self.set('nsds5BeginReplicaRefresh', 'start') + + def check_reinit(self): + """Check the status of a reinit. Returns done and error. A correct + reinit will return (True, False). + + :returns: tuple(done, error), where done, error are bool. + """ + done = False + error = False + status = self.get_attr_val_utf8('nsds5ReplicaLastInitStatus') + self._log.debug('agreement tot_init status: %s' % status) + if not status: + pass + elif 'replica busy' in status: + error = True + elif 'Total update succeeded' in status: + done = True + elif 'Replication error' in status: + error = True + + return (done, error) + + def wait_reinit(self, timeout=300): + """Wait for a reinit to complete. Returns done and error. A correct + reinit will return (True, False). + + :returns: tuple(done, error), where done, error are bool. + """ + done = False + error = False + count = 0 + while done is False and error is False: + (done, error) = self.check_reinit() + if count > timeout and not done: + error = True + count = count + 2 + time.sleep(2) + return (done, error) + + def pause(self): + """Pause outgoing changes from this server to consumer. Note + that this does not pause the consumer, only that changes will + not be sent from this master to consumer: the consumer may still + recieve changes from other replication paths! + """ + self.set('nsds5ReplicaEnabled', 'off') + + def resume(self): + """Resume sending updates from this master to consumer directly. + """ + self.set('nsds5ReplicaEnabled', 'on') + + def set_wait_for_async_results(self, value): + """Set nsDS5ReplicaWaitForAsyncResults to value. + + :param value: Time in milliseconds. + :type value: str + """ + self.replace('nsDS5ReplicaWaitForAsyncResults', value) + + def remove_wait_for_async_results(self): + """Reset nsDS5ReplicaWaitForAsyncResults to default. + """ + self.remove_all('nsDS5ReplicaWaitForAsyncResults') + + def get_wait_for_async_results_utf8(self): + """Get the current value of nsDS5ReplicaWaitForAsyncResults. + + :returns: str + """ + return self.get_attr_val_utf8('nsDS5ReplicaWaitForAsyncResults') + +class Agreements(DSLdapObjects): + """Represents the set of agreements configured on this instance. + There are two possible ways to use this interface. + + The first is as the set of agreements on the server for all + replicated suffixes. IE: + + agmts = Agreements(inst). + + However, this will NOT allow new agreements to be created, as + agreements must be related to a replica. + + The second is Agreements related to a replica. For this method + you must use: + + replica = Replicas(inst).get(<suffix>) + agmts = Agreements(inst, replica.dn) + + + :param instance: An instance + :type instance: lib389.DirSrv + :param basedn: The base dn to search. + :type basedn: str + :param rdn: The rdn relative to cn=mapping tree to search. + :type rdn: str + """ + + def __init__(self, instance, basedn=DN_MAPPING_TREE, rdn=None): + super(Agreements, self).__init__(instance) + self._childobject = Agreement + self._objectclasses = [ 'nsds5replicationagreement' ] + self._filterattrs = [ 'cn', 'nsDS5ReplicaRoot' ] + if rdn is None: + self._basedn = basedn + else: + self._basedn = "%s,%s" % (rdn, basedn) + + def _validate(self, rdn, properties): + """ An internal implementation detail of create verification. You should + never call this directly. + """ + if self._basedn == DN_MAPPING_TREE: + raise ldap.UNWILLING_TO_PERFORM("Refusing to create agreement in %s" % DN_MAPPING_TREE) + return super(Agreements, self)._validate(rdn, properties) + +class AgreementLegacy(object): """An object that helps to work with agreement entry
:param conn: An instance diff --git a/src/lib389/lib389/changelog.py b/src/lib389/lib389/changelog.py index 22fe01a..bed42a0 100644 --- a/src/lib389/lib389/changelog.py +++ b/src/lib389/lib389/changelog.py @@ -13,8 +13,58 @@ from lib389._constants import * from lib389.properties import * from lib389 import DirSrv, Entry, InvalidArgumentError
+from lib389._mapped_object import DSLdapObject +from lib389.utils import ds_is_older
-class Changelog(object): +class Changelog5(DSLdapObject): + """Represents the Directory Server changelog. This is used for + replication. Only one changelog is needed for every server. + + :param instance: An instance + :type instance: lib389.DirSrv + """ + + def __init__(self, instance, dn='cn=changelog5,cn=config'): + super(Changelog5,self).__init__(instance, dn) + self._rdn_attribute = 'cn' + self._must_attributes = [ 'cn', 'nsslapd-changelogdir' ] + self._create_objectclasses = [ + 'top', + 'nsChangelogConfig', + ] + if ds_is_older('1.4.0'): + self._create_objectclasses = [ + 'top', + 'extensibleobject', + ] + self._protected = True + + def set_max_entries(self, value): + """Configure the max entries the changelog can hold. + + :param value: the number of entries. + :type value: str + """ + self.replace('nsslapd-changelogmaxentries', value) + + def set_trim_interval(self, value): + """The time between changelog trims in seconds. + + :param value: The time in seconds + :type value: str + """ + self.replace('nsslapd-changelogtrim-interval', value) + + def set_max_age(self, value): + """The maximum age of entries in the changelog. + + :param value: The age with a time modifier of s, m, h, d, w. + :type value: str + """ + self.replace('nsslapd-changelogmaxage', value) + + +class ChangelogLegacy(object): """An object that helps to work with changelog entry
:param conn: An instance diff --git a/src/lib389/lib389/idm/domain.py b/src/lib389/lib389/idm/domain.py index aff9639..4108a7a 100644 --- a/src/lib389/lib389/idm/domain.py +++ b/src/lib389/lib389/idm/domain.py @@ -19,7 +19,7 @@ class Domain(DSLdapObject): :type dn: str """
- def __init__(self, instance, dn=None): + def __init__(self, instance, dn): super(Domain, self).__init__(instance, dn) self._rdn_attribute = 'dc' self._must_attributes = ['dc'] diff --git a/src/lib389/lib389/idm/group.py b/src/lib389/lib389/idm/group.py index 0447726..d1716a6 100644 --- a/src/lib389/lib389/idm/group.py +++ b/src/lib389/lib389/idm/group.py @@ -63,6 +63,14 @@ class Group(DSLdapObject):
self.remove('member', dn)
+ def ensure_member(self, dn): + """Ensure DN is a member + + :param dn: Entry DN + :type dn: str + """ + + self.ensure_present('member', dn)
class Groups(DSLdapObjects): """DSLdapObjects that represents Groups entry @@ -81,7 +89,10 @@ class Groups(DSLdapObjects): ] self._filterattrs = [RDN] self._childobject = Group - self._basedn = '{},{}'.format(ensure_str(rdn), ensure_str(basedn)) + if rdn: + self._basedn = '{},{}'.format(ensure_str(rdn), ensure_str(basedn)) + else: + self._basedn = ensure_str(basedn)
class UniqueGroup(DSLdapObject): @@ -120,7 +131,10 @@ class UniqueGroups(DSLdapObjects): ] self._filterattrs = [RDN] self._childobject = UniqueGroup - self._basedn = '{},{}'.format(rdn, basedn) + if rdn: + self._basedn = '{},{}'.format(ensure_str(rdn), ensure_str(basedn)) + else: + self._basedn = ensure_str(basedn)
diff --git a/src/lib389/lib389/idm/services.py b/src/lib389/lib389/idm/services.py index 90b0ba9..865fd09 100644 --- a/src/lib389/lib389/idm/services.py +++ b/src/lib389/lib389/idm/services.py @@ -9,6 +9,8 @@ from lib389._mapped_object import DSLdapObjects from lib389.idm.account import Account
+from lib389.utils import ds_is_older + RDN = 'cn' MUST_ATTRIBUTES = [ 'cn', @@ -31,6 +33,15 @@ class ServiceAccount(Account): 'top', 'netscapeServer', ] + if ds_is_older('1.4.0'): + # This is a HORRIBLE HACK for older versions that DON'T have + # correct updated schema! + # + # I feel physically ill having wrtten this line of code. :( + self._create_objectclasses.append('extensibleobject') + else: + self._create_objectclasses.append('nsMemberOf') + self._create_objectclasses.append('nsAccount') self._protected = False
class ServiceAccounts(DSLdapObjects): diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py index fce1d2d..c30fa17 100644 --- a/src/lib389/lib389/instance/remove.py +++ b/src/lib389/lib389/instance/remove.py @@ -27,7 +27,7 @@ def remove_ds_instance(dirsrv): ### WARNING: The changelogdb isn't removed. we assume it's in: # db_dir ../changelogdb. So remove that too! # abspath will resolve the ".." down. - remove_paths['changelogdb_dir'] = os.path.abspath(os.path.join(dirsrv.ds_paths.db_dir, '../changelogdb')) + remove_paths['changelogdb_dir'] = dirsrv.get_changelog_dir() remove_paths['ldif_dir'] = dirsrv.ds_paths.ldif_dir remove_paths['lock_dir'] = dirsrv.ds_paths.lock_dir remove_paths['log_dir'] = dirsrv.ds_paths.log_dir diff --git a/src/lib389/lib389/nss_ssl.py b/src/lib389/lib389/nss_ssl.py index a37c11f..1cb22ef 100644 --- a/src/lib389/lib389/nss_ssl.py +++ b/src/lib389/lib389/nss_ssl.py @@ -23,6 +23,7 @@ from subprocess import check_call, check_output from lib389.passwd import password_generate
from lib389.utils import ensure_str, ensure_bytes +import uuid
KEYBITS = 4096 CA_NAME = 'Self-Signed-CA' @@ -32,7 +33,8 @@ PIN_TXT = 'pin.txt' PWD_TXT = 'pwdfile.txt' CERT_SUFFIX = 'O=testing,L=389ds,ST=Queensland,C=AU' ISSUER = 'CN=ssca.389ds.example.com,%s' % CERT_SUFFIX -SELF_ISSUER = 'CN={HOSTNAME},%s' % CERT_SUFFIX +SELF_ISSUER = 'CN={HOSTNAME},givenName={GIVENNAME},%s' % CERT_SUFFIX +USER_ISSUER = 'CN={HOSTNAME},%s' % CERT_SUFFIX VALID = 2
# My logger @@ -52,6 +54,46 @@ class NssSsl(object): else: self.dbpassword = dbpassword
+ def detect_alt_names(self, alt_names=[]): + """Attempt to determine appropriate subject alternate names for a host. + Returns the list of names we derive. + + :param alt_names: A list of alternate names. + :type alt_names: list[str] + :returns: list[str] + """ + if self.dirsrv and self.dirsrv.host not in alt_names: + alt_names.append(self.dirsrv.host) + if len(alt_names) == 0: + alt_names.append(socket.gethostname()) + return alt_names + + def generate_cert_subject(self, alt_names=[]): + """Return the cert subject we would generate for this host + from the lib389 self signed process. This is *not* the subject + of the actual cert, which could be different. + + :param alt_names: Alternative names you want to configure. + :type alt_names: [str, ] + :returns: String of the subject DN. + """ + + if self.dirsrv and len(alt_names) > 0: + return SELF_ISSUER.format(GIVENNAME=self.dirsrv.get_uuid(), HOSTNAME=alt_names[0]) + elif len(alt_names) > 0: + return SELF_ISSUER.format(GIVENNAME=uuid.uuid4(), HOSTNAME=alt_names[0]) + else: + return SELF_ISSUER.format(GIVENNAME=uuid.uuid4(), HOSTNAME='lib389host.localdomain') + + def get_server_cert_subject(self, alt_names=[]): + """Get the server db subject. For now, this uses generate, but later + we can make this determined from other factors like x509 parsing. + + :returns: str + """ + alt_names = self.detect_alt_names(alt_names) + return self.generate_cert_subject(alt_names) + def _generate_noise(self, fpath): noise = password_generate(256) with open(fpath, 'w') as f: @@ -140,6 +182,7 @@ class NssSsl(object): '-f', '%s/%s' % (self._certdb, PWD_TXT), ] + self.log.debug("nss cmd: %s" % cmd) result = ensure_str(check_output(cmd)) self.log.debug("nss output: %s" % result) # Now extract the CAcert to a well know place. @@ -153,6 +196,7 @@ class NssSsl(object): self._certdb, '-a', ] + self.log.debug("nss cmd: %s" % cmd) certdetails = check_output(cmd) with open('%s/ca.crt' % self._certdb, 'w') as f: f.write(ensure_str(certdetails)) @@ -266,10 +310,8 @@ class NssSsl(object): extra names to take. """
- if len(alt_names) == 0: - alt_names.append(socket.gethostname()) - if self.dirsrv and self.dirsrv.host not in alt_names: - alt_names.append(self.dirsrv.host) + alt_names = self.detect_alt_names(alt_names) + subject = self.generate_cert_subject(alt_names)
# Wait a second to avoid an NSS bug with serial ids based on time. time.sleep(1) @@ -281,7 +323,7 @@ class NssSsl(object): '-n', CERT_NAME, '-s', - SELF_ISSUER.format(HOSTNAME=alt_names[0]), + subject, # We MUST issue with SANs else ldap wont verify the name. '-8', ','.join(alt_names), '-c', @@ -300,6 +342,7 @@ class NssSsl(object): '%s/%s' % (self._certdb, PWD_TXT), ]
+ self.log.debug("nss cmd: %s" % cmd) result = ensure_str(check_output(cmd)) self.log.debug("nss output: %s" % result) return True @@ -311,21 +354,26 @@ class NssSsl(object): """ csr_path = os.path.join(self._certdb, '%s.csr' % CERT_NAME)
- if len(alt_names) == 0: - alt_names.append(socket.gethostname()) - if self.dirsrv and self.dirsrv.host not in alt_names: - alt_names.append(self.dirsrv.host) + alt_names = self.detect_alt_names(alt_names) + subject = self.generate_cert_subject(alt_names)
# Wait a second to avoid an NSS bug with serial ids based on time. time.sleep(1) # Create noise. self._generate_noise('%s/noise.txt' % self._certdb)
- check_call([ + cmd = [ '/usr/bin/certutil', '-R', + # We want a dual purposes client and server cert + '--keyUsage', + 'digitalSignature,nonRepudiation,keyEncipherment,dataEncipherment', + '--nsCertType', + 'sslClient,sslServer', + '--extKeyUsage', + 'clientAuth,serverAuth', '-s', - SELF_ISSUER.format(HOSTNAME=alt_names[0]), + subject, # We MUST issue with SANs else ldap wont verify the name. '-8', ','.join(alt_names), '-g', @@ -340,7 +388,11 @@ class NssSsl(object): '%s/%s' % (self._certdb, PWD_TXT), '-a', '-o', csr_path, - ]) + ] + + self.log.debug("nss cmd: %s" % cmd) + check_call(cmd) + return csr_path
def rsa_ca_sign_csr(self, csr_path): @@ -398,7 +450,7 @@ class NssSsl(object): '-V', '-d', self._certdb, '-n', CERT_NAME, - '-u', 'V' + '-u', 'YCV' ])
def create_rsa_user(self, name): @@ -407,8 +459,9 @@ class NssSsl(object):
Name is the uid of the account, and will become the CN of the cert. """ + subject = USER_ISSUER.format(HOSTNAME=name) if self._rsa_user_exists(name): - return True + return subject
# Wait a second to avoid an NSS bug with serial ids based on time. time.sleep(1) @@ -418,7 +471,7 @@ class NssSsl(object): '-n', '%s%s' % (USER_PREFIX, name), '-s', - SELF_ISSUER.format(HOSTNAME=name), + subject, '--keyUsage', 'digitalSignature,nonRepudiation,keyEncipherment,dataEncipherment', '--nsCertType', @@ -485,7 +538,7 @@ class NssSsl(object): '-out', '%s/%s%s.der' % (self._certdb, USER_PREFIX, name), ])
- return True + return subject
def get_rsa_user(self, name): """ diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py index 429d583..0e72310 100644 --- a/src/lib389/lib389/replica.py +++ b/src/lib389/lib389/replica.py @@ -10,15 +10,30 @@ import ldap import os import decimal import time +import logging +import uuid + +from itertools import permutations from lib389._constants import * from lib389.properties import * -from lib389.utils import normalizeDN, escapeDNValue, ensure_bytes +from lib389.utils import normalizeDN, escapeDNValue, ensure_bytes, ensure_str, ensure_list_str, ds_is_older from lib389._replication import RUV from lib389.repltools import ReplTools from lib389 import DirSrv, Entry, NoSuchEntryError, InvalidArgumentError from lib389._mapped_object import DSLdapObjects, DSLdapObject +from lib389.passwd import password_generate +from lib389.mappingTree import MappingTrees +from lib389.agreement import Agreements +from lib389.changelog import Changelog5 + from lib389.idm.domain import Domain
+from lib389.idm.group import Groups +from lib389.idm.services import ServiceAccounts +from lib389.idm.organisationalunit import OrganisationalUnits + +from lib389.agreement import Agreements +
class ReplicaLegacy(object): proxied_methods = 'search_s getEntry'.split() @@ -791,6 +806,81 @@ class ReplicaLegacy(object): raise ValueError('Failed to update replica: ' + str(e))
+class RUV(object): + """Represents the server in memory RUV object. The RUV contains each + update vector the server knows of, along with knowledge of CSN state of the + replica we have sent data to. + + :param ruvs: A list of nsds50ruv values. + :type ruvs: list[str] + :param logger: A logging interface. + :type logger: logging object + """ + + def __init__(self, ruvs, logger=None): + if logger is not None: + self._log = logger + else: + self._log = logging.getLogger(__name__) + self._rids = [] + self._rid_csn = {} + self._rid_url = {} + self._data_generation = None + # Process the array of data + for r in ruvs: + pr = r.replace('{', '').replace('}', '').split(' ') + if pr[0] == 'replicageneration': + # replicageneration 5a2ffd0f000000010000 + self._data_generation = pr[1] + elif pr[0] == 'replica': + # replica 1 ldap://ldapkdc.example.com:39001 5a2ffd0f000100010000 5a2ffd0f000200010000 + # Don't add rids if they have no csn (no writes) yet. + rid = pr[1] + self._rids.append(rid) + self._rid_url[rid] = pr[2] + try: + self._rid_csn[rid] = pr[4] + except IndexError: + self._rid_csn[rid] = '00000000000000000000' + + def alloc_rid(self): + """Based on the RUV, determine an available RID for the replication + topology that is unique. + + :returns: str + """ + self._log.debug("Allocated rids: %s" % self._rids) + for i in range(1, 65534): + self._log.debug("Testing ... %s" % i) + if str(i) not in self._rids: + return str(i) + raise Exception("Unable to alloc rid!") + + def is_synced(self, other_ruv): + """Compare two server ruvs to determine if they are synced. This does not + mean that replication is in sync (due to things like fractional repl), but + in some cases can show that "at least some known point" has been achieved in + the replication process. + + :param other_ruv: The other ruv object + :type other_ruv: RUV object + :returns: bool + """ + self._log.debug("RUV: Comparing dg %s %s" % (self._data_generation, other_ruv._data_generation)) + if self._data_generation != other_ruv._data_generation: + self._log.debug("RUV: Incorrect datageneration") + return False + if set(self._rids) != set(other_ruv._rids): + self._log.debug("RUV: Incorrect rid lists, is sync working?") + return False + for rid in self._rids: + my_csn = self._rid_csn.get(rid, '00000000000000000000') + other_csn = other_ruv._rid_csn.get(rid, '00000000000000000000') + self._log.debug("RUV: Comparing csn %s %s %s" % (rid, my_csn, other_csn)) + if my_csn < other_csn: + return False + return True + class Replica(DSLdapObject): """Replica DSLdapObject with: - must attributes = ['cn', 'nsDS5ReplicaType', 'nsDS5ReplicaRoot', @@ -807,15 +897,37 @@ class Replica(DSLdapObject): def __init__(self, instance, dn=None): super(Replica, self).__init__(instance, dn) self._rdn_attribute = 'cn' - self._must_attributes = ['cn', REPL_TYPE, - REPL_ROOT, REPL_BINDDN, REPL_ID] - - self._create_objectclasses = ['top', 'extensibleObject', - REPLICA_OBJECTCLASS_VALUE] + self._must_attributes = [ + 'cn', + 'nsDS5ReplicaType', + 'nsDS5ReplicaRoot', + 'nsDS5ReplicaId', + ] + + self._create_objectclasses = [ + 'top', + 'nsds5Replica' + ] + if ds_is_older('1.4.0'): + self._create_objectclasses.append('extensibleobject') self._protected = False self._suffix = None
- @staticmethod + def _validate(self, rdn, properties, basedn): + (tdn, str_props) = super(Replica, self)._validate(rdn, properties, basedn) + # We override the tdn here. We use the MT for the suffix. + mts = MappingTrees(self._instance) + s_suffix = ensure_str(str_props['nsDS5ReplicaRoot'][0]) + mt = mts.get(s_suffix) + tdn = 'cn=replica,%s' % mt.dn + return (tdn, str_props) + + def _populate_suffix(self): + """Some internal tasks need this populated. + """ + if self._suffix is None: + self._suffix = self.get_attr_val_utf8('nsDS5ReplicaRoot') + def _valid_role(role): """Return True if role is valid
@@ -831,7 +943,6 @@ class Replica(DSLdapObject): else: return True
- @staticmethod def _valid_rid(role, rid=None): """Return True if rid is valid for the replica role
@@ -866,49 +977,21 @@ class Replica(DSLdapObject): :raises: - InvalidArgumentError - if suffix is missing - ldap.LDAPError - for all other update failures """ - - # Get the suffix - suffix = self.get_attr_val(REPL_ROOT) - if not suffix: - self.log.fatal("disableReplication: suffix is not defined") - raise InvalidArgumentError("suffix missing") - # Delete the agreements - try: - self.deleteAgreements() - except ldap.LDAPError as e: - self.log.fatal('Failed to delete replica agreements!') - raise e - + self._delete_agreements() # Delete the replica - try: - super(Replica, self).delete() - except ldap.LDAPError as e: - self.log.fatal('Failed to delete replica configuration ' + - '(%s), error: %s' % (self._dn, str(e))) - raise e + return super(Replica, self).delete()
- def deleteAgreements(self): + def _delete_agreements(self): """Delete all the agreements for the suffix
:raises: LDAPError - If failing to delete or search for agreeme :type binddn: strnts """ - - # Delete the agreements - try: - suffix = self.get_attr_val(REPL_ROOT) - agmts = self._instance.agreement.list(suffix=suffix) - for agmt in agmts: - try: - self._instance.delete_s(agmt.dn) - except ldap.LDAPError as e: - self.log.fatal('Failed to delete replica agreement (%s),' + - ' error: %s' % (agmt.dn, str(e))) - raise e - except ldap.LDAPError as e: - self.log.fatal('Failed to search for replication agreements ' + - 'under (%s), error: %s' % (self._dn, str(e))) - raise e + # Get the suffix + self._populate_suffix() + agmts = self.get_agreements() + for agmt in agmts.list(): + agmt.delete()
def promote(self, newrole, binddn=None, rid=None): """Promote the replica to hub or master @@ -1038,127 +1121,7 @@ class Replica(DSLdapObject):
return replicarole
- def check_init(self, agmtdn): - """Check that a total update has completed - - :param agmtdn: The agreement DN - :type agmtdn: str - - :returns: A tuple - first element is done/not done, 2nd is no error/has error - - THIS SHOULD BE IN THE NEW AGREEMENT CLASS - """ - - done, hasError = False, 0 - attrlist = ['cn', - 'nsds5BeginReplicaRefresh', - 'nsds5replicaUpdateInProgress', - 'nsds5ReplicaLastInitStatus', - 'nsds5ReplicaLastInitStart', - 'nsds5ReplicaLastInitEnd'] - try: - entry = self._instance.getEntry( - agmtdn, ldap.SCOPE_BASE, "(objectclass=*)", attrlist) - except NoSuchEntryError: - self._log.exception("Error reading status from agreement {}".format(agmtdn)) - hasError = 1 - else: - refresh = entry.nsds5BeginReplicaRefresh - inprogress = entry.nsds5replicaUpdateInProgress - status = entry.nsds5ReplicaLastInitStatus - if not refresh: # done - check status - if not status: - print("No status yet") - elif status.find(b"replica busy") > -1: - print("Update failed - replica busy - status", status) - done = True - hasError = 2 - elif status.find(b"Total update succeeded") > -1: - print("Update succeeded: status ", status) - done = True - elif inprogress.lower() == 'true': - print("Update in progress yet not in progress: status ", - status) - else: - print("Update failed: status", status) - hasError = 1 - done = True - elif self.verbose: - print("Update in progress: status", status) - - return done, hasError - - def wait_init(self, agmtdn): - """Initialize replication and wait for completion. - - :param agmtdn: The agreement DN - :type agmtdn: str - - :returns: 0 if the initialization is complete - - THIS SHOULD BE IN THE NEW AGREEMENT CLASS - """ - - done = False - haserror = 0 - while not done and not haserror: - time.sleep(1) # give it a few seconds to get going - done, haserror = self.check_init(agmtdn) - return haserror - - def start_and_wait(self, agmtdn): - """Initialize an agreement and wait for it to complete - - :param agmtdn: The agreement DN - :type agmtdn: str - - :returns: 0 if the initialization is complete - - THIS SHOULD BE IN THE NEW AGREEMENT CLASS - """ - - rc = self.start_async(agmtdn) - if not rc: - rc = self.wait_init(agmtdn) - if rc == 2: # replica busy - retry - rc = self.start_and_wait(agmtdn) - return rc - - def start_async(self, agmtdn): - """Initialize replication without waiting - - :param agmtdn: The agreement DN - :type agmtdn: str - - :returns: None - - THIS SHOULD BE IN THE NEW AGREEMENT CLASS - """ - - self._log.info("Starting async replication %s" % agmtdn) - mod = [(ldap.MOD_ADD, 'nsds5BeginReplicaRefresh', b'start')] - self._instance.modify_s(agmtdn, mod) - - def get_ruv_entry(self): - """Return the database RUV entry - - :returns: The database RUV entry - :raises: ValeuError - If suffix is not setup for replication - LDAPError - If there is a problem trying to search for the RUV - """ - - try: - entry = self._instance.search_s(self._suffix, - ldap.SCOPE_SUBTREE, - REPLICA_RUV_FILTER) - if entry: - return entry[0] - else: - raise ValueError('Suffix (%s) is not setup for replication' % self._suffix) - except ldap.LDAPError as e: - raise e - - def test(self, *replica_dirsrvs): + def test_replication(self, replica_dirsrvs): """Make a "dummy" update on the the replicated suffix, and check all the provided replicas to see if they received the update.
@@ -1204,6 +1167,42 @@ class Replica(DSLdapObject):
return True
+ def get_agreements(self): + """Return the set of agreements related to this suffix replica + + :returns: Agreements object + """ + return Agreements(self._instance, self.dn) + + def get_rid(self): + """Return the current replicas RID for this suffix + + :returns: str + """ + return self.get_attr_val_utf8('nsDS5ReplicaId') + + def get_ruv(self): + """Return the in memory ruv of this replica suffix. + + :returns: RUV object + """ + self._populate_suffix() + + ent = self._instance.search_ext_s( + base=self._suffix, + scope=ldap.SCOPE_SUBTREE, + filterstr='(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))', + attrlist=['nsds50ruv'], + serverctrls=self._server_controls, clientctrls=self._client_controls)[0] + + data = ensure_list_str(ent.getValues('nsds50ruv')) + + return RUV(data) + + def begin_task_cl2ldif(self): + """Begin the changelog to ldif task + """ + self.replace('nsds5task', 'cl2ldif')
class Replicas(DSLdapObjects): """Replica DSLdapObjects for all replicas @@ -1235,217 +1234,628 @@ class Replicas(DSLdapObjects): replica = super(Replicas, self).get(selector, dn) if replica: # Get and set the replica's suffix - replica._suffix = replica.get_attr_val(REPL_ROOT) + replica._populate_suffix() return replica
- def enable(self, suffix, role, replicaID=None, args=None): - """Enable replication for this suffix +class BootstrapReplicationManager(DSLdapObject): + """A Replication Manager credential for bootstrapping the repl process. + This is used by the replication manager object to coordinate the initial + init so that server creds are available.
- :param suffix: The suffix to enable replication for - :type suffix: str - :param role: MASTER, HUB and CONSUMER - :type role: ReplicaRole - :param replicaID: number that identify the supplier replica - (role=ReplicaRole.MASTER) in the topology. - For hub/consumer (role=ReplicaRole.HUB or - ReplicaRole.CONSUMER), rid value is not used. - This parameter is mandatory for supplier. - :type replicaID: int - :param args: A dictionary of additional replica properties - :type args: dict - - :returns: Replica DSLdapObject - :raises: - InvalidArgumentError - if missing mandatory arguments - - ValueError - argument with invalid value - - LDAPError - failed to add replica entry - """ + :param instance: An instance + :type instance: lib389.DirSrv + :param dn: The dn to create + :type dn: str + """ + def __init__(self, instance, dn='cn=replication manager,cn=config'): + super(BootstrapReplicationManager, self).__init__(instance, dn) + self._rdn_attribute = 'cn' + self._must_attributes = ['cn', 'userPassword'] + self._create_objectclasses = [ + 'top', + 'netscapeServer' + ] + self._protected = False + self.common_name = 'replication manager'
- # Normalize the suffix - suffix = normalizeDN(suffix)
- # Check validity of role - if not role: - self._log.fatal("Replica.create: replica role is not specified (ReplicaRole.*)") - raise InvalidArgumentError("role missing") +class ReplicationManager(object): + """The lib389 replication manager. This is used to coordinate + replicas and agreements between servers.
- if not Replica._valid_role(role): - self._log.fatal("enableReplication: replica role invalid (%s) " % role) - raise ValueError("invalid role: %s" % role) + Unlike the raw replicas / agreement types that manipulate the + servers configuration, this is a "high level" coordination type. + It's capable of taking multiple instances and joining them. It + consumes many lib389 types like Replicas, Agreements and more.
- # role is fine, set the replica type - if role == ReplicaRole.MASTER: - rtype = REPLICA_RDWR_TYPE - # check the validity of 'rid' - if not Replica._valid_rid(role, rid=replicaID): - self._log.fatal("Replica.create: replica role is master but " + - "'rid' is missing or invalid value") - raise InvalidArgumentError("rid missing or invalid value") - else: - rtype = REPLICA_RDONLY_TYPE + It is capable of creating the first master in a topoolgy, joining + masters and consumers to that topology, populating per-server + replication credentials, dynamic rid allocation, and more.
- # Set the properties provided as mandatory parameter - properties = {'cn': 'replica', - REPL_ROOT: suffix, - REPL_ID: str(replicaID), - REPL_TYPE: str(rtype)} + Unlike hand management of agreements, this is able to take simpler + steps to agreement creation. For example:
- # If the properties in args are valid add them to 'properties' - if args: - for prop in args: - if not inProperties(prop, REPLICA_PROPNAME_TO_ATTRNAME): - raise ValueError("unknown property: %s" % prop) - properties[prop] = args[prop] + repl = ReplicationManager(<suffix>) + repl.create_first_master(master1) + repl.join_master(master1, master2)
- # Set flags explicitly, so it will be more readable - if role == ReplicaRole.CONSUMER: - properties[REPL_FLAGS] = str(REPLICA_FLAGS_RDONLY) + Contrast to previous implementations of replication which required + much more knowledge and parameters, this is able to securely add + masters. + + :param suffix: The suffix to replicate. + :type suffix: str + :param logger: A logging interface + :type logger: python logging + + """ + def __init__(self, suffix, logger=None): + self._suffix = suffix + if logger is not None: + self._log = logger else: - properties[REPL_FLAGS] = str(REPLICA_FLAGS_WRITE) + self._log = logging.getLogger(__name__) + self._alloc_rids = []
- # Check if replica entry is already in the mapping-tree + def _ensure_changelog(self, instance): + """Internally guarantee a changelog exists for + an instance. Internal only. + """ + cl = Changelog5(instance) try: - replica = self.get(suffix) - # Should we return an error, or just return the existing relica? - self._log.warn("Already setup replica for suffix %s" % suffix) - return replica + cl.create(properties={ + 'cn': 'changelog5', + 'nsslapd-changelogdir': instance.get_changelog_dir() + }) + except ldap.ALREADY_EXISTS: + pass + + def _inst_to_agreement_name(self, to_instance): + """From an instance, determine the agreement name that we + would use for it. Internal only. + """ + to_replicas = Replicas(to_instance) + to_r = to_replicas.get(self._suffix) + return to_r.get_rid() + + def create_first_master(self, instance): + """In a topology, this creates the "first" master that has the + database and content. A number of bootstrap tasks are performed + on this master, as well as creating it's replica type. + + Once the first master is created, all other masters can be joined to + it via "join_master". + + :param instance: An instance + :type instance: lib389.DirSrv + """ + # This is a special wrapper to create. We know it's a master, + # and this is the "first" of the topology. + # So this can wrap it and make it easy. + self._log.debug("Creating first master on %s" % instance.ldapuri) + + self._ensure_changelog(instance) + + rgroup_dn = self._create_service_account(instance, instance) + + # Allocate the first rid, 1. + replicas = Replicas(instance) + replicas.create(properties={ + 'cn': 'replica', + 'nsDS5ReplicaRoot': self._suffix, + 'nsDS5ReplicaId': '1', + 'nsDS5Flags': '1', + 'nsDS5ReplicaType': '3', + 'nsDS5ReplicaBindDNGroup': rgroup_dn, + 'nsds5replicabinddngroupcheckinterval': '0' + }) + self._log.debug("SUCCESS: Created first master on %s" % instance.ldapuri) + + def _create_service_group(self, from_instance): + """Internally create the service group that contains replication managers. + This may become part of the default objects in the future. Internal only. + """ + groups = Groups(from_instance, basedn=self._suffix, rdn=None) + repl_group = groups.ensure_state(properties={ + 'cn': 'replication_managers', + }) + return repl_group + + def _create_service_account(self, from_instance, to_instance): + """Create the server replication service account, and + make it a member of the service group. Internal Only. + """ + repl_group = self._create_service_group(from_instance) + # Create our service account. + ous = OrganisationalUnits(from_instance, self._suffix) + ous.ensure_state(properties={ + 'ou': 'Services' + }) + + # Do we have TLS? + port = to_instance.sslport + + services = ServiceAccounts(from_instance, self._suffix) + # We don't have an agreement yet, so don't bother with the + # password yet ... + repl_service = services.ensure_state(properties={ + 'cn': '%s:%s' % (to_instance.host, port), + }) + + repl_group.ensure_member(repl_service.dn) + + return repl_group.dn + + def _bootstrap_replica(self, from_replica, to_replica, to_instance): + """In the master join process a chicken-egg issues arises + that we require the service account on the target master for + our agreement to be valid, but be can't send it that data without + our service account. + + Resolve that issue by "bootstrapping" the database. This creates a + bootstrap replication manager and conducts a one-way total init. + Once complete the bootstrap agreement is removed, and the service + accounts now exist on both ends allowing the join process to continue. + + Internal Only. + """ + repl_manager_password = password_generate() + # Create a repl manager on the replica + brm = BootstrapReplicationManager(to_instance) + brm.create(properties={ + 'cn': brm.common_name, + 'userPassword': repl_manager_password + }) + + to_replica.set('nsDS5ReplicaBindDN', brm.dn) + + agmt_name = self._inst_to_agreement_name(to_instance) + + # add a temp agreement from A -> B + from_agreements = from_replica.get_agreements() + temp_agmt = from_agreements.create(properties={ + 'cn': "temp_%s" % agmt_name, + 'nsDS5ReplicaRoot': self._suffix, + 'nsDS5ReplicaBindDN': brm.dn, + 'nsDS5ReplicaBindMethod': 'simple' , + 'nsDS5ReplicaTransportInfo': 'LDAP', + 'nsds5replicaTimeout': '5', + 'description': "temp_%s" % agmt_name, + 'nsDS5ReplicaHost': to_instance.host, + 'nsDS5ReplicaPort': str(to_instance.port), + 'nsDS5ReplicaCredentials': repl_manager_password, + }) + # Do a replica refresh. + temp_agmt.begin_reinit() + (done, error) = temp_agmt.wait_reinit() + assert done is True + assert error is False + # Now remove the temp agmt between A -> B + temp_agmt.delete() + # Rm the binddn. + to_replica.remove_all('nsDS5ReplicaBindDN') + # Remove the repl manager. + brm.delete() + self._log.info("SUCCESS: bootstrap to %s completed" % to_instance.ldapuri) + + def join_master(self, from_instance, to_instance): + """Join a new master in MMR to this instance. This will complete + a total init of the data "from instance" to "to instance". + + This can be conducted from any master in the topology as "from" master. + + :param from_instance: An instance already in the topology. + :type from_instance: lib389.DirSrv + :param to_instance: An instance to join to the topology. + :type to_instance: lib389.DirSrv + """ + # Is the to_instance already a replica of the suffix? + to_replicas = Replicas(to_instance) + try: + to_r = to_replicas.get(self._suffix) + self._log("WARNING: to_instance is already a replica for this suffix") + return except ldap.NO_SUCH_OBJECT: pass
- # Create changelog - if (role == ReplicaRole.MASTER) or (role == ReplicaRole.HUB): - self._instance.changelog.create() + # Make sure we replicate this suffix too ... + fr_replicas = Replicas(from_instance) + fr_r = fr_replicas.get(self._suffix)
- # Create the default replica manager entry if it does not exist - if REPL_BINDDN not in properties: - properties[REPL_BINDDN] = defaultProperties[REPLICATION_BIND_DN] - if REPLICATION_BIND_PW not in properties: - repl_pw = defaultProperties[REPLICATION_BIND_PW] - else: - repl_pw = properties[REPLICATION_BIND_PW] - # Remove this property so we don't add it to the replica entry - del properties[REPLICATION_BIND_PW] + # Ensure we have a cl + self._ensure_changelog(to_instance)
- ReplTools.createReplManager(self._instance, - repl_manager_dn=properties[REPL_BINDDN], - repl_manager_pw=repl_pw) + # Create our credentials + repl_dn = self._create_service_account(from_instance, to_instance)
- # Now create the replica entry - mtents = self._instance.mappingtree.list(suffix=suffix) - self._basedn = mtents[0].dn - replica = self.create(RDN_REPLICA, properties) - replica._suffix = suffix + # Find the ruv on from_instance + ruv = fr_r.get_ruv()
- return replica + # Get a free rid + rid = ruv.alloc_rid() + assert rid not in self._alloc_rids + self._alloc_rids.append(rid)
- def disable(self, suffix): - """Disable replication on the suffix specified + self._log.debug("Allocating rid %s" % rid) + # Create replica on to_instance, with bootstrap details. + to_r = to_replicas.create(properties={ + 'cn': 'replica', + 'nsDS5ReplicaRoot': self._suffix, + 'nsDS5ReplicaId': rid, + 'nsDS5Flags': '1', + 'nsDS5ReplicaType': '3', + 'nsds5replicabinddngroupcheckinterval': '0' + })
- :param suffix: Replicated suffix to disable - :type suffix: str + # WARNING: You need to create passwords and agmts BEFORE you tot_init!
- :returns: None - :raises: ValueError is suffix is not being replicated + # Now put in an agreement from to -> from + # both ends. + self.ensure_agreement(from_instance, to_instance) + self.ensure_agreement(to_instance, from_instance, init=True) + + # perform the _bootstrap. This creates a temporare repl manager + # to allow the tot_init to occur. + self._bootstrap_replica(fr_r, to_r, to_instance) + + # Now fix our replica credentials from -> to + to_r.set('nsDS5ReplicaBindDNGroup', repl_dn) + + # Now finally test it ... + self.test_replication(from_instance, to_instance) + self.test_replication(to_instance, from_instance) + # Done! + self._log.info("SUCCESS: joined master from %s to %s" % (from_instance.ldapuri, to_instance.ldapuri)) + + def join_hub(self, from_instance, to_instance): + """Join a new hub to this instance. This will complete + a total init of the data "from instance" to "to instance". + + This can be conducted from any master or hub in the topology as "from" master. + + Not implement yet. + + :param from_instance: An instance already in the topology. + :type from_instance: lib389.DirSrv + :param to_instance: An instance to join to the topology. + :type to_instance: lib389.DirSrv """ + # Ensure we have a cl + self._ensure_changelog(to_instance) + raise Exception + + def join_consumer(self, from_instance, to_instance): + """Join a new consumer to this instance. This will complete + a total init of the data "from instance" to "to instance". + + This can be conducted from any master or hub in the topology as "from" master.
+ + :param from_instance: An instance already in the topology. + :type from_instance: lib389.DirSrv + :param to_instance: An instance to join to the topology. + :type to_instance: lib389.DirSrv + """ + to_replicas = Replicas(to_instance) try: - replica = self.get(suffix) + to_r = to_replicas.get(self._suffix) + self._log("WARNING: to_instance is already a replica for this suffix") + return except ldap.NO_SUCH_OBJECT: - raise ValueError('Suffix (%s) is not setup for replication' % suffix) + pass
- role = replica.get_role() - if role in (ReplicaRole.MASTER, ReplicaRole.HUB): - self._instance.changelog.delete() + # Make sure we replicate this suffix too ... + fr_replicas = Replicas(from_instance) + fr_r = fr_replicas.get(self._suffix) + + # Create replica on to_instance, with bootstrap details. + to_r = to_replicas.create(properties={ + 'cn': 'replica', + 'nsDS5ReplicaRoot': self._suffix, + 'nsDS5ReplicaId': '65535', + 'nsDS5Flags': '0', + 'nsDS5ReplicaType': '2', + 'nsds5replicabinddngroupcheckinterval': '0' + }) + + # WARNING: You need to create passwords and agmts BEFORE you tot_init! + repl_group = self._create_service_group(from_instance) + + # Now put in an agreement from to -> from + # both ends. + self.ensure_agreement(from_instance, to_instance) + + # perform the _bootstrap. This creates a temporare repl manager + # to allow the tot_init to occur. + self._bootstrap_replica(fr_r, to_r, to_instance) + + # Now fix our replica credentials from -> to + to_r.set('nsDS5ReplicaBindDNGroup', repl_group.dn) + + # Now finally test it ... + self.test_replication(from_instance, to_instance) + # Done! + self._log.info("SUCCESS: joined consumer from %s to %s" % (from_instance.ldapuri, to_instance.ldapuri)) + + def _get_replica_creds(self, from_instance, write_instance): + """For the master "from_instance" create or derive the credentials + needed for it's replication service account. In some cases the + credentials are created, write them to "write instance" as a new + service account userPassword. + + This function signature exists for bootstrapping: We need to + link master A and B, but they have not yet replicated. So we generate + credentials for B, and write them to A's instance, where they will + then be replicated back to B. If this wasn't the case, we would generate + the credentials on B, write them to B, but B has no way to authenticate + to A because the service account doesn't have credentials there yet. + + Internal Only. + """ + # We write all our changes to "write_instance", but we read data + # from the "from" instance. + + dn = None + creds = None + + fr_replicas = Replicas(from_instance) + fr_r = fr_replicas.get(self._suffix) + from_agmts = fr_r.get_agreements() + # see if any exist already .... + agmts = from_agmts.list() + if len(agmts) > 0: + # okay, re-use the creds + agmt = agmts[0] + dn = agmt.get_attr_val_utf8('nsDS5ReplicaBindDN') + creds = agmt.get_attr_val_utf8('nsDS5ReplicaCredentials') + else: + # Create them ... + # Get the service account. + services = ServiceAccounts(write_instance, self._suffix) + sa = services.get('%s:%s' % (from_instance.host, from_instance.sslport)) + creds = password_generate() + # Gen a password + sa.set('userPassword', creds) + dn = sa.dn + + return (dn, creds) + + def ensure_agreement(self, from_instance, to_instance, init=False): + """Guarantee that a replication agreement exists 'from_instance' send + data 'to_instance'. This can be for *any* instance, master, hub, or + consumer. + + Both instances must have been added to the topology with + create first master, join_master or join_consumer. + + :param from_instance: An instance already in the topology. + :type from_instance: lib389.DirSrv + :param to_instance: An instance to replicate to. + :type to_instance: lib389.DirSrv + """ + # Make sure that an agreement from -> to exists. + # At the moment we assert this by checking host and port + # details.
- try: - replica.delete() - except ldap.LDAPError as e: - raise ValueError('Failed to disable replication for suffix ' + - '(%s) LDAP error (%s)' % (suffix, str(e))) + # init = True means to create credentials on the "to" master, because + # we are initialising in reverse.
- def promote(self, suffix, newrole, binddn=None, rid=None): - """Promote the replica to hub or master + # init = False (default) means creds *might* exist, and we create them + # on the "from" master.
- :param newrole: The new replication role for the replica: MASTER and HUB - :type newrole: ReplicaRole - :param binddn: The replication bind dn - only applied to master - :type binddn: str - :param rid: The replication ID, applies only to promotions to "master" - :type rid: int + fr_replicas = Replicas(from_instance) + fr_r = fr_replicas.get(self._suffix)
- :returns: None - :raises: ValueError - If replica is not promoted - """ + from_agmts = fr_r.get_agreements() + + agmt_name = self._inst_to_agreement_name(to_instance)
- replica = self.get(suffix) try: - replica = self.get(suffix) + agmt = from_agmts.get(agmt_name) + self._log.info("SUCCESS: Agreement from %s to %s already exists" % (from_instance.ldapuri, to_instance.ldapuri)) + return except ldap.NO_SUCH_OBJECT: - raise ValueError('Suffix (%s) is not setup for replication' % suffix) - replica.promote(newrole, binddn, rid) + # Okay, it doesn't exist, lets go ahead! + pass
- def demote(self, suffix, newrole): - """Demote a replica to a hub or consumer + if init is True: + (dn, creds) = self._get_replica_creds(from_instance, to_instance) + else: + (dn, creds) = self._get_replica_creds(from_instance, from_instance) + + assert dn is not None + assert creds is not None + + agmt = from_agmts.create(properties={ + 'cn': agmt_name, + 'nsDS5ReplicaRoot': self._suffix, + 'nsDS5ReplicaBindDN': dn, + 'nsDS5ReplicaBindMethod': 'simple' , + 'nsDS5ReplicaTransportInfo': 'LDAP', + 'nsds5replicaTimeout': '5', + 'description': agmt_name, + 'nsDS5ReplicaHost': to_instance.host, + 'nsDS5ReplicaPort': str(to_instance.port), + 'nsDS5ReplicaCredentials': creds, + }) + # Done! + self._log.info("SUCCESS: Agreement from %s to %s is was created" % (from_instance.ldapuri, to_instance.ldapuri)) + return agmt + + def remove_master(self, instance, remaining_instances=[], purge_sa=True): + """Remove an instance from the replication topology. + + If purge service accounts is true, remove the instances service account. + + The purge_sa *must* be conducted on a remaining master to guarantee + the result. + + We recommend remaining instances contains *all* masters that have an + agreement to instance, to ensure no dangling agreements exist. Masters + with no agreement are skipped. + + :param instance: An instance to remove from the topology. + :type from_instance: lib389.DirSrv + :param remaining_instances: The remaining masters of the topology. + :type remaining_instances: list[lib389.DirSrv] + :param purge_sa: Purge the service account for instance + :type purge_sa: bool + """ + if purge_sa and len(remaining_instances) > 0: + services = ServiceAccounts(remaining_instances[0], self._suffix) + try: + sa = services.get('%s:%s' % (instance.host, instance.sslport)) + sa.delete() + except ldap.NO_SUCH_OBJECT: + # It's already gone ... + pass
- :param newrole: The new replication role for the replica: CONSUMER and HUB - :type newrole: ReplicaRole + agmt_name = self._inst_to_agreement_name(instance) + for r_inst in remaining_instances: + agmts = Agreements(r_inst) + try: + agmt = agmts.get(agmt_name) + agmt.delete() + except ldap.NO_SUCH_OBJECT: + # No agreement, that's good! + pass
- :returns: None - :raises: ValueError - If replica is not demoted + fr_replicas = Replicas(instance) + fr_r = fr_replicas.get(self._suffix) + # This should delete the agreements .... + fr_r.delete() + + def disable_to_master(self, to_instance, from_instances=[]): + """For all masters "from" disable all agreements "to" instance. + + :param to_instance: The instance to stop recieving data. + :type to_instance: lib389.DirSrv + :param from_instances: The instances to stop sending data. + :type from_instances: list[lib389.DirSrv] + """ + agmt_name = self._inst_to_agreement_name(to_instance) + for r_inst in from_instances: + agmts = Agreements(r_inst) + agmt = agmts.get(agmt_name) + agmt.pause() + + def enable_to_master(self, to_instance, from_instances=[]): + """For all masters "from" enable all agreements "to" instance. + + :param to_instance: The instance to start recieving data. + :type to_instance: lib389.DirSrv + :param from_instances: The instances to start sending data. + :type from_instances: list[lib389.DirSrv] """ + agmt_name = self._inst_to_agreement_name(to_instance) + for r_inst in from_instances: + agmts = Agreements(r_inst) + agmt = agmts.get(agmt_name) + agmt.resume() + + def wait_for_ruv(self, from_instance, to_instance, timeout=20): + """Wait for the in-memory ruv 'from_instance' to be advanced past on + 'to_instance'. Note this does not mean the ruvs are "exact matches" + only that some set of CSN states has been advanced past. Topics like + fractional replication may or may not interfer in this process. + + In essence this is a rough check that to_instance is at least + at the replication state of from_instance. You should consider using + wait_for_replication instead for a guarantee. + + :param from_instance: The instance whos state we we want to check from + :type from_instance: lib389.DirSrv + :param to_instance: The instance whos state we want to check matches from. + :type to_instance: lib389.DirSrv
- replica = self.get(suffix) - try: - replica = self.get(suffix) - except ldap.NO_SUCH_OBJECT: - raise ValueError('Suffix (%s) is not setup for replication' % suffix) - replica.demote(newrole) + """ + fr_replicas = Replicas(from_instance) + fr_r = fr_replicas.get(self._suffix) + + to_replicas = Replicas(to_instance) + to_r = to_replicas.get(self._suffix) + + from_ruv = fr_r.get_ruv() + + for i in range(0, timeout): + to_ruv = to_r.get_ruv() + if to_ruv.is_synced(from_ruv): + self._log.info("SUCCESS: RUV from %s to %s is in sync" % (from_instance.ldapuri, to_instance.ldapuri)) + return True + time.sleep(1) + raise Exception("RUV did not sync in time!")
- def get_dn(self, suffix): - """Return the DN of the replica from cn=config, this is also - known as the mapping tree entry + def wait_for_replication(self, from_instance, to_instance, timeout=20): + """Wait for a replication event to occur from instance to instance. This + shows some point of synchronisation has occured.
- :param suffix: The replication suffix to get the mapping tree DN - :type suffix: str + :param from_instance: The instance whos state we we want to check from + :type from_instance: lib389.DirSrv + :param to_instance: The instance whos state we want to check matches from. + :type to_instance: lib389.DirSrv + :param timeout: Fail after timeout seconds. + :type timeout: int
- :returns: The DN of the replication entry from cn=config """ + # Touch something then wait_for_replication. + from_groups = Groups(from_instance, basedn=self._suffix, rdn=None) + to_groups = Groups(to_instance, basedn=self._suffix, rdn=None) + from_group = from_groups.get('replication_managers') + to_group = to_groups.get('replication_managers')
- try: - replica = self.get(suffix) - except ldap.NO_SUCH_OBJECT: - raise ValueError('Suffix (%s) is not setup for replication' % suffix) - return replica._dn + change = str(uuid.uuid4()) + + from_group.replace('description', change) + + for i in range(0, timeout): + desc = to_group.get_attr_val_utf8('description') + if change == desc: + self._log.info("SUCCESS: Replication from %s to %s is working" % (from_instance.ldapuri, to_instance.ldapuri)) + return True + time.sleep(1) + raise Exception("Replication did not sync in time!")
- def get_ruv_entry(self, suffix): - """Return the database RUV entry for the provided suffix + self.wait_for_replication(from_instance, to_instance) + + + def test_replication(self, from_instance, to_instance, timeout=20): + """Wait for a replication event to occur from instance to instance. This + shows some point of synchronisation has occured. + + :param from_instance: The instance whos state we we want to check from + :type from_instance: lib389.DirSrv + :param to_instance: The instance whos state we want to check matches from. + :type to_instance: lib389.DirSrv + :param timeout: Fail after timeout seconds. + :type timeout: int
- :returns: The database RUV entry - :raises: - ValeuError - If suffix is not setup for replication - - LDAPError - If there is a problem trying to search for the RUV """ + # It's the same .... + self.wait_for_replication(from_instance, to_instance, timeout)
- try: - replica = self.get(suffix) - except ldap.NO_SUCH_OBJECT: - raise ValueError('Suffix (%s) is not setup for replication' % suffix) - return replica.get_ruv_entry() + def test_replication_topology(self, instances, timeout=20): + """Confirm replication works between all permutations of masters + in the topology.
- def test(self, suffix, *replica_dirsrvs): - """Make a "dummy" update on the the replicated suffix, and check - all the provided replicas to see if they received the update. + :param instances: The masters. + :type instances: list[lib389.DirSrv] + :param timeout: Fail after timeout seconds. + :type timeout: int
- :param suffix: The replicated suffix we want to check - :type suffix: str - :param *replica_dirsrvs: DirSrv instance, DirSrv instance, ... - :type *replica_dirsrvs: list of DirSrv + """ + for p in permutations(instances, 2): + a, b = p + self.test_replication(a, b, timeout)
- :returns: True - if all servers have received the update by this - replica, otherwise return False - :raises: LDAPError - when failing to update/search database + def get_rid(self, instance): + """For a given master, retrieve it's RID for this suffix. + + :param instance: The instance + :type instance: lib389.DirSrv + :returns: str """ + replicas = Replicas(instance) + replica = replicas.get(self._suffix) + return replica.get_rid() +
- try: - replica = self.get(suffix) - except ldap.NO_SUCH_OBJECT: - raise ValueError('Suffix (%s) is not setup for replication' % suffix) - return replica.test(*replica_dirsrvs) diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py index adb3cec..7fbf50a 100644 --- a/src/lib389/lib389/tasks.py +++ b/src/lib389/lib389/tasks.py @@ -51,21 +51,23 @@ class Task(DSLdapObject): return None return None
- def wait(self): + def wait(self, timeout=120): """Wait until task is complete.""" - while True: + count = 0 + while count < timeout: if self.is_complete(): break - time.sleep(1) + count = count + 1 + time.sleep(2)
- def create(self, rdn=None, properties=None, basedn=None): + def create(self, rdn=None, properties={}, basedn=None): properties['cn'] = self.cn return super(Task, self).create(rdn, properties, basedn)
@staticmethod def _get_task_date(): """Return a timestamp to use in naming new task entries.""" - return datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + return datetime.now().isoformat()
class MemberOfFixupTask(Task): @@ -97,6 +99,66 @@ class SchemaReloadTask(Task):
super(SchemaReloadTask, self).__init__(instance, dn)
+class AbortCleanAllRUVTask(Task): + """Abort the Clean All Ruv task on all masters. You should + call this from "CleanAllRUVTask.abort()" instead to provide + proper linking of the task information. + + :param instance: The instance + :type instance: lib389.DirSrv + """ + def __init__(self, instance, dn=None): + self.cn = 'abortcleanallruv_' + Task._get_task_date() + dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS + + super(AbortCleanAllRUVTask, self).__init__(instance, dn) + +class CleanAllRUVTask(Task): + """Create the clean all ruv task. This will be replicated through + a topology to remove non-present ruvs. Note that if a ruv is NOT + able to be removed, this indicates a dangling replication agreement + on *some* master in the topology. + + :param instance: The instance + :type instance: lib389.DirSrv + """ + def __init__(self, instance, dn=None): + self.cn = 'cleanallruv_' + Task._get_task_date() + dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS + self._properties = None + + super(CleanAllRUVTask, self).__init__(instance, dn) + + def create(self, rdn=None, properties=None, basedn=None): + """Create the cleanallruvtask. + + :param rdn: RDN of the new entry. + :type rdn: str + :param properties: Attributes for the new entry + :type properties: dict + :param basedn: Basedn to create the entry. Do not change this. + :type basedn: str + """ + # Stash the props for abort + self._properties = properties + return super(CleanAllRUVTask, self).create(rdn, properties, basedn) + + def abort(self, certify=False): + """Abort the current cleanallruvtask. + + :param certify: certify abort on all masters + :type certify: bool + :returns: AbortCleanAllRUVTask + """ + if certify is True: + self._properties['replica-certify-all'] = 'yes' + else: + self._properties['replica-certify-all'] = 'no' + # Then create the abort. + abort_task = AbortCleanAllRUVTask(self._instance) + abort_task.create(properties=self._properties) + return abort_task + class Tasks(object): proxied_methods = 'search_s getEntry'.split()
diff --git a/src/lib389/lib389/tombstone.py b/src/lib389/lib389/tombstone.py new file mode 100644 index 0000000..154fa1a --- /dev/null +++ b/src/lib389/lib389/tombstone.py @@ -0,0 +1,90 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import ldap + +from lib389._entry import Entry + +from lib389._mapped_object import DSLdapObject, DSLdapObjects, _gen_and, _gen_filter, _term_gen + +class Tombstone(DSLdapObject): + """A tombstone is created during a conflict or a delete in a + replicated environment. It can be useful to access these, to + see conflicts, or to restore deleted entries in some cases. + + :param instance: An instance + :type instance: lib389.DirSrv + :param dn: The DN of the tombstone + :type dn: str + """ + def __init__(self, instance, dn=None): + super(Tombstone, self).__init__(instance, dn) + self._rdn_attribute = 'nsUniqueId' + self._create_objectclasses = ['nsTombStone'] + self._protected = True + # We need to always add this filter, else we won't see the ts + self._object_filter = '(objectclass=nsTombStone)' + + def revive(self): + """Revive this object within the tree. + + This duplicates "as much as possible", excluding some internal attributes. + """ + orig_dn = self.get_attr_val_utf8('nscpEntryDN') + self._log.info("Reviving %s -> %s" % (self.dn, orig_dn)) + # Get all our attributes + properties = self.get_all_attrs() + properties.pop('nsuniqueid', None) + properties.pop('modifiersname', None) + properties.pop('createtimestamp', None) + properties.pop('creatorsname', None) + properties.pop('modifytimestamp', None) + properties.pop('entryid', None) + properties.pop('entrydn', None) + properties.pop('parentid', None) + properties.pop('nsparentuniqueid', None) + properties.pop('nstombstonecsn', None) + properties.pop('nscpentrydn', None) + properties['objectclass'].remove(b'nsTombstone') + + e = Entry(orig_dn) + e.update(properties) + self._instance.add_ext_s(e, serverctrls=self._server_controls, clientctrls=self._client_controls) + +class Tombstones(DSLdapObjects): + """Represents the set of tombstone objects that may exist on + this replica. Tombstones are locally generated, so they are + unique to individual masters, and may or may not correlate + to tombstones on other masters. + + :param instance: An instance + :type instance: lib389.DirSrv + :param basedn: Tree to search for tombstones in + :type basedn: str + """ + def __init__(self, instance, basedn): + super(Tombstones, self).__init__(instance) + self._objectclasses = ['nstombstone'] + # Try some common ones .... + self._filterattrs = ['nsUniqueId', 'cn', 'uid', 'ou'] + self._childobject = Tombstone + self._basedn = basedn + + # This gives us the ruv exclusion. + def _get_objectclass_filter(self): + """An internal function to help find tombstones. They require special + additions to filters, and this is part of the DSLdapObjects framework + that we can emit these for inclusion in our searches. + + Internal Only. + """ + return _gen_and( + _gen_filter(_term_gen('objectclass'), self._objectclasses, extra='(!(nsUniqueId=ffffffff-ffffffff-ffffffff-ffffffff))') + ) + diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py index 7e7bc0c..ab9b2bc 100644 --- a/src/lib389/lib389/topologies.py +++ b/src/lib389/lib389/topologies.py @@ -18,9 +18,9 @@ import pytest from lib389 import DirSrv from lib389.nss_ssl import NssSsl from lib389.utils import generate_ds_params -from lib389.replica import Replicas from lib389.mit_krb5 import MitKrb5 from lib389.saslmap import SaslMappings +from lib389.replica import ReplicationManager, Replicas
from lib389._constants import (SER_HOST, SER_PORT, SER_SERVERID_PROP, SER_CREATION_SUFFIX, SER_SECURE_PORT, ReplicaRole, DEFAULT_SUFFIX, REPLICA_ID, @@ -74,8 +74,8 @@ def create_topology(topo_dict): args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP] args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_copied = args_instance.copy() - instance.allocate(args_copied) + instance.allocate(args_instance) + instance_exists = instance.exists() if instance_exists: instance.delete() @@ -93,46 +93,46 @@ def create_topology(topo_dict): if role == ReplicaRole.CONSUMER: cs[instance.serverid] = instance instances.update(cs) - log.info("Instance with parameters {} was created.".format(args_copied)) - - # Set up replication - if role in (ReplicaRole.MASTER, ReplicaRole.CONSUMER): - replicas = Replicas(instance) - replica = replicas.enable(DEFAULT_SUFFIX, role, instance_data[REPLICA_ID]) - replica_dict[replica] = instance - - for role_from in topo_dict.keys(): - # Do not create agreements on consumer - if role_from == ReplicaRole.CONSUMER: + log.info("Instance with parameters {} was created.".format(args_instance)) + + # Start with a single master, and create it "first". + first_master = None + try: + first_master = list(ms.values())[0] + log.info("Creating replication topology.") + # Now get the first master ready. + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_master(first_master) + except IndexError: + pass + + # Now init the other masters from this. + # This will reinit m, and put a bi-directional agreement + # in place. + for m in ms.values(): + # Skip firstmaster. + if m is first_master: continue - - # Create agreements: master -> masters, consumers - for inst_num_from in range(1, topo_dict[role_from]+1): - roles_to = [ReplicaRole.MASTER, ReplicaRole.CONSUMER] - - for role_to in [role for role in topo_dict if role in roles_to]: - for inst_num_to in range(1, topo_dict[role_to]+1): - # Exclude the instance we created it from - if role_from != role_to or inst_num_from != inst_num_to: - inst_from_id = "{}{}".format(role_from.name.lower(), inst_num_from) - inst_to_id = "{}{}".format(role_to.name.lower(), inst_num_to) - inst_from = instances[inst_from_id] - inst_to = instances[inst_to_id] - inst_from.agreement.create(suffix=DEFAULT_SUFFIX, - host=inst_to.host, - port=inst_to.port) - - # Allow the replicas to get situated with the new agreements - if replica_dict: - time.sleep(5) - - # Initialize all agreements of one master (consumers) - for replica_from, inst_from in replica_dict.items(): - if replica_from.get_role() == ReplicaRole.MASTER: - agmts = inst_from.agreement.list(DEFAULT_SUFFIX) - for r in map(lambda agmt: replica_from.start_and_wait(agmt.dn), agmts): - assert r == 0 - break + log.info("Joining master %s to %s ..." % (m.serverid, first_master.serverid)) + repl.join_master(first_master, m) + + # Mesh the master agreements. + for mo in ms.values(): + for mi in ms.values(): + if mo is mi: + continue + log.info("Ensuring master %s to %s ..." % (mo.serverid, mi.serverid)) + repl.ensure_agreement(mo, mi) + + # Add master -> consumer agreements. + for c in cs.values(): + log.info("Joining consumer %s from %s ..." % (mo.serverid, mi.serverid)) + repl.join_consumer(first_master, c) + + for m in ms.values(): + for c in cs.values(): + log.info("Ensuring consumer %s from %s ..." % (c.serverid, m.serverid)) + repl.ensure_agreement(m, c)
# Clear out the tmp dir for instance in instances.values(): @@ -172,15 +172,19 @@ class TopologyMain(object): """Pause all agreements in the class instance"""
for inst in self.all_insts.values(): - for agreement in inst.agreement.list(suffix=DEFAULT_SUFFIX): - inst.agreement.pause(agreement.dn) + replicas = Replicas(inst) + replica = replicas.get(DEFAULT_SUFFIX) + for agreement in replica.get_agreements().list(): + agreement.pause()
def resume_all_replicas(self): """Resume all agreements in the class instance"""
for inst in self.all_insts.values(): - for agreement in inst.agreement.list(suffix=DEFAULT_SUFFIX): - inst.agreement.resume(agreement.dn) + replicas = Replicas(inst) + replica = replicas.get(DEFAULT_SUFFIX) + for agreement in replica.get_agreements().list(): + agreement.resume()
def all_get_dsldapobject(self, dn, otype): result = [] @@ -302,6 +306,20 @@ def topology_i3(request):
return topology
+@pytest.fixture(scope="module") +def topology_m1(request): + """Create Replication Deployment with one master and one consumer""" + + topology = create_topology({ReplicaRole.MASTER: 1}) + + def fin(): + if DEBUGGING: + map(lambda inst: inst.stop(), topology.all_insts.values()) + else: + map(lambda inst: inst.delete(), topology.all_insts.values()) + request.addfinalizer(fin) + + return topology
@pytest.fixture(scope="module") def topology_m1c1(request): @@ -309,8 +327,6 @@ def topology_m1c1(request):
topology = create_topology({ReplicaRole.MASTER: 1, ReplicaRole.CONSUMER: 1}) - replicas = Replicas(topology.ms["master1"]) - assert replicas.test(DEFAULT_SUFFIX, topology.cs["consumer1"])
def fin(): if DEBUGGING: @@ -327,8 +343,6 @@ def topology_m2(request): """Create Replication Deployment with two masters"""
topology = create_topology({ReplicaRole.MASTER: 2}) - replicas = Replicas(topology.ms["master1"]) - assert replicas.test(DEFAULT_SUFFIX, topology.ms["master2"])
def fin(): if DEBUGGING: @@ -345,8 +359,6 @@ def topology_m3(request): """Create Replication Deployment with three masters"""
topology = create_topology({ReplicaRole.MASTER: 3}) - replicas = Replicas(topology.ms["master1"]) - assert replicas.test(DEFAULT_SUFFIX, topology.ms["master3"])
def fin(): if DEBUGGING: @@ -363,8 +375,6 @@ def topology_m4(request): """Create Replication Deployment with four masters"""
topology = create_topology({ReplicaRole.MASTER: 4}) - replicas = Replicas(topology.ms["master1"]) - assert replicas.test(DEFAULT_SUFFIX, topology.ms["master4"])
def fin(): if DEBUGGING: @@ -382,8 +392,6 @@ def topology_m2c2(request):
topology = create_topology({ReplicaRole.MASTER: 2, ReplicaRole.CONSUMER: 2}) - replicas = Replicas(topology.ms["master1"]) - assert replicas.test(DEFAULT_SUFFIX, topology.cs["consumer1"])
def fin(): if DEBUGGING: @@ -411,19 +419,17 @@ def topology_m1h1c1(request): else: instance = DirSrv(verbose=False) args_instance = {} - # args_instance[SER_HOST] = instance_data[SER_HOST] args_instance[SER_PORT] = instance_data[SER_PORT] args_instance[SER_SECURE_PORT] = instance_data[SER_SECURE_PORT] args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP] args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_copied = args_instance.copy() - instance.allocate(args_copied) + instance.allocate(args_instance) instance_exists = instance.exists() if instance_exists: instance.delete() instance.create() instance.open() - log.info("Instance with parameters {} was created.".format(args_copied)) + log.info("Instance with parameters {} was created.".format(args_instance))
# Set up replication replicas = Replicas(instance)
389-commits@lists.fedoraproject.org