| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211 |
- # --- BEGIN COPYRIGHT BLOCK ---
- # Copyright (C) 2015 Red Hat, Inc.
- # All rights reserved.
- #
- # License: GPL (version 3 or any later version).
- # See LICENSE for details.
- # --- END COPYRIGHT BLOCK ---
- #
- import os
- import sys
- import time
- import ldap
- import logging
- import pytest
- import threading
- from lib389 import DirSrv, Entry, tools, tasks
- from lib389.tools import DirSrvTools
- from lib389.repltools import ReplTools
- from lib389._constants import *
- from lib389.properties import *
- from lib389.tasks import *
- from lib389.utils import *
- logging.getLogger(__name__).setLevel(logging.DEBUG)
- log = logging.getLogger(__name__)
- installation1_prefix = None
- class AddUsers(threading.Thread):
- def __init__(self, inst, num_users):
- threading.Thread.__init__(self)
- self.daemon = True
- self.inst = inst
- self.num_users = num_users
- def openConnection(self, inst):
- # Open a new connection to our LDAP server
- server = DirSrv(verbose=False)
- args_instance[SER_HOST] = inst.host
- args_instance[SER_PORT] = inst.port
- args_instance[SER_SERVERID_PROP] = inst.serverid
- args_standalone = args_instance.copy()
- server.allocate(args_standalone)
- server.open()
- return server
- def run(self):
- # Start adding users
- conn = self.openConnection(self.inst)
- idx = 0
- while idx < self.num_users:
- USER_DN = 'uid=' + self.inst.serverid + '_' + str(idx) + ',' + DEFAULT_SUFFIX
- try:
- conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user' + str(idx)})))
- except ldap.UNWILLING_TO_PERFORM:
- # One of the masters was probably put into read only mode - just break out
- break
- except ldap.ALREADY_EXISTS:
- pass
- except ldap.LDAPError as e:
- log.error('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
- assert False
- idx += 1
- conn.close()
- def remove_master4_agmts(msg, topology):
- """Remove all the repl agmts to master4.
- """
- log.info('%s: remove all the agreements to master 4...' % msg)
- try:
- topology.master1.agreement.delete(DEFAULT_SUFFIX,
- topology.master4.host,
- topology.master4.port)
- except ldap.LDAPError as e:
- log.fatal('%s: Failed to delete agmt(m1 -> m4), error: %s' %
- (msg, str(e)))
- assert False
- try:
- topology.master2.agreement.delete(DEFAULT_SUFFIX,
- topology.master4.host,
- topology.master4.port)
- except ldap.LDAPError as e:
- log.fatal('%s: Failed to delete agmt(m2 -> m4), error: %s' %
- (msg, str(e)))
- assert False
- try:
- topology.master3.agreement.delete(DEFAULT_SUFFIX,
- topology.master4.host,
- topology.master4.port)
- except ldap.LDAPError as e:
- log.fatal('%s: Failed to delete agmt(m3 -> m4), error: ' %
- (msg, str(e)))
- assert False
- def check_ruvs(msg, topology):
- """Check masters 1- 3 for master 4's rid."""
- clean = False
- count = 0
- while not clean and count < 10:
- clean = True
- # Check master 1
- try:
- entry = topology.master1.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- REPLICA_RUV_FILTER)
- if not entry:
- log.error('%s: Failed to find db tombstone entry from master' %
- msg)
- repl_fail(replica_inst)
- elements = entry[0].getValues('nsds50ruv')
- for ruv in elements:
- if 'replica 4' in ruv:
- # Not cleaned
- log.error('%s: Master 1 not cleaned!' % msg)
- clean = False
- if clean:
- log.info('%s: Master 1 is cleaned.' % msg)
- except ldap.LDAPError as e:
- log.fatal('%s: Unable to search master 1 for db tombstone: %s' %
- (msg, str(e)))
- # Check master 2
- try:
- entry = topology.master2.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- REPLICA_RUV_FILTER)
- if not entry:
- log.error('%s: Failed to find tombstone entry from master' %
- msg)
- repl_fail(replica_inst)
- elements = entry[0].getValues('nsds50ruv')
- for ruv in elements:
- if 'replica 4' in ruv:
- # Not cleaned
- log.error('%s: Master 2 not cleaned!' % msg)
- clean = False
- if clean:
- log.info('%s: Master 2 is cleaned.', msg)
- except ldap.LDAPError as e:
- log.fatal('Unable to search master 2 for db tombstone: ' +
- e.message['desc'])
- # Check master 3
- try:
- entry = topology.master3.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- REPLICA_RUV_FILTER)
- if not entry:
- log.error('%s: Failed to find db tombstone entry from master' %
- msg)
- repl_fail(replica_inst)
- elements = entry[0].getValues('nsds50ruv')
- for ruv in elements:
- if 'replica 4' in ruv:
- # Not cleaned
- log.error('%s: Master 3 not cleaned!' % msg)
- clean = False
- if clean:
- log.info('%s: Master 3 is cleaned.' % msg)
- except ldap.LDAPError as e:
- log.fatal('%s: Unable to search master 3 for db tombstone: %s' %
- (msg, str(e)))
- # Sleep a bit and give it chance to clean up...
- time.sleep(5)
- count += 1
- return clean
- def task_done(topology, task_dn, timeout=60):
- """Check if the task is complete"""
- attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode',
- 'nsTaskCurrentItem', 'nsTaskTotalItems']
- done = False
- count = 0
- while not done and count < timeout:
- try:
- entry = topology.master1.getEntry(task_dn, attrlist=attrlist)
- if not entry or entry.nsTaskExitCode:
- done = True
- break
- except ldap.NO_SUCH_OBJECT:
- done = True
- break
- except ldap.LDAPError:
- break
- time.sleep(1)
- count += 1
- return done
- class TopologyReplication(object):
- def __init__(self, master1, master2, master3, master4, m1_m2_agmt, m1_m3_agmt, m1_m4_agmt):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
- master3.open()
- self.master3 = master3
- master4.open()
- self.master4 = master4
- # Store the agreement dn's for future initializations
- self.m1_m2_agmt = m1_m2_agmt
- self.m1_m3_agmt = m1_m3_agmt
- self.m1_m4_agmt = m1_m4_agmt
- @pytest.fixture(scope="module")
- def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- master1.log = log
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
- # Creating master 3...
- master3 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_3
- args_instance[SER_PORT] = PORT_MASTER_3
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master3.allocate(args_master)
- instance_master3 = master3.exists()
- if instance_master3:
- master3.delete()
- master3.create()
- master3.open()
- master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_3)
- # Creating master 4...
- master4 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_4
- args_instance[SER_PORT] = PORT_MASTER_4
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master4.allocate(args_master)
- instance_master4 = master4.exists()
- if instance_master4:
- master4.delete()
- master4.create()
- master4.open()
- master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_4)
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: 'meTo_%s:%s' % (master2.host, master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
- # Creating agreement from master 1 to master 3
- properties = {RA_NAME: 'meTo_%s:%s' % (master3.host, master3.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
- if not m1_m3_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m3_agmt)
- # Creating agreement from master 1 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (master4.host, master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties)
- if not m1_m4_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m4_agmt)
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: 'meTo_%s:%s' % (master1.host, master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
- # Creating agreement from master 2 to master 3
- properties = {RA_NAME: 'meTo_%s:%s' % (master3.host, master3.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
- if not m2_m3_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m3_agmt)
- # Creating agreement from master 2 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (master4.host, master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m4_agmt = master2.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties)
- if not m2_m4_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m4_agmt)
- # Creating agreement from master 3 to master 1
- properties = {RA_NAME: 'meTo_%s:%s' % (master1.host, master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m3_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m3_m1_agmt)
- # Creating agreement from master 3 to master 2
- properties = {RA_NAME: 'meTo_%s:%s' % (master2.host, master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m3_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m3_m2_agmt)
- # Creating agreement from master 3 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (master4.host, master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m3_m4_agmt = master3.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties)
- if not m3_m4_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m3_m4_agmt)
- # Creating agreement from master 4 to master 1
- properties = {RA_NAME: 'meTo_%s:%s' % (master1.host, master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m1_agmt = master4.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m4_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m4_m1_agmt)
- # Creating agreement from master 4 to master 2
- properties = {RA_NAME: 'meTo_%s:%s' % (master2.host, master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m2_agmt = master4.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m4_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m4_m2_agmt)
- # Creating agreement from master 4 to master 3
- properties = {RA_NAME: 'meTo_%s:%s' % (master3.host, master3.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m3_agmt = master4.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
- if not m4_m3_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m4_m3_agmt)
- # Allow the replicas to get situated with the new agreements
- time.sleep(5)
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
- master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
- master1.waitForReplInit(m1_m3_agmt)
- master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4)
- master1.waitForReplInit(m1_m4_agmt)
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
- def fin():
- master1.delete()
- master2.delete()
- master3.delete()
- master4.delete()
- request.addfinalizer(fin)
- return TopologyReplication(master1, master2, master3, master4, m1_m2_agmt, m1_m3_agmt, m1_m4_agmt)
- def restore_master4(topology):
- '''
- In our tests will always be removing master 4, so we need a common
- way to restore it for another test
- '''
- log.info('Restoring master 4...')
- # Enable replication on master 4
- topology.master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_4)
- #
- # Create agreements from master 4 -> m1, m2 ,m3
- #
- # Creating agreement from master 4 to master 1
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master1.host, topology.master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m1_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master1.host,
- port=topology.master1.port, properties=properties)
- if not m4_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m4_m1_agmt)
- # Creating agreement from master 4 to master 2
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master2.host, topology.master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m2_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master2.host,
- port=topology.master2.port, properties=properties)
- if not m4_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m4_m2_agmt)
- # Creating agreement from master 4 to master 3
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master3.host, topology.master3.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m3_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master3.host,
- port=topology.master3.port, properties=properties)
- if not m4_m3_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m4_m3_agmt)
- #
- # Create agreements from m1, m2, m3 to master 4
- #
- # Creating agreement from master 1 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master4.host, topology.master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m4_agmt = topology.master1.agreement.create(suffix=SUFFIX, host=topology.master4.host,
- port=topology.master4.port, properties=properties)
- if not m1_m4_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m4_agmt)
- # Creating agreement from master 2 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master4.host, topology.master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m4_agmt = topology.master2.agreement.create(suffix=SUFFIX, host=topology.master4.host,
- port=topology.master4.port, properties=properties)
- if not m2_m4_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m4_agmt)
- # Creating agreement from master 3 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master4.host, topology.master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m3_m4_agmt = topology.master3.agreement.create(suffix=SUFFIX, host=topology.master4.host,
- port=topology.master4.port, properties=properties)
- if not m3_m4_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m3_m4_agmt)
- #
- # Stop the servers - this allows the rid(for master4) to be used again
- #
- topology.master1.stop(timeout=30)
- topology.master2.stop(timeout=30)
- topology.master3.stop(timeout=30)
- topology.master4.stop(timeout=30)
- #
- # Initialize the agreements
- #
- # m1 -> m2
- topology.master1.start(timeout=30)
- topology.master2.start(timeout=30)
- time.sleep(5)
- topology.master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- topology.master1.waitForReplInit(topology.m1_m2_agmt)
- # m1 -> m3
- topology.master3.start(timeout=30)
- time.sleep(5)
- topology.master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
- topology.master1.waitForReplInit(topology.m1_m3_agmt)
- # m1 -> m4
- time.sleep(5)
- topology.master4.start(timeout=30)
- topology.master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4)
- topology.master1.waitForReplInit(topology.m1_m4_agmt)
- #
- # Test Replication is working
- #
- # Check replication is working with previous working master(m1 -> m2)
- if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master2):
- log.info('Replication is working m1 -> m2.')
- else:
- log.fatal('restore_master4: Replication is not working from m1 -> m2.')
- assert False
- # Check replication is working from master 1 to master 4...
- if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4):
- log.info('Replication is working m1 -> m4.')
- else:
- log.fatal('restore_master4: Replication is not working from m1 -> m4.')
- assert False
- # Check replication is working from master 4 to master1...
- if topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1):
- log.info('Replication is working m4 -> m1.')
- else:
- log.fatal('restore_master4: Replication is not working from m4 -> 1.')
- assert False
- log.info('Master 4 has been successfully restored.')
- def test_cleanallruv_init(topology):
- '''
- Make updates on each master to make sure we have the all master RUVs on
- each master.
- '''
- log.info('Initializing cleanAllRUV test suite...')
- # Master 1
- if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master2):
- log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 2.')
- assert False
- if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master3):
- log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 3.')
- assert False
- if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4):
- log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 4.')
- assert False
- # Master 2
- if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master1):
- log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.')
- assert False
- if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master3):
- log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 3.')
- assert False
- if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master4):
- log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 4.')
- assert False
- # Master 3
- if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master1):
- log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.')
- assert False
- if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master2):
- log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 2.')
- assert False
- if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master4):
- log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 4.')
- assert False
- # Master 4
- if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1):
- log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.')
- assert False
- if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master2):
- log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 2.')
- assert False
- if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master3):
- log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 3.')
- assert False
- log.info('Initialized cleanAllRUV test suite.')
- def test_cleanallruv_clean(topology):
- '''
- Disable a master, remove agreements to that master, and clean the RUVs on
- the remaining replicas
- '''
- log.info('Running test_cleanallruv_clean...')
- # Disable master 4
- log.info('test_cleanallruv_clean: disable master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('error!')
- assert False
- # Remove the agreements from the other masters that point to master 4
- remove_master4_agmts("test_cleanallruv_clean", topology)
- # Run the task
- log.info('test_cleanallruv_clean: run the cleanAllRUV task...')
- try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
- args={TASK_WAIT: True})
- except ValueError as e:
- log.fatal('test_cleanallruv_clean: Problem running cleanAllRuv task: ' +
- e.message('desc'))
- assert False
- # Check the other master's RUV for 'replica 4'
- log.info('test_cleanallruv_clean: check all the masters have been cleaned...')
- clean = check_ruvs("test_cleanallruv_clean", topology)
- if not clean:
- log.fatal('test_cleanallruv_clean: Failed to clean replicas')
- assert False
- log.info('test_cleanallruv_clean PASSED, restoring master 4...')
- #
- # Cleanup - restore master 4
- #
- restore_master4(topology)
- def test_cleanallruv_clean_restart(topology):
- '''
- Test that if a master istopped during the clean process, that it
- resumes and finishes when its started.
- '''
- log.info('Running test_cleanallruv_clean_restart...')
- # Disable master 4
- log.info('test_cleanallruv_clean_restart: disable master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('error!')
- assert False
- # Remove the agreements from the other masters that point to master 4
- log.info('test_cleanallruv_clean: remove all the agreements to master 4...')
- remove_master4_agmts("test_cleanallruv_clean restart", topology)
- # Stop master 3 to keep the task running, so we can stop master 1...
- topology.master3.stop(timeout=30)
- # Run the task
- log.info('test_cleanallruv_clean_restart: run the cleanAllRUV task...')
- try:
- (task_dn, rc) = topology.master1.tasks.cleanAllRUV(
- suffix=DEFAULT_SUFFIX, replicaid='4', args={TASK_WAIT: False})
- except ValueError as e:
- log.fatal('test_cleanallruv_clean_restart: Problem running cleanAllRuv task: ' +
- e.message('desc'))
- assert False
- # Sleep a bit, then stop master 1
- time.sleep(5)
- topology.master1.stop(timeout=30)
- # Now start master 3 & 1, and make sure we didn't crash
- topology.master3.start(timeout=30)
- if topology.master3.detectDisorderlyShutdown():
- log.fatal('test_cleanallruv_clean_restart: Master 3 previously crashed!')
- assert False
- topology.master1.start(timeout=30)
- if topology.master1.detectDisorderlyShutdown():
- log.fatal('test_cleanallruv_clean_restart: Master 1 previously crashed!')
- assert False
- # Wait a little for agmts/cleanallruv to wake up
- if not task_done(topology, task_dn):
- log.fatal('test_cleanallruv_clean_restart: cleanAllRUV task did not finish')
- assert False
- # Check the other master's RUV for 'replica 4'
- log.info('test_cleanallruv_clean_restart: check all the masters have been cleaned...')
- clean = check_ruvs("test_cleanallruv_clean_restart", topology)
- if not clean:
- log.fatal('Failed to clean replicas')
- assert False
- log.info('test_cleanallruv_clean_restart PASSED, restoring master 4...')
- #
- # Cleanup - restore master 4
- #
- restore_master4(topology)
- def test_cleanallruv_clean_force(topology):
- '''
- Disable a master, remove agreements to that master, and clean the RUVs on
- the remaining replicas
- '''
- log.info('Running test_cleanallruv_clean_force...')
- # Stop master 3, while we update master 4, so that 3 is behind the other masters
- topology.master3.stop(timeout=10)
- # Add a bunch of updates to master 4
- m4_add_users = AddUsers(topology.master4, 1500)
- m4_add_users.start()
- m4_add_users.join()
- # Disable master 4
- log.info('test_cleanallruv_clean_force: disable master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('error!')
- assert False
- # Start master 3, it should be out of sync with the other replicas...
- topology.master3.start(timeout=30)
- # Remove the agreements from the other masters that point to master 4
- remove_master4_agmts("test_cleanallruv_clean_force", topology)
- # Run the task, use "force" because master 3 is not in sync with the other replicas
- # in regards to the replica 4 RUV
- log.info('test_cleanallruv_clean_force: run the cleanAllRUV task...')
- try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
- force=True, args={TASK_WAIT: True})
- except ValueError as e:
- log.fatal('test_cleanallruv_clean_force: Problem running cleanAllRuv task: ' +
- e.message('desc'))
- assert False
- # Check the other master's RUV for 'replica 4'
- log.info('test_cleanallruv_clean_force: check all the masters have been cleaned...')
- clean = check_ruvs("test_cleanallruv_clean_force", topology)
- if not clean:
- log.fatal('test_cleanallruv_clean_force: Failed to clean replicas')
- assert False
- log.info('test_cleanallruv_clean_force PASSED, restoring master 4...')
- #
- # Cleanup - restore master 4
- #
- restore_master4(topology)
- def test_cleanallruv_abort(topology):
- '''
- Test the abort task.
- DIsable master 4
- Stop master 2 so that it can not be cleaned
- Run the clean task
- Wait a bit
- Abort the task
- Verify task is aborted
- '''
- log.info('Running test_cleanallruv_abort...')
- # Disable master 4
- log.info('test_cleanallruv_abort: disable replication on master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('test_cleanallruv_abort: failed to disable replication')
- assert False
- # Remove the agreements from the other masters that point to master 4
- remove_master4_agmts("test_cleanallruv_abort", topology)
- # Stop master 2
- log.info('test_cleanallruv_abort: stop master 2 to freeze the cleanAllRUV task...')
- topology.master2.stop(timeout=30)
- # Run the task
- log.info('test_cleanallruv_abort: add the cleanAllRUV task...')
- try:
- (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: False})
- except ValueError as e:
- log.fatal('test_cleanallruv_abort: Problem running cleanAllRuv task: ' +
- e.message('desc'))
- assert False
- # Wait a bit
- time.sleep(5)
- # Abort the task
- log.info('test_cleanallruv_abort: abort the cleanAllRUV task...')
- try:
- topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
- args={TASK_WAIT: True})
- except ValueError as e:
- log.fatal('test_cleanallruv_abort: Problem running abortCleanAllRuv task: ' +
- e.message('desc'))
- assert False
- # Check master 1 does not have the clean task running
- log.info('test_cleanallruv_abort: check master 1 no longer has a cleanAllRUV task...')
- if not task_done(topology, clean_task_dn):
- log.fatal('test_cleanallruv_abort: CleanAllRUV task was not aborted')
- assert False
- # Start master 2
- log.info('test_cleanallruv_abort: start master 2 to begin the restore process...')
- topology.master2.start(timeout=30)
- #
- # Now run the clean task task again to we can properly restore master 4
- #
- log.info('test_cleanallruv_abort: run cleanAllRUV task so we can properly restore master 4...')
- try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: True})
- except ValueError as e:
- log.fatal('test_cleanallruv_abort: Problem running cleanAllRuv task: ' + e.message('desc'))
- assert False
- log.info('test_cleanallruv_abort PASSED, restoring master 4...')
- #
- # Cleanup - Restore master 4
- #
- restore_master4(topology)
- def test_cleanallruv_abort_restart(topology):
- '''
- Test the abort task can handle a restart, and then resume
- '''
- log.info('Running test_cleanallruv_abort_restart...')
- # Disable master 4
- log.info('test_cleanallruv_abort_restart: disable replication on master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('error!')
- assert False
- # Remove the agreements from the other masters that point to master 4
- log.info('test_cleanallruv_abort_restart: remove all the agreements to master 4...)')
- remove_master4_agmts("test_cleanallruv_abort_restart", topology)
- # Stop master 3
- log.info('test_cleanallruv_abort_restart: stop master 3 to freeze the cleanAllRUV task...')
- topology.master3.stop()
- # Run the task
- log.info('test_cleanallruv_abort_restart: add the cleanAllRUV task...')
- try:
- (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: False})
- except ValueError as e:
- log.fatal('test_cleanallruv_abort_restart: Problem running cleanAllRuv task: ' +
- e.message('desc'))
- assert False
- # Wait a bit
- time.sleep(5)
- # Abort the task
- log.info('test_cleanallruv_abort_restart: abort the cleanAllRUV task...')
- try:
- topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
- certify=True, args={TASK_WAIT: False})
- except ValueError as e:
- log.fatal('test_cleanallruv_abort_restart: Problem running test_cleanallruv_abort_restart task: ' +
- e.message('desc'))
- assert False
- # Allow task to run for a bit:
- time.sleep(5)
- # Check master 1 does not have the clean task running
- log.info('test_cleanallruv_abort: check master 1 no longer has a cleanAllRUV task...')
- if not task_done(topology, clean_task_dn):
- log.fatal('test_cleanallruv_abort_restart: CleanAllRUV task was not aborted')
- assert False
- # Now restart master 1, and make sure the abort process completes
- topology.master1.restart()
- if topology.master1.detectDisorderlyShutdown():
- log.fatal('test_cleanallruv_abort_restart: Master 1 previously crashed!')
- assert False
- # Start master 3
- topology.master3.start()
- # Check master 1 tried to run abort task. We expect the abort task to be aborted.
- if not topology.master1.searchErrorsLog('Aborting abort task'):
- log.fatal('test_cleanallruv_abort_restart: Abort task did not restart')
- assert False
- #
- # Now run the clean task task again to we can properly restore master 4
- #
- log.info('test_cleanallruv_abort_restart: run cleanAllRUV task so we can properly restore master 4...')
- try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: True})
- except ValueError as e:
- log.fatal('test_cleanallruv_abort_restart: Problem running cleanAllRuv task: ' +
- e.message('desc'))
- assert False
- log.info('test_cleanallruv_abort_restart PASSED, restoring master 4...')
- #
- # Cleanup - Restore master 4
- #
- restore_master4(topology)
- def test_cleanallruv_abort_certify(topology):
- '''
- Test the abort task.
- Disable master 4
- Stop master 2 so that it can not be cleaned
- Run the clean task
- Wait a bit
- Abort the task
- Verify task is aborted
- '''
- log.info('Running test_cleanallruv_abort_certify...')
- # Disable master 4
- log.info('test_cleanallruv_abort_certify: disable replication on master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('error!')
- assert False
- # Remove the agreements from the other masters that point to master 4
- remove_master4_agmts("test_cleanallruv_abort_certify", topology)
- # Stop master 2
- log.info('test_cleanallruv_abort_certify: stop master 2 to freeze the cleanAllRUV task...')
- topology.master2.stop()
- # Run the task
- log.info('test_cleanallruv_abort_certify: add the cleanAllRUV task...')
- try:
- (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: False})
- except ValueError as e:
- log.fatal('test_cleanallruv_abort_certify: Problem running cleanAllRuv task: ' +
- e.message('desc'))
- assert False
- # Allow the clean task to get started...
- time.sleep(5)
- # Abort the task
- log.info('test_cleanallruv_abort_certify: abort the cleanAllRUV task...')
- try:
- (abort_task_dn, rc) = topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', certify=True, args={TASK_WAIT: False})
- except ValueError as e:
- log.fatal('test_cleanallruv_abort_certify: Problem running abortCleanAllRuv task: ' +
- e.message('desc'))
- assert False
- # Wait a while and make sure the abort task is still running
- log.info('test_cleanallruv_abort_certify: sleep for 5 seconds')
- time.sleep(5)
- if task_done(topology, abort_task_dn, 60):
- log.fatal('test_cleanallruv_abort_certify: abort task incorrectly finished')
- assert False
- # Now start master 2 so it can be aborted
- log.info('test_cleanallruv_abort_certify: start master 2 to allow the abort task to finish...')
- topology.master2.start()
- # Wait for the abort task to stop
- if not task_done(topology, abort_task_dn, 60):
- log.fatal('test_cleanallruv_abort_certify: The abort CleanAllRUV task was not aborted')
- assert False
- # Check master 1 does not have the clean task running
- log.info('test_cleanallruv_abort_certify: check master 1 no longer has a cleanAllRUV task...')
- if not task_done(topology, clean_task_dn):
- log.fatal('test_cleanallruv_abort_certify: CleanAllRUV task was not aborted')
- assert False
- # Start master 2
- log.info('test_cleanallruv_abort_certify: start master 2 to begin the restore process...')
- topology.master2.start()
- #
- # Now run the clean task task again to we can properly restore master 4
- #
- log.info('test_cleanallruv_abort_certify: run cleanAllRUV task so we can properly restore master 4...')
- try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: True})
- except ValueError as e:
- log.fatal('test_cleanallruv_abort_certify: Problem running cleanAllRuv task: ' +
- e.message('desc'))
- assert False
- log.info('test_cleanallruv_abort_certify PASSED, restoring master 4...')
- #
- # Cleanup - Restore master 4
- #
- restore_master4(topology)
- def test_cleanallruv_stress_clean(topology):
- '''
- Put each server(m1 - m4) under stress, and perform the entire clean process
- '''
- log.info('Running test_cleanallruv_stress_clean...')
- log.info('test_cleanallruv_stress_clean: put all the masters under load...')
- # Put all the masters under load
- m1_add_users = AddUsers(topology.master1, 2000)
- m1_add_users.start()
- m2_add_users = AddUsers(topology.master2, 2000)
- m2_add_users.start()
- m3_add_users = AddUsers(topology.master3, 2000)
- m3_add_users.start()
- m4_add_users = AddUsers(topology.master4, 2000)
- m4_add_users.start()
- # Allow sometime to get replication flowing in all directions
- log.info('test_cleanallruv_stress_clean: allow some time for replication to get flowing...')
- time.sleep(5)
- # Put master 4 into read only mode
- log.info('test_cleanallruv_stress_clean: put master 4 into read-only mode...')
- try:
- topology.master4.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'on')])
- except ldap.LDAPError as e:
- log.fatal('test_cleanallruv_stress_clean: Failed to put master 4 into read-only mode: error ' +
- e.message['desc'])
- assert False
- # We need to wait for master 4 to push its changes out
- log.info('test_cleanallruv_stress_clean: allow some time for master 4 to push changes out (60 seconds)...')
- time.sleep(60)
- # Disable master 4
- log.info('test_cleanallruv_stress_clean: disable replication on master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('test_cleanallruv_stress_clean: failed to diable replication')
- assert False
- # Remove the agreements from the other masters that point to master 4
- remove_master4_agmts("test_cleanallruv_stress_clean", topology)
- # Run the task
- log.info('test_cleanallruv_stress_clean: Run the cleanAllRUV task...')
- try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
- args={TASK_WAIT: True})
- except ValueError as e:
- log.fatal('test_cleanallruv_stress_clean: Problem running cleanAllRuv task: ' +
- e.message('desc'))
- assert False
- # Wait for the update to finish
- log.info('test_cleanallruv_stress_clean: wait for all the updates to finish...')
- m1_add_users.join()
- m2_add_users.join()
- m3_add_users.join()
- m4_add_users.join()
- # Check the other master's RUV for 'replica 4'
- log.info('test_cleanallruv_stress_clean: check if all the replicas have been cleaned...')
- clean = check_ruvs("test_cleanallruv_stress_clean", topology)
- if not clean:
- log.fatal('test_cleanallruv_stress_clean: Failed to clean replicas')
- assert False
- log.info('test_cleanallruv_stress_clean: PASSED, restoring master 4...')
- #
- # Cleanup - restore master 4
- #
- # Sleep for a bit to replication complete
- log.info("Sleep for 120 seconds to allow replication to complete...")
- time.sleep(120)
- # Turn off readonly mode
- try:
- topology.master4.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'off')])
- except ldap.LDAPError as e:
- log.fatal('test_cleanallruv_stress_clean: Failed to put master 4 into read-only mode: error ' +
- e.message['desc'])
- assert False
- restore_master4(topology)
- if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
|