|
@@ -8,11 +8,7 @@ import sys
|
|
|
import time
|
|
import time
|
|
|
import ldap
|
|
import ldap
|
|
|
import logging
|
|
import logging
|
|
|
-import socket
|
|
|
|
|
-import time
|
|
|
|
|
-import logging
|
|
|
|
|
import pytest
|
|
import pytest
|
|
|
-import re
|
|
|
|
|
import tarfile
|
|
import tarfile
|
|
|
import stat
|
|
import stat
|
|
|
import shutil
|
|
import shutil
|
|
@@ -21,8 +17,7 @@ from lib389 import DirSrv, Entry, tools
|
|
|
from lib389.tools import DirSrvTools
|
|
from lib389.tools import DirSrvTools
|
|
|
from lib389._constants import *
|
|
from lib389._constants import *
|
|
|
from lib389.properties import *
|
|
from lib389.properties import *
|
|
|
-from lib389._constants import *
|
|
|
|
|
-from constants import *
|
|
|
|
|
|
|
+
|
|
|
|
|
|
|
|
logging.getLogger(__name__).setLevel(logging.DEBUG)
|
|
logging.getLogger(__name__).setLevel(logging.DEBUG)
|
|
|
log = logging.getLogger(__name__)
|
|
log = logging.getLogger(__name__)
|
|
@@ -48,7 +43,7 @@ BIND_PW = 'password'
|
|
|
ENTRY_NAME = 'test_entry'
|
|
ENTRY_NAME = 'test_entry'
|
|
|
ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
|
|
ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
|
|
|
ENTRY_OC = "top person %s" % OC_NAME
|
|
ENTRY_OC = "top person %s" % OC_NAME
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
def _oc_definition(oid_ext, name, must=None, may=None):
|
|
def _oc_definition(oid_ext, name, must=None, may=None):
|
|
|
oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
|
|
oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
|
|
|
desc = 'To test ticket 47490'
|
|
desc = 'To test ticket 47490'
|
|
@@ -57,14 +52,14 @@ def _oc_definition(oid_ext, name, must=None, may=None):
|
|
|
must = MUST
|
|
must = MUST
|
|
|
if not may:
|
|
if not may:
|
|
|
may = MAY
|
|
may = MAY
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
|
|
new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
|
|
|
return new_oc
|
|
return new_oc
|
|
|
class TopologyMaster1Master2(object):
|
|
class TopologyMaster1Master2(object):
|
|
|
def __init__(self, master1, master2):
|
|
def __init__(self, master1, master2):
|
|
|
master1.open()
|
|
master1.open()
|
|
|
self.master1 = master1
|
|
self.master1 = master1
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
master2.open()
|
|
master2.open()
|
|
|
self.master2 = master2
|
|
self.master2 = master2
|
|
|
|
|
|
|
@@ -74,50 +69,29 @@ def topology(request):
|
|
|
'''
|
|
'''
|
|
|
This fixture is used to create a replicated topology for the 'module'.
|
|
This fixture is used to create a replicated topology for the 'module'.
|
|
|
The replicated topology is MASTER1 <-> Master2.
|
|
The replicated topology is MASTER1 <-> Master2.
|
|
|
- At the beginning, It may exists a master2 instance and/or a master2 instance.
|
|
|
|
|
- It may also exists a backup for the master1 and/or the master2.
|
|
|
|
|
-
|
|
|
|
|
- Principle:
|
|
|
|
|
- If master1 instance exists:
|
|
|
|
|
- restart it
|
|
|
|
|
- If master2 instance exists:
|
|
|
|
|
- restart it
|
|
|
|
|
- If backup of master1 AND backup of master2 exists:
|
|
|
|
|
- create or rebind to master1
|
|
|
|
|
- create or rebind to master2
|
|
|
|
|
-
|
|
|
|
|
- restore master1 from backup
|
|
|
|
|
- restore master2 from backup
|
|
|
|
|
- else:
|
|
|
|
|
- Cleanup everything
|
|
|
|
|
- remove instances
|
|
|
|
|
- remove backups
|
|
|
|
|
- Create instances
|
|
|
|
|
- Initialize replication
|
|
|
|
|
- Create backups
|
|
|
|
|
'''
|
|
'''
|
|
|
global installation1_prefix
|
|
global installation1_prefix
|
|
|
global installation2_prefix
|
|
global installation2_prefix
|
|
|
|
|
|
|
|
#os.environ['USE_VALGRIND'] = '1'
|
|
#os.environ['USE_VALGRIND'] = '1'
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# allocate master1 on a given deployement
|
|
# allocate master1 on a given deployement
|
|
|
- master1 = DirSrv(verbose=False)
|
|
|
|
|
|
|
+ master1 = DirSrv(verbose=False)
|
|
|
if installation1_prefix:
|
|
if installation1_prefix:
|
|
|
args_instance[SER_DEPLOYED_DIR] = installation1_prefix
|
|
args_instance[SER_DEPLOYED_DIR] = installation1_prefix
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# Args for the master1 instance
|
|
# Args for the master1 instance
|
|
|
args_instance[SER_HOST] = HOST_MASTER_1
|
|
args_instance[SER_HOST] = HOST_MASTER_1
|
|
|
args_instance[SER_PORT] = PORT_MASTER_1
|
|
args_instance[SER_PORT] = PORT_MASTER_1
|
|
|
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
|
|
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
|
|
|
args_master = args_instance.copy()
|
|
args_master = args_instance.copy()
|
|
|
master1.allocate(args_master)
|
|
master1.allocate(args_master)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# allocate master1 on a given deployement
|
|
# allocate master1 on a given deployement
|
|
|
master2 = DirSrv(verbose=False)
|
|
master2 = DirSrv(verbose=False)
|
|
|
if installation2_prefix:
|
|
if installation2_prefix:
|
|
|
args_instance[SER_DEPLOYED_DIR] = installation2_prefix
|
|
args_instance[SER_DEPLOYED_DIR] = installation2_prefix
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# Args for the consumer instance
|
|
# Args for the consumer instance
|
|
|
args_instance[SER_HOST] = HOST_MASTER_2
|
|
args_instance[SER_HOST] = HOST_MASTER_2
|
|
|
args_instance[SER_PORT] = PORT_MASTER_2
|
|
args_instance[SER_PORT] = PORT_MASTER_2
|
|
@@ -125,162 +99,104 @@ def topology(request):
|
|
|
args_master = args_instance.copy()
|
|
args_master = args_instance.copy()
|
|
|
master2.allocate(args_master)
|
|
master2.allocate(args_master)
|
|
|
|
|
|
|
|
-
|
|
|
|
|
- # Get the status of the backups
|
|
|
|
|
- backup_master1 = master1.checkBackupFS()
|
|
|
|
|
- backup_master2 = master2.checkBackupFS()
|
|
|
|
|
-
|
|
|
|
|
# Get the status of the instance and restart it if it exists
|
|
# Get the status of the instance and restart it if it exists
|
|
|
- instance_master1 = master1.exists()
|
|
|
|
|
- if instance_master1:
|
|
|
|
|
- master1.stop(timeout=10)
|
|
|
|
|
- master1.start(timeout=10)
|
|
|
|
|
-
|
|
|
|
|
|
|
+ instance_master1 = master1.exists()
|
|
|
instance_master2 = master2.exists()
|
|
instance_master2 = master2.exists()
|
|
|
|
|
+
|
|
|
|
|
+ # Remove all the instances
|
|
|
|
|
+ if instance_master1:
|
|
|
|
|
+ master1.delete()
|
|
|
if instance_master2:
|
|
if instance_master2:
|
|
|
- master2.stop(timeout=10)
|
|
|
|
|
- master2.start(timeout=10)
|
|
|
|
|
-
|
|
|
|
|
- if backup_master1 and backup_master2:
|
|
|
|
|
- # The backups exist, assuming they are correct
|
|
|
|
|
- # we just re-init the instances with them
|
|
|
|
|
- if not instance_master1:
|
|
|
|
|
- master1.create()
|
|
|
|
|
- # Used to retrieve configuration information (dbdir, confdir...)
|
|
|
|
|
- master1.open()
|
|
|
|
|
-
|
|
|
|
|
- if not instance_master2:
|
|
|
|
|
- master2.create()
|
|
|
|
|
- # Used to retrieve configuration information (dbdir, confdir...)
|
|
|
|
|
- master2.open()
|
|
|
|
|
-
|
|
|
|
|
- # restore master1 from backup
|
|
|
|
|
- master1.stop(timeout=10)
|
|
|
|
|
- master1.restoreFS(backup_master1)
|
|
|
|
|
- master1.start(timeout=10)
|
|
|
|
|
-
|
|
|
|
|
- # restore master2 from backup
|
|
|
|
|
- master2.stop(timeout=10)
|
|
|
|
|
- master2.restoreFS(backup_master2)
|
|
|
|
|
- master2.start(timeout=10)
|
|
|
|
|
- else:
|
|
|
|
|
- # We should be here only in two conditions
|
|
|
|
|
- # - This is the first time a test involve master-consumer
|
|
|
|
|
- # so we need to create everything
|
|
|
|
|
- # - Something weird happened (instance/backup destroyed)
|
|
|
|
|
- # so we discard everything and recreate all
|
|
|
|
|
-
|
|
|
|
|
- # Remove all the backups. So even if we have a specific backup file
|
|
|
|
|
- # (e.g backup_master) we clear all backups that an instance my have created
|
|
|
|
|
- if backup_master1:
|
|
|
|
|
- master1.clearBackupFS()
|
|
|
|
|
- if backup_master2:
|
|
|
|
|
- master2.clearBackupFS()
|
|
|
|
|
-
|
|
|
|
|
- # Remove all the instances
|
|
|
|
|
- if instance_master1:
|
|
|
|
|
- master1.delete()
|
|
|
|
|
- if instance_master2:
|
|
|
|
|
- master2.delete()
|
|
|
|
|
-
|
|
|
|
|
- # Create the instances
|
|
|
|
|
- master1.create()
|
|
|
|
|
- master1.open()
|
|
|
|
|
- master2.create()
|
|
|
|
|
- master2.open()
|
|
|
|
|
-
|
|
|
|
|
- #
|
|
|
|
|
- # Now prepare the Master-Consumer topology
|
|
|
|
|
- #
|
|
|
|
|
- # First Enable replication
|
|
|
|
|
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
|
|
|
|
|
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
|
|
|
|
|
-
|
|
|
|
|
- # Initialize the supplier->consumer
|
|
|
|
|
-
|
|
|
|
|
- properties = {RA_NAME: r'meTo_$host:$port',
|
|
|
|
|
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
|
|
|
|
|
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
|
|
|
|
|
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
|
|
|
|
|
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
|
|
|
|
|
- repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
|
|
|
|
|
-
|
|
|
|
|
- if not repl_agreement:
|
|
|
|
|
- log.fatal("Fail to create a replica agreement")
|
|
|
|
|
- sys.exit(1)
|
|
|
|
|
-
|
|
|
|
|
- log.debug("%s created" % repl_agreement)
|
|
|
|
|
-
|
|
|
|
|
- properties = {RA_NAME: r'meTo_$host:$port',
|
|
|
|
|
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
|
|
|
|
|
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
|
|
|
|
|
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
|
|
|
|
|
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
|
|
|
|
|
- master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
|
|
|
|
|
-
|
|
|
|
|
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
|
|
|
|
|
- master1.waitForReplInit(repl_agreement)
|
|
|
|
|
-
|
|
|
|
|
- # Check replication is working fine
|
|
|
|
|
- master1.add_s(Entry((TEST_REPL_DN, {
|
|
|
|
|
- 'objectclass': "top person".split(),
|
|
|
|
|
- 'sn': 'test_repl',
|
|
|
|
|
- 'cn': 'test_repl'})))
|
|
|
|
|
- loop = 0
|
|
|
|
|
- while loop <= 10:
|
|
|
|
|
- try:
|
|
|
|
|
- ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
|
|
|
|
|
- break
|
|
|
|
|
- except ldap.NO_SUCH_OBJECT:
|
|
|
|
|
- time.sleep(1)
|
|
|
|
|
- loop += 1
|
|
|
|
|
-
|
|
|
|
|
- # Time to create the backups
|
|
|
|
|
- master1.stop(timeout=10)
|
|
|
|
|
- master1.backupfile = master1.backupFS()
|
|
|
|
|
- master1.start(timeout=10)
|
|
|
|
|
-
|
|
|
|
|
- master2.stop(timeout=10)
|
|
|
|
|
- master2.backupfile = master2.backupFS()
|
|
|
|
|
- master2.start(timeout=10)
|
|
|
|
|
-
|
|
|
|
|
- #
|
|
|
|
|
|
|
+ master2.delete()
|
|
|
|
|
+
|
|
|
|
|
+ # Create the instances
|
|
|
|
|
+ master1.create()
|
|
|
|
|
+ master1.open()
|
|
|
|
|
+ master2.create()
|
|
|
|
|
+ master2.open()
|
|
|
|
|
+
|
|
|
|
|
+ #
|
|
|
|
|
+ # Now prepare the Master-Consumer topology
|
|
|
|
|
+ #
|
|
|
|
|
+ # First Enable replication
|
|
|
|
|
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
|
|
|
|
|
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
|
|
|
|
|
+
|
|
|
|
|
+ # Initialize the supplier->consumer
|
|
|
|
|
+
|
|
|
|
|
+ properties = {RA_NAME: r'meTo_$host:$port',
|
|
|
|
|
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
|
|
|
|
|
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
|
|
|
|
|
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
|
|
|
|
|
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
|
|
|
|
|
+ repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
|
|
|
|
|
+
|
|
|
|
|
+ if not repl_agreement:
|
|
|
|
|
+ log.fatal("Fail to create a replica agreement")
|
|
|
|
|
+ sys.exit(1)
|
|
|
|
|
+
|
|
|
|
|
+ log.debug("%s created" % repl_agreement)
|
|
|
|
|
+
|
|
|
|
|
+ properties = {RA_NAME: r'meTo_$host:$port',
|
|
|
|
|
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
|
|
|
|
|
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
|
|
|
|
|
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
|
|
|
|
|
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
|
|
|
|
|
+ master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
|
|
|
|
|
+
|
|
|
|
|
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
|
|
|
|
|
+ master1.waitForReplInit(repl_agreement)
|
|
|
|
|
+
|
|
|
|
|
+ # Check replication is working fine
|
|
|
|
|
+ master1.add_s(Entry((TEST_REPL_DN, {
|
|
|
|
|
+ 'objectclass': "top person".split(),
|
|
|
|
|
+ 'sn': 'test_repl',
|
|
|
|
|
+ 'cn': 'test_repl'})))
|
|
|
|
|
+ loop = 0
|
|
|
|
|
+ ent = None
|
|
|
|
|
+ while loop <= 10:
|
|
|
|
|
+ try:
|
|
|
|
|
+ ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
|
|
|
|
|
+ break
|
|
|
|
|
+ except ldap.NO_SUCH_OBJECT:
|
|
|
|
|
+ time.sleep(1)
|
|
|
|
|
+ loop += 1
|
|
|
|
|
+ if ent is None:
|
|
|
|
|
+ assert False
|
|
|
|
|
+
|
|
|
# Here we have two instances master and consumer
|
|
# Here we have two instances master and consumer
|
|
|
- # with replication working. Either coming from a backup recovery
|
|
|
|
|
- # or from a fresh (re)init
|
|
|
|
|
- # Time to return the topology
|
|
|
|
|
return TopologyMaster1Master2(master1, master2)
|
|
return TopologyMaster1Master2(master1, master2)
|
|
|
|
|
|
|
|
|
|
+
|
|
|
def _header(topology, label):
|
|
def _header(topology, label):
|
|
|
topology.master1.log.info("\n\n###############################################")
|
|
topology.master1.log.info("\n\n###############################################")
|
|
|
topology.master1.log.info("#######")
|
|
topology.master1.log.info("#######")
|
|
|
topology.master1.log.info("####### %s" % label)
|
|
topology.master1.log.info("####### %s" % label)
|
|
|
topology.master1.log.info("#######")
|
|
topology.master1.log.info("#######")
|
|
|
topology.master1.log.info("###################################################")
|
|
topology.master1.log.info("###################################################")
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
def _install_schema(server, tarFile):
|
|
def _install_schema(server, tarFile):
|
|
|
server.stop(timeout=10)
|
|
server.stop(timeout=10)
|
|
|
-
|
|
|
|
|
- here = os.getcwd()
|
|
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
tmpSchema = '/tmp/schema_47988'
|
|
tmpSchema = '/tmp/schema_47988'
|
|
|
if not os.path.isdir(tmpSchema):
|
|
if not os.path.isdir(tmpSchema):
|
|
|
os.mkdir(tmpSchema)
|
|
os.mkdir(tmpSchema)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
for the_file in os.listdir(tmpSchema):
|
|
for the_file in os.listdir(tmpSchema):
|
|
|
file_path = os.path.join(tmpSchema, the_file)
|
|
file_path = os.path.join(tmpSchema, the_file)
|
|
|
if os.path.isfile(file_path):
|
|
if os.path.isfile(file_path):
|
|
|
os.unlink(file_path)
|
|
os.unlink(file_path)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
os.chdir(tmpSchema)
|
|
os.chdir(tmpSchema)
|
|
|
tar = tarfile.open(tarFile, 'r:gz')
|
|
tar = tarfile.open(tarFile, 'r:gz')
|
|
|
for member in tar.getmembers():
|
|
for member in tar.getmembers():
|
|
|
tar.extract(member.name)
|
|
tar.extract(member.name)
|
|
|
|
|
|
|
|
tar.close()
|
|
tar.close()
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
st = os.stat(server.schemadir)
|
|
st = os.stat(server.schemadir)
|
|
|
- os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR )
|
|
|
|
|
|
|
+ os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR)
|
|
|
for the_file in os.listdir(tmpSchema):
|
|
for the_file in os.listdir(tmpSchema):
|
|
|
schemaFile = os.path.join(server.schemadir, the_file)
|
|
schemaFile = os.path.join(server.schemadir, the_file)
|
|
|
if os.path.isfile(schemaFile):
|
|
if os.path.isfile(schemaFile):
|
|
@@ -289,13 +205,13 @@ def _install_schema(server, tarFile):
|
|
|
os.chmod(schemaFile, stat.S_IWUSR | stat.S_IRUSR)
|
|
os.chmod(schemaFile, stat.S_IWUSR | stat.S_IRUSR)
|
|
|
server.log.info("replace %s" % schemaFile)
|
|
server.log.info("replace %s" % schemaFile)
|
|
|
shutil.copy(the_file, schemaFile)
|
|
shutil.copy(the_file, schemaFile)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
else:
|
|
else:
|
|
|
server.log.info("add %s" % schemaFile)
|
|
server.log.info("add %s" % schemaFile)
|
|
|
shutil.copy(the_file, schemaFile)
|
|
shutil.copy(the_file, schemaFile)
|
|
|
os.chmod(schemaFile, stat.S_IRUSR | stat.S_IRGRP)
|
|
os.chmod(schemaFile, stat.S_IRUSR | stat.S_IRGRP)
|
|
|
os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP)
|
|
os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
|
|
|
|
|
def test_ticket47988_init(topology):
|
|
def test_ticket47988_init(topology):
|
|
|
"""
|
|
"""
|
|
@@ -303,20 +219,20 @@ def test_ticket47988_init(topology):
|
|
|
- Objectclass with MAY 'member'
|
|
- Objectclass with MAY 'member'
|
|
|
- an entry ('bind_entry') with which we bind to test the 'SELFDN' operation
|
|
- an entry ('bind_entry') with which we bind to test the 'SELFDN' operation
|
|
|
It deletes the anonymous aci
|
|
It deletes the anonymous aci
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
"""
|
|
"""
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
_header(topology, 'test_ticket47988_init')
|
|
_header(topology, 'test_ticket47988_init')
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# enable acl error logging
|
|
# enable acl error logging
|
|
|
- mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL
|
|
|
|
|
|
|
+ mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL
|
|
|
topology.master1.modify_s(DN_CONFIG, mod)
|
|
topology.master1.modify_s(DN_CONFIG, mod)
|
|
|
topology.master2.modify_s(DN_CONFIG, mod)
|
|
topology.master2.modify_s(DN_CONFIG, mod)
|
|
|
|
|
|
|
|
- mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260))] # Internal op
|
|
|
|
|
|
|
+ mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260))] # Internal op
|
|
|
topology.master1.modify_s(DN_CONFIG, mod)
|
|
topology.master1.modify_s(DN_CONFIG, mod)
|
|
|
topology.master2.modify_s(DN_CONFIG, mod)
|
|
topology.master2.modify_s(DN_CONFIG, mod)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# add dummy entries
|
|
# add dummy entries
|
|
|
for cpt in range(MAX_OTHERS):
|
|
for cpt in range(MAX_OTHERS):
|
|
|
name = "%s%d" % (OTHER_NAME, cpt)
|
|
name = "%s%d" % (OTHER_NAME, cpt)
|
|
@@ -324,8 +240,8 @@ def test_ticket47988_init(topology):
|
|
|
'objectclass': "top person".split(),
|
|
'objectclass': "top person".split(),
|
|
|
'sn': name,
|
|
'sn': name,
|
|
|
'cn': name})))
|
|
'cn': name})))
|
|
|
-
|
|
|
|
|
- # check that entry 0 is replicated before
|
|
|
|
|
|
|
+
|
|
|
|
|
+ # check that entry 0 is replicated before
|
|
|
loop = 0
|
|
loop = 0
|
|
|
entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
|
|
entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
|
|
|
while loop <= 10:
|
|
while loop <= 10:
|
|
@@ -336,10 +252,10 @@ def test_ticket47988_init(topology):
|
|
|
time.sleep(1)
|
|
time.sleep(1)
|
|
|
loop += 1
|
|
loop += 1
|
|
|
assert (loop <= 10)
|
|
assert (loop <= 10)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
topology.master1.stop(timeout=10)
|
|
topology.master1.stop(timeout=10)
|
|
|
topology.master2.stop(timeout=10)
|
|
topology.master2.stop(timeout=10)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
#install the specific schema M1: ipa3.3, M2: ipa4.1
|
|
#install the specific schema M1: ipa3.3, M2: ipa4.1
|
|
|
schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz")
|
|
schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz")
|
|
|
_install_schema(topology.master1, schema_file)
|
|
_install_schema(topology.master1, schema_file)
|
|
@@ -349,17 +265,19 @@ def test_ticket47988_init(topology):
|
|
|
topology.master1.start(timeout=10)
|
|
topology.master1.start(timeout=10)
|
|
|
topology.master2.start(timeout=10)
|
|
topology.master2.start(timeout=10)
|
|
|
|
|
|
|
|
|
|
+
|
|
|
def _do_update_schema(server, range=3999):
|
|
def _do_update_schema(server, range=3999):
|
|
|
'''
|
|
'''
|
|
|
Update the schema of the M2 (IPA4.1). to generate a nsSchemaCSN
|
|
Update the schema of the M2 (IPA4.1). to generate a nsSchemaCSN
|
|
|
'''
|
|
'''
|
|
|
- postfix = str(randint(range, range+1000))
|
|
|
|
|
|
|
+ postfix = str(randint(range, range + 1000))
|
|
|
OID = '2.16.840.1.113730.3.8.12.%s' % postfix
|
|
OID = '2.16.840.1.113730.3.8.12.%s' % postfix
|
|
|
NAME = 'thierry%s' % postfix
|
|
NAME = 'thierry%s' % postfix
|
|
|
value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (OID, NAME)
|
|
value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (OID, NAME)
|
|
|
mod = [(ldap.MOD_ADD, 'objectclasses', value)]
|
|
mod = [(ldap.MOD_ADD, 'objectclasses', value)]
|
|
|
server.modify_s('cn=schema', mod)
|
|
server.modify_s('cn=schema', mod)
|
|
|
|
|
|
|
|
|
|
+
|
|
|
def _do_update_entry(supplier=None, consumer=None, attempts=10):
|
|
def _do_update_entry(supplier=None, consumer=None, attempts=10):
|
|
|
'''
|
|
'''
|
|
|
This is doing an update on M2 (IPA4.1) and checks the update has been
|
|
This is doing an update on M2 (IPA4.1) and checks the update has been
|
|
@@ -368,10 +286,10 @@ def _do_update_entry(supplier=None, consumer=None, attempts=10):
|
|
|
assert(supplier)
|
|
assert(supplier)
|
|
|
assert(consumer)
|
|
assert(consumer)
|
|
|
entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
|
|
entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
|
|
|
- value = str(randint(100,200))
|
|
|
|
|
|
|
+ value = str(randint(100, 200))
|
|
|
mod = [(ldap.MOD_REPLACE, 'telephonenumber', value)]
|
|
mod = [(ldap.MOD_REPLACE, 'telephonenumber', value)]
|
|
|
supplier.modify_s(entryDN, mod)
|
|
supplier.modify_s(entryDN, mod)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
loop = 0
|
|
loop = 0
|
|
|
while loop <= attempts:
|
|
while loop <= attempts:
|
|
|
ent = consumer.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
|
|
ent = consumer.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
|
|
@@ -383,7 +301,8 @@ def _do_update_entry(supplier=None, consumer=None, attempts=10):
|
|
|
loop += 1
|
|
loop += 1
|
|
|
supplier.log.debug("test_do_update: receive %s (expected %s)" % (read_val, value))
|
|
supplier.log.debug("test_do_update: receive %s (expected %s)" % (read_val, value))
|
|
|
assert (loop <= attempts)
|
|
assert (loop <= attempts)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
def _pause_M2_to_M1(topology):
|
|
def _pause_M2_to_M1(topology):
|
|
|
topology.master1.log.info("\n\n######################### Pause RA M2->M1 ######################\n")
|
|
topology.master1.log.info("\n\n######################### Pause RA M2->M1 ######################\n")
|
|
|
ents = topology.master2.agreement.list(suffix=SUFFIX)
|
|
ents = topology.master2.agreement.list(suffix=SUFFIX)
|
|
@@ -397,6 +316,7 @@ def _resume_M1_to_M2(topology):
|
|
|
assert len(ents) == 1
|
|
assert len(ents) == 1
|
|
|
topology.master1.agreement.resume(ents[0].dn)
|
|
topology.master1.agreement.resume(ents[0].dn)
|
|
|
|
|
|
|
|
|
|
+
|
|
|
def _pause_M1_to_M2(topology):
|
|
def _pause_M1_to_M2(topology):
|
|
|
topology.master1.log.info("\n\n######################### Pause RA M1->M2 ######################\n")
|
|
topology.master1.log.info("\n\n######################### Pause RA M1->M2 ######################\n")
|
|
|
ents = topology.master1.agreement.list(suffix=SUFFIX)
|
|
ents = topology.master1.agreement.list(suffix=SUFFIX)
|
|
@@ -409,31 +329,33 @@ def _resume_M2_to_M1(topology):
|
|
|
ents = topology.master2.agreement.list(suffix=SUFFIX)
|
|
ents = topology.master2.agreement.list(suffix=SUFFIX)
|
|
|
assert len(ents) == 1
|
|
assert len(ents) == 1
|
|
|
topology.master2.agreement.resume(ents[0].dn)
|
|
topology.master2.agreement.resume(ents[0].dn)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
def test_ticket47988_1(topology):
|
|
def test_ticket47988_1(topology):
|
|
|
'''
|
|
'''
|
|
|
Check that replication is working and pause replication M2->M1
|
|
Check that replication is working and pause replication M2->M1
|
|
|
'''
|
|
'''
|
|
|
_header(topology, 'test_ticket47988_1')
|
|
_header(topology, 'test_ticket47988_1')
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
topology.master1.log.debug("\n\nCheck that replication is working and pause replication M2->M1\n")
|
|
topology.master1.log.debug("\n\nCheck that replication is working and pause replication M2->M1\n")
|
|
|
_do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
|
|
_do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
|
|
|
_pause_M2_to_M1(topology)
|
|
_pause_M2_to_M1(topology)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
def test_ticket47988_2(topology):
|
|
def test_ticket47988_2(topology):
|
|
|
'''
|
|
'''
|
|
|
Update M1 schema and trigger update M1->M2
|
|
Update M1 schema and trigger update M1->M2
|
|
|
So M1 should learn new/extended definitions that are in M2 schema
|
|
So M1 should learn new/extended definitions that are in M2 schema
|
|
|
'''
|
|
'''
|
|
|
_header(topology, 'test_ticket47988_2')
|
|
_header(topology, 'test_ticket47988_2')
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
|
|
topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
|
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
|
master2_schema_csn = topology.master2.schema.get_schema_csn()
|
|
master2_schema_csn = topology.master2.schema.get_schema_csn()
|
|
|
topology.master1.log.debug("\nBefore updating the schema on M1\n")
|
|
topology.master1.log.debug("\nBefore updating the schema on M1\n")
|
|
|
topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
|
|
topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
|
|
|
topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
|
|
topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# Here M1 should no, should check M2 schema and learn
|
|
# Here M1 should no, should check M2 schema and learn
|
|
|
_do_update_schema(topology.master1)
|
|
_do_update_schema(topology.master1)
|
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
@@ -442,11 +364,11 @@ def test_ticket47988_2(topology):
|
|
|
topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
|
|
topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
|
|
|
topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
|
|
topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
|
|
|
assert (master1_schema_csn)
|
|
assert (master1_schema_csn)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# to avoid linger effect where a replication session is reused without checking the schema
|
|
# to avoid linger effect where a replication session is reused without checking the schema
|
|
|
_pause_M1_to_M2(topology)
|
|
_pause_M1_to_M2(topology)
|
|
|
_resume_M1_to_M2(topology)
|
|
_resume_M1_to_M2(topology)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
#topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
|
|
#topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
|
|
|
#time.sleep(60)
|
|
#time.sleep(60)
|
|
|
_do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=15)
|
|
_do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=15)
|
|
@@ -458,23 +380,25 @@ def test_ticket47988_2(topology):
|
|
|
assert (master1_schema_csn)
|
|
assert (master1_schema_csn)
|
|
|
assert (master2_schema_csn)
|
|
assert (master2_schema_csn)
|
|
|
|
|
|
|
|
|
|
+
|
|
|
def test_ticket47988_3(topology):
|
|
def test_ticket47988_3(topology):
|
|
|
'''
|
|
'''
|
|
|
Resume replication M2->M1 and check replication is still working
|
|
Resume replication M2->M1 and check replication is still working
|
|
|
'''
|
|
'''
|
|
|
_header(topology, 'test_ticket47988_3')
|
|
_header(topology, 'test_ticket47988_3')
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
_resume_M2_to_M1(topology)
|
|
_resume_M2_to_M1(topology)
|
|
|
_do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
|
|
_do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
|
|
|
_do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
|
|
_do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
|
|
|
|
|
|
|
|
|
|
+
|
|
|
def test_ticket47988_4(topology):
|
|
def test_ticket47988_4(topology):
|
|
|
'''
|
|
'''
|
|
|
Check schemaCSN is identical on both server
|
|
Check schemaCSN is identical on both server
|
|
|
And save the nsschemaCSN to later check they do not change unexpectedly
|
|
And save the nsschemaCSN to later check they do not change unexpectedly
|
|
|
'''
|
|
'''
|
|
|
_header(topology, 'test_ticket47988_4')
|
|
_header(topology, 'test_ticket47988_4')
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
|
master2_schema_csn = topology.master2.schema.get_schema_csn()
|
|
master2_schema_csn = topology.master2.schema.get_schema_csn()
|
|
|
topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
|
|
topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
|
|
@@ -482,16 +406,17 @@ def test_ticket47988_4(topology):
|
|
|
assert (master1_schema_csn)
|
|
assert (master1_schema_csn)
|
|
|
assert (master2_schema_csn)
|
|
assert (master2_schema_csn)
|
|
|
assert (master1_schema_csn == master2_schema_csn)
|
|
assert (master1_schema_csn == master2_schema_csn)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
topology.master1.saved_schema_csn = master1_schema_csn
|
|
topology.master1.saved_schema_csn = master1_schema_csn
|
|
|
topology.master2.saved_schema_csn = master2_schema_csn
|
|
topology.master2.saved_schema_csn = master2_schema_csn
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
def test_ticket47988_5(topology):
|
|
def test_ticket47988_5(topology):
|
|
|
'''
|
|
'''
|
|
|
Check schemaCSN do not change unexpectedly
|
|
Check schemaCSN do not change unexpectedly
|
|
|
'''
|
|
'''
|
|
|
_header(topology, 'test_ticket47988_5')
|
|
_header(topology, 'test_ticket47988_5')
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
_do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
|
|
_do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
|
|
|
_do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
|
|
_do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
|
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
@@ -501,25 +426,26 @@ def test_ticket47988_5(topology):
|
|
|
assert (master1_schema_csn)
|
|
assert (master1_schema_csn)
|
|
|
assert (master2_schema_csn)
|
|
assert (master2_schema_csn)
|
|
|
assert (master1_schema_csn == master2_schema_csn)
|
|
assert (master1_schema_csn == master2_schema_csn)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
assert (topology.master1.saved_schema_csn == master1_schema_csn)
|
|
assert (topology.master1.saved_schema_csn == master1_schema_csn)
|
|
|
assert (topology.master2.saved_schema_csn == master2_schema_csn)
|
|
assert (topology.master2.saved_schema_csn == master2_schema_csn)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
def test_ticket47988_6(topology):
|
|
def test_ticket47988_6(topology):
|
|
|
'''
|
|
'''
|
|
|
Update M1 schema and trigger update M2->M1
|
|
Update M1 schema and trigger update M2->M1
|
|
|
So M2 should learn new/extended definitions that are in M1 schema
|
|
So M2 should learn new/extended definitions that are in M1 schema
|
|
|
'''
|
|
'''
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
_header(topology, 'test_ticket47988_6')
|
|
_header(topology, 'test_ticket47988_6')
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
|
|
topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
|
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
|
master2_schema_csn = topology.master2.schema.get_schema_csn()
|
|
master2_schema_csn = topology.master2.schema.get_schema_csn()
|
|
|
topology.master1.log.debug("\nBefore updating the schema on M1\n")
|
|
topology.master1.log.debug("\nBefore updating the schema on M1\n")
|
|
|
topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
|
|
topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
|
|
|
topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
|
|
topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# Here M1 should no, should check M2 schema and learn
|
|
# Here M1 should no, should check M2 schema and learn
|
|
|
_do_update_schema(topology.master1, range=5999)
|
|
_do_update_schema(topology.master1, range=5999)
|
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
master1_schema_csn = topology.master1.schema.get_schema_csn()
|
|
@@ -528,11 +454,11 @@ def test_ticket47988_6(topology):
|
|
|
topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
|
|
topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
|
|
|
topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
|
|
topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
|
|
|
assert (master1_schema_csn)
|
|
assert (master1_schema_csn)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# to avoid linger effect where a replication session is reused without checking the schema
|
|
# to avoid linger effect where a replication session is reused without checking the schema
|
|
|
_pause_M1_to_M2(topology)
|
|
_pause_M1_to_M2(topology)
|
|
|
_resume_M1_to_M2(topology)
|
|
_resume_M1_to_M2(topology)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
#topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
|
|
#topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
|
|
|
#time.sleep(60)
|
|
#time.sleep(60)
|
|
|
_do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=15)
|
|
_do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=15)
|
|
@@ -543,15 +469,18 @@ def test_ticket47988_6(topology):
|
|
|
topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
|
|
topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
|
|
|
assert (master1_schema_csn)
|
|
assert (master1_schema_csn)
|
|
|
assert (master2_schema_csn)
|
|
assert (master2_schema_csn)
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
def test_ticket47988_final(topology):
|
|
def test_ticket47988_final(topology):
|
|
|
- topology.master1.delete()
|
|
|
|
|
- topology.master2.delete()
|
|
|
|
|
|
|
+ topology.master1.delete()
|
|
|
|
|
+ topology.master2.delete()
|
|
|
|
|
+ log.info('Testcase PASSED')
|
|
|
|
|
+
|
|
|
|
|
|
|
|
def run_isolated():
|
|
def run_isolated():
|
|
|
'''
|
|
'''
|
|
|
run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
|
|
run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
|
|
|
- To run isolated without py.test, you need to
|
|
|
|
|
|
|
+ To run isolated without py.test, you need to
|
|
|
- edit this file and comment '@pytest.fixture' line before 'topology' function.
|
|
- edit this file and comment '@pytest.fixture' line before 'topology' function.
|
|
|
- set the installation prefix
|
|
- set the installation prefix
|
|
|
- run this program
|
|
- run this program
|
|
@@ -560,7 +489,7 @@ def run_isolated():
|
|
|
global installation2_prefix
|
|
global installation2_prefix
|
|
|
installation1_prefix = None
|
|
installation1_prefix = None
|
|
|
installation2_prefix = None
|
|
installation2_prefix = None
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
topo = topology(True)
|
|
topo = topology(True)
|
|
|
test_ticket47988_init(topo)
|
|
test_ticket47988_init(topo)
|
|
|
test_ticket47988_1(topo)
|
|
test_ticket47988_1(topo)
|