ticket47787_test.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. '''
  2. Created on April 14, 2014
  3. @author: tbordaz
  4. '''
  5. import os
  6. import sys
  7. import time
  8. import ldap
  9. import logging
  10. import socket
  11. import time
  12. import logging
  13. import pytest
  14. import re
  15. from lib389 import DirSrv, Entry, tools, NoSuchEntryError
  16. from lib389.tools import DirSrvTools
  17. from lib389._constants import *
  18. from lib389.properties import *
  19. from constants import *
  20. from lib389._constants import REPLICAROLE_MASTER
  21. logging.getLogger(__name__).setLevel(logging.DEBUG)
  22. log = logging.getLogger(__name__)
  23. #
  24. # important part. We can deploy Master1 and Master2 on different versions
  25. #
  26. installation1_prefix = None
  27. installation2_prefix = None
  28. # set this flag to False so that it will assert on failure _status_entry_both_server
  29. DEBUG_FLAG = False
  30. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  31. STAGING_CN = "staged user"
  32. PRODUCTION_CN = "accounts"
  33. EXCEPT_CN = "excepts"
  34. STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX)
  35. PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX)
  36. PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN)
  37. STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX)
  38. PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX)
  39. BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX)
  40. BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX)
  41. BIND_CN = "bind_entry"
  42. BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX)
  43. BIND_PW = "password"
  44. NEW_ACCOUNT = "new_account"
  45. MAX_ACCOUNTS = 20
  46. CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci"
  47. class TopologyMaster1Master2(object):
  48. def __init__(self, master1, master2):
  49. master1.open()
  50. self.master1 = master1
  51. master2.open()
  52. self.master2 = master2
  53. @pytest.fixture(scope="module")
  54. def topology(request):
  55. '''
  56. This fixture is used to create a replicated topology for the 'module'.
  57. The replicated topology is MASTER1 <-> Master2.
  58. At the beginning, It may exists a master2 instance and/or a master2 instance.
  59. It may also exists a backup for the master1 and/or the master2.
  60. Principle:
  61. If master1 instance exists:
  62. restart it
  63. If master2 instance exists:
  64. restart it
  65. If backup of master1 AND backup of master2 exists:
  66. create or rebind to master1
  67. create or rebind to master2
  68. restore master1 from backup
  69. restore master2 from backup
  70. else:
  71. Cleanup everything
  72. remove instances
  73. remove backups
  74. Create instances
  75. Initialize replication
  76. Create backups
  77. '''
  78. global installation1_prefix
  79. global installation2_prefix
  80. # allocate master1 on a given deployement
  81. master1 = DirSrv(verbose=False)
  82. if installation1_prefix:
  83. args_instance[SER_DEPLOYED_DIR] = installation1_prefix
  84. # Args for the master1 instance
  85. args_instance[SER_HOST] = HOST_MASTER_1
  86. args_instance[SER_PORT] = PORT_MASTER_1
  87. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
  88. args_master = args_instance.copy()
  89. master1.allocate(args_master)
  90. # allocate master1 on a given deployement
  91. master2 = DirSrv(verbose=False)
  92. if installation2_prefix:
  93. args_instance[SER_DEPLOYED_DIR] = installation2_prefix
  94. # Args for the consumer instance
  95. args_instance[SER_HOST] = HOST_MASTER_2
  96. args_instance[SER_PORT] = PORT_MASTER_2
  97. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
  98. args_master = args_instance.copy()
  99. master2.allocate(args_master)
  100. # Get the status of the backups
  101. backup_master1 = master1.checkBackupFS()
  102. backup_master2 = master2.checkBackupFS()
  103. # Get the status of the instance and restart it if it exists
  104. instance_master1 = master1.exists()
  105. if instance_master1:
  106. master1.stop(timeout=10)
  107. master1.start(timeout=10)
  108. instance_master2 = master2.exists()
  109. if instance_master2:
  110. master2.stop(timeout=10)
  111. master2.start(timeout=10)
  112. if backup_master1 and backup_master2:
  113. # The backups exist, assuming they are correct
  114. # we just re-init the instances with them
  115. if not instance_master1:
  116. master1.create()
  117. # Used to retrieve configuration information (dbdir, confdir...)
  118. master1.open()
  119. if not instance_master2:
  120. master2.create()
  121. # Used to retrieve configuration information (dbdir, confdir...)
  122. master2.open()
  123. # restore master1 from backup
  124. master1.stop(timeout=10)
  125. master1.restoreFS(backup_master1)
  126. master1.start(timeout=10)
  127. # restore master2 from backup
  128. master2.stop(timeout=10)
  129. master2.restoreFS(backup_master2)
  130. master2.start(timeout=10)
  131. else:
  132. # We should be here only in two conditions
  133. # - This is the first time a test involve master-consumer
  134. # so we need to create everything
  135. # - Something weird happened (instance/backup destroyed)
  136. # so we discard everything and recreate all
  137. # Remove all the backups. So even if we have a specific backup file
  138. # (e.g backup_master) we clear all backups that an instance my have created
  139. if backup_master1:
  140. master1.clearBackupFS()
  141. if backup_master2:
  142. master2.clearBackupFS()
  143. # Remove all the instances
  144. if instance_master1:
  145. master1.delete()
  146. if instance_master2:
  147. master2.delete()
  148. # Create the instances
  149. master1.create()
  150. master1.open()
  151. master2.create()
  152. master2.open()
  153. #
  154. # Now prepare the Master-Consumer topology
  155. #
  156. # First Enable replication
  157. master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
  158. master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
  159. # Initialize the supplier->consumer
  160. properties = {RA_NAME: r'meTo_$host:$port',
  161. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  162. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  163. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  164. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  165. repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
  166. if not repl_agreement:
  167. log.fatal("Fail to create a replica agreement")
  168. sys.exit(1)
  169. log.debug("%s created" % repl_agreement)
  170. properties = {RA_NAME: r'meTo_$host:$port',
  171. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  172. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  173. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  174. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  175. master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
  176. master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
  177. master1.waitForReplInit(repl_agreement)
  178. # Check replication is working fine
  179. master1.add_s(Entry((TEST_REPL_DN, {
  180. 'objectclass': "top person".split(),
  181. 'sn': 'test_repl',
  182. 'cn': 'test_repl'})))
  183. loop = 0
  184. while loop <= 10:
  185. try:
  186. ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  187. break
  188. except ldap.NO_SUCH_OBJECT:
  189. time.sleep(1)
  190. loop += 1
  191. # Time to create the backups
  192. master1.stop(timeout=10)
  193. master1.backupfile = master1.backupFS()
  194. master1.start(timeout=10)
  195. master2.stop(timeout=10)
  196. master2.backupfile = master2.backupFS()
  197. master2.start(timeout=10)
  198. # clear the tmp directory
  199. master1.clearTmpDir(__file__)
  200. #
  201. # Here we have two instances master and consumer
  202. # with replication working. Either coming from a backup recovery
  203. # or from a fresh (re)init
  204. # Time to return the topology
  205. return TopologyMaster1Master2(master1, master2)
  206. def _bind_manager(server):
  207. server.log.info("Bind as %s " % DN_DM)
  208. server.simple_bind_s(DN_DM, PASSWORD)
  209. def _bind_normal(server):
  210. server.log.info("Bind as %s " % BIND_DN)
  211. server.simple_bind_s(BIND_DN, BIND_PW)
  212. def _header(topology, label):
  213. topology.master1.log.info("\n\n###############################################")
  214. topology.master1.log.info("#######")
  215. topology.master1.log.info("####### %s" % label)
  216. topology.master1.log.info("#######")
  217. topology.master1.log.info("###############################################")
  218. def _status_entry_both_server(topology, name=None, desc=None, debug=True):
  219. if not name:
  220. return
  221. topology.master1.log.info("\n\n######################### Tombstone on M1 ######################\n")
  222. attr = 'description'
  223. found = False
  224. attempt = 0
  225. while not found and attempt < 10:
  226. ent_m1 = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
  227. if attr in ent_m1.getAttrs():
  228. found = True
  229. else:
  230. time.sleep(1)
  231. attempt = attempt + 1
  232. assert ent_m1
  233. topology.master1.log.info("\n\n######################### Tombstone on M2 ######################\n")
  234. ent_m2 = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
  235. assert ent_m2
  236. topology.master1.log.info("\n\n######################### Description ######################\n%s\n" % desc)
  237. topology.master1.log.info("M1 only\n")
  238. for attr in ent_m1.getAttrs():
  239. if not debug:
  240. assert attr in ent_m2.getAttrs()
  241. if not attr in ent_m2.getAttrs():
  242. topology.master1.log.info(" %s" % attr)
  243. for val in ent_m1.getValues(attr):
  244. topology.master1.log.info(" %s" % val)
  245. topology.master1.log.info("M2 only\n")
  246. for attr in ent_m2.getAttrs():
  247. if not debug:
  248. assert attr in ent_m1.getAttrs()
  249. if not attr in ent_m1.getAttrs():
  250. topology.master1.log.info(" %s" % attr)
  251. for val in ent_m2.getValues(attr):
  252. topology.master1.log.info(" %s" % val)
  253. topology.master1.log.info("M1 differs M2\n")
  254. if not debug:
  255. assert ent_m1.dn == ent_m2.dn
  256. if ent_m1.dn != ent_m2.dn:
  257. topology.master1.log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn))
  258. for attr1 in ent_m1.getAttrs():
  259. if attr1 in ent_m2.getAttrs():
  260. for val1 in ent_m1.getValues(attr1):
  261. found = False
  262. for val2 in ent_m2.getValues(attr1):
  263. if val1 == val2:
  264. found = True
  265. break
  266. if not debug:
  267. assert found
  268. if not found:
  269. topology.master1.log.info(" M1[%s] = %s" % (attr1, val1))
  270. for attr2 in ent_m2.getAttrs():
  271. if attr2 in ent_m1.getAttrs():
  272. for val2 in ent_m2.getValues(attr2):
  273. found = False
  274. for val1 in ent_m1.getValues(attr2):
  275. if val2 == val1:
  276. found = True
  277. break
  278. if not debug:
  279. assert found
  280. if not found:
  281. topology.master1.log.info(" M2[%s] = %s" % (attr2, val2))
  282. def _pause_RAs(topology):
  283. topology.master1.log.info("\n\n######################### Pause RA M1<->M2 ######################\n")
  284. ents = topology.master1.agreement.list(suffix=SUFFIX)
  285. assert len(ents) == 1
  286. topology.master1.agreement.pause(ents[0].dn)
  287. ents = topology.master2.agreement.list(suffix=SUFFIX)
  288. assert len(ents) == 1
  289. topology.master2.agreement.pause(ents[0].dn)
  290. def _resume_RAs(topology):
  291. topology.master1.log.info("\n\n######################### resume RA M1<->M2 ######################\n")
  292. ents = topology.master1.agreement.list(suffix=SUFFIX)
  293. assert len(ents) == 1
  294. topology.master1.agreement.resume(ents[0].dn)
  295. ents = topology.master2.agreement.list(suffix=SUFFIX)
  296. assert len(ents) == 1
  297. topology.master2.agreement.resume(ents[0].dn)
  298. def _find_tombstone(instance, base, attr, value):
  299. #
  300. # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because
  301. # tombstone are not index in 'sn' so 'sn=name' will return NULL
  302. # and even if tombstone are indexed for objectclass the '&' will set
  303. # the candidate list to NULL
  304. #
  305. filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE
  306. ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt)
  307. found = False
  308. for ent in ents:
  309. if ent.hasAttr(attr):
  310. for val in ent.getValues(attr):
  311. if val == value:
  312. instance.log.debug("tombstone found: %r" % ent)
  313. return ent
  314. return None
  315. def _delete_entry(instance, entry_dn, name):
  316. instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name)
  317. # delete the entry
  318. instance.delete_s(entry_dn)
  319. assert _find_tombstone(instance, SUFFIX, 'sn', name) != None
  320. def _mod_entry(instance, entry_dn, attr, value):
  321. instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn)
  322. mod = [(ldap.MOD_REPLACE, attr, value)]
  323. instance.modify_s(entry_dn, mod)
  324. def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
  325. assert instance != None
  326. assert entry_dn != None
  327. if not new_rdn:
  328. pattern = 'cn=(.*),(.*)'
  329. rdnre = re.compile(pattern)
  330. match = rdnre.match(entry_dn)
  331. old_value = match.group(1)
  332. new_rdn_val = "%s_modrdn" % old_value
  333. new_rdn = "cn=%s" % new_rdn_val
  334. instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn)
  335. if new_superior:
  336. instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
  337. else:
  338. instance.rename_s(entry_dn, new_rdn, delold=del_old)
  339. def _check_entry_exists(instance, entry_dn):
  340. loop = 0
  341. while loop <= 10:
  342. try:
  343. ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)")
  344. break
  345. except ldap.NO_SUCH_OBJECT:
  346. time.sleep(1)
  347. loop += 1
  348. assert loop <= 10
  349. def _check_mod_received(instance, base, filt, attr, value):
  350. instance.log.info("\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid)
  351. loop = 0
  352. while loop <= 10:
  353. ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt)
  354. if ent.hasAttr(attr) and ent.getValue(attr) == value:
  355. break
  356. time.sleep(1)
  357. loop += 1
  358. assert loop <= 10
  359. def _check_replication(topology, entry_dn):
  360. # prepare the filter to retrieve the entry
  361. filt = entry_dn.split(',')[0]
  362. topology.master1.log.info("\n######################### Check replicat M1->M2 ######################\n")
  363. loop = 0
  364. while loop <= 10:
  365. attr = 'description'
  366. value = 'test_value_%d' % loop
  367. mod = [(ldap.MOD_REPLACE, attr, value)]
  368. topology.master1.modify_s(entry_dn, mod)
  369. _check_mod_received(topology.master2, SUFFIX, filt, attr, value)
  370. loop += 1
  371. topology.master1.log.info("\n######################### Check replicat M2->M1 ######################\n")
  372. loop = 0
  373. while loop <= 10:
  374. attr = 'description'
  375. value = 'test_value_%d' % loop
  376. mod = [(ldap.MOD_REPLACE, attr, value)]
  377. topology.master2.modify_s(entry_dn, mod)
  378. _check_mod_received(topology.master1, SUFFIX, filt, attr, value)
  379. loop += 1
  380. def test_ticket47787_init(topology):
  381. """
  382. Creates
  383. - a staging DIT
  384. - a production DIT
  385. - add accounts in staging DIT
  386. """
  387. topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n")
  388. # entry used to bind with
  389. topology.master1.log.info("Add %s" % BIND_DN)
  390. topology.master1.add_s(Entry((BIND_DN, {
  391. 'objectclass': "top person".split(),
  392. 'sn': BIND_CN,
  393. 'cn': BIND_CN,
  394. 'userpassword': BIND_PW})))
  395. # DIT for staging
  396. topology.master1.log.info("Add %s" % STAGING_DN)
  397. topology.master1.add_s(Entry((STAGING_DN, {
  398. 'objectclass': "top organizationalRole".split(),
  399. 'cn': STAGING_CN,
  400. 'description': "staging DIT"})))
  401. # DIT for production
  402. topology.master1.log.info("Add %s" % PRODUCTION_DN)
  403. topology.master1.add_s(Entry((PRODUCTION_DN, {
  404. 'objectclass': "top organizationalRole".split(),
  405. 'cn': PRODUCTION_CN,
  406. 'description': "production DIT"})))
  407. # enable replication error logging
  408. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192')]
  409. topology.master1.modify_s(DN_CONFIG, mod)
  410. topology.master2.modify_s(DN_CONFIG, mod)
  411. # add dummy entries in the staging DIT
  412. for cpt in range(MAX_ACCOUNTS):
  413. name = "%s%d" % (NEW_ACCOUNT, cpt)
  414. topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
  415. 'objectclass': "top person".split(),
  416. 'sn': name,
  417. 'cn': name})))
  418. def test_ticket47787_2(topology):
  419. '''
  420. Disable replication so that updates are not replicated
  421. Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior).
  422. update a test entry on M2
  423. Reenable the RA.
  424. checks that entry was deleted on M2 (with the modified RDN)
  425. checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn)
  426. '''
  427. _header(topology, "test_ticket47787_2")
  428. _bind_manager(topology.master1)
  429. _bind_manager(topology.master2)
  430. #entry to test the replication is still working
  431. name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS -1)
  432. test_rdn = "cn=%s" % (name)
  433. testentry_dn = "%s,%s" % (test_rdn, STAGING_DN)
  434. name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2)
  435. test2_rdn = "cn=%s" % (name)
  436. testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN)
  437. # value of updates to test the replication both ways
  438. attr = 'description'
  439. value = 'test_ticket47787_2'
  440. # entry for the modrdn
  441. name = "%s%d" % (NEW_ACCOUNT, 1)
  442. rdn = "cn=%s" % (name)
  443. entry_dn = "%s,%s" % (rdn, STAGING_DN)
  444. # created on M1, wait the entry exists on M2
  445. _check_entry_exists(topology.master2, entry_dn)
  446. _check_entry_exists(topology.master2, testentry_dn)
  447. _pause_RAs(topology)
  448. # Delete 'entry_dn' on M1.
  449. # dummy update is only have a first CSN before the DEL
  450. # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
  451. _mod_entry(topology.master1, testentry2_dn, attr, 'dummy')
  452. _delete_entry(topology.master1, entry_dn, name)
  453. _mod_entry(topology.master1, testentry2_dn, attr, value)
  454. time.sleep(1) # important to have MOD.csn != DEL.csn
  455. # MOD 'entry_dn' on M1.
  456. # dummy update is only have a first CSN before the MOD entry_dn
  457. # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
  458. _mod_entry(topology.master2, testentry_dn, attr, 'dummy')
  459. _mod_entry(topology.master2, entry_dn, attr, value)
  460. _mod_entry(topology.master2, testentry_dn, attr, value)
  461. _resume_RAs(topology)
  462. topology.master1.log.info("\n\n######################### Check DEL replicated on M2 ######################\n")
  463. loop = 0
  464. while loop <= 10:
  465. ent = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
  466. if ent:
  467. break
  468. time.sleep(1)
  469. loop += 1
  470. assert loop <= 10
  471. assert ent
  472. # the following checks are not necessary
  473. # as this bug is only for failing replicated MOD (entry_dn) on M1
  474. #_check_mod_received(topology.master1, SUFFIX, "(%s)" % (test_rdn), attr, value)
  475. #_check_mod_received(topology.master2, SUFFIX, "(%s)" % (test2_rdn), attr, value)
  476. #
  477. #_check_replication(topology, testentry_dn)
  478. _status_entry_both_server(topology, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG)
  479. topology.master1.log.info("\n\n######################### Check MOD replicated on M1 ######################\n")
  480. loop = 0
  481. while loop <= 10:
  482. ent = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
  483. if ent:
  484. break
  485. time.sleep(1)
  486. loop += 1
  487. assert loop <= 10
  488. assert ent
  489. assert ent.hasAttr(attr)
  490. assert ent.getValue(attr) == value
  491. def test_ticket47787_final(topology):
  492. topology.master1.stop(timeout=10)
  493. topology.master2.stop(timeout=10)
  494. def run_isolated():
  495. '''
  496. run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
  497. To run isolated without py.test, you need to
  498. - edit this file and comment '@pytest.fixture' line before 'topology' function.
  499. - set the installation prefix
  500. - run this program
  501. '''
  502. global installation1_prefix
  503. global installation2_prefix
  504. installation1_prefix = None
  505. installation2_prefix = None
  506. topo = topology(True)
  507. topo.master1.log.info("\n\n######################### Ticket 47787 ######################\n")
  508. test_ticket47787_init(topo)
  509. test_ticket47787_2(topo)
  510. test_ticket47787_final(topo)
  511. if __name__ == '__main__':
  512. run_isolated()