ticket47787_test.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. '''
  2. Created on April 14, 2014
  3. @author: tbordaz
  4. '''
  5. import os
  6. import sys
  7. import time
  8. import ldap
  9. import logging
  10. import socket
  11. import time
  12. import logging
  13. import pytest
  14. import re
  15. from lib389 import DirSrv, Entry, tools, NoSuchEntryError
  16. from lib389.tools import DirSrvTools
  17. from lib389._constants import *
  18. from lib389.properties import *
  19. from constants import *
  20. from lib389._constants import REPLICAROLE_MASTER
  21. logging.getLogger(__name__).setLevel(logging.DEBUG)
  22. log = logging.getLogger(__name__)
  23. #
  24. # important part. We can deploy Master1 and Master2 on different versions
  25. #
  26. installation1_prefix = None
  27. installation2_prefix = None
  28. # set this flag to False so that it will assert on failure _status_entry_both_server
  29. DEBUG_FLAG = False
  30. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  31. STAGING_CN = "staged user"
  32. PRODUCTION_CN = "accounts"
  33. EXCEPT_CN = "excepts"
  34. STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX)
  35. PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX)
  36. PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN)
  37. STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX)
  38. PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX)
  39. BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX)
  40. BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX)
  41. BIND_CN = "bind_entry"
  42. BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX)
  43. BIND_PW = "password"
  44. NEW_ACCOUNT = "new_account"
  45. MAX_ACCOUNTS = 20
  46. CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci"
  47. class TopologyMaster1Master2(object):
  48. def __init__(self, master1, master2):
  49. master1.open()
  50. self.master1 = master1
  51. master2.open()
  52. self.master2 = master2
  53. @pytest.fixture(scope="module")
  54. def topology(request):
  55. '''
  56. This fixture is used to create a replicated topology for the 'module'.
  57. The replicated topology is MASTER1 <-> Master2.
  58. At the beginning, It may exists a master2 instance and/or a master2 instance.
  59. It may also exists a backup for the master1 and/or the master2.
  60. Principle:
  61. If master1 instance exists:
  62. restart it
  63. If master2 instance exists:
  64. restart it
  65. If backup of master1 AND backup of master2 exists:
  66. create or rebind to master1
  67. create or rebind to master2
  68. restore master1 from backup
  69. restore master2 from backup
  70. else:
  71. Cleanup everything
  72. remove instances
  73. remove backups
  74. Create instances
  75. Initialize replication
  76. Create backups
  77. '''
  78. global installation1_prefix
  79. global installation2_prefix
  80. # allocate master1 on a given deployement
  81. master1 = DirSrv(verbose=False)
  82. if installation1_prefix:
  83. args_instance[SER_DEPLOYED_DIR] = installation1_prefix
  84. # Args for the master1 instance
  85. args_instance[SER_HOST] = HOST_MASTER_1
  86. args_instance[SER_PORT] = PORT_MASTER_1
  87. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
  88. args_master = args_instance.copy()
  89. master1.allocate(args_master)
  90. # allocate master1 on a given deployement
  91. master2 = DirSrv(verbose=False)
  92. if installation2_prefix:
  93. args_instance[SER_DEPLOYED_DIR] = installation2_prefix
  94. # Args for the consumer instance
  95. args_instance[SER_HOST] = HOST_MASTER_2
  96. args_instance[SER_PORT] = PORT_MASTER_2
  97. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
  98. args_master = args_instance.copy()
  99. master2.allocate(args_master)
  100. # Get the status of the backups
  101. backup_master1 = master1.checkBackupFS()
  102. backup_master2 = master2.checkBackupFS()
  103. # Get the status of the instance and restart it if it exists
  104. instance_master1 = master1.exists()
  105. if instance_master1:
  106. master1.stop(timeout=10)
  107. master1.start(timeout=10)
  108. instance_master2 = master2.exists()
  109. if instance_master2:
  110. master2.stop(timeout=10)
  111. master2.start(timeout=10)
  112. if backup_master1 and backup_master2:
  113. # The backups exist, assuming they are correct
  114. # we just re-init the instances with them
  115. if not instance_master1:
  116. master1.create()
  117. # Used to retrieve configuration information (dbdir, confdir...)
  118. master1.open()
  119. if not instance_master2:
  120. master2.create()
  121. # Used to retrieve configuration information (dbdir, confdir...)
  122. master2.open()
  123. # restore master1 from backup
  124. master1.stop(timeout=10)
  125. master1.restoreFS(backup_master1)
  126. master1.start(timeout=10)
  127. # restore master2 from backup
  128. master2.stop(timeout=10)
  129. master2.restoreFS(backup_master2)
  130. master2.start(timeout=10)
  131. else:
  132. # We should be here only in two conditions
  133. # - This is the first time a test involve master-consumer
  134. # so we need to create everything
  135. # - Something weird happened (instance/backup destroyed)
  136. # so we discard everything and recreate all
  137. # Remove all the backups. So even if we have a specific backup file
  138. # (e.g backup_master) we clear all backups that an instance my have created
  139. if backup_master1:
  140. master1.clearBackupFS()
  141. if backup_master2:
  142. master2.clearBackupFS()
  143. # Remove all the instances
  144. if instance_master1:
  145. master1.delete()
  146. if instance_master2:
  147. master2.delete()
  148. # Create the instances
  149. master1.create()
  150. master1.open()
  151. master2.create()
  152. master2.open()
  153. #
  154. # Now prepare the Master-Consumer topology
  155. #
  156. # First Enable replication
  157. master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
  158. master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
  159. # Initialize the supplier->consumer
  160. properties = {RA_NAME: r'meTo_$host:$port',
  161. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  162. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  163. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  164. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  165. repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
  166. if not repl_agreement:
  167. log.fatal("Fail to create a replica agreement")
  168. sys.exit(1)
  169. log.debug("%s created" % repl_agreement)
  170. properties = {RA_NAME: r'meTo_$host:$port',
  171. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  172. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  173. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  174. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  175. master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
  176. master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
  177. master1.waitForReplInit(repl_agreement)
  178. # Check replication is working fine
  179. master1.add_s(Entry((TEST_REPL_DN, {
  180. 'objectclass': "top person".split(),
  181. 'sn': 'test_repl',
  182. 'cn': 'test_repl'})))
  183. loop = 0
  184. while loop <= 10:
  185. try:
  186. ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  187. break
  188. except ldap.NO_SUCH_OBJECT:
  189. time.sleep(1)
  190. loop += 1
  191. # Time to create the backups
  192. master1.stop(timeout=10)
  193. master1.backupfile = master1.backupFS()
  194. master1.start(timeout=10)
  195. master2.stop(timeout=10)
  196. master2.backupfile = master2.backupFS()
  197. master2.start(timeout=10)
  198. # clear the tmp directory
  199. master1.clearTmpDir(__file__)
  200. #
  201. # Here we have two instances master and consumer
  202. # with replication working. Either coming from a backup recovery
  203. # or from a fresh (re)init
  204. # Time to return the topology
  205. return TopologyMaster1Master2(master1, master2)
  206. def _bind_manager(server):
  207. server.log.info("Bind as %s " % DN_DM)
  208. server.simple_bind_s(DN_DM, PASSWORD)
  209. def _bind_normal(server):
  210. server.log.info("Bind as %s " % BIND_DN)
  211. server.simple_bind_s(BIND_DN, BIND_PW)
  212. def _header(topology, label):
  213. topology.master1.log.info("\n\n###############################################")
  214. topology.master1.log.info("#######")
  215. topology.master1.log.info("####### %s" % label)
  216. topology.master1.log.info("#######")
  217. topology.master1.log.info("###############################################")
  218. def _status_entry_both_server(topology, name=None, desc=None, debug=True):
  219. if not name:
  220. return
  221. topology.master1.log.info("\n\n######################### Tombstone on M1 ######################\n")
  222. ent_m1 = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
  223. assert ent_m1
  224. topology.master1.log.info("\n\n######################### Tombstone on M2 ######################\n")
  225. ent_m2 = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
  226. assert ent_m2
  227. topology.master1.log.info("\n\n######################### Description ######################\n%s\n" % desc)
  228. topology.master1.log.info("M1 only\n")
  229. for attr in ent_m1.getAttrs():
  230. if not debug:
  231. assert attr in ent_m2.getAttrs()
  232. if not attr in ent_m2.getAttrs():
  233. topology.master1.log.info(" %s" % attr)
  234. for val in ent_m1.getValues(attr):
  235. topology.master1.log.info(" %s" % val)
  236. topology.master1.log.info("M2 only\n")
  237. for attr in ent_m2.getAttrs():
  238. if not debug:
  239. assert attr in ent_m1.getAttrs()
  240. if not attr in ent_m1.getAttrs():
  241. topology.master1.log.info(" %s" % attr)
  242. for val in ent_m2.getValues(attr):
  243. topology.master1.log.info(" %s" % val)
  244. topology.master1.log.info("M1 differs M2\n")
  245. if not debug:
  246. assert ent_m1.dn == ent_m2.dn
  247. if ent_m1.dn != ent_m2.dn:
  248. topology.master1.log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn))
  249. for attr1 in ent_m1.getAttrs():
  250. if attr1 in ent_m2.getAttrs():
  251. for val1 in ent_m1.getValues(attr1):
  252. found = False
  253. for val2 in ent_m2.getValues(attr1):
  254. if val1 == val2:
  255. found = True
  256. break
  257. if not debug:
  258. assert found
  259. if not found:
  260. topology.master1.log.info(" M1[%s] = %s" % (attr1, val1))
  261. for attr2 in ent_m2.getAttrs():
  262. if attr2 in ent_m1.getAttrs():
  263. for val2 in ent_m2.getValues(attr2):
  264. found = False
  265. for val1 in ent_m1.getValues(attr2):
  266. if val2 == val1:
  267. found = True
  268. break
  269. if not debug:
  270. assert found
  271. if not found:
  272. topology.master1.log.info(" M2[%s] = %s" % (attr2, val2))
  273. def _pause_RAs(topology):
  274. topology.master1.log.info("\n\n######################### Pause RA M1<->M2 ######################\n")
  275. ents = topology.master1.agreement.list(suffix=SUFFIX)
  276. assert len(ents) == 1
  277. topology.master1.agreement.pause(ents[0].dn)
  278. ents = topology.master2.agreement.list(suffix=SUFFIX)
  279. assert len(ents) == 1
  280. topology.master2.agreement.pause(ents[0].dn)
  281. def _resume_RAs(topology):
  282. topology.master1.log.info("\n\n######################### resume RA M1<->M2 ######################\n")
  283. ents = topology.master1.agreement.list(suffix=SUFFIX)
  284. assert len(ents) == 1
  285. topology.master1.agreement.resume(ents[0].dn)
  286. ents = topology.master2.agreement.list(suffix=SUFFIX)
  287. assert len(ents) == 1
  288. topology.master2.agreement.resume(ents[0].dn)
  289. def _find_tombstone(instance, base, attr, value):
  290. #
  291. # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because
  292. # tombstone are not index in 'sn' so 'sn=name' will return NULL
  293. # and even if tombstone are indexed for objectclass the '&' will set
  294. # the candidate list to NULL
  295. #
  296. filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE
  297. ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt)
  298. found = False
  299. for ent in ents:
  300. if ent.hasAttr(attr):
  301. for val in ent.getValues(attr):
  302. if val == value:
  303. instance.log.debug("tombstone found: %r" % ent)
  304. return ent
  305. return None
  306. def _delete_entry(instance, entry_dn, name):
  307. instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name)
  308. # delete the entry
  309. instance.delete_s(entry_dn)
  310. assert _find_tombstone(instance, SUFFIX, 'sn', name) != None
  311. def _mod_entry(instance, entry_dn, attr, value):
  312. instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn)
  313. mod = [(ldap.MOD_REPLACE, attr, value)]
  314. instance.modify_s(entry_dn, mod)
  315. def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
  316. assert instance != None
  317. assert entry_dn != None
  318. if not new_rdn:
  319. pattern = 'cn=(.*),(.*)'
  320. rdnre = re.compile(pattern)
  321. match = rdnre.match(entry_dn)
  322. old_value = match.group(1)
  323. new_rdn_val = "%s_modrdn" % old_value
  324. new_rdn = "cn=%s" % new_rdn_val
  325. instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn)
  326. if new_superior:
  327. instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
  328. else:
  329. instance.rename_s(entry_dn, new_rdn, delold=del_old)
  330. def _check_entry_exists(instance, entry_dn):
  331. loop = 0
  332. while loop <= 10:
  333. try:
  334. ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)")
  335. break
  336. except ldap.NO_SUCH_OBJECT:
  337. time.sleep(1)
  338. loop += 1
  339. assert loop <= 10
  340. def _check_mod_received(instance, base, filt, attr, value):
  341. instance.log.info("\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid)
  342. loop = 0
  343. while loop <= 10:
  344. ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt)
  345. if ent.hasAttr(attr) and ent.getValue(attr) == value:
  346. break
  347. time.sleep(1)
  348. loop += 1
  349. assert loop <= 10
  350. def _check_replication(topology, entry_dn):
  351. # prepare the filter to retrieve the entry
  352. filt = entry_dn.split(',')[0]
  353. topology.master1.log.info("\n######################### Check replicat M1->M2 ######################\n")
  354. loop = 0
  355. while loop <= 10:
  356. attr = 'description'
  357. value = 'test_value_%d' % loop
  358. mod = [(ldap.MOD_REPLACE, attr, value)]
  359. topology.master1.modify_s(entry_dn, mod)
  360. _check_mod_received(topology.master2, SUFFIX, filt, attr, value)
  361. loop += 1
  362. topology.master1.log.info("\n######################### Check replicat M2->M1 ######################\n")
  363. loop = 0
  364. while loop <= 10:
  365. attr = 'description'
  366. value = 'test_value_%d' % loop
  367. mod = [(ldap.MOD_REPLACE, attr, value)]
  368. topology.master2.modify_s(entry_dn, mod)
  369. _check_mod_received(topology.master1, SUFFIX, filt, attr, value)
  370. loop += 1
  371. def test_ticket47787_init(topology):
  372. """
  373. Creates
  374. - a staging DIT
  375. - a production DIT
  376. - add accounts in staging DIT
  377. """
  378. topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n")
  379. # entry used to bind with
  380. topology.master1.log.info("Add %s" % BIND_DN)
  381. topology.master1.add_s(Entry((BIND_DN, {
  382. 'objectclass': "top person".split(),
  383. 'sn': BIND_CN,
  384. 'cn': BIND_CN,
  385. 'userpassword': BIND_PW})))
  386. # DIT for staging
  387. topology.master1.log.info("Add %s" % STAGING_DN)
  388. topology.master1.add_s(Entry((STAGING_DN, {
  389. 'objectclass': "top organizationalRole".split(),
  390. 'cn': STAGING_CN,
  391. 'description': "staging DIT"})))
  392. # DIT for production
  393. topology.master1.log.info("Add %s" % PRODUCTION_DN)
  394. topology.master1.add_s(Entry((PRODUCTION_DN, {
  395. 'objectclass': "top organizationalRole".split(),
  396. 'cn': PRODUCTION_CN,
  397. 'description': "production DIT"})))
  398. # enable replication error logging
  399. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192')]
  400. topology.master1.modify_s(DN_CONFIG, mod)
  401. topology.master2.modify_s(DN_CONFIG, mod)
  402. # add dummy entries in the staging DIT
  403. for cpt in range(MAX_ACCOUNTS):
  404. name = "%s%d" % (NEW_ACCOUNT, cpt)
  405. topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
  406. 'objectclass': "top person".split(),
  407. 'sn': name,
  408. 'cn': name})))
  409. def test_ticket47787_2(topology):
  410. '''
  411. Disable replication so that updates are not replicated
  412. Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior).
  413. update a test entry on M2
  414. Reenable the RA.
  415. checks that entry was deleted on M2 (with the modified RDN)
  416. checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn)
  417. '''
  418. _header(topology, "test_ticket47787_2")
  419. _bind_manager(topology.master1)
  420. _bind_manager(topology.master2)
  421. #entry to test the replication is still working
  422. name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS -1)
  423. test_rdn = "cn=%s" % (name)
  424. testentry_dn = "%s,%s" % (test_rdn, STAGING_DN)
  425. name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2)
  426. test2_rdn = "cn=%s" % (name)
  427. testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN)
  428. # value of updates to test the replication both ways
  429. attr = 'description'
  430. value = 'test_ticket47787_2'
  431. # entry for the modrdn
  432. name = "%s%d" % (NEW_ACCOUNT, 1)
  433. rdn = "cn=%s" % (name)
  434. entry_dn = "%s,%s" % (rdn, STAGING_DN)
  435. # created on M1, wait the entry exists on M2
  436. _check_entry_exists(topology.master2, entry_dn)
  437. _check_entry_exists(topology.master2, testentry_dn)
  438. _pause_RAs(topology)
  439. # Delete 'entry_dn' on M1.
  440. # dummy update is only have a first CSN before the DEL
  441. # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
  442. _mod_entry(topology.master1, testentry2_dn, attr, 'dummy')
  443. _delete_entry(topology.master1, entry_dn, name)
  444. _mod_entry(topology.master1, testentry2_dn, attr, value)
  445. time.sleep(1) # important to have MOD.csn != DEL.csn
  446. # MOD 'entry_dn' on M1.
  447. # dummy update is only have a first CSN before the MOD entry_dn
  448. # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
  449. _mod_entry(topology.master2, testentry_dn, attr, 'dummy')
  450. _mod_entry(topology.master2, entry_dn, attr, value)
  451. _mod_entry(topology.master2, testentry_dn, attr, value)
  452. _resume_RAs(topology)
  453. topology.master1.log.info("\n\n######################### Check DEL replicated on M2 ######################\n")
  454. loop = 0
  455. while loop <= 10:
  456. ent = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
  457. if ent:
  458. break
  459. time.sleep(1)
  460. loop += 1
  461. assert loop <= 10
  462. assert ent
  463. # the following checks are not necessary
  464. # as this bug is only for failing replicated MOD (entry_dn) on M1
  465. #_check_mod_received(topology.master1, SUFFIX, "(%s)" % (test_rdn), attr, value)
  466. #_check_mod_received(topology.master2, SUFFIX, "(%s)" % (test2_rdn), attr, value)
  467. #
  468. #_check_replication(topology, testentry_dn)
  469. _status_entry_both_server(topology, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG)
  470. topology.master1.log.info("\n\n######################### Check MOD replicated on M1 ######################\n")
  471. loop = 0
  472. while loop <= 10:
  473. ent = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
  474. if ent:
  475. break
  476. time.sleep(1)
  477. loop += 1
  478. assert loop <= 10
  479. assert ent
  480. assert ent.hasAttr(attr)
  481. assert ent.getValue(attr) == value
  482. def test_ticket47787_final(topology):
  483. topology.master1.stop(timeout=10)
  484. topology.master2.stop(timeout=10)
  485. def run_isolated():
  486. '''
  487. run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
  488. To run isolated without py.test, you need to
  489. - edit this file and comment '@pytest.fixture' line before 'topology' function.
  490. - set the installation prefix
  491. - run this program
  492. '''
  493. global installation1_prefix
  494. global installation2_prefix
  495. installation1_prefix = None
  496. installation2_prefix = None
  497. topo = topology(True)
  498. topo.master1.log.info("\n\n######################### Ticket 47787 ######################\n")
  499. test_ticket47787_init(topo)
  500. test_ticket47787_2(topo)
  501. test_ticket47787_final(topo)
  502. if __name__ == '__main__':
  503. run_isolated()