ticket47988_test.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. '''
  2. Created on Nov 7, 2013
  3. @author: tbordaz
  4. '''
  5. import os
  6. import sys
  7. import time
  8. import ldap
  9. import logging
  10. import pytest
  11. import tarfile
  12. import stat
  13. import shutil
  14. from random import randint
  15. from lib389 import DirSrv, Entry, tools
  16. from lib389.tools import DirSrvTools
  17. from lib389._constants import *
  18. from lib389.properties import *
  19. logging.getLogger(__name__).setLevel(logging.DEBUG)
  20. log = logging.getLogger(__name__)
  21. #
  22. # important part. We can deploy Master1 and Master2 on different versions
  23. #
  24. installation1_prefix = None
  25. installation2_prefix = None
  26. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  27. OC_NAME = 'OCticket47988'
  28. MUST = "(postalAddress $ postalCode)"
  29. MAY = "(member $ street)"
  30. OTHER_NAME = 'other_entry'
  31. MAX_OTHERS = 10
  32. BIND_NAME = 'bind_entry'
  33. BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
  34. BIND_PW = 'password'
  35. ENTRY_NAME = 'test_entry'
  36. ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
  37. ENTRY_OC = "top person %s" % OC_NAME
  38. def _oc_definition(oid_ext, name, must=None, may=None):
  39. oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
  40. desc = 'To test ticket 47490'
  41. sup = 'person'
  42. if not must:
  43. must = MUST
  44. if not may:
  45. may = MAY
  46. new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
  47. return new_oc
  48. class TopologyMaster1Master2(object):
  49. def __init__(self, master1, master2):
  50. master1.open()
  51. self.master1 = master1
  52. master2.open()
  53. self.master2 = master2
  54. @pytest.fixture(scope="module")
  55. def topology(request):
  56. '''
  57. This fixture is used to create a replicated topology for the 'module'.
  58. The replicated topology is MASTER1 <-> Master2.
  59. '''
  60. global installation1_prefix
  61. global installation2_prefix
  62. #os.environ['USE_VALGRIND'] = '1'
  63. # allocate master1 on a given deployement
  64. master1 = DirSrv(verbose=False)
  65. if installation1_prefix:
  66. args_instance[SER_DEPLOYED_DIR] = installation1_prefix
  67. # Args for the master1 instance
  68. args_instance[SER_HOST] = HOST_MASTER_1
  69. args_instance[SER_PORT] = PORT_MASTER_1
  70. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
  71. args_master = args_instance.copy()
  72. master1.allocate(args_master)
  73. # allocate master1 on a given deployement
  74. master2 = DirSrv(verbose=False)
  75. if installation2_prefix:
  76. args_instance[SER_DEPLOYED_DIR] = installation2_prefix
  77. # Args for the consumer instance
  78. args_instance[SER_HOST] = HOST_MASTER_2
  79. args_instance[SER_PORT] = PORT_MASTER_2
  80. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
  81. args_master = args_instance.copy()
  82. master2.allocate(args_master)
  83. # Get the status of the instance and restart it if it exists
  84. instance_master1 = master1.exists()
  85. instance_master2 = master2.exists()
  86. # Remove all the instances
  87. if instance_master1:
  88. master1.delete()
  89. if instance_master2:
  90. master2.delete()
  91. # Create the instances
  92. master1.create()
  93. master1.open()
  94. master2.create()
  95. master2.open()
  96. #
  97. # Now prepare the Master-Consumer topology
  98. #
  99. # First Enable replication
  100. master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
  101. master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
  102. # Initialize the supplier->consumer
  103. properties = {RA_NAME: r'meTo_$host:$port',
  104. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  105. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  106. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  107. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  108. repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
  109. if not repl_agreement:
  110. log.fatal("Fail to create a replica agreement")
  111. sys.exit(1)
  112. log.debug("%s created" % repl_agreement)
  113. properties = {RA_NAME: r'meTo_$host:$port',
  114. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  115. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  116. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  117. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  118. master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
  119. master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
  120. master1.waitForReplInit(repl_agreement)
  121. # Check replication is working fine
  122. master1.add_s(Entry((TEST_REPL_DN, {
  123. 'objectclass': "top person".split(),
  124. 'sn': 'test_repl',
  125. 'cn': 'test_repl'})))
  126. loop = 0
  127. ent = None
  128. while loop <= 10:
  129. try:
  130. ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  131. break
  132. except ldap.NO_SUCH_OBJECT:
  133. time.sleep(1)
  134. loop += 1
  135. if ent is None:
  136. assert False
  137. # Here we have two instances master and consumer
  138. return TopologyMaster1Master2(master1, master2)
  139. def _header(topology, label):
  140. topology.master1.log.info("\n\n###############################################")
  141. topology.master1.log.info("#######")
  142. topology.master1.log.info("####### %s" % label)
  143. topology.master1.log.info("#######")
  144. topology.master1.log.info("###################################################")
  145. def _install_schema(server, tarFile):
  146. server.stop(timeout=10)
  147. tmpSchema = '/tmp/schema_47988'
  148. if not os.path.isdir(tmpSchema):
  149. os.mkdir(tmpSchema)
  150. for the_file in os.listdir(tmpSchema):
  151. file_path = os.path.join(tmpSchema, the_file)
  152. if os.path.isfile(file_path):
  153. os.unlink(file_path)
  154. os.chdir(tmpSchema)
  155. tar = tarfile.open(tarFile, 'r:gz')
  156. for member in tar.getmembers():
  157. tar.extract(member.name)
  158. tar.close()
  159. st = os.stat(server.schemadir)
  160. os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR)
  161. for the_file in os.listdir(tmpSchema):
  162. schemaFile = os.path.join(server.schemadir, the_file)
  163. if os.path.isfile(schemaFile):
  164. if the_file.startswith('99user.ldif'):
  165. # only replace 99user.ldif, the other standard definition are kept
  166. os.chmod(schemaFile, stat.S_IWUSR | stat.S_IRUSR)
  167. server.log.info("replace %s" % schemaFile)
  168. shutil.copy(the_file, schemaFile)
  169. else:
  170. server.log.info("add %s" % schemaFile)
  171. shutil.copy(the_file, schemaFile)
  172. os.chmod(schemaFile, stat.S_IRUSR | stat.S_IRGRP)
  173. os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP)
  174. def test_ticket47988_init(topology):
  175. """
  176. It adds
  177. - Objectclass with MAY 'member'
  178. - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation
  179. It deletes the anonymous aci
  180. """
  181. _header(topology, 'test_ticket47988_init')
  182. # enable acl error logging
  183. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL
  184. topology.master1.modify_s(DN_CONFIG, mod)
  185. topology.master2.modify_s(DN_CONFIG, mod)
  186. mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260))] # Internal op
  187. topology.master1.modify_s(DN_CONFIG, mod)
  188. topology.master2.modify_s(DN_CONFIG, mod)
  189. # add dummy entries
  190. for cpt in range(MAX_OTHERS):
  191. name = "%s%d" % (OTHER_NAME, cpt)
  192. topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
  193. 'objectclass': "top person".split(),
  194. 'sn': name,
  195. 'cn': name})))
  196. # check that entry 0 is replicated before
  197. loop = 0
  198. entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
  199. while loop <= 10:
  200. try:
  201. ent = topology.master2.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
  202. break
  203. except ldap.NO_SUCH_OBJECT:
  204. time.sleep(1)
  205. loop += 1
  206. assert (loop <= 10)
  207. topology.master1.stop(timeout=10)
  208. topology.master2.stop(timeout=10)
  209. #install the specific schema M1: ipa3.3, M2: ipa4.1
  210. schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz")
  211. _install_schema(topology.master1, schema_file)
  212. schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz")
  213. _install_schema(topology.master2, schema_file)
  214. topology.master1.start(timeout=10)
  215. topology.master2.start(timeout=10)
  216. def _do_update_schema(server, range=3999):
  217. '''
  218. Update the schema of the M2 (IPA4.1). to generate a nsSchemaCSN
  219. '''
  220. postfix = str(randint(range, range + 1000))
  221. OID = '2.16.840.1.113730.3.8.12.%s' % postfix
  222. NAME = 'thierry%s' % postfix
  223. value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (OID, NAME)
  224. mod = [(ldap.MOD_ADD, 'objectclasses', value)]
  225. server.modify_s('cn=schema', mod)
  226. def _do_update_entry(supplier=None, consumer=None, attempts=10):
  227. '''
  228. This is doing an update on M2 (IPA4.1) and checks the update has been
  229. propagated to M1 (IPA3.3)
  230. '''
  231. assert(supplier)
  232. assert(consumer)
  233. entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
  234. value = str(randint(100, 200))
  235. mod = [(ldap.MOD_REPLACE, 'telephonenumber', value)]
  236. supplier.modify_s(entryDN, mod)
  237. loop = 0
  238. while loop <= attempts:
  239. ent = consumer.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
  240. read_val = ent.telephonenumber or "0"
  241. if read_val == value:
  242. break
  243. # the expected value is not yet replicated. try again
  244. time.sleep(5)
  245. loop += 1
  246. supplier.log.debug("test_do_update: receive %s (expected %s)" % (read_val, value))
  247. assert (loop <= attempts)
  248. def _pause_M2_to_M1(topology):
  249. topology.master1.log.info("\n\n######################### Pause RA M2->M1 ######################\n")
  250. ents = topology.master2.agreement.list(suffix=SUFFIX)
  251. assert len(ents) == 1
  252. topology.master2.agreement.pause(ents[0].dn)
  253. def _resume_M1_to_M2(topology):
  254. topology.master1.log.info("\n\n######################### resume RA M1->M2 ######################\n")
  255. ents = topology.master1.agreement.list(suffix=SUFFIX)
  256. assert len(ents) == 1
  257. topology.master1.agreement.resume(ents[0].dn)
  258. def _pause_M1_to_M2(topology):
  259. topology.master1.log.info("\n\n######################### Pause RA M1->M2 ######################\n")
  260. ents = topology.master1.agreement.list(suffix=SUFFIX)
  261. assert len(ents) == 1
  262. topology.master1.agreement.pause(ents[0].dn)
  263. def _resume_M2_to_M1(topology):
  264. topology.master1.log.info("\n\n######################### resume RA M2->M1 ######################\n")
  265. ents = topology.master2.agreement.list(suffix=SUFFIX)
  266. assert len(ents) == 1
  267. topology.master2.agreement.resume(ents[0].dn)
  268. def test_ticket47988_1(topology):
  269. '''
  270. Check that replication is working and pause replication M2->M1
  271. '''
  272. _header(topology, 'test_ticket47988_1')
  273. topology.master1.log.debug("\n\nCheck that replication is working and pause replication M2->M1\n")
  274. _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
  275. _pause_M2_to_M1(topology)
  276. def test_ticket47988_2(topology):
  277. '''
  278. Update M1 schema and trigger update M1->M2
  279. So M1 should learn new/extended definitions that are in M2 schema
  280. '''
  281. _header(topology, 'test_ticket47988_2')
  282. topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
  283. master1_schema_csn = topology.master1.schema.get_schema_csn()
  284. master2_schema_csn = topology.master2.schema.get_schema_csn()
  285. topology.master1.log.debug("\nBefore updating the schema on M1\n")
  286. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  287. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  288. # Here M1 should no, should check M2 schema and learn
  289. _do_update_schema(topology.master1)
  290. master1_schema_csn = topology.master1.schema.get_schema_csn()
  291. master2_schema_csn = topology.master2.schema.get_schema_csn()
  292. topology.master1.log.debug("\nAfter updating the schema on M1\n")
  293. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  294. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  295. assert (master1_schema_csn)
  296. # to avoid linger effect where a replication session is reused without checking the schema
  297. _pause_M1_to_M2(topology)
  298. _resume_M1_to_M2(topology)
  299. #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
  300. #time.sleep(60)
  301. _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=15)
  302. master1_schema_csn = topology.master1.schema.get_schema_csn()
  303. master2_schema_csn = topology.master2.schema.get_schema_csn()
  304. topology.master1.log.debug("\nAfter a full replication session\n")
  305. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  306. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  307. assert (master1_schema_csn)
  308. assert (master2_schema_csn)
  309. def test_ticket47988_3(topology):
  310. '''
  311. Resume replication M2->M1 and check replication is still working
  312. '''
  313. _header(topology, 'test_ticket47988_3')
  314. _resume_M2_to_M1(topology)
  315. _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
  316. _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
  317. def test_ticket47988_4(topology):
  318. '''
  319. Check schemaCSN is identical on both server
  320. And save the nsschemaCSN to later check they do not change unexpectedly
  321. '''
  322. _header(topology, 'test_ticket47988_4')
  323. master1_schema_csn = topology.master1.schema.get_schema_csn()
  324. master2_schema_csn = topology.master2.schema.get_schema_csn()
  325. topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
  326. topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
  327. assert (master1_schema_csn)
  328. assert (master2_schema_csn)
  329. assert (master1_schema_csn == master2_schema_csn)
  330. topology.master1.saved_schema_csn = master1_schema_csn
  331. topology.master2.saved_schema_csn = master2_schema_csn
  332. def test_ticket47988_5(topology):
  333. '''
  334. Check schemaCSN do not change unexpectedly
  335. '''
  336. _header(topology, 'test_ticket47988_5')
  337. _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
  338. _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
  339. master1_schema_csn = topology.master1.schema.get_schema_csn()
  340. master2_schema_csn = topology.master2.schema.get_schema_csn()
  341. topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
  342. topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
  343. assert (master1_schema_csn)
  344. assert (master2_schema_csn)
  345. assert (master1_schema_csn == master2_schema_csn)
  346. assert (topology.master1.saved_schema_csn == master1_schema_csn)
  347. assert (topology.master2.saved_schema_csn == master2_schema_csn)
  348. def test_ticket47988_6(topology):
  349. '''
  350. Update M1 schema and trigger update M2->M1
  351. So M2 should learn new/extended definitions that are in M1 schema
  352. '''
  353. _header(topology, 'test_ticket47988_6')
  354. topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
  355. master1_schema_csn = topology.master1.schema.get_schema_csn()
  356. master2_schema_csn = topology.master2.schema.get_schema_csn()
  357. topology.master1.log.debug("\nBefore updating the schema on M1\n")
  358. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  359. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  360. # Here M1 should no, should check M2 schema and learn
  361. _do_update_schema(topology.master1, range=5999)
  362. master1_schema_csn = topology.master1.schema.get_schema_csn()
  363. master2_schema_csn = topology.master2.schema.get_schema_csn()
  364. topology.master1.log.debug("\nAfter updating the schema on M1\n")
  365. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  366. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  367. assert (master1_schema_csn)
  368. # to avoid linger effect where a replication session is reused without checking the schema
  369. _pause_M1_to_M2(topology)
  370. _resume_M1_to_M2(topology)
  371. #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
  372. #time.sleep(60)
  373. _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=15)
  374. master1_schema_csn = topology.master1.schema.get_schema_csn()
  375. master2_schema_csn = topology.master2.schema.get_schema_csn()
  376. topology.master1.log.debug("\nAfter a full replication session\n")
  377. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  378. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  379. assert (master1_schema_csn)
  380. assert (master2_schema_csn)
  381. def test_ticket47988_final(topology):
  382. topology.master1.delete()
  383. topology.master2.delete()
  384. log.info('Testcase PASSED')
  385. def run_isolated():
  386. '''
  387. run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
  388. To run isolated without py.test, you need to
  389. - edit this file and comment '@pytest.fixture' line before 'topology' function.
  390. - set the installation prefix
  391. - run this program
  392. '''
  393. global installation1_prefix
  394. global installation2_prefix
  395. installation1_prefix = None
  396. installation2_prefix = None
  397. topo = topology(True)
  398. test_ticket47988_init(topo)
  399. test_ticket47988_1(topo)
  400. test_ticket47988_2(topo)
  401. test_ticket47988_3(topo)
  402. test_ticket47988_4(topo)
  403. test_ticket47988_5(topo)
  404. test_ticket47988_6(topo)
  405. test_ticket47988_final(topo)
  406. if __name__ == '__main__':
  407. run_isolated()