ticket47988_test.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. # --- BEGIN COPYRIGHT BLOCK ---
  2. # Copyright (C) 2015 Red Hat, Inc.
  3. # All rights reserved.
  4. #
  5. # License: GPL (version 3 or any later version).
  6. # See LICENSE for details.
  7. # --- END COPYRIGHT BLOCK ---
  8. #
  9. '''
  10. Created on Nov 7, 2013
  11. @author: tbordaz
  12. '''
  13. import os
  14. import sys
  15. import time
  16. import ldap
  17. import logging
  18. import pytest
  19. import tarfile
  20. import stat
  21. import shutil
  22. from random import randint
  23. from lib389 import DirSrv, Entry, tools
  24. from lib389.tools import DirSrvTools
  25. from lib389._constants import *
  26. from lib389.properties import *
  27. logging.getLogger(__name__).setLevel(logging.DEBUG)
  28. log = logging.getLogger(__name__)
  29. #
  30. # important part. We can deploy Master1 and Master2 on different versions
  31. #
  32. installation1_prefix = None
  33. installation2_prefix = None
  34. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  35. OC_NAME = 'OCticket47988'
  36. MUST = "(postalAddress $ postalCode)"
  37. MAY = "(member $ street)"
  38. OTHER_NAME = 'other_entry'
  39. MAX_OTHERS = 10
  40. BIND_NAME = 'bind_entry'
  41. BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
  42. BIND_PW = 'password'
  43. ENTRY_NAME = 'test_entry'
  44. ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
  45. ENTRY_OC = "top person %s" % OC_NAME
  46. def _oc_definition(oid_ext, name, must=None, may=None):
  47. oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
  48. desc = 'To test ticket 47490'
  49. sup = 'person'
  50. if not must:
  51. must = MUST
  52. if not may:
  53. may = MAY
  54. new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
  55. return new_oc
  56. class TopologyMaster1Master2(object):
  57. def __init__(self, master1, master2):
  58. master1.open()
  59. self.master1 = master1
  60. master2.open()
  61. self.master2 = master2
  62. @pytest.fixture(scope="module")
  63. def topology(request):
  64. '''
  65. This fixture is used to create a replicated topology for the 'module'.
  66. The replicated topology is MASTER1 <-> Master2.
  67. '''
  68. global installation1_prefix
  69. global installation2_prefix
  70. #os.environ['USE_VALGRIND'] = '1'
  71. # allocate master1 on a given deployement
  72. master1 = DirSrv(verbose=False)
  73. if installation1_prefix:
  74. args_instance[SER_DEPLOYED_DIR] = installation1_prefix
  75. # Args for the master1 instance
  76. args_instance[SER_HOST] = HOST_MASTER_1
  77. args_instance[SER_PORT] = PORT_MASTER_1
  78. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
  79. args_master = args_instance.copy()
  80. master1.allocate(args_master)
  81. # allocate master1 on a given deployement
  82. master2 = DirSrv(verbose=False)
  83. if installation2_prefix:
  84. args_instance[SER_DEPLOYED_DIR] = installation2_prefix
  85. # Args for the consumer instance
  86. args_instance[SER_HOST] = HOST_MASTER_2
  87. args_instance[SER_PORT] = PORT_MASTER_2
  88. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
  89. args_master = args_instance.copy()
  90. master2.allocate(args_master)
  91. # Get the status of the instance and restart it if it exists
  92. instance_master1 = master1.exists()
  93. instance_master2 = master2.exists()
  94. # Remove all the instances
  95. if instance_master1:
  96. master1.delete()
  97. if instance_master2:
  98. master2.delete()
  99. # Create the instances
  100. master1.create()
  101. master1.open()
  102. master2.create()
  103. master2.open()
  104. def fin():
  105. master1.delete()
  106. master2.delete()
  107. request.addfinalizer(fin)
  108. #
  109. # Now prepare the Master-Consumer topology
  110. #
  111. # First Enable replication
  112. master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
  113. master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
  114. # Initialize the supplier->consumer
  115. properties = {RA_NAME: r'meTo_$host:$port',
  116. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  117. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  118. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  119. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  120. repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
  121. if not repl_agreement:
  122. log.fatal("Fail to create a replica agreement")
  123. sys.exit(1)
  124. log.debug("%s created" % repl_agreement)
  125. properties = {RA_NAME: r'meTo_$host:$port',
  126. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  127. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  128. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  129. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  130. master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
  131. master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
  132. master1.waitForReplInit(repl_agreement)
  133. # Check replication is working fine
  134. if master1.testReplication(DEFAULT_SUFFIX, master2):
  135. log.info('Replication is working.')
  136. else:
  137. log.fatal('Replication is not working.')
  138. assert False
  139. # Here we have two instances master and consumer
  140. return TopologyMaster1Master2(master1, master2)
  141. def _header(topology, label):
  142. topology.master1.log.info("\n\n###############################################")
  143. topology.master1.log.info("#######")
  144. topology.master1.log.info("####### %s" % label)
  145. topology.master1.log.info("#######")
  146. topology.master1.log.info("###################################################")
  147. def _install_schema(server, tarFile):
  148. server.stop(timeout=10)
  149. tmpSchema = '/tmp/schema_47988'
  150. if not os.path.isdir(tmpSchema):
  151. os.mkdir(tmpSchema)
  152. for the_file in os.listdir(tmpSchema):
  153. file_path = os.path.join(tmpSchema, the_file)
  154. if os.path.isfile(file_path):
  155. os.unlink(file_path)
  156. os.chdir(tmpSchema)
  157. tar = tarfile.open(tarFile, 'r:gz')
  158. for member in tar.getmembers():
  159. tar.extract(member.name)
  160. tar.close()
  161. st = os.stat(server.schemadir)
  162. os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR)
  163. for the_file in os.listdir(tmpSchema):
  164. schemaFile = os.path.join(server.schemadir, the_file)
  165. if os.path.isfile(schemaFile):
  166. if the_file.startswith('99user.ldif'):
  167. # only replace 99user.ldif, the other standard definition are kept
  168. os.chmod(schemaFile, stat.S_IWUSR | stat.S_IRUSR)
  169. server.log.info("replace %s" % schemaFile)
  170. shutil.copy(the_file, schemaFile)
  171. else:
  172. server.log.info("add %s" % schemaFile)
  173. shutil.copy(the_file, schemaFile)
  174. os.chmod(schemaFile, stat.S_IRUSR | stat.S_IRGRP)
  175. os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP)
  176. def test_ticket47988_init(topology):
  177. """
  178. It adds
  179. - Objectclass with MAY 'member'
  180. - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation
  181. It deletes the anonymous aci
  182. """
  183. _header(topology, 'test_ticket47988_init')
  184. # enable acl error logging
  185. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL
  186. topology.master1.modify_s(DN_CONFIG, mod)
  187. topology.master2.modify_s(DN_CONFIG, mod)
  188. mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260))] # Internal op
  189. topology.master1.modify_s(DN_CONFIG, mod)
  190. topology.master2.modify_s(DN_CONFIG, mod)
  191. # add dummy entries
  192. for cpt in range(MAX_OTHERS):
  193. name = "%s%d" % (OTHER_NAME, cpt)
  194. topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
  195. 'objectclass': "top person".split(),
  196. 'sn': name,
  197. 'cn': name})))
  198. # check that entry 0 is replicated before
  199. loop = 0
  200. entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
  201. while loop <= 10:
  202. try:
  203. ent = topology.master2.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
  204. break
  205. except ldap.NO_SUCH_OBJECT:
  206. time.sleep(1)
  207. loop += 1
  208. assert (loop <= 10)
  209. topology.master1.stop(timeout=10)
  210. topology.master2.stop(timeout=10)
  211. #install the specific schema M1: ipa3.3, M2: ipa4.1
  212. schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz")
  213. _install_schema(topology.master1, schema_file)
  214. schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz")
  215. _install_schema(topology.master2, schema_file)
  216. topology.master1.start(timeout=10)
  217. topology.master2.start(timeout=10)
  218. def _do_update_schema(server, range=3999):
  219. '''
  220. Update the schema of the M2 (IPA4.1). to generate a nsSchemaCSN
  221. '''
  222. postfix = str(randint(range, range + 1000))
  223. OID = '2.16.840.1.113730.3.8.12.%s' % postfix
  224. NAME = 'thierry%s' % postfix
  225. value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (OID, NAME)
  226. mod = [(ldap.MOD_ADD, 'objectclasses', value)]
  227. server.modify_s('cn=schema', mod)
  228. def _do_update_entry(supplier=None, consumer=None, attempts=10):
  229. '''
  230. This is doing an update on M2 (IPA4.1) and checks the update has been
  231. propagated to M1 (IPA3.3)
  232. '''
  233. assert(supplier)
  234. assert(consumer)
  235. entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
  236. value = str(randint(100, 200))
  237. mod = [(ldap.MOD_REPLACE, 'telephonenumber', value)]
  238. supplier.modify_s(entryDN, mod)
  239. loop = 0
  240. while loop <= attempts:
  241. ent = consumer.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
  242. read_val = ent.telephonenumber or "0"
  243. if read_val == value:
  244. break
  245. # the expected value is not yet replicated. try again
  246. time.sleep(5)
  247. loop += 1
  248. supplier.log.debug("test_do_update: receive %s (expected %s)" % (read_val, value))
  249. assert (loop <= attempts)
  250. def _pause_M2_to_M1(topology):
  251. topology.master1.log.info("\n\n######################### Pause RA M2->M1 ######################\n")
  252. ents = topology.master2.agreement.list(suffix=SUFFIX)
  253. assert len(ents) == 1
  254. topology.master2.agreement.pause(ents[0].dn)
  255. def _resume_M1_to_M2(topology):
  256. topology.master1.log.info("\n\n######################### resume RA M1->M2 ######################\n")
  257. ents = topology.master1.agreement.list(suffix=SUFFIX)
  258. assert len(ents) == 1
  259. topology.master1.agreement.resume(ents[0].dn)
  260. def _pause_M1_to_M2(topology):
  261. topology.master1.log.info("\n\n######################### Pause RA M1->M2 ######################\n")
  262. ents = topology.master1.agreement.list(suffix=SUFFIX)
  263. assert len(ents) == 1
  264. topology.master1.agreement.pause(ents[0].dn)
  265. def _resume_M2_to_M1(topology):
  266. topology.master1.log.info("\n\n######################### resume RA M2->M1 ######################\n")
  267. ents = topology.master2.agreement.list(suffix=SUFFIX)
  268. assert len(ents) == 1
  269. topology.master2.agreement.resume(ents[0].dn)
  270. def test_ticket47988_1(topology):
  271. '''
  272. Check that replication is working and pause replication M2->M1
  273. '''
  274. _header(topology, 'test_ticket47988_1')
  275. topology.master1.log.debug("\n\nCheck that replication is working and pause replication M2->M1\n")
  276. _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
  277. _pause_M2_to_M1(topology)
  278. def test_ticket47988_2(topology):
  279. '''
  280. Update M1 schema and trigger update M1->M2
  281. So M1 should learn new/extended definitions that are in M2 schema
  282. '''
  283. _header(topology, 'test_ticket47988_2')
  284. topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
  285. master1_schema_csn = topology.master1.schema.get_schema_csn()
  286. master2_schema_csn = topology.master2.schema.get_schema_csn()
  287. topology.master1.log.debug("\nBefore updating the schema on M1\n")
  288. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  289. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  290. # Here M1 should no, should check M2 schema and learn
  291. _do_update_schema(topology.master1)
  292. master1_schema_csn = topology.master1.schema.get_schema_csn()
  293. master2_schema_csn = topology.master2.schema.get_schema_csn()
  294. topology.master1.log.debug("\nAfter updating the schema on M1\n")
  295. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  296. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  297. assert (master1_schema_csn)
  298. # to avoid linger effect where a replication session is reused without checking the schema
  299. _pause_M1_to_M2(topology)
  300. _resume_M1_to_M2(topology)
  301. #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
  302. #time.sleep(60)
  303. _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=15)
  304. master1_schema_csn = topology.master1.schema.get_schema_csn()
  305. master2_schema_csn = topology.master2.schema.get_schema_csn()
  306. topology.master1.log.debug("\nAfter a full replication session\n")
  307. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  308. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  309. assert (master1_schema_csn)
  310. assert (master2_schema_csn)
  311. def test_ticket47988_3(topology):
  312. '''
  313. Resume replication M2->M1 and check replication is still working
  314. '''
  315. _header(topology, 'test_ticket47988_3')
  316. _resume_M2_to_M1(topology)
  317. _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
  318. _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
  319. def test_ticket47988_4(topology):
  320. '''
  321. Check schemaCSN is identical on both server
  322. And save the nsschemaCSN to later check they do not change unexpectedly
  323. '''
  324. _header(topology, 'test_ticket47988_4')
  325. master1_schema_csn = topology.master1.schema.get_schema_csn()
  326. master2_schema_csn = topology.master2.schema.get_schema_csn()
  327. topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
  328. topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
  329. assert (master1_schema_csn)
  330. assert (master2_schema_csn)
  331. assert (master1_schema_csn == master2_schema_csn)
  332. topology.master1.saved_schema_csn = master1_schema_csn
  333. topology.master2.saved_schema_csn = master2_schema_csn
  334. def test_ticket47988_5(topology):
  335. '''
  336. Check schemaCSN do not change unexpectedly
  337. '''
  338. _header(topology, 'test_ticket47988_5')
  339. _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
  340. _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
  341. master1_schema_csn = topology.master1.schema.get_schema_csn()
  342. master2_schema_csn = topology.master2.schema.get_schema_csn()
  343. topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
  344. topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
  345. assert (master1_schema_csn)
  346. assert (master2_schema_csn)
  347. assert (master1_schema_csn == master2_schema_csn)
  348. assert (topology.master1.saved_schema_csn == master1_schema_csn)
  349. assert (topology.master2.saved_schema_csn == master2_schema_csn)
  350. def test_ticket47988_6(topology):
  351. '''
  352. Update M1 schema and trigger update M2->M1
  353. So M2 should learn new/extended definitions that are in M1 schema
  354. '''
  355. _header(topology, 'test_ticket47988_6')
  356. topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
  357. master1_schema_csn = topology.master1.schema.get_schema_csn()
  358. master2_schema_csn = topology.master2.schema.get_schema_csn()
  359. topology.master1.log.debug("\nBefore updating the schema on M1\n")
  360. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  361. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  362. # Here M1 should no, should check M2 schema and learn
  363. _do_update_schema(topology.master1, range=5999)
  364. master1_schema_csn = topology.master1.schema.get_schema_csn()
  365. master2_schema_csn = topology.master2.schema.get_schema_csn()
  366. topology.master1.log.debug("\nAfter updating the schema on M1\n")
  367. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  368. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  369. assert (master1_schema_csn)
  370. # to avoid linger effect where a replication session is reused without checking the schema
  371. _pause_M1_to_M2(topology)
  372. _resume_M1_to_M2(topology)
  373. #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
  374. #time.sleep(60)
  375. _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=15)
  376. master1_schema_csn = topology.master1.schema.get_schema_csn()
  377. master2_schema_csn = topology.master2.schema.get_schema_csn()
  378. topology.master1.log.debug("\nAfter a full replication session\n")
  379. topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
  380. topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
  381. assert (master1_schema_csn)
  382. assert (master2_schema_csn)
  383. if __name__ == '__main__':
  384. # Run isolated
  385. # -s for DEBUG mode
  386. CURRENT_FILE = os.path.realpath(__file__)
  387. pytest.main("-s %s" % CURRENT_FILE)