ticket47676_test.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. '''
  2. Created on Nov 7, 2013
  3. @author: tbordaz
  4. '''
  5. import os
  6. import sys
  7. import time
  8. import ldap
  9. import logging
  10. import socket
  11. import time
  12. import logging
  13. import pytest
  14. import re
  15. from lib389 import DirSrv, Entry, tools
  16. from lib389.tools import DirSrvTools
  17. from lib389._constants import *
  18. from lib389.properties import *
  19. from constants import *
  20. from lib389._constants import REPLICAROLE_MASTER
  21. logging.getLogger(__name__).setLevel(logging.DEBUG)
  22. log = logging.getLogger(__name__)
  23. #
  24. # important part. We can deploy Master1 and Master2 on different versions
  25. #
  26. installation1_prefix = None
  27. installation2_prefix = None
  28. SCHEMA_DN = "cn=schema"
  29. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  30. OC_NAME = 'OCticket47676'
  31. OC_OID_EXT = 2
  32. MUST = "(postalAddress $ postalCode)"
  33. MAY = "(member $ street)"
  34. OC2_NAME = 'OC2ticket47676'
  35. OC2_OID_EXT = 3
  36. MUST_2 = "(postalAddress $ postalCode)"
  37. MAY_2 = "(member $ street)"
  38. REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config"
  39. REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
  40. OTHER_NAME = 'other_entry'
  41. MAX_OTHERS = 10
  42. BIND_NAME = 'bind_entry'
  43. BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
  44. BIND_PW = 'password'
  45. ENTRY_NAME = 'test_entry'
  46. ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
  47. ENTRY_OC = "top person %s" % OC_NAME
  48. BASE_OID = "1.2.3.4.5.6.7.8.9.10"
  49. def _oc_definition(oid_ext, name, must=None, may=None):
  50. oid = "%s.%d" % (BASE_OID, oid_ext)
  51. desc = 'To test ticket 47490'
  52. sup = 'person'
  53. if not must:
  54. must = MUST
  55. if not may:
  56. may = MAY
  57. new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
  58. return new_oc
  59. class TopologyMaster1Master2(object):
  60. def __init__(self, master1, master2):
  61. master1.open()
  62. self.master1 = master1
  63. master2.open()
  64. self.master2 = master2
  65. @pytest.fixture(scope="module")
  66. def topology(request):
  67. '''
  68. This fixture is used to create a replicated topology for the 'module'.
  69. The replicated topology is MASTER1 <-> Master2.
  70. At the beginning, It may exists a master2 instance and/or a master2 instance.
  71. It may also exists a backup for the master1 and/or the master2.
  72. Principle:
  73. If master1 instance exists:
  74. restart it
  75. If master2 instance exists:
  76. restart it
  77. If backup of master1 AND backup of master2 exists:
  78. create or rebind to master1
  79. create or rebind to master2
  80. restore master1 from backup
  81. restore master2 from backup
  82. else:
  83. Cleanup everything
  84. remove instances
  85. remove backups
  86. Create instances
  87. Initialize replication
  88. Create backups
  89. '''
  90. global installation1_prefix
  91. global installation2_prefix
  92. # allocate master1 on a given deployement
  93. master1 = DirSrv(verbose=False)
  94. if installation1_prefix:
  95. args_instance[SER_DEPLOYED_DIR] = installation1_prefix
  96. # Args for the master1 instance
  97. args_instance[SER_HOST] = HOST_MASTER_1
  98. args_instance[SER_PORT] = PORT_MASTER_1
  99. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
  100. args_master = args_instance.copy()
  101. master1.allocate(args_master)
  102. # allocate master1 on a given deployement
  103. master2 = DirSrv(verbose=False)
  104. if installation2_prefix:
  105. args_instance[SER_DEPLOYED_DIR] = installation2_prefix
  106. # Args for the consumer instance
  107. args_instance[SER_HOST] = HOST_MASTER_2
  108. args_instance[SER_PORT] = PORT_MASTER_2
  109. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
  110. args_master = args_instance.copy()
  111. master2.allocate(args_master)
  112. # Get the status of the backups
  113. backup_master1 = master1.checkBackupFS()
  114. backup_master2 = master2.checkBackupFS()
  115. # Get the status of the instance and restart it if it exists
  116. instance_master1 = master1.exists()
  117. if instance_master1:
  118. master1.stop(timeout=10)
  119. master1.start(timeout=10)
  120. instance_master2 = master2.exists()
  121. if instance_master2:
  122. master2.stop(timeout=10)
  123. master2.start(timeout=10)
  124. if backup_master1 and backup_master2:
  125. # The backups exist, assuming they are correct
  126. # we just re-init the instances with them
  127. if not instance_master1:
  128. master1.create()
  129. # Used to retrieve configuration information (dbdir, confdir...)
  130. master1.open()
  131. if not instance_master2:
  132. master2.create()
  133. # Used to retrieve configuration information (dbdir, confdir...)
  134. master2.open()
  135. # restore master1 from backup
  136. master1.stop(timeout=10)
  137. master1.restoreFS(backup_master1)
  138. master1.start(timeout=10)
  139. # restore master2 from backup
  140. master2.stop(timeout=10)
  141. master2.restoreFS(backup_master2)
  142. master2.start(timeout=10)
  143. else:
  144. # We should be here only in two conditions
  145. # - This is the first time a test involve master-consumer
  146. # so we need to create everything
  147. # - Something weird happened (instance/backup destroyed)
  148. # so we discard everything and recreate all
  149. # Remove all the backups. So even if we have a specific backup file
  150. # (e.g backup_master) we clear all backups that an instance my have created
  151. if backup_master1:
  152. master1.clearBackupFS()
  153. if backup_master2:
  154. master2.clearBackupFS()
  155. # Remove all the instances
  156. if instance_master1:
  157. master1.delete()
  158. if instance_master2:
  159. master2.delete()
  160. # Create the instances
  161. master1.create()
  162. master1.open()
  163. master2.create()
  164. master2.open()
  165. #
  166. # Now prepare the Master-Consumer topology
  167. #
  168. # First Enable replication
  169. master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
  170. master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
  171. # Initialize the supplier->consumer
  172. properties = {RA_NAME: r'meTo_$host:$port',
  173. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  174. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  175. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  176. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  177. repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
  178. if not repl_agreement:
  179. log.fatal("Fail to create a replica agreement")
  180. sys.exit(1)
  181. log.debug("%s created" % repl_agreement)
  182. properties = {RA_NAME: r'meTo_$host:$port',
  183. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  184. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  185. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  186. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  187. master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
  188. master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
  189. master1.waitForReplInit(repl_agreement)
  190. # Check replication is working fine
  191. master1.add_s(Entry((TEST_REPL_DN, {
  192. 'objectclass': "top person".split(),
  193. 'sn': 'test_repl',
  194. 'cn': 'test_repl'})))
  195. loop = 0
  196. ent = None
  197. while loop <= 10:
  198. try:
  199. ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  200. break
  201. except ldap.NO_SUCH_OBJECT:
  202. time.sleep(1)
  203. loop += 1
  204. if ent is None:
  205. assert False
  206. # Time to create the backups
  207. master1.stop(timeout=10)
  208. master1.backupfile = master1.backupFS()
  209. master1.start(timeout=10)
  210. master2.stop(timeout=10)
  211. master2.backupfile = master2.backupFS()
  212. master2.start(timeout=10)
  213. # clear the tmp directory
  214. master1.clearTmpDir(__file__)
  215. #
  216. # Here we have two instances master and consumer
  217. # with replication working. Either coming from a backup recovery
  218. # or from a fresh (re)init
  219. # Time to return the topology
  220. return TopologyMaster1Master2(master1, master2)
  221. def test_ticket47676_init(topology):
  222. """
  223. It adds
  224. - Objectclass with MAY 'member'
  225. - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation
  226. It deletes the anonymous aci
  227. """
  228. topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME)
  229. new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may = MAY)
  230. topology.master1.schema.add_schema('objectClasses', new_oc)
  231. # entry used to bind with
  232. topology.master1.log.info("Add %s" % BIND_DN)
  233. topology.master1.add_s(Entry((BIND_DN, {
  234. 'objectclass': "top person".split(),
  235. 'sn': BIND_NAME,
  236. 'cn': BIND_NAME,
  237. 'userpassword': BIND_PW})))
  238. # enable acl error logging
  239. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL
  240. topology.master1.modify_s(DN_CONFIG, mod)
  241. topology.master2.modify_s(DN_CONFIG, mod)
  242. # add dummy entries
  243. for cpt in range(MAX_OTHERS):
  244. name = "%s%d" % (OTHER_NAME, cpt)
  245. topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
  246. 'objectclass': "top person".split(),
  247. 'sn': name,
  248. 'cn': name})))
  249. def test_ticket47676_skip_oc_at(topology):
  250. '''
  251. This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated
  252. on MASTER2 (even if on MASTER2 47676 is NOT fixed). Then update on MASTER2.
  253. If the schema has successfully been pushed, updating Master2 should succeed
  254. '''
  255. topology.master1.log.info("\n\n######################### ADD ######################\n")
  256. # bind as 'cn=Directory manager'
  257. topology.master1.log.info("Bind as %s and add the add the entry with specific oc" % DN_DM)
  258. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  259. # Prepare the entry with multivalued members
  260. entry = Entry(ENTRY_DN)
  261. entry.setValues('objectclass', 'top', 'person', 'OCticket47676')
  262. entry.setValues('sn', ENTRY_NAME)
  263. entry.setValues('cn', ENTRY_NAME)
  264. entry.setValues('postalAddress', 'here')
  265. entry.setValues('postalCode', '1234')
  266. members = []
  267. for cpt in range(MAX_OTHERS):
  268. name = "%s%d" % (OTHER_NAME, cpt)
  269. members.append("cn=%s,%s" % (name, SUFFIX))
  270. members.append(BIND_DN)
  271. entry.setValues('member', members)
  272. topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN)
  273. topology.master1.add_s(entry)
  274. #
  275. # Now check the entry as been replicated
  276. #
  277. topology.master2.simple_bind_s(DN_DM, PASSWORD)
  278. topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN)
  279. loop = 0
  280. while loop <= 10:
  281. try:
  282. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  283. break
  284. except ldap.NO_SUCH_OBJECT:
  285. time.sleep(2)
  286. loop += 1
  287. assert loop <= 10
  288. # Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2)
  289. topology.master1.log.info("Update %s on M2" % ENTRY_DN)
  290. mod = [(ldap.MOD_REPLACE, 'description', 'test_add')]
  291. topology.master2.modify_s(ENTRY_DN, mod)
  292. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  293. loop = 0
  294. while loop <= 10:
  295. ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  296. if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'):
  297. break
  298. time.sleep(1)
  299. loop += 1
  300. assert ent.getValue('description') == 'test_add'
  301. def test_ticket47676_reject_action(topology):
  302. topology.master1.log.info("\n\n######################### REJECT ACTION ######################\n")
  303. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  304. topology.master2.simple_bind_s(DN_DM, PASSWORD)
  305. # make master1 to refuse to push the schema if OC_NAME is present in consumer schema
  306. mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL
  307. topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
  308. # Restart is required to take into account that policy
  309. topology.master1.stop(timeout=10)
  310. topology.master1.start(timeout=10)
  311. # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema
  312. topology.master1.log.info("Add %s on M1" % OC2_NAME)
  313. new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY)
  314. topology.master1.schema.add_schema('objectClasses', new_oc)
  315. # Safety checking that the schema has been updated on M1
  316. topology.master1.log.info("Check %s is in M1" % OC2_NAME)
  317. ent = topology.master1.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  318. assert ent.hasAttr('objectclasses')
  319. found = False
  320. for objectclass in ent.getValues('objectclasses'):
  321. if str(objectclass).find(OC2_NAME) >= 0:
  322. found = True
  323. break
  324. assert found
  325. # Do an update of M1 so that M1 will try to push the schema
  326. topology.master1.log.info("Update %s on M1" % ENTRY_DN)
  327. mod = [(ldap.MOD_REPLACE, 'description', 'test_reject')]
  328. topology.master1.modify_s(ENTRY_DN, mod)
  329. # Check the replication occured and so also M1 attempted to push the schema
  330. topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
  331. loop = 0
  332. while loop <= 10:
  333. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
  334. if ent.hasAttr('description') and ent.getValue('description') == 'test_reject':
  335. # update was replicated
  336. break
  337. time.sleep(2)
  338. loop += 1
  339. assert loop <= 10
  340. # Check that the schema has not been pushed
  341. topology.master1.log.info("Check %s is not in M2" % OC2_NAME)
  342. ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  343. assert ent.hasAttr('objectclasses')
  344. found = False
  345. for objectclass in ent.getValues('objectclasses'):
  346. if str(objectclass).find(OC2_NAME) >= 0:
  347. found = True
  348. break
  349. assert not found
  350. topology.master1.log.info("\n\n######################### NO MORE REJECT ACTION ######################\n")
  351. # make master1 to do no specific action on OC_NAME
  352. mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL
  353. topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
  354. # Restart is required to take into account that policy
  355. topology.master1.stop(timeout=10)
  356. topology.master1.start(timeout=10)
  357. # Do an update of M1 so that M1 will try to push the schema
  358. topology.master1.log.info("Update %s on M1" % ENTRY_DN)
  359. mod = [(ldap.MOD_REPLACE, 'description', 'test_no_more_reject')]
  360. topology.master1.modify_s(ENTRY_DN, mod)
  361. # Check the replication occured and so also M1 attempted to push the schema
  362. topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
  363. loop = 0
  364. while loop <= 10:
  365. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
  366. if ent.hasAttr('description') and ent.getValue('description') == 'test_no_more_reject':
  367. # update was replicated
  368. break
  369. time.sleep(2)
  370. loop += 1
  371. assert loop <= 10
  372. # Check that the schema has been pushed
  373. topology.master1.log.info("Check %s is in M2" % OC2_NAME)
  374. ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  375. assert ent.hasAttr('objectclasses')
  376. found = False
  377. for objectclass in ent.getValues('objectclasses'):
  378. if str(objectclass).find(OC2_NAME) >= 0:
  379. found = True
  380. break
  381. assert found
  382. def test_ticket47676_final(topology):
  383. topology.master1.delete()
  384. topology.master2.delete()
  385. def run_isolated():
  386. '''
  387. run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
  388. To run isolated without py.test, you need to
  389. - edit this file and comment '@pytest.fixture' line before 'topology' function.
  390. - set the installation prefix
  391. - run this program
  392. '''
  393. global installation1_prefix
  394. global installation2_prefix
  395. installation1_prefix = None
  396. installation2_prefix = None
  397. topo = topology(True)
  398. topo.master1.log.info("\n\n######################### Ticket 47676 ######################\n")
  399. test_ticket47676_init(topo)
  400. test_ticket47676_skip_oc_at(topo)
  401. test_ticket47676_reject_action(topo)
  402. test_ticket47676_final(topo)
  403. if __name__ == '__main__':
  404. run_isolated()