ticket47676_test.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. # --- BEGIN COPYRIGHT BLOCK ---
  2. # Copyright (C) 2015 Red Hat, Inc.
  3. # All rights reserved.
  4. #
  5. # License: GPL (version 3 or any later version).
  6. # See LICENSE for details.
  7. # --- END COPYRIGHT BLOCK ---
  8. #
  9. '''
  10. Created on Nov 7, 2013
  11. @author: tbordaz
  12. '''
  13. import os
  14. import sys
  15. import time
  16. import ldap
  17. import logging
  18. import pytest
  19. from lib389 import DirSrv, Entry, tools
  20. from lib389.tools import DirSrvTools
  21. from lib389._constants import *
  22. from lib389.properties import *
  23. logging.getLogger(__name__).setLevel(logging.DEBUG)
  24. log = logging.getLogger(__name__)
  25. #
  26. # important part. We can deploy Master1 and Master2 on different versions
  27. #
  28. installation1_prefix = None
  29. installation2_prefix = None
  30. SCHEMA_DN = "cn=schema"
  31. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  32. OC_NAME = 'OCticket47676'
  33. OC_OID_EXT = 2
  34. MUST = "(postalAddress $ postalCode)"
  35. MAY = "(member $ street)"
  36. OC2_NAME = 'OC2ticket47676'
  37. OC2_OID_EXT = 3
  38. MUST_2 = "(postalAddress $ postalCode)"
  39. MAY_2 = "(member $ street)"
  40. REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config"
  41. REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
  42. OTHER_NAME = 'other_entry'
  43. MAX_OTHERS = 10
  44. BIND_NAME = 'bind_entry'
  45. BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
  46. BIND_PW = 'password'
  47. ENTRY_NAME = 'test_entry'
  48. ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
  49. ENTRY_OC = "top person %s" % OC_NAME
  50. BASE_OID = "1.2.3.4.5.6.7.8.9.10"
  51. def _oc_definition(oid_ext, name, must=None, may=None):
  52. oid = "%s.%d" % (BASE_OID, oid_ext)
  53. desc = 'To test ticket 47490'
  54. sup = 'person'
  55. if not must:
  56. must = MUST
  57. if not may:
  58. may = MAY
  59. new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
  60. return new_oc
  61. class TopologyMaster1Master2(object):
  62. def __init__(self, master1, master2):
  63. master1.open()
  64. self.master1 = master1
  65. master2.open()
  66. self.master2 = master2
  67. @pytest.fixture(scope="module")
  68. def topology(request):
  69. '''
  70. This fixture is used to create a replicated topology for the 'module'.
  71. The replicated topology is MASTER1 <-> Master2.
  72. '''
  73. global installation1_prefix
  74. global installation2_prefix
  75. # allocate master1 on a given deployement
  76. master1 = DirSrv(verbose=False)
  77. if installation1_prefix:
  78. args_instance[SER_DEPLOYED_DIR] = installation1_prefix
  79. # Args for the master1 instance
  80. args_instance[SER_HOST] = HOST_MASTER_1
  81. args_instance[SER_PORT] = PORT_MASTER_1
  82. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
  83. args_master = args_instance.copy()
  84. master1.allocate(args_master)
  85. # allocate master1 on a given deployement
  86. master2 = DirSrv(verbose=False)
  87. if installation2_prefix:
  88. args_instance[SER_DEPLOYED_DIR] = installation2_prefix
  89. # Args for the consumer instance
  90. args_instance[SER_HOST] = HOST_MASTER_2
  91. args_instance[SER_PORT] = PORT_MASTER_2
  92. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
  93. args_master = args_instance.copy()
  94. master2.allocate(args_master)
  95. # Get the status of the instance and restart it if it exists
  96. instance_master1 = master1.exists()
  97. instance_master2 = master2.exists()
  98. # Remove all the instances
  99. if instance_master1:
  100. master1.delete()
  101. if instance_master2:
  102. master2.delete()
  103. # Create the instances
  104. master1.create()
  105. master1.open()
  106. master2.create()
  107. master2.open()
  108. #
  109. # Now prepare the Master-Consumer topology
  110. #
  111. # First Enable replication
  112. master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
  113. master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
  114. # Initialize the supplier->consumer
  115. properties = {RA_NAME: r'meTo_$host:$port',
  116. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  117. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  118. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  119. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  120. repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
  121. if not repl_agreement:
  122. log.fatal("Fail to create a replica agreement")
  123. sys.exit(1)
  124. log.debug("%s created" % repl_agreement)
  125. properties = {RA_NAME: r'meTo_$host:$port',
  126. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  127. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  128. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  129. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  130. master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
  131. master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
  132. master1.waitForReplInit(repl_agreement)
  133. # Check replication is working fine
  134. if master1.testReplication(DEFAULT_SUFFIX, master2):
  135. log.info('Replication is working.')
  136. else:
  137. log.fatal('Replication is not working.')
  138. assert False
  139. def fin():
  140. master1.delete()
  141. master2.delete()
  142. request.addfinalizer(fin)
  143. # Here we have two instances master and consumer
  144. # with replication working.
  145. return TopologyMaster1Master2(master1, master2)
  146. def test_ticket47676_init(topology):
  147. """
  148. It adds
  149. - Objectclass with MAY 'member'
  150. - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation
  151. It deletes the anonymous aci
  152. """
  153. topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME)
  154. new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may = MAY)
  155. topology.master1.schema.add_schema('objectClasses', new_oc)
  156. # entry used to bind with
  157. topology.master1.log.info("Add %s" % BIND_DN)
  158. topology.master1.add_s(Entry((BIND_DN, {
  159. 'objectclass': "top person".split(),
  160. 'sn': BIND_NAME,
  161. 'cn': BIND_NAME,
  162. 'userpassword': BIND_PW})))
  163. # enable acl error logging
  164. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL
  165. topology.master1.modify_s(DN_CONFIG, mod)
  166. topology.master2.modify_s(DN_CONFIG, mod)
  167. # add dummy entries
  168. for cpt in range(MAX_OTHERS):
  169. name = "%s%d" % (OTHER_NAME, cpt)
  170. topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
  171. 'objectclass': "top person".split(),
  172. 'sn': name,
  173. 'cn': name})))
  174. def test_ticket47676_skip_oc_at(topology):
  175. '''
  176. This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated
  177. on MASTER2 (even if on MASTER2 47676 is NOT fixed). Then update on MASTER2.
  178. If the schema has successfully been pushed, updating Master2 should succeed
  179. '''
  180. topology.master1.log.info("\n\n######################### ADD ######################\n")
  181. # bind as 'cn=Directory manager'
  182. topology.master1.log.info("Bind as %s and add the add the entry with specific oc" % DN_DM)
  183. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  184. # Prepare the entry with multivalued members
  185. entry = Entry(ENTRY_DN)
  186. entry.setValues('objectclass', 'top', 'person', 'OCticket47676')
  187. entry.setValues('sn', ENTRY_NAME)
  188. entry.setValues('cn', ENTRY_NAME)
  189. entry.setValues('postalAddress', 'here')
  190. entry.setValues('postalCode', '1234')
  191. members = []
  192. for cpt in range(MAX_OTHERS):
  193. name = "%s%d" % (OTHER_NAME, cpt)
  194. members.append("cn=%s,%s" % (name, SUFFIX))
  195. members.append(BIND_DN)
  196. entry.setValues('member', members)
  197. topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN)
  198. topology.master1.add_s(entry)
  199. #
  200. # Now check the entry as been replicated
  201. #
  202. topology.master2.simple_bind_s(DN_DM, PASSWORD)
  203. topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN)
  204. loop = 0
  205. while loop <= 10:
  206. try:
  207. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  208. break
  209. except ldap.NO_SUCH_OBJECT:
  210. time.sleep(2)
  211. loop += 1
  212. assert loop <= 10
  213. # Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2)
  214. topology.master1.log.info("Update %s on M2" % ENTRY_DN)
  215. mod = [(ldap.MOD_REPLACE, 'description', 'test_add')]
  216. topology.master2.modify_s(ENTRY_DN, mod)
  217. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  218. loop = 0
  219. while loop <= 10:
  220. ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  221. if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'):
  222. break
  223. time.sleep(1)
  224. loop += 1
  225. assert ent.getValue('description') == 'test_add'
  226. def test_ticket47676_reject_action(topology):
  227. topology.master1.log.info("\n\n######################### REJECT ACTION ######################\n")
  228. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  229. topology.master2.simple_bind_s(DN_DM, PASSWORD)
  230. # make master1 to refuse to push the schema if OC_NAME is present in consumer schema
  231. mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL
  232. topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
  233. # Restart is required to take into account that policy
  234. topology.master1.stop(timeout=10)
  235. topology.master1.start(timeout=10)
  236. # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema
  237. topology.master1.log.info("Add %s on M1" % OC2_NAME)
  238. new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY)
  239. topology.master1.schema.add_schema('objectClasses', new_oc)
  240. # Safety checking that the schema has been updated on M1
  241. topology.master1.log.info("Check %s is in M1" % OC2_NAME)
  242. ent = topology.master1.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  243. assert ent.hasAttr('objectclasses')
  244. found = False
  245. for objectclass in ent.getValues('objectclasses'):
  246. if str(objectclass).find(OC2_NAME) >= 0:
  247. found = True
  248. break
  249. assert found
  250. # Do an update of M1 so that M1 will try to push the schema
  251. topology.master1.log.info("Update %s on M1" % ENTRY_DN)
  252. mod = [(ldap.MOD_REPLACE, 'description', 'test_reject')]
  253. topology.master1.modify_s(ENTRY_DN, mod)
  254. # Check the replication occured and so also M1 attempted to push the schema
  255. topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
  256. loop = 0
  257. while loop <= 10:
  258. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
  259. if ent.hasAttr('description') and ent.getValue('description') == 'test_reject':
  260. # update was replicated
  261. break
  262. time.sleep(2)
  263. loop += 1
  264. assert loop <= 10
  265. # Check that the schema has not been pushed
  266. topology.master1.log.info("Check %s is not in M2" % OC2_NAME)
  267. ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  268. assert ent.hasAttr('objectclasses')
  269. found = False
  270. for objectclass in ent.getValues('objectclasses'):
  271. if str(objectclass).find(OC2_NAME) >= 0:
  272. found = True
  273. break
  274. assert not found
  275. topology.master1.log.info("\n\n######################### NO MORE REJECT ACTION ######################\n")
  276. # make master1 to do no specific action on OC_NAME
  277. mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL
  278. topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
  279. # Restart is required to take into account that policy
  280. topology.master1.stop(timeout=10)
  281. topology.master1.start(timeout=10)
  282. # Do an update of M1 so that M1 will try to push the schema
  283. topology.master1.log.info("Update %s on M1" % ENTRY_DN)
  284. mod = [(ldap.MOD_REPLACE, 'description', 'test_no_more_reject')]
  285. topology.master1.modify_s(ENTRY_DN, mod)
  286. # Check the replication occured and so also M1 attempted to push the schema
  287. topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
  288. loop = 0
  289. while loop <= 10:
  290. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
  291. if ent.hasAttr('description') and ent.getValue('description') == 'test_no_more_reject':
  292. # update was replicated
  293. break
  294. time.sleep(2)
  295. loop += 1
  296. assert loop <= 10
  297. # Check that the schema has been pushed
  298. topology.master1.log.info("Check %s is in M2" % OC2_NAME)
  299. ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  300. assert ent.hasAttr('objectclasses')
  301. found = False
  302. for objectclass in ent.getValues('objectclasses'):
  303. if str(objectclass).find(OC2_NAME) >= 0:
  304. found = True
  305. break
  306. assert found
  307. def test_ticket47676_final(topology):
  308. log.info('Testcase PASSED')
  309. def run_isolated():
  310. '''
  311. run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
  312. To run isolated without py.test, you need to
  313. - edit this file and comment '@pytest.fixture' line before 'topology' function.
  314. - set the installation prefix
  315. - run this program
  316. '''
  317. global installation1_prefix
  318. global installation2_prefix
  319. installation1_prefix = None
  320. installation2_prefix = None
  321. topo = topology(True)
  322. topo.master1.log.info("\n\n######################### Ticket 47676 ######################\n")
  323. test_ticket47676_init(topo)
  324. test_ticket47676_skip_oc_at(topo)
  325. test_ticket47676_reject_action(topo)
  326. test_ticket47676_final(topo)
  327. if __name__ == '__main__':
  328. run_isolated()