ticket47676_test.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485
  1. '''
  2. Created on Nov 7, 2013
  3. @author: tbordaz
  4. '''
  5. import os
  6. import sys
  7. import time
  8. import ldap
  9. import logging
  10. import socket
  11. import time
  12. import logging
  13. import pytest
  14. import re
  15. from lib389 import DirSrv, Entry, tools
  16. from lib389.tools import DirSrvTools
  17. from lib389._constants import *
  18. from lib389.properties import *
  19. from constants import *
  20. from lib389._constants import REPLICAROLE_MASTER
  21. logging.getLogger(__name__).setLevel(logging.DEBUG)
  22. log = logging.getLogger(__name__)
  23. #
  24. # important part. We can deploy Master1 and Master2 on different versions
  25. #
  26. installation1_prefix = None
  27. installation2_prefix = None
  28. SCHEMA_DN = "cn=schema"
  29. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  30. OC_NAME = 'OCticket47676'
  31. OC_OID_EXT = 2
  32. MUST = "(postalAddress $ postalCode)"
  33. MAY = "(member $ street)"
  34. OC2_NAME = 'OC2ticket47676'
  35. OC2_OID_EXT = 3
  36. MUST_2 = "(postalAddress $ postalCode)"
  37. MAY_2 = "(member $ street)"
  38. REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config"
  39. REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
  40. OTHER_NAME = 'other_entry'
  41. MAX_OTHERS = 10
  42. BIND_NAME = 'bind_entry'
  43. BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
  44. BIND_PW = 'password'
  45. ENTRY_NAME = 'test_entry'
  46. ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
  47. ENTRY_OC = "top person %s" % OC_NAME
  48. BASE_OID = "1.2.3.4.5.6.7.8.9.10"
  49. def _oc_definition(oid_ext, name, must=None, may=None):
  50. oid = "%s.%d" % (BASE_OID, oid_ext)
  51. desc = 'To test ticket 47490'
  52. sup = 'person'
  53. if not must:
  54. must = MUST
  55. if not may:
  56. may = MAY
  57. new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
  58. return new_oc
  59. class TopologyMaster1Master2(object):
  60. def __init__(self, master1, master2):
  61. master1.open()
  62. self.master1 = master1
  63. master2.open()
  64. self.master2 = master2
  65. @pytest.fixture(scope="module")
  66. def topology(request):
  67. '''
  68. This fixture is used to create a replicated topology for the 'module'.
  69. The replicated topology is MASTER1 <-> Master2.
  70. At the beginning, It may exists a master2 instance and/or a master2 instance.
  71. It may also exists a backup for the master1 and/or the master2.
  72. Principle:
  73. If master1 instance exists:
  74. restart it
  75. If master2 instance exists:
  76. restart it
  77. If backup of master1 AND backup of master2 exists:
  78. create or rebind to master1
  79. create or rebind to master2
  80. restore master1 from backup
  81. restore master2 from backup
  82. else:
  83. Cleanup everything
  84. remove instances
  85. remove backups
  86. Create instances
  87. Initialize replication
  88. Create backups
  89. '''
  90. global installation1_prefix
  91. global installation2_prefix
  92. # allocate master1 on a given deployement
  93. master1 = DirSrv(verbose=False)
  94. if installation1_prefix:
  95. args_instance[SER_DEPLOYED_DIR] = installation1_prefix
  96. # Args for the master1 instance
  97. args_instance[SER_HOST] = HOST_MASTER_1
  98. args_instance[SER_PORT] = PORT_MASTER_1
  99. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
  100. args_master = args_instance.copy()
  101. master1.allocate(args_master)
  102. # allocate master1 on a given deployement
  103. master2 = DirSrv(verbose=False)
  104. if installation2_prefix:
  105. args_instance[SER_DEPLOYED_DIR] = installation2_prefix
  106. # Args for the consumer instance
  107. args_instance[SER_HOST] = HOST_MASTER_2
  108. args_instance[SER_PORT] = PORT_MASTER_2
  109. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
  110. args_master = args_instance.copy()
  111. master2.allocate(args_master)
  112. # Get the status of the backups
  113. backup_master1 = master1.checkBackupFS()
  114. backup_master2 = master2.checkBackupFS()
  115. # Get the status of the instance and restart it if it exists
  116. instance_master1 = master1.exists()
  117. if instance_master1:
  118. master1.stop(timeout=10)
  119. master1.start(timeout=10)
  120. instance_master2 = master2.exists()
  121. if instance_master2:
  122. master2.stop(timeout=10)
  123. master2.start(timeout=10)
  124. if backup_master1 and backup_master2:
  125. # The backups exist, assuming they are correct
  126. # we just re-init the instances with them
  127. if not instance_master1:
  128. master1.create()
  129. # Used to retrieve configuration information (dbdir, confdir...)
  130. master1.open()
  131. if not instance_master2:
  132. master2.create()
  133. # Used to retrieve configuration information (dbdir, confdir...)
  134. master2.open()
  135. # restore master1 from backup
  136. master1.stop(timeout=10)
  137. master1.restoreFS(backup_master1)
  138. master1.start(timeout=10)
  139. # restore master2 from backup
  140. master2.stop(timeout=10)
  141. master2.restoreFS(backup_master2)
  142. master2.start(timeout=10)
  143. else:
  144. # We should be here only in two conditions
  145. # - This is the first time a test involve master-consumer
  146. # so we need to create everything
  147. # - Something weird happened (instance/backup destroyed)
  148. # so we discard everything and recreate all
  149. # Remove all the backups. So even if we have a specific backup file
  150. # (e.g backup_master) we clear all backups that an instance my have created
  151. if backup_master1:
  152. master1.clearBackupFS()
  153. if backup_master2:
  154. master2.clearBackupFS()
  155. # Remove all the instances
  156. if instance_master1:
  157. master1.delete()
  158. if instance_master2:
  159. master2.delete()
  160. # Create the instances
  161. master1.create()
  162. master1.open()
  163. master2.create()
  164. master2.open()
  165. #
  166. # Now prepare the Master-Consumer topology
  167. #
  168. # First Enable replication
  169. master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
  170. master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
  171. # Initialize the supplier->consumer
  172. properties = {RA_NAME: r'meTo_$host:$port',
  173. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  174. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  175. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  176. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  177. repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
  178. if not repl_agreement:
  179. log.fatal("Fail to create a replica agreement")
  180. sys.exit(1)
  181. log.debug("%s created" % repl_agreement)
  182. properties = {RA_NAME: r'meTo_$host:$port',
  183. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  184. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  185. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  186. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  187. master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
  188. master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
  189. master1.waitForReplInit(repl_agreement)
  190. # Check replication is working fine
  191. master1.add_s(Entry((TEST_REPL_DN, {
  192. 'objectclass': "top person".split(),
  193. 'sn': 'test_repl',
  194. 'cn': 'test_repl'})))
  195. loop = 0
  196. while loop <= 10:
  197. try:
  198. ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  199. break
  200. except ldap.NO_SUCH_OBJECT:
  201. time.sleep(1)
  202. loop += 1
  203. # Time to create the backups
  204. master1.stop(timeout=10)
  205. master1.backupfile = master1.backupFS()
  206. master1.start(timeout=10)
  207. master2.stop(timeout=10)
  208. master2.backupfile = master2.backupFS()
  209. master2.start(timeout=10)
  210. #
  211. # Here we have two instances master and consumer
  212. # with replication working. Either coming from a backup recovery
  213. # or from a fresh (re)init
  214. # Time to return the topology
  215. return TopologyMaster1Master2(master1, master2)
  216. def test_ticket47676_init(topology):
  217. """
  218. It adds
  219. - Objectclass with MAY 'member'
  220. - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation
  221. It deletes the anonymous aci
  222. """
  223. topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME)
  224. new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may = MAY)
  225. topology.master1.schema.add_schema('objectClasses', new_oc)
  226. # entry used to bind with
  227. topology.master1.log.info("Add %s" % BIND_DN)
  228. topology.master1.add_s(Entry((BIND_DN, {
  229. 'objectclass': "top person".split(),
  230. 'sn': BIND_NAME,
  231. 'cn': BIND_NAME,
  232. 'userpassword': BIND_PW})))
  233. # enable acl error logging
  234. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128+8192))] # ACL + REPL
  235. topology.master1.modify_s(DN_CONFIG, mod)
  236. topology.master2.modify_s(DN_CONFIG, mod)
  237. # add dummy entries
  238. for cpt in range(MAX_OTHERS):
  239. name = "%s%d" % (OTHER_NAME, cpt)
  240. topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
  241. 'objectclass': "top person".split(),
  242. 'sn': name,
  243. 'cn': name})))
  244. def test_ticket47676_skip_oc_at(topology):
  245. '''
  246. This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated
  247. on MASTER2 (even if on MASTER2 47676 is NOT fixed). Then update on MASTER2.
  248. If the schema has successfully been pushed, updating Master2 should succeed
  249. '''
  250. topology.master1.log.info("\n\n######################### ADD ######################\n")
  251. # bind as 'cn=Directory manager'
  252. topology.master1.log.info("Bind as %s and add the add the entry with specific oc" % DN_DM)
  253. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  254. # Prepare the entry with multivalued members
  255. entry = Entry(ENTRY_DN)
  256. entry.setValues('objectclass', 'top', 'person', 'OCticket47676')
  257. entry.setValues('sn', ENTRY_NAME)
  258. entry.setValues('cn', ENTRY_NAME)
  259. entry.setValues('postalAddress', 'here')
  260. entry.setValues('postalCode', '1234')
  261. members = []
  262. for cpt in range(MAX_OTHERS):
  263. name = "%s%d" % (OTHER_NAME, cpt)
  264. members.append("cn=%s,%s" % (name, SUFFIX))
  265. members.append(BIND_DN)
  266. entry.setValues('member', members)
  267. topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN)
  268. topology.master1.add_s(entry)
  269. #
  270. # Now check the entry as been replicated
  271. #
  272. topology.master2.simple_bind_s(DN_DM, PASSWORD)
  273. topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN)
  274. loop = 0
  275. while loop <= 10:
  276. try:
  277. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  278. break
  279. except ldap.NO_SUCH_OBJECT:
  280. time.sleep(2)
  281. loop += 1
  282. assert loop <= 10
  283. # Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2)
  284. topology.master1.log.info("Update %s on M2" % ENTRY_DN)
  285. mod = [(ldap.MOD_REPLACE, 'description', 'test_add')]
  286. topology.master2.modify_s(ENTRY_DN, mod)
  287. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  288. loop = 0
  289. while loop <= 10:
  290. ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  291. if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'):
  292. break
  293. time.sleep(1)
  294. loop += 1
  295. assert ent.getValue('description') == 'test_add'
  296. def test_ticket47676_reject_action(topology):
  297. topology.master1.log.info("\n\n######################### REJECT ACTION ######################\n")
  298. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  299. topology.master2.simple_bind_s(DN_DM, PASSWORD)
  300. # make master1 to refuse to push the schema if OC_NAME is present in consumer schema
  301. mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME) )] # ACL + REPL
  302. topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
  303. # Restart is required to take into account that policy
  304. topology.master1.stop(timeout=10)
  305. topology.master1.start(timeout=10)
  306. # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema
  307. topology.master1.log.info("Add %s on M1" % OC2_NAME)
  308. new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must = MUST, may = MAY)
  309. topology.master1.schema.add_schema('objectClasses', new_oc)
  310. # Safety checking that the schema has been updated on M1
  311. topology.master1.log.info("Check %s is in M1" % OC2_NAME)
  312. ent = topology.master1.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  313. assert ent.hasAttr('objectclasses')
  314. found = False
  315. for objectclass in ent.getValues('objectclasses'):
  316. if str(objectclass).find(OC2_NAME) >= 0:
  317. found = True
  318. break
  319. assert found
  320. # Do an update of M1 so that M1 will try to push the schema
  321. topology.master1.log.info("Update %s on M1" % ENTRY_DN)
  322. mod = [(ldap.MOD_REPLACE, 'description', 'test_reject')]
  323. topology.master1.modify_s(ENTRY_DN, mod)
  324. # Check the replication occured and so also M1 attempted to push the schema
  325. topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
  326. loop = 0
  327. while loop <= 10:
  328. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
  329. if ent.hasAttr('description') and ent.getValue('description') == 'test_reject':
  330. # update was replicated
  331. break
  332. time.sleep(2)
  333. loop += 1
  334. assert loop <= 10
  335. # Check that the schema has not been pushed
  336. topology.master1.log.info("Check %s is not in M2" % OC2_NAME)
  337. ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  338. assert ent.hasAttr('objectclasses')
  339. found = False
  340. for objectclass in ent.getValues('objectclasses'):
  341. if str(objectclass).find(OC2_NAME) >= 0:
  342. found = True
  343. break
  344. assert not found
  345. topology.master1.log.info("\n\n######################### NO MORE REJECT ACTION ######################\n")
  346. # make master1 to do no specific action on OC_NAME
  347. mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME) )] # ACL + REPL
  348. topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
  349. # Restart is required to take into account that policy
  350. topology.master1.stop(timeout=10)
  351. topology.master1.start(timeout=10)
  352. # Do an update of M1 so that M1 will try to push the schema
  353. topology.master1.log.info("Update %s on M1" % ENTRY_DN)
  354. mod = [(ldap.MOD_REPLACE, 'description', 'test_no_more_reject')]
  355. topology.master1.modify_s(ENTRY_DN, mod)
  356. # Check the replication occured and so also M1 attempted to push the schema
  357. topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
  358. loop = 0
  359. while loop <= 10:
  360. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
  361. if ent.hasAttr('description') and ent.getValue('description') == 'test_no_more_reject':
  362. # update was replicated
  363. break
  364. time.sleep(2)
  365. loop += 1
  366. assert loop <= 10
  367. # Check that the schema has been pushed
  368. topology.master1.log.info("Check %s is in M2" % OC2_NAME)
  369. ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  370. assert ent.hasAttr('objectclasses')
  371. found = False
  372. for objectclass in ent.getValues('objectclasses'):
  373. if str(objectclass).find(OC2_NAME) >= 0:
  374. found = True
  375. break
  376. assert found
  377. def test_ticket47676_final(topology):
  378. topology.master1.stop(timeout=10)
  379. topology.master2.stop(timeout=10)
  380. def run_isolated():
  381. '''
  382. run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
  383. To run isolated without py.test, you need to
  384. - edit this file and comment '@pytest.fixture' line before 'topology' function.
  385. - set the installation prefix
  386. - run this program
  387. '''
  388. global installation1_prefix
  389. global installation2_prefix
  390. installation1_prefix = None
  391. installation2_prefix = None
  392. topo = topology(True)
  393. topo.master1.log.info("\n\n######################### Ticket 47676 ######################\n")
  394. test_ticket47676_init(topo)
  395. test_ticket47676_skip_oc_at(topo)
  396. test_ticket47676_reject_action(topo)
  397. test_ticket47676_final(topo)
  398. if __name__ == '__main__':
  399. run_isolated()