ticket47676_test.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. '''
  2. Created on Nov 7, 2013
  3. @author: tbordaz
  4. '''
  5. import os
  6. import sys
  7. import time
  8. import ldap
  9. import logging
  10. import socket
  11. import time
  12. import logging
  13. import pytest
  14. import re
  15. from lib389 import DirSrv, Entry, tools
  16. from lib389.tools import DirSrvTools
  17. from lib389._constants import *
  18. from lib389.properties import *
  19. from constants import *
  20. from lib389._constants import REPLICAROLE_MASTER
  21. logging.getLogger(__name__).setLevel(logging.DEBUG)
  22. log = logging.getLogger(__name__)
  23. #
  24. # important part. We can deploy Master1 and Master2 on different versions
  25. #
  26. installation1_prefix = None
  27. installation2_prefix = None
  28. SCHEMA_DN = "cn=schema"
  29. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  30. OC_NAME = 'OCticket47676'
  31. OC_OID_EXT = 2
  32. MUST = "(postalAddress $ postalCode)"
  33. MAY = "(member $ street)"
  34. OC2_NAME = 'OC2ticket47676'
  35. OC2_OID_EXT = 3
  36. MUST_2 = "(postalAddress $ postalCode)"
  37. MAY_2 = "(member $ street)"
  38. REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config"
  39. REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
  40. OTHER_NAME = 'other_entry'
  41. MAX_OTHERS = 10
  42. BIND_NAME = 'bind_entry'
  43. BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
  44. BIND_PW = 'password'
  45. ENTRY_NAME = 'test_entry'
  46. ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
  47. ENTRY_OC = "top person %s" % OC_NAME
  48. BASE_OID = "1.2.3.4.5.6.7.8.9.10"
  49. def _oc_definition(oid_ext, name, must=None, may=None):
  50. oid = "%s.%d" % (BASE_OID, oid_ext)
  51. desc = 'To test ticket 47490'
  52. sup = 'person'
  53. if not must:
  54. must = MUST
  55. if not may:
  56. may = MAY
  57. new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
  58. return new_oc
  59. class TopologyMaster1Master2(object):
  60. def __init__(self, master1, master2):
  61. master1.open()
  62. self.master1 = master1
  63. master2.open()
  64. self.master2 = master2
  65. @pytest.fixture(scope="module")
  66. def topology(request):
  67. '''
  68. This fixture is used to create a replicated topology for the 'module'.
  69. The replicated topology is MASTER1 <-> Master2.
  70. At the beginning, It may exists a master2 instance and/or a master2 instance.
  71. It may also exists a backup for the master1 and/or the master2.
  72. Principle:
  73. If master1 instance exists:
  74. restart it
  75. If master2 instance exists:
  76. restart it
  77. If backup of master1 AND backup of master2 exists:
  78. create or rebind to master1
  79. create or rebind to master2
  80. restore master1 from backup
  81. restore master2 from backup
  82. else:
  83. Cleanup everything
  84. remove instances
  85. remove backups
  86. Create instances
  87. Initialize replication
  88. Create backups
  89. '''
  90. global installation1_prefix
  91. global installation2_prefix
  92. # allocate master1 on a given deployement
  93. master1 = DirSrv(verbose=False)
  94. if installation1_prefix:
  95. args_instance[SER_DEPLOYED_DIR] = installation1_prefix
  96. # Args for the master1 instance
  97. args_instance[SER_HOST] = HOST_MASTER_1
  98. args_instance[SER_PORT] = PORT_MASTER_1
  99. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
  100. args_master = args_instance.copy()
  101. master1.allocate(args_master)
  102. # allocate master1 on a given deployement
  103. master2 = DirSrv(verbose=False)
  104. if installation2_prefix:
  105. args_instance[SER_DEPLOYED_DIR] = installation2_prefix
  106. # Args for the consumer instance
  107. args_instance[SER_HOST] = HOST_MASTER_2
  108. args_instance[SER_PORT] = PORT_MASTER_2
  109. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
  110. args_master = args_instance.copy()
  111. master2.allocate(args_master)
  112. # Get the status of the backups
  113. backup_master1 = master1.checkBackupFS()
  114. backup_master2 = master2.checkBackupFS()
  115. # Get the status of the instance and restart it if it exists
  116. instance_master1 = master1.exists()
  117. if instance_master1:
  118. master1.stop(timeout=10)
  119. master1.start(timeout=10)
  120. instance_master2 = master2.exists()
  121. if instance_master2:
  122. master2.stop(timeout=10)
  123. master2.start(timeout=10)
  124. if backup_master1 and backup_master2:
  125. # The backups exist, assuming they are correct
  126. # we just re-init the instances with them
  127. if not instance_master1:
  128. master1.create()
  129. # Used to retrieve configuration information (dbdir, confdir...)
  130. master1.open()
  131. if not instance_master2:
  132. master2.create()
  133. # Used to retrieve configuration information (dbdir, confdir...)
  134. master2.open()
  135. # restore master1 from backup
  136. master1.stop(timeout=10)
  137. master1.restoreFS(backup_master1)
  138. master1.start(timeout=10)
  139. # restore master2 from backup
  140. master2.stop(timeout=10)
  141. master2.restoreFS(backup_master2)
  142. master2.start(timeout=10)
  143. else:
  144. # We should be here only in two conditions
  145. # - This is the first time a test involve master-consumer
  146. # so we need to create everything
  147. # - Something weird happened (instance/backup destroyed)
  148. # so we discard everything and recreate all
  149. # Remove all the backups. So even if we have a specific backup file
  150. # (e.g backup_master) we clear all backups that an instance my have created
  151. if backup_master1:
  152. master1.clearBackupFS()
  153. if backup_master2:
  154. master2.clearBackupFS()
  155. # Remove all the instances
  156. if instance_master1:
  157. master1.delete()
  158. if instance_master2:
  159. master2.delete()
  160. # Create the instances
  161. master1.create()
  162. master1.open()
  163. master2.create()
  164. master2.open()
  165. #
  166. # Now prepare the Master-Consumer topology
  167. #
  168. # First Enable replication
  169. master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
  170. master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
  171. # Initialize the supplier->consumer
  172. properties = {RA_NAME: r'meTo_$host:$port',
  173. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  174. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  175. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  176. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  177. repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
  178. if not repl_agreement:
  179. log.fatal("Fail to create a replica agreement")
  180. sys.exit(1)
  181. log.debug("%s created" % repl_agreement)
  182. properties = {RA_NAME: r'meTo_$host:$port',
  183. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  184. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  185. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  186. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  187. master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
  188. master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
  189. master1.waitForReplInit(repl_agreement)
  190. # Check replication is working fine
  191. master1.add_s(Entry((TEST_REPL_DN, {
  192. 'objectclass': "top person".split(),
  193. 'sn': 'test_repl',
  194. 'cn': 'test_repl'})))
  195. loop = 0
  196. while loop <= 10:
  197. try:
  198. ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  199. break
  200. except ldap.NO_SUCH_OBJECT:
  201. time.sleep(1)
  202. loop += 1
  203. # Time to create the backups
  204. master1.stop(timeout=10)
  205. master1.backupfile = master1.backupFS()
  206. master1.start(timeout=10)
  207. master2.stop(timeout=10)
  208. master2.backupfile = master2.backupFS()
  209. master2.start(timeout=10)
  210. # clear the tmp directory
  211. master1.clearTmpDir(__file__)
  212. #
  213. # Here we have two instances master and consumer
  214. # with replication working. Either coming from a backup recovery
  215. # or from a fresh (re)init
  216. # Time to return the topology
  217. return TopologyMaster1Master2(master1, master2)
  218. def test_ticket47676_init(topology):
  219. """
  220. It adds
  221. - Objectclass with MAY 'member'
  222. - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation
  223. It deletes the anonymous aci
  224. """
  225. topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME)
  226. new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may = MAY)
  227. topology.master1.schema.add_schema('objectClasses', new_oc)
  228. # entry used to bind with
  229. topology.master1.log.info("Add %s" % BIND_DN)
  230. topology.master1.add_s(Entry((BIND_DN, {
  231. 'objectclass': "top person".split(),
  232. 'sn': BIND_NAME,
  233. 'cn': BIND_NAME,
  234. 'userpassword': BIND_PW})))
  235. # enable acl error logging
  236. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128+8192))] # ACL + REPL
  237. topology.master1.modify_s(DN_CONFIG, mod)
  238. topology.master2.modify_s(DN_CONFIG, mod)
  239. # add dummy entries
  240. for cpt in range(MAX_OTHERS):
  241. name = "%s%d" % (OTHER_NAME, cpt)
  242. topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
  243. 'objectclass': "top person".split(),
  244. 'sn': name,
  245. 'cn': name})))
  246. def test_ticket47676_skip_oc_at(topology):
  247. '''
  248. This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated
  249. on MASTER2 (even if on MASTER2 47676 is NOT fixed). Then update on MASTER2.
  250. If the schema has successfully been pushed, updating Master2 should succeed
  251. '''
  252. topology.master1.log.info("\n\n######################### ADD ######################\n")
  253. # bind as 'cn=Directory manager'
  254. topology.master1.log.info("Bind as %s and add the add the entry with specific oc" % DN_DM)
  255. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  256. # Prepare the entry with multivalued members
  257. entry = Entry(ENTRY_DN)
  258. entry.setValues('objectclass', 'top', 'person', 'OCticket47676')
  259. entry.setValues('sn', ENTRY_NAME)
  260. entry.setValues('cn', ENTRY_NAME)
  261. entry.setValues('postalAddress', 'here')
  262. entry.setValues('postalCode', '1234')
  263. members = []
  264. for cpt in range(MAX_OTHERS):
  265. name = "%s%d" % (OTHER_NAME, cpt)
  266. members.append("cn=%s,%s" % (name, SUFFIX))
  267. members.append(BIND_DN)
  268. entry.setValues('member', members)
  269. topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN)
  270. topology.master1.add_s(entry)
  271. #
  272. # Now check the entry as been replicated
  273. #
  274. topology.master2.simple_bind_s(DN_DM, PASSWORD)
  275. topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN)
  276. loop = 0
  277. while loop <= 10:
  278. try:
  279. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  280. break
  281. except ldap.NO_SUCH_OBJECT:
  282. time.sleep(2)
  283. loop += 1
  284. assert loop <= 10
  285. # Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2)
  286. topology.master1.log.info("Update %s on M2" % ENTRY_DN)
  287. mod = [(ldap.MOD_REPLACE, 'description', 'test_add')]
  288. topology.master2.modify_s(ENTRY_DN, mod)
  289. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  290. loop = 0
  291. while loop <= 10:
  292. ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  293. if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'):
  294. break
  295. time.sleep(1)
  296. loop += 1
  297. assert ent.getValue('description') == 'test_add'
  298. def test_ticket47676_reject_action(topology):
  299. topology.master1.log.info("\n\n######################### REJECT ACTION ######################\n")
  300. topology.master1.simple_bind_s(DN_DM, PASSWORD)
  301. topology.master2.simple_bind_s(DN_DM, PASSWORD)
  302. # make master1 to refuse to push the schema if OC_NAME is present in consumer schema
  303. mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME) )] # ACL + REPL
  304. topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
  305. # Restart is required to take into account that policy
  306. topology.master1.stop(timeout=10)
  307. topology.master1.start(timeout=10)
  308. # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema
  309. topology.master1.log.info("Add %s on M1" % OC2_NAME)
  310. new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must = MUST, may = MAY)
  311. topology.master1.schema.add_schema('objectClasses', new_oc)
  312. # Safety checking that the schema has been updated on M1
  313. topology.master1.log.info("Check %s is in M1" % OC2_NAME)
  314. ent = topology.master1.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  315. assert ent.hasAttr('objectclasses')
  316. found = False
  317. for objectclass in ent.getValues('objectclasses'):
  318. if str(objectclass).find(OC2_NAME) >= 0:
  319. found = True
  320. break
  321. assert found
  322. # Do an update of M1 so that M1 will try to push the schema
  323. topology.master1.log.info("Update %s on M1" % ENTRY_DN)
  324. mod = [(ldap.MOD_REPLACE, 'description', 'test_reject')]
  325. topology.master1.modify_s(ENTRY_DN, mod)
  326. # Check the replication occured and so also M1 attempted to push the schema
  327. topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
  328. loop = 0
  329. while loop <= 10:
  330. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
  331. if ent.hasAttr('description') and ent.getValue('description') == 'test_reject':
  332. # update was replicated
  333. break
  334. time.sleep(2)
  335. loop += 1
  336. assert loop <= 10
  337. # Check that the schema has not been pushed
  338. topology.master1.log.info("Check %s is not in M2" % OC2_NAME)
  339. ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  340. assert ent.hasAttr('objectclasses')
  341. found = False
  342. for objectclass in ent.getValues('objectclasses'):
  343. if str(objectclass).find(OC2_NAME) >= 0:
  344. found = True
  345. break
  346. assert not found
  347. topology.master1.log.info("\n\n######################### NO MORE REJECT ACTION ######################\n")
  348. # make master1 to do no specific action on OC_NAME
  349. mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME) )] # ACL + REPL
  350. topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
  351. # Restart is required to take into account that policy
  352. topology.master1.stop(timeout=10)
  353. topology.master1.start(timeout=10)
  354. # Do an update of M1 so that M1 will try to push the schema
  355. topology.master1.log.info("Update %s on M1" % ENTRY_DN)
  356. mod = [(ldap.MOD_REPLACE, 'description', 'test_no_more_reject')]
  357. topology.master1.modify_s(ENTRY_DN, mod)
  358. # Check the replication occured and so also M1 attempted to push the schema
  359. topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
  360. loop = 0
  361. while loop <= 10:
  362. ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
  363. if ent.hasAttr('description') and ent.getValue('description') == 'test_no_more_reject':
  364. # update was replicated
  365. break
  366. time.sleep(2)
  367. loop += 1
  368. assert loop <= 10
  369. # Check that the schema has been pushed
  370. topology.master1.log.info("Check %s is in M2" % OC2_NAME)
  371. ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
  372. assert ent.hasAttr('objectclasses')
  373. found = False
  374. for objectclass in ent.getValues('objectclasses'):
  375. if str(objectclass).find(OC2_NAME) >= 0:
  376. found = True
  377. break
  378. assert found
  379. def test_ticket47676_final(topology):
  380. topology.master1.stop(timeout=10)
  381. topology.master2.stop(timeout=10)
  382. def run_isolated():
  383. '''
  384. run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
  385. To run isolated without py.test, you need to
  386. - edit this file and comment '@pytest.fixture' line before 'topology' function.
  387. - set the installation prefix
  388. - run this program
  389. '''
  390. global installation1_prefix
  391. global installation2_prefix
  392. installation1_prefix = None
  393. installation2_prefix = None
  394. topo = topology(True)
  395. topo.master1.log.info("\n\n######################### Ticket 47676 ######################\n")
  396. test_ticket47676_init(topo)
  397. test_ticket47676_skip_oc_at(topo)
  398. test_ticket47676_reject_action(topo)
  399. test_ticket47676_final(topo)
  400. if __name__ == '__main__':
  401. run_isolated()