ticket47490_test.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. '''
  2. Created on Nov 7, 2013
  3. @author: tbordaz
  4. '''
  5. import os
  6. import sys
  7. import time
  8. import ldap
  9. import logging
  10. import socket
  11. import time
  12. import logging
  13. import pytest
  14. import re
  15. from lib389 import DirSrv, Entry, tools
  16. from lib389.tools import DirSrvTools
  17. from lib389._constants import *
  18. from lib389.properties import *
  19. from constants import *
  20. logging.getLogger(__name__).setLevel(logging.DEBUG)
  21. log = logging.getLogger(__name__)
  22. installation_prefix = None
  23. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  24. ENTRY_DN = "cn=test_entry, %s" % SUFFIX
  25. MUST_OLD = "(postalAddress $ preferredLocale)"
  26. MUST_NEW = "(postalAddress $ preferredLocale $ telexNumber)"
  27. MAY_OLD = "(postalCode $ street)"
  28. MAY_NEW = "(postalCode $ street $ postOfficeBox)"
  29. class TopologyMasterConsumer(object):
  30. def __init__(self, master, consumer):
  31. master.open()
  32. self.master = master
  33. consumer.open()
  34. self.consumer = consumer
  35. def pattern_errorlog(file, log_pattern):
  36. try:
  37. pattern_errorlog.last_pos += 1
  38. except AttributeError:
  39. pattern_errorlog.last_pos = 0
  40. found = None
  41. log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos)
  42. file.seek(pattern_errorlog.last_pos)
  43. # Use a while true iteration because 'for line in file: hit a
  44. # python bug that break file.tell()
  45. while True:
  46. line = file.readline()
  47. log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
  48. found = log_pattern.search(line)
  49. if ((line == '') or (found)):
  50. break
  51. log.debug("_pattern_errorlog: end at offset %d" % file.tell())
  52. pattern_errorlog.last_pos = file.tell()
  53. return found
  54. def _oc_definition(oid_ext, name, must=None, may=None):
  55. oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
  56. desc = 'To test ticket 47490'
  57. sup = 'person'
  58. if not must:
  59. must = MUST_OLD
  60. if not may:
  61. may = MAY_OLD
  62. new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
  63. return new_oc
  64. def add_OC(instance, oid_ext, name):
  65. new_oc = _oc_definition(oid_ext, name)
  66. instance.schema.add_schema('objectClasses', new_oc)
  67. def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None):
  68. old_oc = _oc_definition(oid_ext, name, old_must, old_may)
  69. new_oc = _oc_definition(oid_ext, name, new_must, new_may)
  70. instance.schema.del_schema('objectClasses', old_oc)
  71. instance.schema.add_schema('objectClasses', new_oc)
  72. def trigger_schema_push(topology):
  73. """
  74. It triggers an update on the supplier. This will start a replication
  75. session and a schema push
  76. """
  77. try:
  78. trigger_schema_push.value += 1
  79. except AttributeError:
  80. trigger_schema_push.value = 1
  81. replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_schema_push.value))]
  82. topology.master.modify_s(ENTRY_DN, replace)
  83. # wait 10 seconds that the update is replicated
  84. loop = 0
  85. while loop <= 10:
  86. try:
  87. ent = topology.consumer.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
  88. val = ent.telephonenumber or "0"
  89. if int(val) == trigger_schema_push.value:
  90. return
  91. # the expected value is not yet replicated. try again
  92. time.sleep(1)
  93. loop += 1
  94. log.debug("trigger_schema_push: receive %s (expected %d)" % (val, trigger_schema_push.value))
  95. except ldap.NO_SUCH_OBJECT:
  96. time.sleep(1)
  97. loop += 1
  98. @pytest.fixture(scope="module")
  99. def topology(request):
  100. '''
  101. This fixture is used to create a replicated topology for the 'module'.
  102. The replicated topology is MASTER -> Consumer.
  103. At the beginning, It may exists a master instance and/or a consumer instance.
  104. It may also exists a backup for the master and/or the consumer.
  105. Principle:
  106. If master instance exists:
  107. restart it
  108. If consumer instance exists:
  109. restart it
  110. If backup of master AND backup of consumer exists:
  111. create or rebind to consumer
  112. create or rebind to master
  113. restore master from backup
  114. restore consumer from backup
  115. else:
  116. Cleanup everything
  117. remove instances
  118. remove backups
  119. Create instances
  120. Initialize replication
  121. Create backups
  122. '''
  123. global installation_prefix
  124. if installation_prefix:
  125. args_instance[SER_DEPLOYED_DIR] = installation_prefix
  126. master = DirSrv(verbose=False)
  127. consumer = DirSrv(verbose=False)
  128. # Args for the master instance
  129. args_instance[SER_HOST] = HOST_MASTER
  130. args_instance[SER_PORT] = PORT_MASTER
  131. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER
  132. args_master = args_instance.copy()
  133. master.allocate(args_master)
  134. # Args for the consumer instance
  135. args_instance[SER_HOST] = HOST_CONSUMER
  136. args_instance[SER_PORT] = PORT_CONSUMER
  137. args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER
  138. args_consumer = args_instance.copy()
  139. consumer.allocate(args_consumer)
  140. # Get the status of the backups
  141. backup_master = master.checkBackupFS()
  142. backup_consumer = consumer.checkBackupFS()
  143. # Get the status of the instance and restart it if it exists
  144. instance_master = master.exists()
  145. if instance_master:
  146. master.stop(timeout=10)
  147. master.start(timeout=10)
  148. instance_consumer = consumer.exists()
  149. if instance_consumer:
  150. consumer.stop(timeout=10)
  151. consumer.start(timeout=10)
  152. if backup_master and backup_consumer:
  153. # The backups exist, assuming they are correct
  154. # we just re-init the instances with them
  155. if not instance_master:
  156. master.create()
  157. # Used to retrieve configuration information (dbdir, confdir...)
  158. master.open()
  159. if not instance_consumer:
  160. consumer.create()
  161. # Used to retrieve configuration information (dbdir, confdir...)
  162. consumer.open()
  163. # restore master from backup
  164. master.stop(timeout=10)
  165. master.restoreFS(backup_master)
  166. master.start(timeout=10)
  167. # restore consumer from backup
  168. consumer.stop(timeout=10)
  169. consumer.restoreFS(backup_consumer)
  170. consumer.start(timeout=10)
  171. else:
  172. # We should be here only in two conditions
  173. # - This is the first time a test involve master-consumer
  174. # so we need to create everything
  175. # - Something weird happened (instance/backup destroyed)
  176. # so we discard everything and recreate all
  177. # Remove all the backups. So even if we have a specific backup file
  178. # (e.g backup_master) we clear all backups that an instance my have created
  179. if backup_master:
  180. master.clearBackupFS()
  181. if backup_consumer:
  182. consumer.clearBackupFS()
  183. # Remove all the instances
  184. if instance_master:
  185. master.delete()
  186. if instance_consumer:
  187. consumer.delete()
  188. # Create the instances
  189. master.create()
  190. master.open()
  191. consumer.create()
  192. consumer.open()
  193. #
  194. # Now prepare the Master-Consumer topology
  195. #
  196. # First Enable replication
  197. master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER)
  198. consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
  199. # Initialize the supplier->consumer
  200. properties = {RA_NAME: r'meTo_$host:$port',
  201. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  202. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  203. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  204. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  205. repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
  206. if not repl_agreement:
  207. log.fatal("Fail to create a replica agreement")
  208. sys.exit(1)
  209. log.debug("%s created" % repl_agreement)
  210. master.agreement.init(SUFFIX, HOST_CONSUMER, PORT_CONSUMER)
  211. master.waitForReplInit(repl_agreement)
  212. # Check replication is working fine
  213. master.add_s(Entry((TEST_REPL_DN, {
  214. 'objectclass': "top person".split(),
  215. 'sn': 'test_repl',
  216. 'cn': 'test_repl'})))
  217. loop = 0
  218. while loop <= 10:
  219. try:
  220. ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  221. break
  222. except ldap.NO_SUCH_OBJECT:
  223. time.sleep(1)
  224. loop += 1
  225. # Time to create the backups
  226. master.stop(timeout=10)
  227. master.backupfile = master.backupFS()
  228. master.start(timeout=10)
  229. consumer.stop(timeout=10)
  230. consumer.backupfile = consumer.backupFS()
  231. consumer.start(timeout=10)
  232. #
  233. # Here we have two instances master and consumer
  234. # with replication working. Either coming from a backup recovery
  235. # or from a fresh (re)init
  236. # Time to return the topology
  237. return TopologyMasterConsumer(master, consumer)
  238. def test_ticket47490_init(topology):
  239. """
  240. Initialize the test environment
  241. """
  242. log.debug("test_ticket47490_init topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer))
  243. # the test case will check if a warning message is logged in the
  244. # error log of the supplier
  245. topology.master.errorlog_file = open(topology.master.errlog, "r")
  246. # This entry will be used to trigger attempt of schema push
  247. topology.master.add_s(Entry((ENTRY_DN, {
  248. 'objectclass': "top person".split(),
  249. 'sn': 'test_entry',
  250. 'cn': 'test_entry'})))
  251. def test_ticket47490_one(topology):
  252. """
  253. Summary: Extra OC Schema is pushed - no error
  254. If supplier schema is a superset (one extra OC) of consumer schema, then
  255. schema is pushed and there is no message in the error log
  256. State at startup:
  257. - supplier default schema
  258. - consumer default schema
  259. Final state
  260. - supplier +masterNewOCA
  261. - consumer +masterNewOCA
  262. """
  263. log.debug("test_ticket47490_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer))
  264. # update the schema of the supplier so that it is a superset of
  265. # consumer. Schema should be pushed
  266. add_OC(topology.master, 2, 'masterNewOCA')
  267. trigger_schema_push(topology)
  268. master_schema_csn = topology.master.schema.get_schema_csn()
  269. consumer_schema_csn = topology.consumer.schema.get_schema_csn()
  270. # Check the schemaCSN was updated on the consumer
  271. log.debug("test_ticket47490_one master_schema_csn=%s", master_schema_csn)
  272. log.debug("ctest_ticket47490_one onsumer_schema_csn=%s", consumer_schema_csn)
  273. assert master_schema_csn == consumer_schema_csn
  274. # Check the error log of the supplier does not contain an error
  275. regex = re.compile("must not be overwritten \(set replication log for additional info\)")
  276. res = pattern_errorlog(topology.master.errorlog_file, regex)
  277. assert res == None
  278. def test_ticket47490_two(topology):
  279. """
  280. Summary: Extra OC Schema is NOT pushed - error
  281. If consumer schema is a superset (one extra OC) of supplier schema, then
  282. schema is not pushed and there is a message in the error log
  283. State at startup
  284. - supplier +masterNewOCA
  285. - consumer +masterNewOCA
  286. Final state
  287. - supplier +masterNewOCA +masterNewOCB
  288. - consumer +masterNewOCA +consumerNewOCA
  289. """
  290. # add this OC on consumer. Supplier will no push the schema
  291. add_OC(topology.consumer, 1, 'consumerNewOCA')
  292. # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s)
  293. time.sleep(2)
  294. add_OC(topology.master, 3, 'masterNewOCB')
  295. # now push the scheam
  296. trigger_schema_push(topology)
  297. master_schema_csn = topology.master.schema.get_schema_csn()
  298. consumer_schema_csn = topology.consumer.schema.get_schema_csn()
  299. # Check the schemaCSN was NOT updated on the consumer
  300. log.debug("test_ticket47490_two master_schema_csn=%s", master_schema_csn)
  301. log.debug("test_ticket47490_two consumer_schema_csn=%s", consumer_schema_csn)
  302. assert master_schema_csn != consumer_schema_csn
  303. # Check the error log of the supplier does not contain an error
  304. regex = re.compile("must not be overwritten \(set replication log for additional info\)")
  305. res = pattern_errorlog(topology.master.errorlog_file, regex)
  306. assert res
  307. def test_ticket47490_three(topology):
  308. """
  309. Summary: Extra OC Schema is pushed - no error
  310. If supplier schema is again a superset (one extra OC), then
  311. schema is pushed and there is no message in the error log
  312. State at startup
  313. - supplier +masterNewOCA +masterNewOCB
  314. - consumer +masterNewOCA +consumerNewOCA
  315. Final state
  316. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA
  317. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA
  318. """
  319. # Do an upate to trigger the schema push attempt
  320. # add this OC on consumer. Supplier will no push the schema
  321. add_OC(topology.master, 1, 'consumerNewOCA')
  322. # now push the scheam
  323. trigger_schema_push(topology)
  324. master_schema_csn = topology.master.schema.get_schema_csn()
  325. consumer_schema_csn = topology.consumer.schema.get_schema_csn()
  326. # Check the schemaCSN was NOT updated on the consumer
  327. log.debug("test_ticket47490_three master_schema_csn=%s", master_schema_csn)
  328. log.debug("test_ticket47490_three consumer_schema_csn=%s", consumer_schema_csn)
  329. assert master_schema_csn == consumer_schema_csn
  330. # Check the error log of the supplier does not contain an error
  331. regex = re.compile("must not be overwritten \(set replication log for additional info\)")
  332. res = pattern_errorlog(topology.master.errorlog_file, regex)
  333. assert res == None
  334. def test_ticket47490_four(topology):
  335. """
  336. Summary: Same OC - extra MUST: Schema is pushed - no error
  337. If supplier schema is again a superset (OC with more MUST), then
  338. schema is pushed and there is no message in the error log
  339. State at startup
  340. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA
  341. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA
  342. Final state
  343. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA
  344. +must=telexnumber
  345. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA
  346. +must=telexnumber
  347. """
  348. mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD)
  349. trigger_schema_push(topology)
  350. master_schema_csn = topology.master.schema.get_schema_csn()
  351. consumer_schema_csn = topology.consumer.schema.get_schema_csn()
  352. # Check the schemaCSN was updated on the consumer
  353. log.debug("test_ticket47490_four master_schema_csn=%s", master_schema_csn)
  354. log.debug("ctest_ticket47490_four onsumer_schema_csn=%s", consumer_schema_csn)
  355. assert master_schema_csn == consumer_schema_csn
  356. # Check the error log of the supplier does not contain an error
  357. regex = re.compile("must not be overwritten \(set replication log for additional info\)")
  358. res = pattern_errorlog(topology.master.errorlog_file, regex)
  359. assert res == None
  360. def test_ticket47490_five(topology):
  361. """
  362. Summary: Same OC - extra MUST: Schema is NOT pushed - error
  363. If consumer schema is a superset (OC with more MUST), then
  364. schema is not pushed and there is a message in the error log
  365. State at startup
  366. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA
  367. +must=telexnumber
  368. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA
  369. +must=telexnumber
  370. Final state
  371. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  372. +must=telexnumber
  373. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA
  374. +must=telexnumber +must=telexnumber
  375. Note: replication log is enabled to get more details
  376. """
  377. # get more detail why it fails
  378. topology.master.enableReplLogging()
  379. # add telenumber to 'consumerNewOCA' on the consumer
  380. mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD)
  381. # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s)
  382. time.sleep(2)
  383. add_OC(topology.master, 4, 'masterNewOCC')
  384. trigger_schema_push(topology)
  385. master_schema_csn = topology.master.schema.get_schema_csn()
  386. consumer_schema_csn = topology.consumer.schema.get_schema_csn()
  387. # Check the schemaCSN was NOT updated on the consumer
  388. log.debug("test_ticket47490_five master_schema_csn=%s", master_schema_csn)
  389. log.debug("ctest_ticket47490_five onsumer_schema_csn=%s", consumer_schema_csn)
  390. assert master_schema_csn != consumer_schema_csn
  391. #Check that replication logging display additional message about 'telexNumber' not being
  392. # required in the master schema
  393. # This message appears before 'must not be overwritten' so it should be check first
  394. regex = re.compile("Attribute telexNumber is not required in 'consumerNewOCA' of the local supplier schema")
  395. res = pattern_errorlog(topology.master.errorlog_file, regex)
  396. assert res != None
  397. # Check the error log of the supplier does not contain an error
  398. regex = re.compile("must not be overwritten \(set replication log for additional info\)")
  399. res = pattern_errorlog(topology.master.errorlog_file, regex)
  400. assert res != None
  401. def test_ticket47490_six(topology):
  402. """
  403. Summary: Same OC - extra MUST: Schema is pushed - no error
  404. If supplier schema is again a superset (OC with more MUST), then
  405. schema is pushed and there is no message in the error log
  406. State at startup
  407. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  408. +must=telexnumber
  409. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA
  410. +must=telexnumber +must=telexnumber
  411. Final state
  412. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  413. +must=telexnumber +must=telexnumber
  414. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  415. +must=telexnumber +must=telexnumber
  416. Note: replication log is enabled to get more details
  417. """
  418. # add telenumber to 'consumerNewOCA' on the consumer
  419. mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD)
  420. trigger_schema_push(topology)
  421. master_schema_csn = topology.master.schema.get_schema_csn()
  422. consumer_schema_csn = topology.consumer.schema.get_schema_csn()
  423. # Check the schemaCSN was NOT updated on the consumer
  424. log.debug("test_ticket47490_six master_schema_csn=%s", master_schema_csn)
  425. log.debug("ctest_ticket47490_six onsumer_schema_csn=%s", consumer_schema_csn)
  426. assert master_schema_csn == consumer_schema_csn
  427. # Check the error log of the supplier does not contain an error
  428. regex = re.compile("must not be overwritten \(set replication log for additional info\)")
  429. res = pattern_errorlog(topology.master.errorlog_file, regex)
  430. assert res == None
  431. def test_ticket47490_seven(topology):
  432. """
  433. Summary: Same OC - extra MAY: Schema is pushed - no error
  434. If supplier schema is again a superset (OC with more MAY), then
  435. schema is pushed and there is no message in the error log
  436. State at startup
  437. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  438. +must=telexnumber +must=telexnumber
  439. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  440. +must=telexnumber +must=telexnumber
  441. Final stat
  442. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  443. +must=telexnumber +must=telexnumber
  444. +may=postOfficeBox
  445. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  446. +must=telexnumber +must=telexnumber
  447. +may=postOfficeBox
  448. """
  449. mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
  450. trigger_schema_push(topology)
  451. master_schema_csn = topology.master.schema.get_schema_csn()
  452. consumer_schema_csn = topology.consumer.schema.get_schema_csn()
  453. # Check the schemaCSN was updated on the consumer
  454. log.debug("test_ticket47490_seven master_schema_csn=%s", master_schema_csn)
  455. log.debug("ctest_ticket47490_seven consumer_schema_csn=%s", consumer_schema_csn)
  456. assert master_schema_csn == consumer_schema_csn
  457. # Check the error log of the supplier does not contain an error
  458. regex = re.compile("must not be overwritten \(set replication log for additional info\)")
  459. res = pattern_errorlog(topology.master.errorlog_file, regex)
  460. assert res == None
  461. def test_ticket47490_eight(topology):
  462. """
  463. Summary: Same OC - extra MAY: Schema is NOT pushed - error
  464. If consumer schema is a superset (OC with more MAY), then
  465. schema is not pushed and there is message in the error log
  466. State at startup
  467. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  468. +must=telexnumber +must=telexnumber
  469. +may=postOfficeBox
  470. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  471. +must=telexnumber +must=telexnumber
  472. +may=postOfficeBox
  473. Final state
  474. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  475. +must=telexnumber +must=telexnumber
  476. +may=postOfficeBox +may=postOfficeBox
  477. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  478. +must=telexnumber +must=telexnumber
  479. +may=postOfficeBox +may=postOfficeBox
  480. """
  481. mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
  482. # modify OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s)
  483. time.sleep(2)
  484. mod_OC(topology.master, 4, 'masterNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD, new_may=MAY_NEW)
  485. trigger_schema_push(topology)
  486. master_schema_csn = topology.master.schema.get_schema_csn()
  487. consumer_schema_csn = topology.consumer.schema.get_schema_csn()
  488. # Check the schemaCSN was not updated on the consumer
  489. log.debug("test_ticket47490_eight master_schema_csn=%s", master_schema_csn)
  490. log.debug("ctest_ticket47490_eight onsumer_schema_csn=%s", consumer_schema_csn)
  491. assert master_schema_csn != consumer_schema_csn
  492. #Check that replication logging display additional message about 'postOfficeBox' not being
  493. # allowed in the master schema
  494. # This message appears before 'must not be overwritten' so it should be check first
  495. regex = re.compile("Attribute postOfficeBox is not allowed in 'consumerNewOCA' of the local supplier schema")
  496. res = pattern_errorlog(topology.master.errorlog_file, regex)
  497. assert res != None
  498. # Check the error log of the supplier does not contain an error
  499. regex = re.compile("must not be overwritten \(set replication log for additional info\)")
  500. res = pattern_errorlog(topology.master.errorlog_file, regex)
  501. assert res != None
  502. def test_ticket47490_nine(topology):
  503. """
  504. Summary: Same OC - extra MAY: Schema is pushed - no error
  505. If consumer schema is a superset (OC with more MAY), then
  506. schema is not pushed and there is message in the error log
  507. State at startup
  508. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  509. +must=telexnumber +must=telexnumber
  510. +may=postOfficeBox +may=postOfficeBox
  511. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  512. +must=telexnumber +must=telexnumber
  513. +may=postOfficeBox +may=postOfficeBox
  514. Final state
  515. - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  516. +must=telexnumber +must=telexnumber
  517. +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox
  518. - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC
  519. +must=telexnumber +must=telexnumber
  520. +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox
  521. """
  522. mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
  523. trigger_schema_push(topology)
  524. master_schema_csn = topology.master.schema.get_schema_csn()
  525. consumer_schema_csn = topology.consumer.schema.get_schema_csn()
  526. # Check the schemaCSN was updated on the consumer
  527. log.debug("test_ticket47490_nine master_schema_csn=%s", master_schema_csn)
  528. log.debug("ctest_ticket47490_nine onsumer_schema_csn=%s", consumer_schema_csn)
  529. assert master_schema_csn == consumer_schema_csn
  530. # Check the error log of the supplier does not contain an error
  531. regex = re.compile("must not be overwritten \(set replication log for additional info\)")
  532. res = pattern_errorlog(topology.master.errorlog_file, regex)
  533. assert res == None
  534. def test_ticket47490_final(topology):
  535. topology.master.stop(timeout=10)
  536. topology.consumer.stop(timeout=10)
  537. def run_isolated():
  538. '''
  539. run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
  540. To run isolated without py.test, you need to
  541. - edit this file and comment '@pytest.fixture' line before 'topology' function.
  542. - set the installation prefix
  543. - run this program
  544. '''
  545. global installation_prefix
  546. installation_prefix = None
  547. topo = topology(True)
  548. test_ticket47490_init(topo)
  549. test_ticket47490_one(topo)
  550. test_ticket47490_two(topo)
  551. test_ticket47490_three(topo)
  552. test_ticket47490_four(topo)
  553. test_ticket47490_five(topo)
  554. test_ticket47490_six(topo)
  555. test_ticket47490_seven(topo)
  556. test_ticket47490_eight(topo)
  557. test_ticket47490_nine(topo)
  558. test_ticket47490_final(topo)
  559. if __name__ == '__main__':
  560. run_isolated()