test_dynamic_plugins.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. # --- BEGIN COPYRIGHT BLOCK ---
  2. # Copyright (C) 2015 Red Hat, Inc.
  3. # All rights reserved.
  4. #
  5. # License: GPL (version 3 or any later version).
  6. # See LICENSE for details.
  7. # --- END COPYRIGHT BLOCK ---
  8. #
  9. '''
  10. Created on Dec 09, 2014
  11. @author: mreynolds
  12. '''
  13. import os
  14. import sys
  15. import time
  16. import ldap
  17. import ldap.sasl
  18. import logging
  19. import pytest
  20. import plugin_tests
  21. import stress_tests
  22. from lib389 import DirSrv, Entry, tools, tasks
  23. from lib389.tools import DirSrvTools
  24. from lib389._constants import *
  25. from lib389.properties import *
  26. from lib389.tasks import *
  27. log = logging.getLogger(__name__)
  28. installation_prefix = None
  29. class TopologyStandalone(object):
  30. def __init__(self, standalone):
  31. standalone.open()
  32. self.standalone = standalone
  33. def repl_fail(replica):
  34. # remove replica instance, and assert failure
  35. replica.delete()
  36. assert False
  37. @pytest.fixture(scope="module")
  38. def topology(request):
  39. '''
  40. This fixture is used to standalone topology for the 'module'.
  41. '''
  42. global installation_prefix
  43. if installation_prefix:
  44. args_instance[SER_DEPLOYED_DIR] = installation_prefix
  45. standalone = DirSrv(verbose=False)
  46. # Args for the standalone instance
  47. args_instance[SER_HOST] = HOST_STANDALONE
  48. args_instance[SER_PORT] = PORT_STANDALONE
  49. args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
  50. args_standalone = args_instance.copy()
  51. standalone.allocate(args_standalone)
  52. # Get the status of the instance and restart it if it exists
  53. instance_standalone = standalone.exists()
  54. # Remove the instance
  55. if instance_standalone:
  56. standalone.delete()
  57. # Create the instance
  58. standalone.create()
  59. # Used to retrieve configuration information (dbdir, confdir...)
  60. standalone.open()
  61. # Here we have standalone instance up and running
  62. return TopologyStandalone(standalone)
  63. def test_dynamic_plugins(topology):
  64. """
  65. Test Dynamic Plugins - exercise each plugin and its main features, while
  66. changing the configuration without restarting the server.
  67. Need to test: functionality, stability, and stress. These tests need to run
  68. with replication disabled, and with replication setup with a
  69. second instance. Then test if replication is working, and we have
  70. same entries on each side.
  71. Functionality - Make sure that as configuration changes are made they take
  72. effect immediately. Cross plugin interaction (e.g. automember/memberOf)
  73. needs to tested, as well as plugin tasks. Need to test plugin
  74. config validation(dependencies, etc).
  75. Memory Corruption - Restart the plugins many times, and in different orders and test
  76. functionality, and stability. This will excerise the internal
  77. plugin linked lists, dse callbacks, and task handlers.
  78. Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
  79. Restart various plugins while these operations are going on. Perform this test
  80. 5 times(stress_max_run).
  81. """
  82. REPLICA_PORT = 33334
  83. RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
  84. master_maxcsn = 0
  85. replica_maxcsn = 0
  86. msg = ' (no replication)'
  87. replication_run = False
  88. stress_max_runs = 5
  89. # First enable dynamic plugins
  90. try:
  91. topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
  92. except ldap.LDAPError as e:
  93. ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
  94. assert False
  95. # Test that critical plugins can be updated even though the change might not be applied
  96. try:
  97. topology.standalone.modify_s(DN_LDBM, [(ldap.MOD_REPLACE, 'description', 'test')])
  98. except ldap.LDAPError as e:
  99. ldap.fatal('Failed to apply change to critical plugin' + e.message['desc'])
  100. assert False
  101. while 1:
  102. #
  103. # First run the tests with replication disabled, then rerun them with replication set up
  104. #
  105. ############################################################################
  106. # Test plugin functionality
  107. ############################################################################
  108. log.info('####################################################################')
  109. log.info('Testing Dynamic Plugins Functionality' + msg + '...')
  110. log.info('####################################################################\n')
  111. plugin_tests.test_all_plugins(topology.standalone)
  112. log.info('####################################################################')
  113. log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.')
  114. log.info('####################################################################\n')
  115. ############################################################################
  116. # Test the stability by exercising the internal lists, callabcks, and task handlers
  117. ############################################################################
  118. log.info('####################################################################')
  119. log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
  120. log.info('####################################################################\n')
  121. prev_plugin_test = None
  122. prev_prev_plugin_test = None
  123. for plugin_test in plugin_tests.func_tests:
  124. #
  125. # Restart the plugin several times (and prev plugins) - work that linked list
  126. #
  127. plugin_test(topology.standalone, "restart")
  128. if prev_prev_plugin_test:
  129. prev_prev_plugin_test(topology.standalone, "restart")
  130. plugin_test(topology.standalone, "restart")
  131. if prev_plugin_test:
  132. prev_plugin_test(topology.standalone, "restart")
  133. plugin_test(topology.standalone, "restart")
  134. # Now run the functional test
  135. plugin_test(topology.standalone)
  136. # Set the previous tests
  137. if prev_plugin_test:
  138. prev_prev_plugin_test = prev_plugin_test
  139. prev_plugin_test = plugin_test
  140. log.info('####################################################################')
  141. log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.')
  142. log.info('####################################################################\n')
  143. ############################################################################
  144. # Stress two plugins while restarting it, and while restarting other plugins.
  145. # The goal is to not crash, and have the plugins work after stressing them.
  146. ############################################################################
  147. log.info('####################################################################')
  148. log.info('Stressing Dynamic Plugins' + msg + '...')
  149. log.info('####################################################################\n')
  150. stress_tests.configureMO(topology.standalone)
  151. stress_tests.configureRI(topology.standalone)
  152. stress_count = 0
  153. while stress_count < stress_max_runs:
  154. log.info('####################################################################')
  155. log.info('Running stress test' + msg + '. Run (%d/%d)...' % (stress_count + 1, stress_max_runs))
  156. log.info('####################################################################\n')
  157. try:
  158. # Launch three new threads to add a bunch of users
  159. add_users = stress_tests.AddUsers(topology.standalone, 'employee', True)
  160. add_users.start()
  161. add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True)
  162. add_users2.start()
  163. add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True)
  164. add_users3.start()
  165. time.sleep(1)
  166. # While we are adding users restart the MO plugin and an idle plugin
  167. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  168. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  169. time.sleep(1)
  170. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  171. time.sleep(1)
  172. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  173. topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
  174. topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
  175. time.sleep(1)
  176. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  177. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  178. time.sleep(2)
  179. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  180. time.sleep(1)
  181. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  182. topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
  183. topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
  184. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  185. time.sleep(1)
  186. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  187. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  188. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  189. # Wait for the 'adding' threads to complete
  190. add_users.join()
  191. add_users2.join()
  192. add_users3.join()
  193. # Now launch three threads to delete the users
  194. del_users = stress_tests.DelUsers(topology.standalone, 'employee')
  195. del_users.start()
  196. del_users2 = stress_tests.DelUsers(topology.standalone, 'entry')
  197. del_users2.start()
  198. del_users3 = stress_tests.DelUsers(topology.standalone, 'person')
  199. del_users3.start()
  200. time.sleep(1)
  201. # Restart both the MO, RI plugins during these deletes, and an idle plugin
  202. topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
  203. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  204. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  205. topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
  206. time.sleep(1)
  207. topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
  208. time.sleep(1)
  209. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  210. time.sleep(1)
  211. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  212. time.sleep(1)
  213. topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
  214. topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
  215. topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
  216. topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
  217. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  218. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  219. topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
  220. time.sleep(2)
  221. topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
  222. time.sleep(1)
  223. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  224. time.sleep(1)
  225. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  226. time.sleep(1)
  227. topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
  228. topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
  229. topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
  230. # Wait for the 'deleting' threads to complete
  231. del_users.join()
  232. del_users2.join()
  233. del_users3.join()
  234. # Now make sure both the MO and RI plugins still work correctly
  235. plugin_tests.func_tests[8](topology.standalone) # RI plugin
  236. plugin_tests.func_tests[5](topology.standalone) # MO plugin
  237. # Cleanup the stress tests
  238. stress_tests.cleanup(topology.standalone)
  239. except:
  240. log.info('Stress test failed!')
  241. repl_fail(replica_inst)
  242. stress_count += 1
  243. log.info('####################################################################')
  244. log.info('Successfully Stressed Dynamic Plugins' + msg +
  245. '. Completed (%d/%d)' % (stress_count, stress_max_runs))
  246. log.info('####################################################################\n')
  247. if replication_run:
  248. # We're done.
  249. break
  250. else:
  251. #
  252. # Enable replication and run everything one more time
  253. #
  254. log.info('Setting up replication, and rerunning the tests...\n')
  255. # Create replica instance
  256. replica_inst = DirSrv(verbose=False)
  257. args_instance[SER_HOST] = LOCALHOST
  258. args_instance[SER_PORT] = REPLICA_PORT
  259. args_instance[SER_SERVERID_PROP] = 'replica'
  260. args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
  261. args_replica_inst = args_instance.copy()
  262. replica_inst.allocate(args_replica_inst)
  263. replica_inst.create()
  264. replica_inst.open()
  265. try:
  266. topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX,
  267. role=REPLICAROLE_MASTER,
  268. replicaId=1)
  269. replica_inst.replica.enableReplication(suffix=DEFAULT_SUFFIX,
  270. role=REPLICAROLE_CONSUMER,
  271. replicaId=65535)
  272. properties = {RA_NAME: r'to_replica',
  273. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  274. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  275. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  276. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  277. repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
  278. host=LOCALHOST,
  279. port=REPLICA_PORT,
  280. properties=properties)
  281. if not repl_agreement:
  282. log.fatal("Fail to create a replica agreement")
  283. repl_fail(replica_inst)
  284. topology.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT)
  285. topology.standalone.waitForReplInit(repl_agreement)
  286. except:
  287. log.info('Failed to setup replication!')
  288. repl_fail(replica_inst)
  289. replication_run = True
  290. msg = ' (replication enabled)'
  291. time.sleep(1)
  292. ############################################################################
  293. # Check replication, and data are in sync, and remove the instance
  294. ############################################################################
  295. log.info('Checking if replication is in sync...')
  296. try:
  297. # Grab master's max CSN
  298. entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
  299. if not entry:
  300. log.error('Failed to find db tombstone entry from master')
  301. repl_fail(replica_inst)
  302. elements = entry[0].getValues('nsds50ruv')
  303. for ruv in elements:
  304. if 'replica 1' in ruv:
  305. parts = ruv.split()
  306. if len(parts) == 5:
  307. master_maxcsn = parts[4]
  308. break
  309. else:
  310. log.error('RUV is incomplete')
  311. repl_fail(replica_inst)
  312. if master_maxcsn == 0:
  313. log.error('Failed to find maxcsn on master')
  314. repl_fail(replica_inst)
  315. except ldap.LDAPError as e:
  316. log.fatal('Unable to search masterfor db tombstone: ' + e.message['desc'])
  317. repl_fail(replica_inst)
  318. # Loop on the consumer - waiting for it to catch up
  319. count = 0
  320. insync = False
  321. while count < 10:
  322. try:
  323. # Grab master's max CSN
  324. entry = replica_inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
  325. if not entry:
  326. log.error('Failed to find db tombstone entry on consumer')
  327. repl_fail(replica_inst)
  328. elements = entry[0].getValues('nsds50ruv')
  329. for ruv in elements:
  330. if 'replica 1' in ruv:
  331. parts = ruv.split()
  332. if len(parts) == 5:
  333. replica_maxcsn = parts[4]
  334. break
  335. if replica_maxcsn == 0:
  336. log.error('Failed to find maxcsn on consumer')
  337. repl_fail(replica_inst)
  338. except ldap.LDAPError as e:
  339. log.fatal('Unable to search for db tombstone on consumer: ' + e.message['desc'])
  340. repl_fail(replica_inst)
  341. if master_maxcsn == replica_maxcsn:
  342. insync = True
  343. log.info('Replication is in sync.\n')
  344. break
  345. count += 1
  346. time.sleep(1)
  347. # Report on replication status
  348. if not insync:
  349. log.error('Consumer not in sync with master!')
  350. repl_fail(replica_inst)
  351. #
  352. # Verify the databases are identical. There should not be any "user, entry, employee" entries
  353. #
  354. log.info('Checking if the data is the same between the replicas...')
  355. # Check the master
  356. try:
  357. entries = topology.standalone.search_s(DEFAULT_SUFFIX,
  358. ldap.SCOPE_SUBTREE,
  359. "(|(uid=person*)(uid=entry*)(uid=employee*))")
  360. if len(entries) > 0:
  361. log.error('Master database has incorrect data set!\n')
  362. repl_fail(replica_inst)
  363. except ldap.LDAPError as e:
  364. log.fatal('Unable to search db on master: ' + e.message['desc'])
  365. repl_fail(replica_inst)
  366. # Check the consumer
  367. try:
  368. entries = replica_inst.search_s(DEFAULT_SUFFIX,
  369. ldap.SCOPE_SUBTREE,
  370. "(|(uid=person*)(uid=entry*)(uid=employee*))")
  371. if len(entries) > 0:
  372. log.error('Consumer database in not consistent with master database')
  373. repl_fail(replica_inst)
  374. except ldap.LDAPError as e:
  375. log.fatal('Unable to search db on consumer: ' + e.message['desc'])
  376. repl_fail(replica_inst)
  377. log.info('Data is consistent across the replicas.\n')
  378. log.info('####################################################################')
  379. log.info('Replication consistency test passed')
  380. log.info('####################################################################\n')
  381. # Remove the replica instance
  382. replica_inst.delete()
  383. ############################################################################
  384. # We made it to the end!
  385. ############################################################################
  386. log.info('#####################################################')
  387. log.info('#####################################################')
  388. log.info("Dynamic Plugins Testsuite: Completed Successfully!")
  389. log.info('#####################################################')
  390. log.info('#####################################################\n')
  391. def test_dynamic_plugins_final(topology):
  392. topology.standalone.delete()
  393. def run_isolated():
  394. '''
  395. run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
  396. To run isolated without py.test, you need to
  397. - edit this file and comment '@pytest.fixture' line before 'topology' function.
  398. - set the installation prefix
  399. - run this program
  400. '''
  401. global installation_prefix
  402. installation_prefix = None
  403. topo = topology(True)
  404. test_dynamic_plugins(topo)
  405. test_dynamic_plugins_final(topo)
  406. if __name__ == '__main__':
  407. run_isolated()