test_dynamic_plugins.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. # --- BEGIN COPYRIGHT BLOCK ---
  2. # Copyright (C) 2015 Red Hat, Inc.
  3. # All rights reserved.
  4. #
  5. # License: GPL (version 3 or any later version).
  6. # See LICENSE for details.
  7. # --- END COPYRIGHT BLOCK ---
  8. #
  9. '''
  10. Created on Dec 09, 2014
  11. @author: mreynolds
  12. '''
  13. import os
  14. import sys
  15. import time
  16. import ldap
  17. import ldap.sasl
  18. import logging
  19. import pytest
  20. import plugin_tests
  21. import stress_tests
  22. from lib389 import DirSrv, Entry, tools, tasks
  23. from lib389.tools import DirSrvTools
  24. from lib389._constants import *
  25. from lib389.properties import *
  26. from lib389.tasks import *
  27. log = logging.getLogger(__name__)
  28. installation_prefix = None
  29. class TopologyStandalone(object):
  30. def __init__(self, standalone):
  31. standalone.open()
  32. self.standalone = standalone
  33. def repl_fail(replica):
  34. # remove replica instance, and assert failure
  35. replica.delete()
  36. assert False
  37. @pytest.fixture(scope="module")
  38. def topology(request):
  39. '''
  40. This fixture is used to standalone topology for the 'module'.
  41. '''
  42. global installation_prefix
  43. if installation_prefix:
  44. args_instance[SER_DEPLOYED_DIR] = installation_prefix
  45. standalone = DirSrv(verbose=False)
  46. # Args for the standalone instance
  47. args_instance[SER_HOST] = HOST_STANDALONE
  48. args_instance[SER_PORT] = PORT_STANDALONE
  49. args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
  50. args_standalone = args_instance.copy()
  51. standalone.allocate(args_standalone)
  52. # Get the status of the instance and restart it if it exists
  53. instance_standalone = standalone.exists()
  54. # Remove the instance
  55. if instance_standalone:
  56. standalone.delete()
  57. # Create the instance
  58. standalone.create()
  59. # Used to retrieve configuration information (dbdir, confdir...)
  60. standalone.open()
  61. def fin():
  62. standalone.delete()
  63. request.addfinalizer(fin)
  64. # Here we have standalone instance up and running
  65. return TopologyStandalone(standalone)
  66. def test_dynamic_plugins(topology):
  67. """
  68. Test Dynamic Plugins - exercise each plugin and its main features, while
  69. changing the configuration without restarting the server.
  70. Need to test: functionality, stability, and stress. These tests need to run
  71. with replication disabled, and with replication setup with a
  72. second instance. Then test if replication is working, and we have
  73. same entries on each side.
  74. Functionality - Make sure that as configuration changes are made they take
  75. effect immediately. Cross plugin interaction (e.g. automember/memberOf)
  76. needs to tested, as well as plugin tasks. Need to test plugin
  77. config validation(dependencies, etc).
  78. Memory Corruption - Restart the plugins many times, and in different orders and test
  79. functionality, and stability. This will excerise the internal
  80. plugin linked lists, dse callbacks, and task handlers.
  81. Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
  82. Restart various plugins while these operations are going on. Perform this test
  83. 5 times(stress_max_run).
  84. """
  85. REPLICA_PORT = 33334
  86. RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
  87. master_maxcsn = 0
  88. replica_maxcsn = 0
  89. msg = ' (no replication)'
  90. replication_run = False
  91. stress_max_runs = 5
  92. # First enable dynamic plugins
  93. try:
  94. topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
  95. except ldap.LDAPError as e:
  96. ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
  97. assert False
  98. # Test that critical plugins can be updated even though the change might not be applied
  99. try:
  100. topology.standalone.modify_s(DN_LDBM, [(ldap.MOD_REPLACE, 'description', 'test')])
  101. except ldap.LDAPError as e:
  102. ldap.fatal('Failed to apply change to critical plugin' + e.message['desc'])
  103. assert False
  104. while 1:
  105. #
  106. # First run the tests with replication disabled, then rerun them with replication set up
  107. #
  108. ############################################################################
  109. # Test plugin functionality
  110. ############################################################################
  111. log.info('####################################################################')
  112. log.info('Testing Dynamic Plugins Functionality' + msg + '...')
  113. log.info('####################################################################\n')
  114. plugin_tests.test_all_plugins(topology.standalone)
  115. log.info('####################################################################')
  116. log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.')
  117. log.info('####################################################################\n')
  118. ############################################################################
  119. # Test the stability by exercising the internal lists, callabcks, and task handlers
  120. ############################################################################
  121. log.info('####################################################################')
  122. log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
  123. log.info('####################################################################\n')
  124. prev_plugin_test = None
  125. prev_prev_plugin_test = None
  126. for plugin_test in plugin_tests.func_tests:
  127. #
  128. # Restart the plugin several times (and prev plugins) - work that linked list
  129. #
  130. plugin_test(topology.standalone, "restart")
  131. if prev_prev_plugin_test:
  132. prev_prev_plugin_test(topology.standalone, "restart")
  133. plugin_test(topology.standalone, "restart")
  134. if prev_plugin_test:
  135. prev_plugin_test(topology.standalone, "restart")
  136. plugin_test(topology.standalone, "restart")
  137. # Now run the functional test
  138. plugin_test(topology.standalone)
  139. # Set the previous tests
  140. if prev_plugin_test:
  141. prev_prev_plugin_test = prev_plugin_test
  142. prev_plugin_test = plugin_test
  143. log.info('####################################################################')
  144. log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.')
  145. log.info('####################################################################\n')
  146. ############################################################################
  147. # Stress two plugins while restarting it, and while restarting other plugins.
  148. # The goal is to not crash, and have the plugins work after stressing them.
  149. ############################################################################
  150. log.info('####################################################################')
  151. log.info('Stressing Dynamic Plugins' + msg + '...')
  152. log.info('####################################################################\n')
  153. stress_tests.configureMO(topology.standalone)
  154. stress_tests.configureRI(topology.standalone)
  155. stress_count = 0
  156. while stress_count < stress_max_runs:
  157. log.info('####################################################################')
  158. log.info('Running stress test' + msg + '. Run (%d/%d)...' % (stress_count + 1, stress_max_runs))
  159. log.info('####################################################################\n')
  160. try:
  161. # Launch three new threads to add a bunch of users
  162. add_users = stress_tests.AddUsers(topology.standalone, 'employee', True)
  163. add_users.start()
  164. add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True)
  165. add_users2.start()
  166. add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True)
  167. add_users3.start()
  168. time.sleep(1)
  169. # While we are adding users restart the MO plugin and an idle plugin
  170. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  171. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  172. time.sleep(1)
  173. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  174. time.sleep(1)
  175. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  176. topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
  177. topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
  178. time.sleep(1)
  179. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  180. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  181. time.sleep(2)
  182. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  183. time.sleep(1)
  184. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  185. topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
  186. topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
  187. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  188. time.sleep(1)
  189. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  190. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  191. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  192. # Wait for the 'adding' threads to complete
  193. add_users.join()
  194. add_users2.join()
  195. add_users3.join()
  196. # Now launch three threads to delete the users
  197. del_users = stress_tests.DelUsers(topology.standalone, 'employee')
  198. del_users.start()
  199. del_users2 = stress_tests.DelUsers(topology.standalone, 'entry')
  200. del_users2.start()
  201. del_users3 = stress_tests.DelUsers(topology.standalone, 'person')
  202. del_users3.start()
  203. time.sleep(1)
  204. # Restart both the MO, RI plugins during these deletes, and an idle plugin
  205. topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
  206. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  207. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  208. topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
  209. time.sleep(1)
  210. topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
  211. time.sleep(1)
  212. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  213. time.sleep(1)
  214. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  215. time.sleep(1)
  216. topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
  217. topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
  218. topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
  219. topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
  220. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  221. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  222. topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
  223. time.sleep(2)
  224. topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
  225. time.sleep(1)
  226. topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
  227. time.sleep(1)
  228. topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
  229. time.sleep(1)
  230. topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
  231. topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
  232. topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
  233. # Wait for the 'deleting' threads to complete
  234. del_users.join()
  235. del_users2.join()
  236. del_users3.join()
  237. # Now make sure both the MO and RI plugins still work correctly
  238. plugin_tests.func_tests[8](topology.standalone) # RI plugin
  239. plugin_tests.func_tests[5](topology.standalone) # MO plugin
  240. # Cleanup the stress tests
  241. stress_tests.cleanup(topology.standalone)
  242. except:
  243. log.info('Stress test failed!')
  244. repl_fail(replica_inst)
  245. stress_count += 1
  246. log.info('####################################################################')
  247. log.info('Successfully Stressed Dynamic Plugins' + msg +
  248. '. Completed (%d/%d)' % (stress_count, stress_max_runs))
  249. log.info('####################################################################\n')
  250. if replication_run:
  251. # We're done.
  252. break
  253. else:
  254. #
  255. # Enable replication and run everything one more time
  256. #
  257. log.info('Setting up replication, and rerunning the tests...\n')
  258. # Create replica instance
  259. replica_inst = DirSrv(verbose=False)
  260. args_instance[SER_HOST] = LOCALHOST
  261. args_instance[SER_PORT] = REPLICA_PORT
  262. args_instance[SER_SERVERID_PROP] = 'replica'
  263. args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
  264. args_replica_inst = args_instance.copy()
  265. replica_inst.allocate(args_replica_inst)
  266. replica_inst.create()
  267. replica_inst.open()
  268. try:
  269. topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX,
  270. role=REPLICAROLE_MASTER,
  271. replicaId=1)
  272. replica_inst.replica.enableReplication(suffix=DEFAULT_SUFFIX,
  273. role=REPLICAROLE_CONSUMER,
  274. replicaId=65535)
  275. properties = {RA_NAME: r'to_replica',
  276. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  277. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  278. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  279. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  280. repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
  281. host=LOCALHOST,
  282. port=REPLICA_PORT,
  283. properties=properties)
  284. if not repl_agreement:
  285. log.fatal("Fail to create a replica agreement")
  286. repl_fail(replica_inst)
  287. topology.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT)
  288. topology.standalone.waitForReplInit(repl_agreement)
  289. except:
  290. log.info('Failed to setup replication!')
  291. repl_fail(replica_inst)
  292. replication_run = True
  293. msg = ' (replication enabled)'
  294. time.sleep(1)
  295. ############################################################################
  296. # Check replication, and data are in sync, and remove the instance
  297. ############################################################################
  298. log.info('Checking if replication is in sync...')
  299. try:
  300. # Grab master's max CSN
  301. entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
  302. if not entry:
  303. log.error('Failed to find db tombstone entry from master')
  304. repl_fail(replica_inst)
  305. elements = entry[0].getValues('nsds50ruv')
  306. for ruv in elements:
  307. if 'replica 1' in ruv:
  308. parts = ruv.split()
  309. if len(parts) == 5:
  310. master_maxcsn = parts[4]
  311. break
  312. else:
  313. log.error('RUV is incomplete')
  314. repl_fail(replica_inst)
  315. if master_maxcsn == 0:
  316. log.error('Failed to find maxcsn on master')
  317. repl_fail(replica_inst)
  318. except ldap.LDAPError as e:
  319. log.fatal('Unable to search masterfor db tombstone: ' + e.message['desc'])
  320. repl_fail(replica_inst)
  321. # Loop on the consumer - waiting for it to catch up
  322. count = 0
  323. insync = False
  324. while count < 60:
  325. try:
  326. # Grab master's max CSN
  327. entry = replica_inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
  328. if not entry:
  329. log.error('Failed to find db tombstone entry on consumer')
  330. repl_fail(replica_inst)
  331. elements = entry[0].getValues('nsds50ruv')
  332. for ruv in elements:
  333. if 'replica 1' in ruv:
  334. parts = ruv.split()
  335. if len(parts) == 5:
  336. replica_maxcsn = parts[4]
  337. break
  338. if replica_maxcsn == 0:
  339. log.error('Failed to find maxcsn on consumer')
  340. repl_fail(replica_inst)
  341. except ldap.LDAPError as e:
  342. log.fatal('Unable to search for db tombstone on consumer: ' + e.message['desc'])
  343. repl_fail(replica_inst)
  344. if master_maxcsn == replica_maxcsn:
  345. insync = True
  346. log.info('Replication is in sync.\n')
  347. break
  348. count += 1
  349. time.sleep(1)
  350. # Report on replication status
  351. if not insync:
  352. log.error('Consumer not in sync with master!')
  353. repl_fail(replica_inst)
  354. #
  355. # Verify the databases are identical. There should not be any "user, entry, employee" entries
  356. #
  357. log.info('Checking if the data is the same between the replicas...')
  358. # Check the master
  359. try:
  360. entries = topology.standalone.search_s(DEFAULT_SUFFIX,
  361. ldap.SCOPE_SUBTREE,
  362. "(|(uid=person*)(uid=entry*)(uid=employee*))")
  363. if len(entries) > 0:
  364. log.error('Master database has incorrect data set!\n')
  365. repl_fail(replica_inst)
  366. except ldap.LDAPError as e:
  367. log.fatal('Unable to search db on master: ' + e.message['desc'])
  368. repl_fail(replica_inst)
  369. # Check the consumer
  370. try:
  371. entries = replica_inst.search_s(DEFAULT_SUFFIX,
  372. ldap.SCOPE_SUBTREE,
  373. "(|(uid=person*)(uid=entry*)(uid=employee*))")
  374. if len(entries) > 0:
  375. log.error('Consumer database in not consistent with master database')
  376. repl_fail(replica_inst)
  377. except ldap.LDAPError as e:
  378. log.fatal('Unable to search db on consumer: ' + e.message['desc'])
  379. repl_fail(replica_inst)
  380. log.info('Data is consistent across the replicas.\n')
  381. log.info('####################################################################')
  382. log.info('Replication consistency test passed')
  383. log.info('####################################################################\n')
  384. # Remove the replica instance
  385. replica_inst.delete()
  386. ############################################################################
  387. # We made it to the end!
  388. ############################################################################
  389. log.info('#####################################################')
  390. log.info('#####################################################')
  391. log.info("Dynamic Plugins Testsuite: Completed Successfully!")
  392. log.info('#####################################################')
  393. log.info('#####################################################\n')
  394. if __name__ == '__main__':
  395. # Run isolated
  396. # -s for DEBUG mode
  397. CURRENT_FILE = os.path.realpath(__file__)
  398. pytest.main("-s %s" % CURRENT_FILE)