| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441 |
- # --- BEGIN COPYRIGHT BLOCK ---
- # Copyright (C) 2016 Red Hat, Inc.
- # All rights reserved.
- #
- # License: GPL (version 3 or any later version).
- # See LICENSE for details.
- # --- END COPYRIGHT BLOCK ---
- #
- '''
- Created on Dec 09, 2014
- @author: mreynolds
- '''
- import logging
- import ldap.sasl
- import pytest
- from lib389.tasks import *
- from lib389.replica import ReplicationManager
- from lib389.config import LDBMConfig
- from lib389._constants import *
- from lib389.topologies import topology_m2
- from ..plugins import acceptance_test
- from . import stress_tests
- pytestmark = pytest.mark.tier1
- log = logging.getLogger(__name__)
- def check_replicas(topology_m2):
- """Check that replication is in sync and working"""
- m1 = topology_m2.ms["master1"]
- m2 = topology_m2.ms["master2"]
- log.info('Checking if replication is in sync...')
- repl = ReplicationManager(DEFAULT_SUFFIX)
- repl.test_replication_topology(topology_m2)
- #
- # Verify the databases are identical. There should not be any "user, entry, employee" entries
- #
- log.info('Checking if the data is the same between the replicas...')
- # Check the master
- try:
- entries = m1.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- "(|(uid=person*)(uid=entry*)(uid=employee*))")
- if len(entries) > 0:
- log.error('Master database has incorrect data set!\n')
- assert False
- except ldap.LDAPError as e:
- log.fatal('Unable to search db on master: ' + e.message['desc'])
- assert False
- # Check the consumer
- try:
- entries = m2.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- "(|(uid=person*)(uid=entry*)(uid=employee*))")
- if len(entries) > 0:
- log.error('Consumer database in not consistent with master database')
- assert False
- except ldap.LDAPError as e:
- log.fatal('Unable to search db on consumer: ' + e.message['desc'])
- assert False
- log.info('Data is consistent across the replicas.\n')
- def test_acceptance(topology_m2):
- """Exercise each plugin and its main features, while
- changing the configuration without restarting the server.
- :id: 96136538-0151-4b09-9933-0e0cbf2c786c
- :setup: 2 Master Instances
- :steps:
- 1. Pause all replication
- 2. Set nsslapd-dynamic-plugins to on
- 3. Try to update LDBM config entry
- 4. Go through all plugin basic functionality
- 5. Resume replication
- 6. Go through all plugin basic functionality again
- 7. Check that data in sync and replication is working
- :expectedresults:
- 1. Success
- 2. Success
- 3. Success
- 4. Success
- 5. Success
- 6. Success
- 7. Success
- """
- m1 = topology_m2.ms["master1"]
- msg = ' (no replication)'
- replication_run = False
- # First part of the test should be without replication
- topology_m2.pause_all_replicas()
- # First enable dynamic plugins
- m1.config.replace('nsslapd-dynamic-plugins', 'on')
- # Test that critical plugins can be updated even though the change might not be applied
- ldbm_config = LDBMConfig(m1)
- ldbm_config.replace('description', 'test')
- while True:
- # First run the tests with replication disabled, then rerun them with replication set up
- ############################################################################
- # Test plugin functionality
- ############################################################################
- log.info('####################################################################')
- log.info('Testing Dynamic Plugins Functionality' + msg + '...')
- log.info('####################################################################\n')
- acceptance_test.check_all_plugins(topology_m2)
- log.info('####################################################################')
- log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.')
- log.info('####################################################################\n')
- if replication_run:
- # We're done.
- break
- else:
- log.info('Resume replication and run everything one more time')
- topology_m2.resume_all_replicas()
- replication_run = True
- msg = ' (replication enabled)'
- time.sleep(1)
- ############################################################################
- # Check replication, and data are in sync
- ############################################################################
- check_replicas(topology_m2)
- def test_memory_corruption(topology_m2):
- """Check the plugins for memory corruption issues while
- dynamic plugins option is enabled
- :id: 96136538-0151-4b09-9933-0e0cbf2c7862
- :setup: 2 Master Instances
- :steps:
- 1. Pause all replication
- 2. Set nsslapd-dynamic-plugins to on
- 3. Try to update LDBM config entry
- 4. Restart the plugin many times in a linked list fashion
- restarting previous and preprevious plugins in the list of all plugins
- 5. Run the functional test
- 6. Repeat 4 and 5 steps for all plugins
- 7. Resume replication
- 8. Go through 4-6 steps once more
- 9. Check that data in sync and replication is working
- :expectedresults:
- 1. Success
- 2. Success
- 3. Success
- 4. Success
- 5. Success
- 6. Success
- 7. Success
- 8. Success
- 9. Success
- """
- m1 = topology_m2.ms["master1"]
- msg = ' (no replication)'
- replication_run = False
- # First part of the test should be without replication
- topology_m2.pause_all_replicas()
- # First enable dynamic plugins
- m1.config.replace('nsslapd-dynamic-plugins', 'on')
- # Test that critical plugins can be updated even though the change might not be applied
- ldbm_config = LDBMConfig(m1)
- ldbm_config.replace('description', 'test')
- while True:
- # First run the tests with replication disabled, then rerun them with replication set up
- ############################################################################
- # Test the stability by exercising the internal lists, callabcks, and task handlers
- ############################################################################
- log.info('####################################################################')
- log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
- log.info('####################################################################\n')
- prev_plugin_test = None
- prev_prev_plugin_test = None
- for plugin_test in acceptance_test.func_tests:
- #
- # Restart the plugin several times (and prev plugins) - work that linked list
- #
- plugin_test(topology_m2, "restart")
- if prev_prev_plugin_test:
- prev_prev_plugin_test(topology_m2, "restart")
- plugin_test(topology_m2, "restart")
- if prev_plugin_test:
- prev_plugin_test(topology_m2, "restart")
- plugin_test(topology_m2, "restart")
- # Now run the functional test
- plugin_test(topology_m2, "dynamic")
- # Set the previous tests
- if prev_plugin_test:
- prev_prev_plugin_test = prev_plugin_test
- prev_plugin_test = plugin_test
- log.info('####################################################################')
- log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.')
- log.info('####################################################################\n')
- if replication_run:
- # We're done.
- break
- else:
- log.info('Resume replication and run everything one more time')
- topology_m2.resume_all_replicas()
- replication_run = True
- msg = ' (replication enabled)'
- time.sleep(1)
- ############################################################################
- # Check replication, and data are in sync
- ############################################################################
- check_replicas(topology_m2)
- def test_stress(topology_m2):
- """Test plugins while under a big load. Perform the test 5 times
- :id: 96136538-0151-4b09-9933-0e0cbf2c7863
- :setup: 2 Master Instances
- :steps:
- 1. Pause all replication
- 2. Set nsslapd-dynamic-plugins to on
- 3. Try to update LDBM config entry
- 4. Do one run through all tests
- 5. Enable Referential integrity and MemberOf plugins
- 6. Launch three new threads to add a bunch of users
- 7. While we are adding users restart the MemberOf and
- Linked Attributes plugins many times
- 8. Wait for the 'adding' threads to complete
- 9. Now launch three threads to delete the users
- 10. Restart both the MemberOf, Referential integrity and
- Linked Attributes plugins during these deletes
- 11. Wait for the 'deleting' threads to complete
- 12. Now make sure both the MemberOf and Referential integrity plugins still work correctly
- 13. Cleanup the stress tests (delete the group entry)
- 14. Perform 4-13 steps five times
- 15. Resume replication
- 16. Go through 4-14 steps once more
- 17. Check that data in sync and replication is working
- :expectedresults:
- 1. Success
- 2. Success
- 3. Success
- 4. Success
- 5. Success
- 6. Success
- 7. Success
- 8. Success
- 9. Success
- 10. Success
- 11. Success
- 12. Success
- 13. Success
- 14. Success
- 15. Success
- 16. Success
- 17. Success
- """
- m1 = topology_m2.ms["master1"]
- msg = ' (no replication)'
- replication_run = False
- stress_max_runs = 5
- # First part of the test should be without replication
- topology_m2.pause_all_replicas()
- # First enable dynamic plugins
- m1.config.replace('nsslapd-dynamic-plugins', 'on')
- # Test that critical plugins can be updated even though the change might not be applied
- ldbm_config = LDBMConfig(m1)
- ldbm_config.replace('description', 'test')
- while True:
- # First run the tests with replication disabled, then rerun them with replication set up
- log.info('Do one run through all tests ' + msg + '...')
- acceptance_test.check_all_plugins(topology_m2)
- log.info('####################################################################')
- log.info('Stressing Dynamic Plugins' + msg + '...')
- log.info('####################################################################\n')
- stress_tests.configureMO(m1)
- stress_tests.configureRI(m1)
- stress_count = 0
- while stress_count < stress_max_runs:
- log.info('####################################################################')
- log.info('Running stress test' + msg + '. Run (%d/%d)...' % (stress_count + 1, stress_max_runs))
- log.info('####################################################################\n')
- # Launch three new threads to add a bunch of users
- add_users = stress_tests.AddUsers(m1, 'employee', True)
- add_users.start()
- add_users2 = stress_tests.AddUsers(m1, 'entry', True)
- add_users2.start()
- add_users3 = stress_tests.AddUsers(m1, 'person', True)
- add_users3.start()
- time.sleep(1)
- # While we are adding users restart the MO plugin and an idle plugin
- m1.plugins.disable(name=PLUGIN_MEMBER_OF)
- m1.plugins.enable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- m1.plugins.disable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- m1.plugins.enable(name=PLUGIN_MEMBER_OF)
- m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
- time.sleep(1)
- m1.plugins.disable(name=PLUGIN_MEMBER_OF)
- m1.plugins.enable(name=PLUGIN_MEMBER_OF)
- time.sleep(2)
- m1.plugins.disable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- m1.plugins.enable(name=PLUGIN_MEMBER_OF)
- m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
- m1.plugins.disable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- m1.plugins.enable(name=PLUGIN_MEMBER_OF)
- m1.plugins.disable(name=PLUGIN_MEMBER_OF)
- m1.plugins.enable(name=PLUGIN_MEMBER_OF)
- # Wait for the 'adding' threads to complete
- add_users.join()
- add_users2.join()
- add_users3.join()
- # Now launch three threads to delete the users
- del_users = stress_tests.DelUsers(m1, 'employee')
- del_users.start()
- del_users2 = stress_tests.DelUsers(m1, 'entry')
- del_users2.start()
- del_users3 = stress_tests.DelUsers(m1, 'person')
- del_users3.start()
- time.sleep(1)
- # Restart both the MO, RI plugins during these deletes, and an idle plugin
- m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- m1.plugins.disable(name=PLUGIN_MEMBER_OF)
- m1.plugins.enable(name=PLUGIN_MEMBER_OF)
- m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- time.sleep(1)
- m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- time.sleep(1)
- m1.plugins.disable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- m1.plugins.enable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
- m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- m1.plugins.disable(name=PLUGIN_MEMBER_OF)
- m1.plugins.enable(name=PLUGIN_MEMBER_OF)
- m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- time.sleep(2)
- m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- time.sleep(1)
- m1.plugins.disable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- m1.plugins.enable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
- # Wait for the 'deleting' threads to complete
- del_users.join()
- del_users2.join()
- del_users3.join()
- # Now make sure both the MO and RI plugins still work correctly
- acceptance_test.func_tests[8](topology_m2, "dynamic") # RI plugin
- acceptance_test.func_tests[5](topology_m2, "dynamic") # MO plugin
- # Cleanup the stress tests
- stress_tests.cleanup(m1)
- stress_count += 1
- log.info('####################################################################')
- log.info('Successfully Stressed Dynamic Plugins' + msg +
- '. Completed (%d/%d)' % (stress_count, stress_max_runs))
- log.info('####################################################################\n')
- if replication_run:
- # We're done.
- break
- else:
- log.info('Resume replication and run everything one more time')
- topology_m2.resume_all_replicas()
- replication_run = True
- msg = ' (replication enabled)'
- time.sleep(1)
- ############################################################################
- # Check replication, and data are in sync
- ############################################################################
- check_replicas(topology_m2)
- if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
|