Forráskód Böngészése

Ticket 47823 - attribute uniqueness enforced on all subtrees

Bug Description:
	Attribute uniqueness plugin enforces uniqueness on
	each defined subtrees where the modified/added entry is located.

	We need the ability to check uniqueness across all the defined subtrees.

	It requires a new configuration attribute for the plugin.
	The name of the new configuration attribute is more explicit ('uniqueness-across-all-subtrees')
	than the old style: nsslapd-pluginarg0, nsslapd-pluginarg1,...

	The new attribute is only supported in new configuration style
		 * uniqueness-attribute-name: uid
		 * uniqueness-subtrees: dc=people,dc=example,dc=com
		 * uniqueness-subtrees: dc=sales, dc=example,dc=com
		 * uniqueness-across-all-subtrees: on

Fix Description:
	The fix support new configuration style but still support the old one:
		 * nsslapd-pluginarg0: uid
		 * nsslapd-pluginarg1: dc=people,dc=example,dc=com
		 * nsslapd-pluginarg2: dc=sales, dc=example,dc=com

	A mix of configuration style likely results in invalid configuration, that
	prevent to start the plugin -> prevent to start the server

https://fedorahosted.org/389/ticket/47823

Reviewed by: Rich Megginson (thanks Rich for reviews and tips !!)

Platforms tested: F17/F20

Flag Day: no

Doc impact: yes
Thierry bordaz (tbordaz) 11 éve
szülő
commit
c66b5e9f83

+ 1046 - 0
dirsrvtests/tickets/ticket47823_test.py

@@ -0,0 +1,1046 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import socket
+import pytest
+import re
+import shutil
+from lib389 import DirSrv, Entry, tools
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from constants import *
+
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+PROVISIONING_CN = "provisioning" 
+PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX) 
+
+ACTIVE_CN = "accounts" 
+STAGE_CN  = "staged users" 
+DELETE_CN = "deleted users" 
+ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX) 
+STAGE_DN  = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) 
+DELETE_DN  = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) 
+
+STAGE_USER_CN = "stage guy" 
+STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) 
+
+ACTIVE_USER_CN = "active guy" 
+ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN)
+
+ACTIVE_USER_1_CN = "test_1"
+ACTIVE_USER_1_DN = "cn=%s,%s" % (ACTIVE_USER_1_CN, ACTIVE_DN)
+ACTIVE_USER_2_CN = "test_2"
+ACTIVE_USER_2_DN = "cn=%s,%s" % (ACTIVE_USER_2_CN, ACTIVE_DN)
+
+STAGE_USER_1_CN = ACTIVE_USER_1_CN
+STAGE_USER_1_DN = "cn=%s,%s" % (STAGE_USER_1_CN, STAGE_DN)
+STAGE_USER_2_CN = ACTIVE_USER_2_CN
+STAGE_USER_2_DN = "cn=%s,%s" % (STAGE_USER_2_CN, STAGE_DN)
+
+ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-pluginarg2', 
+                    'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees']
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
[email protected](scope="module")
+def topology(request):
+    '''
+        This fixture is used to standalone topology for the 'module'.
+        At the beginning, It may exists a standalone instance.
+        It may also exists a backup for the standalone instance.
+
+        Principle:
+            If standalone instance exists:
+                restart it
+            If backup of standalone exists:
+                create/rebind to standalone
+
+                restore standalone instance from backup
+            else:
+                Cleanup everything
+                    remove instance
+                    remove backup
+                Create instance
+                Create backup
+    '''
+    global installation_prefix
+
+    if installation_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+    standalone = DirSrv(verbose=False)
+
+    # Args for the standalone instance
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+
+    # Get the status of the backups
+    backup_standalone = standalone.checkBackupFS()
+
+    # Get the status of the instance and restart it if it exists
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        # assuming the instance is already stopped, just wait 5 sec max
+        standalone.stop(timeout=5)
+        try:
+            standalone.start(timeout=10)
+        except ldap.SERVER_DOWN:
+            pass
+
+    if backup_standalone:
+        # The backup exist, assuming it is correct
+        # we just re-init the instance with it
+        if not instance_standalone:
+            standalone.create()
+            # Used to retrieve configuration information (dbdir, confdir...)
+            standalone.open()
+
+        # restore standalone instance from backup
+        standalone.stop(timeout=10)
+        standalone.restoreFS(backup_standalone)
+        standalone.start(timeout=10)
+
+    else:
+        # We should be here only in two conditions
+        #      - This is the first time a test involve standalone instance
+        #      - Something weird happened (instance/backup destroyed)
+        #        so we discard everything and recreate all
+
+        # Remove the backup. So even if we have a specific backup file
+        # (e.g backup_standalone) we clear backup that an instance may have created
+        if backup_standalone:
+            standalone.clearBackupFS()
+
+        # Remove the instance
+        if instance_standalone:
+            standalone.delete()
+
+        # Create the instance
+        standalone.create()
+
+        # Used to retrieve configuration information (dbdir, confdir...)
+        standalone.open()
+
+        # Time to create the backups
+        standalone.stop(timeout=10)
+        standalone.backupfile = standalone.backupFS()
+        standalone.start(timeout=10)
+
+    #
+    # Here we have standalone instance up and running
+    # Either coming from a backup recovery
+    # or from a fresh (re)init
+    # Time to return the topology
+    return TopologyStandalone(standalone)
+
+def _header(topology, label):
+    topology.standalone.log.info("\n\n###############################################")
+    topology.standalone.log.info("#######")
+    topology.standalone.log.info("####### %s" % label)
+    topology.standalone.log.info("#######")
+    topology.standalone.log.info("###############################################")
+
+def _uniqueness_config_entry(topology, name=None):
+    if not name:
+        return None
+    
+    ent = topology.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE, 
+                                    "(objectclass=nsSlapdPlugin)",
+                                    ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc',
+                                     'nsslapd-pluginType', 'nsslapd-pluginEnabled', 'nsslapd-plugin-depends-on-type',
+                                     'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor',
+                                     'nsslapd-pluginDescription'])
+    ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN)
+    return ent
+
+def _build_config(topology, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old', across_subtrees=False):
+    assert topology
+    assert attr_name
+    assert subtree_1
+    
+    if type_config == 'old':
+        # enable the 'cn' uniqueness on Active
+        config = _uniqueness_config_entry(topology, attr_name)
+        config.setValue('nsslapd-pluginarg0', attr_name)
+        config.setValue('nsslapd-pluginarg1', subtree_1)
+        if subtree_2:
+            config.setValue('nsslapd-pluginarg2', subtree_2)
+    else:
+        # prepare the config entry
+        config = _uniqueness_config_entry(topology, attr_name)
+        config.setValue('uniqueness-attribute-name', attr_name)
+        config.setValue('uniqueness-subtrees', subtree_1)
+        if subtree_2:
+            config.setValue('uniqueness-subtrees', subtree_2)
+        if across_subtrees:
+            config.setValue('uniqueness-across-all-subtrees', 'on')
+    return config
+
+def _active_container_invalid_cfg_add(topology):
+    '''
+    Check uniqueness is not enforced with ADD (invalid config)
+    '''
+    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+                                            'objectclass': "top person".split(),
+                                            'sn':           ACTIVE_USER_1_CN,
+                                            'cn':           ACTIVE_USER_1_CN})))
+
+    topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+                                        'objectclass': "top person".split(),
+                                        'sn':           ACTIVE_USER_2_CN,
+                                        'cn':           [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+    
+    topology.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology.standalone.delete_s(ACTIVE_USER_2_DN)
+
+def _active_container_add(topology, type_config='old'):
+    '''
+    Check uniqueness in a single container (Active)
+    Add an entry with a given 'cn', then check we can not add an entry with the same 'cn' value
+    
+    '''
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+        
+    # remove the 'cn' uniqueness entry
+    try:
+        topology.standalone.delete_s(config.dn)
+        
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    topology.standalone.restart(timeout=120)
+    
+    topology.standalone.log.info('Uniqueness not enforced: create the entries')
+    
+    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+                                            'objectclass': "top person".split(),
+                                            'sn':           ACTIVE_USER_1_CN,
+                                            'cn':           ACTIVE_USER_1_CN})))
+
+    topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+                                        'objectclass': "top person".split(),
+                                        'sn':           ACTIVE_USER_2_CN,
+                                        'cn':           [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+    
+    topology.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology.standalone.delete_s(ACTIVE_USER_2_DN)
+    
+    
+    topology.standalone.log.info('Uniqueness enforced: checks second entry is rejected')
+    
+    # enable the 'cn' uniqueness on Active  
+    topology.standalone.add_s(config)
+    topology.standalone.restart(timeout=120)
+    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+                                            'objectclass': "top person".split(),
+                                            'sn':           ACTIVE_USER_1_CN,
+                                            'cn':           ACTIVE_USER_1_CN})))
+
+    try:
+        topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+                                        'objectclass': "top person".split(),
+                                        'sn':           ACTIVE_USER_2_CN,
+                                        'cn':           [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+    except ldap.CONSTRAINT_VIOLATION:
+        # yes it is expected
+        pass
+    
+    # cleanup the stuff now
+    topology.standalone.delete_s(config.dn)
+    topology.standalone.delete_s(ACTIVE_USER_1_DN)
+
+
+    
+def _active_container_mod(topology, type_config='old'):
+    '''
+    Check uniqueness in a single container (active)
+    Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value
+    
+    '''
+    
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+    
+    # enable the 'cn' uniqueness on Active
+    topology.standalone.add_s(config)
+    topology.standalone.restart(timeout=120)
+    
+    topology.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected')
+    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+                                            'objectclass': "top person".split(),
+                                            'sn':           ACTIVE_USER_1_CN,
+                                            'cn':           ACTIVE_USER_1_CN})))
+
+    topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+                                    'objectclass': "top person".split(),
+                                    'sn':           ACTIVE_USER_2_CN,
+                                    'cn':           ACTIVE_USER_2_CN})))
+
+    try:
+        topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ACTIVE_USER_1_CN)])
+    except ldap.CONSTRAINT_VIOLATION:
+        # yes it is expected
+        pass
+    
+    topology.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected')
+    try:
+        topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_REPLACE, 'cn', [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN])])
+    except ldap.CONSTRAINT_VIOLATION:
+        # yes it is expected
+        pass
+    
+    # cleanup the stuff now
+    topology.standalone.delete_s(config.dn)
+    topology.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology.standalone.delete_s(ACTIVE_USER_2_DN)
+    
+def _active_container_modrdn(topology, type_config='old'):
+    '''
+    Check uniqueness in a single container
+    Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
+    
+    '''
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+    
+    # enable the 'cn' uniqueness on Active
+    topology.standalone.add_s(config)
+    topology.standalone.restart(timeout=120)
+    
+    topology.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected')
+    
+    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+                                            'objectclass': "top person".split(),
+                                            'sn':           ACTIVE_USER_1_CN,
+                                            'cn':           [ACTIVE_USER_1_CN, 'dummy']})))
+
+    topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+                                    'objectclass': "top person".split(),
+                                    'sn':           ACTIVE_USER_2_CN,
+                                    'cn':           ACTIVE_USER_2_CN})))
+
+    try:
+        topology.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0)
+    except ldap.CONSTRAINT_VIOLATION:
+        # yes it is expected
+        pass
+    
+    
+    # cleanup the stuff now
+    topology.standalone.delete_s(config.dn)
+    topology.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology.standalone.delete_s(ACTIVE_USER_2_DN)
+
+def _active_stage_containers_add(topology, type_config='old', across_subtrees=False):
+    '''
+    Check uniqueness in several containers
+    Add an entry on a container with a given 'cn'
+    with across_subtrees=False check we CAN add an entry with the same 'cn' value on the other container
+    with across_subtrees=True check we CAN NOT add an entry with the same 'cn' value on the other container
+    
+    '''
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+    
+    topology.standalone.add_s(config)
+    topology.standalone.restart(timeout=120)
+    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+                                            'objectclass': "top person".split(),
+                                            'sn':           ACTIVE_USER_1_CN,
+                                            'cn':           ACTIVE_USER_1_CN})))
+    try:
+
+        # adding an entry on a separated contains with the same 'cn'
+        topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
+                                    'objectclass': "top person".split(),
+                                    'sn':           STAGE_USER_1_CN,
+                                    'cn':           ACTIVE_USER_1_CN})))
+    except ldap.CONSTRAINT_VIOLATION:
+            assert across_subtrees
+    
+    # cleanup the stuff now
+    topology.standalone.delete_s(config.dn)
+    topology.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology.standalone.delete_s(STAGE_USER_1_DN)
+    
+def _active_stage_containers_mod(topology, type_config='old', across_subtrees=False):
+    '''
+    Check uniqueness in a several containers
+    Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container
+    
+    '''
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+    
+    topology.standalone.add_s(config)
+    topology.standalone.restart(timeout=120)
+    # adding an entry on active with a different 'cn'
+    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+                                            'objectclass': "top person".split(),
+                                            'sn':           ACTIVE_USER_1_CN,
+                                            'cn':           ACTIVE_USER_2_CN})))
+
+    # adding an entry on a stage with a different 'cn'
+    topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
+                                    'objectclass': "top person".split(),
+                                    'sn':           STAGE_USER_1_CN,
+                                    'cn':           STAGE_USER_1_CN})))
+    
+    try:
+    
+        # modify add same value
+        topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ACTIVE_USER_2_CN])])
+    except ldap.CONSTRAINT_VIOLATION:
+        assert across_subtrees
+    
+    topology.standalone.delete_s(STAGE_USER_1_DN)
+    topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
+                                    'objectclass': "top person".split(),
+                                    'sn':           STAGE_USER_1_CN,
+                                    'cn':           STAGE_USER_2_CN})))
+    try:
+        # modify replace same value
+        topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_REPLACE, 'cn', [STAGE_USER_2_CN, ACTIVE_USER_1_CN])])
+    except ldap.CONSTRAINT_VIOLATION:
+        assert across_subtrees
+    
+    # cleanup the stuff now
+    topology.standalone.delete_s(config.dn)
+    topology.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology.standalone.delete_s(STAGE_USER_1_DN)
+    
+def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False):
+    '''
+    Check uniqueness in a several containers
+    Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container
+    
+    '''
+    
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+    
+    # enable the 'cn' uniqueness on Active and Stage
+    topology.standalone.add_s(config)
+    topology.standalone.restart(timeout=120)
+    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+                                            'objectclass': "top person".split(),
+                                            'sn':           ACTIVE_USER_1_CN,
+                                            'cn':           [ACTIVE_USER_1_CN, 'dummy']})))
+
+    topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
+                                    'objectclass': "top person".split(),
+                                    'sn':           STAGE_USER_1_CN,
+                                    'cn':           STAGE_USER_1_CN})))
+
+
+    try:
+        
+        topology.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0)
+        
+        # check stage entry has 'cn=dummy'
+        stage_ent = topology.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*", ['cn'])
+        assert stage_ent.hasAttr('cn')
+        found = False
+        for value in stage_ent.getValues('cn'):
+            if value == 'dummy':
+                found = True
+        assert found
+        
+        # check active entry has 'cn=dummy'
+        active_ent = topology.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn'])
+        assert active_ent.hasAttr('cn')
+        found = False
+        for value in stage_ent.getValues('cn'):
+            if value == 'dummy':
+                found = True
+        assert found
+        
+        topology.standalone.delete_s("cn=dummy,%s" % (STAGE_DN))
+    except ldap.CONSTRAINT_VIOLATION:
+        assert across_subtrees
+        topology.standalone.delete_s(STAGE_USER_1_DN)
+    
+    
+    
+    # cleanup the stuff now
+    topology.standalone.delete_s(config.dn)
+    topology.standalone.delete_s(ACTIVE_USER_1_DN)
+    
+def _config_file(topology, action='save'):
+    dse_ldif = topology.standalone.confdir + '/dse.ldif'
+    sav_file = topology.standalone.confdir + '/dse.ldif.ticket47823'
+    if action == 'save':
+        shutil.copy(dse_ldif, sav_file)
+    else:
+        shutil.copy(sav_file, dse_ldif)
+    
+def _pattern_errorlog(file, log_pattern):
+    try:
+        _pattern_errorlog.last_pos += 1
+    except AttributeError:
+        _pattern_errorlog.last_pos = 0
+    
+    found = None
+    log.debug("_pattern_errorlog: start at offset %d" % _pattern_errorlog.last_pos)
+    file.seek(_pattern_errorlog.last_pos)
+    
+    # Use a while true iteration because 'for line in file: hit a
+    # python bug that break file.tell()
+    while True:
+        line = file.readline()
+        log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
+        found = log_pattern.search(line)
+        if ((line == '') or (found)):
+            break
+        
+    log.debug("_pattern_errorlog: end at offset %d" % file.tell())
+    _pattern_errorlog.last_pos = file.tell()
+    return found
+    
+def test_ticket47823_init(topology):
+    """
+        
+    """
+
+    # Enabled the plugins
+    topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
+    topology.standalone.restart(timeout=120)
+
+    topology.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(),
+                                                       'cn': PROVISIONING_CN})))                                       
+    topology.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(),                       
+                                                 'cn': ACTIVE_CN})))                                             
+    topology.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(),                       
+                                                'cn': STAGE_CN})))                                              
+    topology.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(),                       
+                                                 'cn': DELETE_CN})))  
+    topology.standalone.errorlog_file = open(topology.standalone.errlog, "r")
+
+    topology.standalone.stop(timeout=120)
+    time.sleep(1)
+    topology.standalone.start(timeout=120)
+    time.sleep(3)
+
+    
+def test_ticket47823_one_container_add(topology):
+    '''
+    Check uniqueness in a single container
+    Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
+    
+    '''
+    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
+
+    _active_container_add(topology, type_config='old')
+    
+    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
+    
+    _active_container_add(topology, type_config='new')
+    
+def test_ticket47823_one_container_mod(topology):
+    '''
+    Check uniqueness in a single container
+    Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value
+    
+    '''
+    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD)")
+    
+    _active_container_mod(topology, type_config='old')
+    
+    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD)")
+    
+    _active_container_mod(topology, type_config='new')
+    
+        
+    
+def test_ticket47823_one_container_modrdn(topology):
+    '''
+    Check uniqueness in a single container
+    Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
+    
+    '''
+    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
+    
+    _active_container_modrdn(topology, type_config='old')
+    
+    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
+    
+    _active_container_modrdn(topology, type_config='new')
+    
+def test_ticket47823_multi_containers_add(topology):
+    '''
+    Check uniqueness in a several containers
+    Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
+    
+    '''
+    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
+
+    _active_stage_containers_add(topology, type_config='old', across_subtrees=False)
+    
+    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
+    
+    _active_stage_containers_add(topology, type_config='new', across_subtrees=False)
+    
+def test_ticket47823_multi_containers_mod(topology):
+    '''
+    Check uniqueness in a several containers
+    Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container
+    
+    '''
+    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
+    
+    
+    topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
+    _active_stage_containers_mod(topology, type_config='old', across_subtrees=False)
+    
+    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
+    
+    
+    topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
+    _active_stage_containers_mod(topology, type_config='new', across_subtrees=False)
+    
+def test_ticket47823_multi_containers_modrdn(topology):
+    '''
+    Check uniqueness in a several containers
+    Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container
+    
+    '''
+    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers")
+    
+    topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
+    _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False)
+    
+    topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
+    _active_stage_containers_modrdn(topology, type_config='old')
+
+def test_ticket47823_across_multi_containers_add(topology):
+    '''
+    Check uniqueness across several containers, uniquely with the new configuration
+    Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
+    
+    '''
+    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers")
+
+    _active_stage_containers_add(topology, type_config='old', across_subtrees=True)
+    
+def test_ticket47823_across_multi_containers_mod(topology):
+    '''
+    Check uniqueness across several containers, uniquely with the new configuration
+    Add and entry with a given 'cn', then check we can not modifiy an entry with the same 'cn' value
+    
+    '''
+    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers")
+
+    _active_stage_containers_mod(topology, type_config='old', across_subtrees=True)
+
+def test_ticket47823_across_multi_containers_modrdn(topology):
+    '''
+    Check uniqueness across several containers, uniquely with the new configuration
+    Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
+    
+    '''
+    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers")
+
+    _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=True)
+    
+def test_ticket47823_invalid_config_1(topology):
+    '''
+    Check that an invalid config is detected. No uniqueness enforced
+    Using old config: arg0 is missing
+    '''
+    _header(topology, "Invalid config (old): arg0 is missing")
+    
+    _config_file(topology, action='save')
+    
+    # create an invalid config without arg0
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+    
+    del config.data['nsslapd-pluginarg0']
+    # replace 'cn' uniqueness entry
+    try:
+        topology.standalone.delete_s(config.dn)
+        
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    topology.standalone.add_s(config)
+
+    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)  
+    
+    # Check the server did not restart
+    try:
+        topology.standalone.restart(timeout=5)
+        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        if ent:
+            # be sure to restore a valid config before assert
+            _config_file(topology, action='restore')
+        assert not ent
+    except ldap.SERVER_DOWN:
+            pass
+    
+    # Check the expected error message
+    regex = re.compile("Config info: attribute name not defined")
+    res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+    if not res:
+        # be sure to restore a valid config before assert
+        _config_file(topology, action='restore')    
+    assert res
+    
+    # Check we can restart the server
+    _config_file(topology, action='restore')
+    topology.standalone.start(timeout=5)
+    try:
+        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    except ldap.NO_SUCH_OBJECT:
+        pass
+
+def test_ticket47823_invalid_config_2(topology):
+    '''
+    Check that an invalid config is detected. No uniqueness enforced
+    Using old config: arg1 is missing
+    '''
+    _header(topology, "Invalid config (old): arg1 is missing")
+    
+    _config_file(topology, action='save')
+    
+    # create an invalid config without arg0
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+    
+    del config.data['nsslapd-pluginarg1']
+    # replace 'cn' uniqueness entry
+    try:
+        topology.standalone.delete_s(config.dn)
+        
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    topology.standalone.add_s(config)
+
+    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)  
+    
+    # Check the server did not restart
+    try:
+        topology.standalone.restart(timeout=5)
+        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        if ent:
+            # be sure to restore a valid config before assert
+            _config_file(topology, action='restore')
+        assert not ent
+    except ldap.SERVER_DOWN:
+            pass
+    
+    # Check the expected error message
+    regex = re.compile("Config info: No valid subtree is defined")
+    res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+    if not res:
+        # be sure to restore a valid config before assert
+        _config_file(topology, action='restore')    
+    assert res
+    
+    # Check we can restart the server
+    _config_file(topology, action='restore')
+    topology.standalone.start(timeout=5)
+    try:
+        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    except ldap.NO_SUCH_OBJECT:
+        pass
+
+def test_ticket47823_invalid_config_3(topology):
+    '''
+    Check that an invalid config is detected. No uniqueness enforced
+    Using old config: arg0 is missing
+    '''
+    _header(topology, "Invalid config (old): arg0 is missing but new config attrname exists")
+    
+    _config_file(topology, action='save')
+    
+    # create an invalid config without arg0
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+    
+    del config.data['nsslapd-pluginarg0']
+    config.data['uniqueness-attribute-name'] = 'cn'
+    # replace 'cn' uniqueness entry
+    try:
+        topology.standalone.delete_s(config.dn)
+        
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    topology.standalone.add_s(config)
+
+    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)  
+    
+    # Check the server did not restart
+    try:
+        topology.standalone.restart(timeout=5)
+        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        if ent:
+            # be sure to restore a valid config before assert
+            _config_file(topology, action='restore')
+        assert not ent
+    except ldap.SERVER_DOWN:
+            pass
+    
+    # Check the expected error message
+    regex = re.compile("Config info: objectclass for subtree entries is not defined")
+    res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+    if not res:
+        # be sure to restore a valid config before assert
+        _config_file(topology, action='restore')    
+    assert res
+    
+    # Check we can restart the server
+    _config_file(topology, action='restore')
+    topology.standalone.start(timeout=5)
+    try:
+        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    
+def test_ticket47823_invalid_config_4(topology):
+    '''
+    Check that an invalid config is detected. No uniqueness enforced
+    Using old config: arg1 is missing
+    '''
+    _header(topology, "Invalid config (old): arg1 is missing but new config exist")
+    
+    _config_file(topology, action='save')
+    
+    # create an invalid config without arg0
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+    
+    del config.data['nsslapd-pluginarg1']
+    config.data['uniqueness-subtrees'] = ACTIVE_DN
+    # replace 'cn' uniqueness entry
+    try:
+        topology.standalone.delete_s(config.dn)
+        
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    topology.standalone.add_s(config)
+
+    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)  
+    
+    # Check the server did not restart
+    try:
+        topology.standalone.restart(timeout=5)
+        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        if ent:
+            # be sure to restore a valid config before assert
+            _config_file(topology, action='restore')
+        assert not ent
+    except ldap.SERVER_DOWN:
+            pass
+    
+    # Check the expected error message
+    regex = re.compile("Config info: No valid subtree is defined")
+    res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+    if not res:
+        # be sure to restore a valid config before assert
+        _config_file(topology, action='restore')    
+    assert res
+    
+    # Check we can restart the server
+    _config_file(topology, action='restore')
+    topology.standalone.start(timeout=5)
+    try:
+        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    
+def test_ticket47823_invalid_config_5(topology):
+    '''
+    Check that an invalid config is detected. No uniqueness enforced
+    Using new config: uniqueness-attribute-name is missing
+    '''
+    _header(topology, "Invalid config (new): uniqueness-attribute-name is missing")
+    
+    _config_file(topology, action='save')
+    
+    # create an invalid config without arg0
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False)
+    
+    del config.data['uniqueness-attribute-name']
+    # replace 'cn' uniqueness entry
+    try:
+        topology.standalone.delete_s(config.dn)
+        
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    topology.standalone.add_s(config)
+
+    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)  
+    
+    # Check the server did not restart
+    try:
+        topology.standalone.restart(timeout=5)
+        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        if ent:
+            # be sure to restore a valid config before assert
+            _config_file(topology, action='restore')
+        assert not ent
+    except ldap.SERVER_DOWN:
+            pass
+    
+    # Check the expected error message
+    regex = re.compile("Config info: attribute name not defined")
+    res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+    if not res:
+        # be sure to restore a valid config before assert
+        _config_file(topology, action='restore')    
+    assert res
+    
+    # Check we can restart the server
+    _config_file(topology, action='restore')
+    topology.standalone.start(timeout=5)
+    try:
+        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    except ldap.NO_SUCH_OBJECT:
+        pass
+
+def test_ticket47823_invalid_config_6(topology):
+    '''
+    Check that an invalid config is detected. No uniqueness enforced
+    Using new config: uniqueness-subtrees is missing
+    '''
+    _header(topology, "Invalid config (new): uniqueness-subtrees is missing")
+    
+    _config_file(topology, action='save')
+    
+    # create an invalid config without arg0
+    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False)
+    
+    del config.data['uniqueness-subtrees']
+    # replace 'cn' uniqueness entry
+    try:
+        topology.standalone.delete_s(config.dn)
+        
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    topology.standalone.add_s(config)
+
+    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)  
+    
+    # Check the server did not restart
+    try:
+        topology.standalone.restart(timeout=5)
+        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        if ent:
+            # be sure to restore a valid config before assert
+            _config_file(topology, action='restore')
+        assert not ent
+    except ldap.SERVER_DOWN:
+            pass
+    
+    # Check the expected error message
+    regex = re.compile("Config info: objectclass for subtree entries is not defined")
+    res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+    if not res:
+        # be sure to restore a valid config before assert
+        _config_file(topology, action='restore')    
+    assert res
+    
+    # Check we can restart the server
+    _config_file(topology, action='restore')
+    topology.standalone.start(timeout=5)
+    try:
+        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    
+def test_ticket47823_invalid_config_7(topology):
+    '''
+    Check that an invalid config is detected. No uniqueness enforced
+    Using new config: uniqueness-subtrees is missing
+    '''
+    _header(topology, "Invalid config (new): uniqueness-subtrees are invalid")
+    
+    _config_file(topology, action='save')
+    
+    # create an invalid config without arg0
+    config = _build_config(topology, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN", type_config='new', across_subtrees=False)
+    
+    # replace 'cn' uniqueness entry
+    try:
+        topology.standalone.delete_s(config.dn)
+        
+    except ldap.NO_SUCH_OBJECT:
+        pass
+    topology.standalone.add_s(config)
+
+    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)  
+    
+    # Check the server did not restart
+    try:
+        topology.standalone.restart(timeout=5)
+        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        if ent:
+            # be sure to restore a valid config before assert
+            _config_file(topology, action='restore')
+        assert not ent
+    except ldap.SERVER_DOWN:
+            pass
+    
+    # Check the expected error message
+    regex = re.compile("Config info: No valid subtree is defined")
+    res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+    if not res:
+        # be sure to restore a valid config before assert
+        _config_file(topology, action='restore')    
+    assert res
+    
+    # Check we can restart the server
+    _config_file(topology, action='restore')
+    topology.standalone.start(timeout=5)
+    try:
+        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    except ldap.NO_SUCH_OBJECT:
+        pass
+def test_ticket47823_final(topology):
+    topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+    '''
+        run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+        To run isolated without py.test, you need to
+            - edit this file and comment '@pytest.fixture' line before 'topology' function.
+            - set the installation prefix
+            - run this program
+    '''
+    global installation_prefix
+    installation_prefix = None
+
+    topo = topology(True)
+    test_ticket47823_init(topo)
+    
+    # run old/new config style that makes uniqueness checking on one subtree
+    test_ticket47823_one_container_add(topo)
+    test_ticket47823_one_container_mod(topo)
+    test_ticket47823_one_container_modrdn(topo)
+    
+    # run old config style that makes uniqueness checking on each defined subtrees
+    test_ticket47823_multi_containers_add(topo)
+    test_ticket47823_multi_containers_mod(topo)
+    test_ticket47823_multi_containers_modrdn(topo)
+    test_ticket47823_across_multi_containers_add(topo)
+    test_ticket47823_across_multi_containers_mod(topo)
+    test_ticket47823_across_multi_containers_modrdn(topo)
+    
+    test_ticket47823_invalid_config_1(topo)
+    test_ticket47823_invalid_config_2(topo)
+    test_ticket47823_invalid_config_3(topo)
+    test_ticket47823_invalid_config_4(topo)
+    test_ticket47823_invalid_config_5(topo)
+    test_ticket47823_invalid_config_6(topo)
+    test_ticket47823_invalid_config_7(topo)
+    
+    test_ticket47823_final(topo)
+    
+
+if __name__ == '__main__':
+    run_isolated()

+ 3 - 2
ldap/ldif/template-dse.ldif.in

@@ -626,8 +626,9 @@ nsslapd-pluginpath: libattr-unique-plugin
 nsslapd-plugininitfunc: NSUniqueAttr_Init
 nsslapd-plugintype: betxnpreoperation
 nsslapd-pluginenabled: off
-nsslapd-pluginarg0: uid
-nsslapd-pluginarg1: %ds_suffix%
+uniqueness-attribute-name: uid
+uniqueness-subtrees: %ds_suffix%
+uniqueness-across-all-subtrees: off
 nsslapd-plugin-depends-on-type: database
 
 dn: cn=7-bit check,cn=plugins,cn=config

+ 357 - 100
ldap/servers/plugins/uiduniq/uid.c

@@ -99,7 +99,23 @@ pluginDesc = {
 	"Enforce unique attribute values" 
 };
 static void* plugin_identity = NULL;
-
+typedef struct attr_uniqueness_config {
+        char *attr;
+        Slapi_DN **subtrees;
+        PRBool unique_in_all_subtrees;
+        char *top_entry_oc;
+        char *subtree_entries_oc;
+        struct attr_uniqueness_config *next;
+} attr_uniqueness_config_t;
+
+#define ATTR_UNIQUENESS_ATTRIBUTE_NAME      "uniqueness-attribute-name"
+#define ATTR_UNIQUENESS_SUBTREES            "uniqueness-subtrees"
+#define ATTR_UNIQUENESS_ACROSS_ALL_SUBTREES "uniqueness-across-all-subtrees"
+#define ATTR_UNIQUENESS_TOP_ENTRY_OC        "uniqueness-top-entry-oc"
+#define ATTR_UNIQUENESS_SUBTREE_ENTRIES_OC  "uniqueness-subtree-entries-oc"
+
+static int getArguments(Slapi_PBlock *pb, char **attrName, char **markerObjectClass, char **requiredObjectClass);
+static struct attr_uniqueness_config *uniqueness_entry_to_config(Slapi_PBlock *pb, Slapi_Entry *config_entry);
 
 /*
  * More information about constraint failure
@@ -107,6 +123,262 @@ static void* plugin_identity = NULL;
 static char *moreInfo =
   "Another entry with the same attribute value already exists (attribute: \"%s\")";
 
+static void
+free_uniqueness_config(struct attr_uniqueness_config *config)
+{
+        int i;
+        
+        slapi_ch_free_string((char **) &config->attr);
+        for (i = 0; config->subtrees && config->subtrees[i]; i++) {
+                slapi_sdn_free(&config->subtrees[i]);
+        }
+        slapi_ch_free((void **) &config->subtrees);
+        slapi_ch_free_string((char **) &config->top_entry_oc);
+        slapi_ch_free_string((char **) &config->subtree_entries_oc);       
+}
+
+/*
+ * New styles: 
+ * ----------
+ * 
+ * uniqueness-attribute-name: uid
+ * uniqueness-subtrees: dc=people,dc=example,dc=com
+ * uniqueness-subtrees: dc=sales, dc=example,dc=com
+ * uniqueness-across-all-subtrees: on
+ * 
+ * or
+ * 
+ * uniqueness-attribute-name: uid
+ * uniqueness-top-entry-oc: organizationalUnit
+ * uniqueness-subtree-entries-oc: person
+ * 
+ * If both are present:
+ *  - uniqueness-subtrees
+ *  - uniqueness-top-entry-oc/uniqueness-subtree-entries-oc
+ * Then uniqueness-subtrees has the priority
+ * 
+ * Old styles:
+ * ----------
+ * 
+ * nsslapd-pluginarg0: uid
+ * nsslapd-pluginarg1: dc=people,dc=example,dc=com
+ * nsslapd-pluginarg2: dc=sales, dc=example,dc=com
+ * 
+ * or
+ * 
+ * nsslapd-pluginarg0: attribute=uid
+ * nsslapd-pluginarg1: markerobjectclass=organizationalUnit
+ * nsslapd-pluginarg2: requiredobjectclass=person
+ *
+ * From a Slapi_Entry of the config entry, it creates a attr_uniqueness_config.
+ * It returns a (attr_uniqueness_config *) if the configuration is valid
+ * Else it returns NULL
+ */
+static struct attr_uniqueness_config *
+uniqueness_entry_to_config(Slapi_PBlock *pb, Slapi_Entry *config_entry) 
+{
+        attr_uniqueness_config_t *tmp_config = NULL;
+        char **values = NULL;
+        int argc;
+        char **argv = NULL;
+        int rc = SLAPI_PLUGIN_SUCCESS;
+        int i;
+        int nb_subtrees = 0;
+
+        if (config_entry == NULL) {
+                rc = SLAPI_PLUGIN_FAILURE;
+                goto done;
+        }
+
+        
+        /* We are going to fill tmp_config in a first phase */
+        if ((tmp_config = (attr_uniqueness_config_t *) slapi_ch_calloc(1, sizeof (attr_uniqueness_config_t))) == NULL) {
+                slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "load_config failed to allocate configuration\n");
+                rc = SLAPI_PLUGIN_FAILURE;
+                goto done;
+        } else {
+                /* set these to -1 for config validation */
+
+        }
+        
+        /* Check if this is new/old config style */
+        slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc);
+        if (argc == 0) {
+                /* This is new config style
+                 * uniqueness-attribute-name: uid
+                 * uniqueness-subtrees: dc=people,dc=example,dc=com
+                 * uniqueness-subtrees: dc=sales, dc=example,dc=com
+                 * uniqueness-across-all-subtrees: on
+                 * 
+                 * or
+                 * 
+                 * uniqueness-attribute-name: uid
+                 * uniqueness-top-entry-oc: organizationalUnit
+                 * uniqueness-subtree-entries-oc: person
+                 */
+
+                /* Attribute name of the attribute we are going to check value uniqueness */
+                tmp_config->attr = slapi_entry_attr_get_charptr(config_entry, ATTR_UNIQUENESS_ATTRIBUTE_NAME);
+                
+                /* Subtrees where uniqueness is tested  */
+                values = slapi_entry_attr_get_charray(config_entry, ATTR_UNIQUENESS_SUBTREES);
+                if (values) {
+
+
+                        for (i = 0; values && values[i]; i++);
+                        if ((tmp_config->subtrees = (Slapi_DN **) slapi_ch_calloc(i + 1, sizeof (Slapi_DN *))) == NULL) {
+                                slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "Config info: Fail to allocate subtree array \n");
+                                rc = SLAPI_PLUGIN_FAILURE;
+                                goto done;
+                        }
+
+                        /* copy the valid subtree DN into the config */
+                        for (i = 0, nb_subtrees = 0; values && values[i]; i++) {
+                                if (slapi_dn_syntax_check(pb, values[i], 1)) { /* syntax check failed */
+                                        slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "Config info: Invalid DN (skipped): %s\n", values[i]);
+                                        continue;
+                                }
+                                tmp_config->subtrees[nb_subtrees] = slapi_sdn_new_dn_byval(values[i]);
+                                nb_subtrees++;
+
+                        }
+
+                        slapi_ch_array_free(values);
+                        values = NULL;
+                }
+
+                /* Uniqueness may be enforced accross all the subtrees, by default it is not */
+                tmp_config->unique_in_all_subtrees = slapi_entry_attr_get_bool(config_entry, ATTR_UNIQUENESS_ACROSS_ALL_SUBTREES);
+                
+                /* enforce uniqueness only if the modified entry has this objectclass */
+                tmp_config->top_entry_oc = slapi_entry_attr_get_charptr(config_entry, ATTR_UNIQUENESS_TOP_ENTRY_OC);
+                
+                /* enforce uniqueness, in the modified entry subtree, only to entries having this objectclass */
+                tmp_config->subtree_entries_oc = slapi_entry_attr_get_charptr(config_entry, ATTR_UNIQUENESS_SUBTREE_ENTRIES_OC);
+                
+        } else {
+                int result;
+                char *attrName = NULL;
+                char *markerObjectClass = NULL;
+                char *requiredObjectClass = NULL;
+                
+                /* using the old style of configuration */
+                result = getArguments(pb, &attrName, &markerObjectClass, &requiredObjectClass);
+                if (LDAP_OPERATIONS_ERROR == result) {
+                        slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name, "Config fail: unable to parse old style\n");
+                        rc = SLAPI_PLUGIN_FAILURE;
+                        goto done;
+                
+                }
+                if (UNTAGGED_PARAMETER == result) {
+                        /* This is
+                         * nsslapd-pluginarg0: uid
+                         * nsslapd-pluginarg1: dc=people,dc=example,dc=com
+                         * nsslapd-pluginarg2: dc=sales, dc=example,dc=com
+                         * 
+                         * config attribute are in argc/argv
+                         * 
+                         * attrName is set
+                         * markerObjectClass/requiredObjectClass are NOT set
+                         */
+                        
+                        if (slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc) || slapi_pblock_get(pb, SLAPI_PLUGIN_ARGV, &argv)) {
+                                slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name, "Config fail: Only attribute name is valid\n");
+                                rc = SLAPI_PLUGIN_FAILURE;
+                                goto done;
+                        }
+                        
+                        /* Store attrName in the config */
+                        tmp_config->attr = slapi_ch_strdup(attrName);
+                        argc--;
+                        argv++; /* First argument was attribute name and remaining are subtrees */
+                        
+                        /* Store the subtrees */
+                        nb_subtrees = 0;
+                        if ((tmp_config->subtrees = (Slapi_DN **) slapi_ch_calloc(argc + 1, sizeof (Slapi_DN *))) == NULL) {
+                                slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "Config info: Fail to allocate subtree array\n");
+                                rc = SLAPI_PLUGIN_FAILURE;
+                                goto done;
+                        }
+                        
+
+                        for (; argc > 0; argc--, argv++) {
+                                if (slapi_dn_syntax_check(pb, *argv, 1)) { /* syntax check failed */
+                                        slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "Config info: Invalid DN  (skipped): %s\n", *argv);
+                                        continue;
+                                }
+                                tmp_config->subtrees[nb_subtrees] = slapi_sdn_new_dn_byval(*argv);
+                                nb_subtrees++;
+                        }
+                        
+                        /* this interface does not configure accross subtree uniqueness*/
+                        tmp_config->unique_in_all_subtrees = PR_FALSE;
+                        
+                        /* Not really usefull, but it clarifies the config */
+                        tmp_config->subtree_entries_oc = NULL;
+                        tmp_config->top_entry_oc = NULL;
+                } else {
+                        /* This is
+                         * nsslapd-pluginarg0: attribute=uid
+                         * nsslapd-pluginarg1: markerobjectclass=organizationalUnit
+                         * nsslapd-pluginarg2: requiredobjectclass=person
+                         * 
+                         * config attributes are in 
+                         *  - attrName 
+                         *  - markerObjectClass
+                         *  - requiredObjectClass 
+                         */
+                        /* Store attrName in the config */
+                        tmp_config->attr = slapi_ch_strdup(attrName);
+                        
+                        /* There is no subtrees */
+                        tmp_config->subtrees = NULL;
+                        
+                        /* this interface does not configure accross subtree uniqueness*/
+                        tmp_config->unique_in_all_subtrees = PR_FALSE;
+                        
+                        /* set the objectclasses retrieved by getArgument */
+                        tmp_config->subtree_entries_oc = slapi_ch_strdup(requiredObjectClass);
+                        tmp_config->top_entry_oc = slapi_ch_strdup(markerObjectClass);
+                        
+                }
+                
+        }
+        
+        /* Time to check that the new configuration is valid */
+        if (tmp_config->attr == NULL) {
+                slapi_log_error( SLAPI_LOG_FATAL, plugin_name, "Config info: attribute name not defined \n");
+                rc = SLAPI_PLUGIN_FAILURE;
+                goto done;
+        }
+        
+        if (tmp_config->subtrees == NULL) {
+                /* Uniqueness is enforced on entries matching objectclass */
+                if (tmp_config->subtree_entries_oc == NULL) {
+                        slapi_log_error( SLAPI_LOG_FATAL, plugin_name, "Config info: objectclass for subtree entries is not defined\n");
+                        rc = SLAPI_PLUGIN_FAILURE;
+                        goto done;
+                }
+        } else if (tmp_config->subtrees[0] == NULL) {
+                /* Uniqueness is enforced on subtrees but none are defined */
+                slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "Config info: No valid subtree is defined \n");
+                rc = SLAPI_PLUGIN_FAILURE;
+                goto done;
+        }
+
+done:
+        if (rc != SLAPI_PLUGIN_SUCCESS) {
+                if (tmp_config) {
+                        free_uniqueness_config(tmp_config);
+                        slapi_ch_free((void **) &tmp_config);
+                }
+                return NULL;
+        } else {
+                
+                return tmp_config;
+        }
+}
+
 static void
 freePblock( Slapi_PBlock *spb ) {
   if ( spb )
@@ -390,29 +662,49 @@ search_one_berval(Slapi_DN *baseDN, const char *attrName,
  *   LDAP_OPERATIONS_ERROR - a server failure.
  */
 static int
-searchAllSubtrees(int argc, char *argv[], const char *attrName,
+searchAllSubtrees(Slapi_DN **subtrees, const char *attrName,
   Slapi_Attr *attr, struct berval **values, const char *requiredObjectClass,
-  Slapi_DN *dn)
+  Slapi_DN *dn, PRBool unique_in_all_subtrees)
 {
   int result = LDAP_SUCCESS;
+  int i;
 
+  if (unique_in_all_subtrees) {
+          PRBool in_a_subtree = PR_FALSE;
+          
+          /* we need to check that the added values of this attribute
+           * are unique in all the monitored subtrees
+           */
+          
+          /* First check the target entry is in one of 
+           * the monitored subtree, so adding 'values' would
+           * violate constraint
+           */
+          for (i = 0;subtrees && subtrees[i]; i++) {
+                  if (slapi_sdn_issuffix(dn, subtrees[i])) {
+                          in_a_subtree = PR_TRUE;
+                          break;
+                  }
+          }
+          if (! in_a_subtree) {
+                  return result;
+          }
+  }
+  
   /*
    * For each DN in the managed list, do uniqueness checking if
    * the target DN is a subnode in the tree.
    */
-  for(;argc > 0;argc--,argv++)
+  for(i = 0;subtrees && subtrees[i]; i++)
   {
-    Slapi_DN *sufdn = slapi_sdn_new_dn_byref(*argv);
+    Slapi_DN *sufdn = subtrees[i];
     /*
      * The DN should already be normalized, so we don't have to
      * worry about that here.
      */
-    if (slapi_sdn_issuffix(dn, sufdn)) {
+    if (unique_in_all_subtrees || slapi_sdn_issuffix(dn, sufdn)) {
       result = search(sufdn, attrName, attr, values, requiredObjectClass, dn);
-      slapi_sdn_free(&sufdn);
       if (result) break;
-    } else {
-      slapi_sdn_free(&sufdn);
     }
   }
   return result;
@@ -561,8 +853,7 @@ preop_add(Slapi_PBlock *pb)
     int isupdatedn;
     Slapi_Entry *e;
     Slapi_Attr *attr;
-    int argc;
-    char **argv = NULL;
+    struct attr_uniqueness_config *config = NULL;
 
         /*
          * If this is a replication update, just be a noop.
@@ -573,28 +864,19 @@ preop_add(Slapi_PBlock *pb)
         {
           break;
         }
-
-    /*
-     * Get the arguments
-     */
-        result = getArguments(pb, &attrName, &markerObjectClass,
-                                                  &requiredObjectClass);
-        if (UNTAGGED_PARAMETER == result)
-        {
-          slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name, 
-                          "ADD parameter untagged: %s\n", attrName);
-          result = LDAP_SUCCESS;
-          /* Statically defined subtrees to monitor */
-          err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc);
-          if (err) { result = uid_op_error(53); break; }
-
-          err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGV, &argv);
-          if (err) { result = uid_op_error(54); break; }
-          argc--; argv++; /* First argument was attribute name */
-        } else if (0 != result)
-        {
-          break;
-        }
+        slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &config);
+        if (config == NULL) {
+                slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "preop_modrdn fail to retrieve the config\n");
+                result = LDAP_OPERATIONS_ERROR;
+                break;
+        }            
+        
+        /*
+         * Get the arguments
+         */
+        attrName = config->attr;
+        markerObjectClass = config->top_entry_oc;
+        requiredObjectClass = config->subtree_entries_oc;
 
     /*
      * Get the target DN for this add operation
@@ -642,8 +924,8 @@ preop_add(Slapi_PBlock *pb)
         } else
         {
           /* Subtrees listed on invocation line */
-          result = searchAllSubtrees(argc, argv, attrName, attr, NULL,
-                                     requiredObjectClass, sdn);
+          result = searchAllSubtrees(config->subtrees, attrName, attr, NULL,
+                                     requiredObjectClass, sdn, config->unique_in_all_subtrees);
         }
   END
 
@@ -696,6 +978,7 @@ preop_modify(Slapi_PBlock *pb)
   int checkmodsCapacity = 0;
   char *errtext = NULL;
   char *attrName = NULL;
+  struct attr_uniqueness_config *config = NULL;
 
 #ifdef DEBUG
     slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name,
@@ -712,8 +995,6 @@ preop_modify(Slapi_PBlock *pb)
     LDAPMod *mod;
     Slapi_DN *sdn = NULL;
     int isupdatedn;
-    int argc;
-    char **argv = NULL;
 
     /*
      * If this is a replication update, just be a noop.
@@ -725,27 +1006,20 @@ preop_modify(Slapi_PBlock *pb)
       break;
     }
 
+    slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &config);
+    if (config == NULL) {
+            slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "preop_modrdn fail to retrieve the config\n");
+            result = LDAP_OPERATIONS_ERROR;
+            break;
+    }    
     /*
      * Get the arguments
      */
-        result = getArguments(pb, &attrName, &markerObjectClass,
-                                                  &requiredObjectClass);
-        if (UNTAGGED_PARAMETER == result)
-        {
-          result = LDAP_SUCCESS;
-          /* Statically defined subtrees to monitor */
-          err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc);
-          if (err) { result = uid_op_error(53); break; }
-
-          err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGV, &argv);
-          if (err) { result = uid_op_error(54); break; }
-          argc--; /* First argument was attribute name */
-          argv++;
-        } else if (0 != result)
-        {
-          break;
-        }
+    attrName = config->attr;
+    markerObjectClass = config->top_entry_oc;
+    requiredObjectClass = config->subtree_entries_oc;
 
+        
     err = slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
     if (err) { result = uid_op_error(61); break; }
 
@@ -809,8 +1083,8 @@ preop_modify(Slapi_PBlock *pb)
         } else
         {
             /* Subtrees listed on invocation line */
-            result = searchAllSubtrees(argc, argv, attrName, NULL,
-                                       mod->mod_bvalues, requiredObjectClass, sdn);
+            result = searchAllSubtrees(config->subtrees, attrName, NULL,
+                                       mod->mod_bvalues, requiredObjectClass, sdn, config->unique_in_all_subtrees);
         }
     }
   END
@@ -852,6 +1126,7 @@ preop_modrdn(Slapi_PBlock *pb)
   Slapi_Value *sv_requiredObjectClass = NULL;
   char *errtext = NULL;
   char *attrName = NULL;
+  struct attr_uniqueness_config *config = NULL;
 
 #ifdef DEBUG
     slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name,
@@ -868,8 +1143,6 @@ preop_modrdn(Slapi_PBlock *pb)
     int deloldrdn = 0;
     int isupdatedn;
     Slapi_Attr *attr;
-    int argc;
-    char **argv = NULL;
 
         /*
          * If this is a replication update, just be a noop.
@@ -881,26 +1154,18 @@ preop_modrdn(Slapi_PBlock *pb)
           break;
         }
 
-    /*
-     * Get the arguments
-     */
-        result = getArguments(pb, &attrName, &markerObjectClass,
-                                                  &requiredObjectClass);
-        if (UNTAGGED_PARAMETER == result)
-        {
-          result = LDAP_SUCCESS;
-          /* Statically defined subtrees to monitor */
-          err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc);
-          if (err) { result = uid_op_error(53); break; }
-
-          err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGV, &argv);
-          if (err) { result = uid_op_error(54); break; }
-          argc--; /* First argument was attribute name */
-          argv++; 
-        } else if (0 != result)
-        {
-          break;
-        }
+       slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &config);
+       if (config == NULL) {
+               slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "preop_modrdn fail to retrieve the config\n");
+               result = LDAP_OPERATIONS_ERROR;
+               break;
+       }
+        /*
+         * Get the arguments
+         */
+        attrName = config->attr;
+        markerObjectClass = config->top_entry_oc;
+        requiredObjectClass = config->subtree_entries_oc;
 
     /* Create a Slapi_Value for the requiredObjectClass to use
      * for checking the entry. */
@@ -978,8 +1243,8 @@ preop_modrdn(Slapi_PBlock *pb)
         } else
         {
           /* Subtrees listed on invocation line */
-          result = searchAllSubtrees(argc, argv, attrName, attr, NULL,
-                                     requiredObjectClass, sdn);
+          result = searchAllSubtrees(config->subtrees, attrName, attr, NULL,
+                                     requiredObjectClass, sdn, config->unique_in_all_subtrees);
         }
   END
   /* Clean-up */
@@ -1021,16 +1286,15 @@ NSUniqueAttr_Init(Slapi_PBlock *pb)
   int preadd = SLAPI_PLUGIN_PRE_ADD_FN;
   int premod = SLAPI_PLUGIN_PRE_MODIFY_FN;
   int premdn = SLAPI_PLUGIN_PRE_MODRDN_FN;
+  struct attr_uniqueness_config *config = NULL;
 
   BEGIN
-    int argc;
-    char **argv;
 
     /* Declare plugin version */
     err = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION,
             SLAPI_PLUGIN_VERSION_01);
     if (err) break;
-
+    
     /*
      * Get plugin identity and store it for later use
      * Used for internal operations
@@ -1049,24 +1313,12 @@ NSUniqueAttr_Init(Slapi_PBlock *pb)
     }
     slapi_ch_free_string(&plugin_type);
 
-    /*
-     * Get and normalize arguments
-     */
-    err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc);
-    if (err) break;
- 
-    err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGV, &argv);
-    if (err) break;
-
-    /* First argument is the unique attribute name */
-    if (argc < 1) { err = -1; break; }
-    argv++; argc--;
-
-    for(;argc > 0;argc--, argv++) {
-        char *normdn = slapi_create_dn_string_case("%s", *argv);
-        slapi_ch_free_string(argv);
-        *argv = normdn;
+    /* load the config into the config list */
+    if ((config = uniqueness_entry_to_config(pb, plugin_entry)) == NULL) {
+            err = SLAPI_PLUGIN_FAILURE;
+            break;
     }
+    slapi_pblock_set(pb, SLAPI_PLUGIN_PRIVATE, (void*) config);
 
     /* Provide descriptive information */
     err = slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION,
@@ -1088,6 +1340,11 @@ NSUniqueAttr_Init(Slapi_PBlock *pb)
   if (err) {
     slapi_log_error(SLAPI_LOG_PLUGIN, "NSUniqueAttr_Init",
              "Error: %d\n", err);
+    if (config) {
+            slapi_pblock_set(pb, SLAPI_PLUGIN_PRIVATE, NULL);
+            free_uniqueness_config(config);
+            slapi_ch_free((void **) &config);
+    }
     err = -1;
   }
   else