Exemplo n.º 1
0
def topology(topology_st):
    if get_default_db_lib() == "mdb":
        handler = LMDB_LDBMConfig(topology_st.standalone)
        # Need at least 1500 dbis for 50 suffixes
        maxdbi = 5000
        log.info(f'Set lmdb map max dbi to {maxdbi}.')
        handler.replace('nsslapd-mdb-max-dbs', str(maxdbi))
        topology_st.standalone.restart()
    bes = Backends(topology_st.standalone)
    bes.delete_all_dangerous()
    mts = MappingTrees(topology_st.standalone)
    assert len(mts.list()) == 0
    return topology_st
Exemplo n.º 2
0
def topology(topology_st):
    bes = Backends(topology_st.standalone)
    bes.delete_all_dangerous()
    mts = MappingTrees(topology_st.standalone)
    assert len(mts.list()) == 0
    return topology_st
Exemplo n.º 3
0
def test_chaining_paged_search(topology):
    """ Test paged search through the chaining db. This
    would cause a SIGSEGV with paged search which could
    be triggered by SSSD.

    :id: 7b29b1f5-26cf-49fa-9fe7-ee29a1408633
    :setup: Two standalones in chaining.
    :steps:
        1. Configure chaining between the nodes
        2. Do a chaining search (no page) to assert it works
        3. Do a paged search through chaining.

    :expectedresults:
        1. Success
        2. Success
        3. Success
    """
    st1 = topology.ins["standalone1"]
    st2 = topology.ins["standalone2"]

    ### We setup so that st1 -> st2

    # Clear all the BE in st1
    bes1 = Backends(st1)
    for be in bes1.list():
        be.delete()

    # Setup st1 to chain to st2
    chain_plugin_1 = ChainingBackendPlugin(st1)
    chain_plugin_1.enable()

    chains = ChainingLinks(st1)
    chain = chains.create(
        properties={
            'cn': 'demochain',
            'nsslapd-suffix': DEFAULT_SUFFIX,
            'nsmultiplexorbinddn': '',
            'nsmultiplexorcredentials': '',
            'nsfarmserverurl': st2.toLDAPURL(),
        })

    mts = MappingTrees(st1)
    # Due to a bug in lib389, we need to delete and recreate the mt.
    for mt in mts.list():
        mt.delete()
    mts.ensure_state(
        properties={
            'cn': DEFAULT_SUFFIX,
            'nsslapd-state': 'backend',
            'nsslapd-backend': 'demochain',
        })
    # Restart to enable
    st1.restart()

    # Get an anonymous connection.
    anon = Account(st1, dn='')
    anon_conn = anon.bind(password='')

    # Now do a search from st1 -> st2
    accs_1 = Accounts(anon_conn, DEFAULT_SUFFIX)
    assert len(accs_1.list()) > 0

    # Allow time to attach lldb if needed.
    # import time
    # print("🔥🔥🔥")
    # time.sleep(45)

    # Now do a *paged* search from st1 -> st2
    assert len(accs_1.list(paged_search=2, paged_critical=False)) > 0
Exemplo n.º 4
0
class Backend(DSLdapObject):
    """Backend DSLdapObject with:
    - must attributes = ['cn', 'nsslapd-suffix']
    - RDN attribute is 'cn'

    :param instance: An instance
    :type instance: lib389.DirSrv
    :param dn: Entry DN
    :type dn: str
    """

    _must_attributes = ['nsslapd-suffix', 'cn']

    def __init__(self, instance, dn=None):
        super(Backend, self).__init__(instance, dn)
        self._rdn_attribute = 'cn'
        self._must_attributes = ['nsslapd-suffix', 'cn']
        self._create_objectclasses = [
            'top', 'extensibleObject', BACKEND_OBJECTCLASS_VALUE
        ]
        self._protected = False
        # Check if a mapping tree for this suffix exists.
        self._mts = MappingTrees(self._instance)

    def lint_uid(self):
        return self.get_attr_val_utf8_l('cn').lower()

    def _lint_virt_attrs(self):
        """Check if any virtual attribute are incorrectly indexed"""
        bename = self.lint_uid()
        indexes = self.get_indexes()
        suffix = self.get_attr_val_utf8('nsslapd-suffix')
        # First check nsrole
        try:
            indexes.get('nsrole')
            report = copy.deepcopy(DSVIRTLE0001)
            report['check'] = f'backends:{bename}:virt_attrs'
            report['detail'] = report['detail'].replace('ATTR', 'nsrole')
            report['fix'] = report['fix'].replace('ATTR', 'nsrole')
            report['fix'] = report['fix'].replace('SUFFIX', suffix)
            report['fix'] = report['fix'].replace('YOUR_INSTANCE',
                                                  self._instance.serverid)
            report['items'].append(suffix)
            report['items'].append('nsrole')
            yield report
        except:
            pass

        # Check COS next
        for cosDefType in [
                CosIndirectDefinitions, CosPointerDefinitions,
                CosClassicDefinitions
        ]:
            defs = cosDefType(self._instance, suffix).list()
            for cosDef in defs:
                attrs = cosDef.get_attr_val_utf8_l("cosAttribute").split()
                for attr in attrs:
                    if attr in [
                            "default", "override", "operational",
                            "operational-default", "merge-schemes"
                    ]:
                        # We are at the end, just break out
                        break
                    try:
                        indexes.get(attr)
                        # If we got here there is an index (bad)
                        report = copy.deepcopy(DSVIRTLE0001)
                        report['check'] = f'backends:{bename}:virt_attrs'
                        report['detail'] = report['detail'].replace(
                            'ATTR', attr)
                        report['fix'] = report['fix'].replace('ATTR', attr)
                        report['fix'] = report['fix'].replace('SUFFIX', suffix)
                        report['fix'] = report['fix'].replace(
                            'YOUR_INSTANCE', self._instance.serverid)
                        report['items'].append(suffix)
                        report['items'].append("Class Of Service (COS)")
                        report['items'].append("cosAttribute: " + attr)
                        yield report
                    except:
                        # this is what we hope for
                        pass

    def _lint_search(self):
        """Perform a search and make sure an entry is accessible
        """
        dn = self.get_attr_val_utf8('nsslapd-suffix')
        bename = self.lint_uid()
        suffix = DSLdapObject(self._instance, dn=dn)
        try:
            suffix.get_attr_val('objectclass')
        except ldap.NO_SUCH_OBJECT:
            # backend root entry not created yet
            DSBLE0003['items'] = [
                dn,
            ]
            DSBLE0003['check'] = f'backends:{bename}:search'
            yield DSBLE0003
        except ldap.LDAPError as e:
            # Some other error
            DSBLE0002['detail'] = DSBLE0002['detail'].replace('ERROR', str(e))
            DSBLE0002['check'] = f'backends:{bename}:search'
            DSBLE0002['items'] = [
                dn,
            ]
            yield DSBLE0002

    def _lint_mappingtree(self):
        """Backend lint

        This should check for:
        * missing mapping tree entries for the backend
        * missing indices if we are local and have log access?
        """
        # Check for the missing mapping tree.
        suffix = self.get_attr_val_utf8('nsslapd-suffix')
        bename = self.lint_uid()
        try:
            mt = self._mts.get(suffix)
            if mt.get_attr_val_utf8(
                    'nsslapd-backend') != bename and mt.get_attr_val_utf8(
                        'nsslapd-state') != 'backend':
                raise ldap.NO_SUCH_OBJECT(
                    "We have a matching suffix, but not a backend or correct database name."
                )
        except ldap.NO_SUCH_OBJECT:
            result = DSBLE0001
            result['check'] = f'backends:{bename}:mappingtree'
            result['items'] = [
                bename,
            ]
            yield result

    def _lint_cl_trimming(self):
        """Check that cl trimming is at least defined to prevent unbounded growth"""
        bename = self.lint_uid()
        suffix = self.get_attr_val_utf8('nsslapd-suffix')
        replicas = Replicas(self._instance)
        try:
            # Check if replication is enabled
            replicas.get(suffix)
            # Check the changelog
            cl = Changelog(self._instance, suffix=suffix)
            if cl.get_attr_val_utf8('nsslapd-changelogmaxentries') is None and \
               cl.get_attr_val_utf8('nsslapd-changelogmaxage') is None:
                report = copy.deepcopy(DSCLLE0001)
                report['fix'] = report['fix'].replace('YOUR_INSTANCE',
                                                      self._instance.serverid)
                report['check'] = f'backends:{bename}::cl_trimming'
                yield report
        except:
            # Suffix is not replicated
            self._log.debug(
                f"_lint_cl_trimming - backend ({suffix}) is not replicated")
            pass

    def create_sample_entries(self, version):
        """Creates sample entries under nsslapd-suffix value

        :param version: Sample entries version, i.e. 001003006
        :type version: str
        """

        self._log.debug('Requested sample entries at version %s....' % version)
        # Grab the correct sample entry config - remember this is a function ptr.
        centries = get_sample_entries(version)
        # apply it.
        basedn = self.get_attr_val('nsslapd-suffix')
        cent = centries(self._instance, basedn)
        # Now it's built, we can get the version for logging.
        self._log.debug('Creating sample entries at version %s' % cent.version)
        cent.apply()

    def _validate(self, rdn, properties, basedn):
        # We always need to call the super validate first. This way we can
        # guarantee that properties is a dictionary.
        # However, backend can take different properties. One is
        # based on the actual key, value of the object
        # one is the "python magic" types.
        # So we actually have to do the super validation later.
        if properties is None:
            raise ldap.UNWILLING_TO_PERFORM(
                'Invalid request to create. Properties cannot be None')
        if type(properties) != dict:
            raise ldap.UNWILLING_TO_PERFORM("properties must be a dictionary")

        # This is converting the BACKEND_ types to the DS nsslapd- attribute values
        nprops = {}
        for key, value in properties.items():
            try:
                nprops[BACKEND_PROPNAME_TO_ATTRNAME[key]] = [
                    value,
                ]
            except KeyError:
                # This means, it's not a mapped value, so continue
                nprops[key] = value

        (dn, valid_props) = super(Backend, self)._validate(rdn, nprops, basedn)

        try:
            self._mts.get(ensure_str(valid_props['nsslapd-suffix'][0]))
            raise ldap.UNWILLING_TO_PERFORM(
                "Mapping tree for this suffix exists!")
        except ldap.NO_SUCH_OBJECT:
            pass
        try:
            self._mts.get(ensure_str(valid_props['cn'][0]))
            raise ldap.UNWILLING_TO_PERFORM(
                "Mapping tree for this database exists!")
        except ldap.NO_SUCH_OBJECT:
            pass
        # We have to stash our valid props so that mapping tree can use them ...
        self._nprops_stash = valid_props

        return (dn, valid_props)

    def create(self,
               dn=None,
               properties=None,
               basedn=DN_LDBM,
               create_mapping_tree=True):
        """Add a new backend entry, create mapping tree,
         and, if requested, sample entries

        :param dn: DN of the new entry
        :type dn: str
        :param properties: Attributes and parameters for the new entry
        :type properties: dict
        :param basedn: Base DN of the new entry
        :type basedn: str
        :param create_mapping_tree: If a related mapping tree node should be created
        :type create_mapping_tree: bool

        :returns: DSLdapObject of the created entry
        """

        sample_entries = False
        parent_suffix = False

        # normalize suffix (remove spaces between comps)
        if dn is not None:
            dn_comps = ldap.dn.explode_dn(dn.lower())
            dn = ",".join(dn_comps)

        if properties is not None:
            suffix_dn = properties['nsslapd-suffix'].lower()
            dn_comps = ldap.dn.explode_dn(suffix_dn)
            ndn = ",".join(dn_comps)
            properties['nsslapd-suffix'] = ndn
            sample_entries = properties.pop(BACKEND_SAMPLE_ENTRIES, False)
            parent_suffix = properties.pop('parent', False)

        # Okay, now try to make the backend.
        super(Backend, self).create(dn, properties, basedn)

        # We check if the mapping tree exists in create, so do this *after*
        if create_mapping_tree is True:
            properties = {
                'cn': self._nprops_stash['nsslapd-suffix'],
                'nsslapd-state': 'backend',
                'nsslapd-backend': self._nprops_stash['cn'],
            }
            if parent_suffix:
                # This is a subsuffix, set the parent suffix
                properties['nsslapd-parent-suffix'] = parent_suffix
            self._mts.create(properties=properties)

        # We can't create the sample entries unless a mapping tree was installed.
        if sample_entries is not False and create_mapping_tree is True:
            self.create_sample_entries(sample_entries)
        return self

    def delete(self):
        """Deletes the backend, it's mapping tree and all related indices.
        This can be changed with the self._protected flag!

        :raises: - UnwillingToPerform - if backend is protected
                 - UnwillingToPerform - if nsslapd-state is not 'backend'
        """

        if self._protected:
            raise ldap.UNWILLING_TO_PERFORM("This is a protected backend!")
        # First check if the mapping tree has our suffix still.
        # suffix = self.get_attr_val('nsslapd-suffix')
        bename = self.get_attr_val_utf8('cn')
        try:
            mt = self._mts.get(selector=bename)
            # Assert the type is "backend"
            # Are these the right types....?
            if mt.get_attr_val_utf8('nsslapd-state').lower() != 'backend':
                raise ldap.UNWILLING_TO_PERFORM(
                    'Can not delete the mapping tree, not for a backend! You may need to delete this backend via cn=config .... ;_; '
                )

            # Delete replicas first
            try:
                Replicas(self._instance).get(
                    mt.get_attr_val_utf8('cn')).delete()
            except ldap.NO_SUCH_OBJECT:
                # No replica, no problem
                pass

            # Delete our mapping tree if it exists.
            mt.delete()
        except ldap.NO_SUCH_OBJECT:
            # Righto, it's already gone! Do nothing ...
            pass

        # Now remove our children, this is all ldbm config
        self._instance.delete_branch_s(self._dn, ldap.SCOPE_SUBTREE)

    def get_suffix(self):
        return self.get_attr_val_utf8_l('nsslapd-suffix')

    def disable(self):
        # Disable backend (mapping tree)
        suffix = self.get_attr_val_utf8_l('nsslapd-suffix')
        mt = self._mts.get(suffix)
        mt.set('nsslapd-nsstate', 'Disabled')

    def enable(self):
        # Enable Backend (mapping tree)
        suffix = self.get_attr_val_utf8_l('nsslapd-suffix')
        mt = self._mts.get(suffix)
        mt.set('nsslapd-nsstate', 'Backend')

    def get_mapping_tree(self):
        suffix = self.get_attr_val_utf8('nsslapd-suffix')
        return self._mts.get(suffix)

    def get_monitor(self):
        """Get a MonitorBackend(DSLdapObject) for the backend"""

        monitor = MonitorBackend(instance=self._instance,
                                 dn="cn=monitor,%s" % self._dn)
        return monitor

    def get_indexes(self):
        """Get an Indexes(DSLdapObject) for the backend"""

        indexes = Indexes(self._instance, basedn="cn=index,%s" % self._dn)
        return indexes

    def get_index(self, attr_name):
        for index in self.get_indexes().list():
            idx_name = index.get_attr_val_utf8_l('cn').lower()
            if idx_name == attr_name.lower():
                return index
        return None

    def del_index(self, attr_name):
        for index in self.get_indexes().list():
            idx_name = index.get_attr_val_utf8_l('cn').lower()
            if idx_name == attr_name.lower():
                index.delete()
                return
        raise ValueError("Can not delete index because it does not exist")

    def add_index(self, attr_name, types, matching_rules=[], reindex=False):
        """ Add an index.

        :param attr_name - name of the attribute to index
        :param types - a List of index types(eq, pres, sub, approx)
        :param matching_rules - a List of matching rules for the index
        :param reindex - If set to True then index the attribute after creating it.
        """
        new_index = Index(self._instance)
        props = {
            'cn': attr_name,
            'nsSystemIndex': 'False',
            'nsIndexType': types,
        }
        if matching_rules is not None:
            mrs = []
            for mr in matching_rules:
                mrs.append(mr)
            props['nsMatchingRule'] = mrs
        new_index.create(properties=props, basedn="cn=index," + self._dn)

        if reindex:
            self.reindex(attr_name)

    def reindex(self, attrs=None, wait=False):
        """Reindex the attributes for this backend
        :param attrs - an optional list of attributes to index
        :param wait - Set to true to wait for task to complete
        """
        args = None
        if wait:
            args = {TASK_WAIT: True}
        bename = ensure_str(self.get_attr_val_bytes('cn'))
        reindex_task = Tasks(self._instance)
        reindex_task.reindex(benamebase=bename, attrname=attrs, args=args)

    def get_encrypted_attrs(self, just_names=False):
        """Get a list of the excrypted attributes
        :param just_names - If True only the encrypted attribute names are returned (instead of the full attribute entry)
        :returns - a list of attributes
        """
        attrs = EncryptedAttrs(self._instance, basedn=self._dn).list()
        if just_names:
            results = []
            for attr in attrs:
                results.append(attr.get_attr_val_utf8_l('cn'))
            return results
        else:
            return attrs

    def add_encrypted_attr(self, attr_name):
        """Add an encrypted attribute
        :param attr_name - name of the new encrypted attribute
        """
        new_attr = EncryptedAttr(self._instance)
        new_attr.create(basedn="cn=encrypted attributes," + self._dn,
                        properties={
                            'cn': attr_name,
                            'nsEncryptionAlgorithm': 'AES'
                        })

    def del_encrypted_attr(self, attr_name):
        """Delete encrypted attribute
        :param attr_name - Name of the encrypted attribute to delete
        """
        enc_attrs = EncryptedAttrs(self._instance,
                                   basedn="cn=encrypted attributes," +
                                   self._dn).list()
        for enc_attr in enc_attrs:
            attr = enc_attr.get_attr_val_utf8_l('cn').lower()
            if attr_name == attr.lower():
                enc_attr.delete()
                break

    def import_ldif(self,
                    ldifs,
                    chunk_size=None,
                    encrypted=False,
                    gen_uniq_id=None,
                    only_core=False,
                    include_suffixes=None,
                    exclude_suffixes=None):
        """Do an import of the suffix"""

        bs = Backends(self._instance)
        task = bs.import_ldif(self.rdn, ldifs, chunk_size, encrypted,
                              gen_uniq_id, only_core, include_suffixes,
                              exclude_suffixes)
        return task

    def export_ldif(self,
                    ldif=None,
                    use_id2entry=False,
                    encrypted=False,
                    min_base64=False,
                    no_uniq_id=False,
                    replication=False,
                    not_folded=False,
                    no_seq_num=False,
                    include_suffixes=None,
                    exclude_suffixes=None):
        """Do an export of the suffix"""

        bs = Backends(self._instance)
        task = bs.export_ldif(self.rdn, ldif, use_id2entry, encrypted,
                              min_base64, no_uniq_id, replication, not_folded,
                              no_seq_num, include_suffixes, exclude_suffixes)
        return task

    def get_vlv_searches(self, vlv_name=None):
        """Return the VLV seaches for this backend, or return a specific search
        :param vlv_name - name of a VLV search entry to return
        :returns - A list of VLV searches or a single VLV sarch entry
        """
        vlv_searches = VLVSearches(self._instance, basedn=self._dn).list()
        if vlv_name is None:
            return vlv_searches

        # return specific search
        for vlv in vlv_searches:
            search_name = vlv.get_attr_val_utf8_l('cn').lower()
            if search_name == vlv_name.lower():
                return vlv

        # No match
        raise ValueError("Failed to find VLV search entry")

    def add_vlv_search(self, vlvname, props, reindex=False):
        """Add a VLV search entry
        :param: vlvname - Name of the new VLV search entry
        :props - A dict of the attribute value pairs for the VLV search entry
        :param - reindex - Set to True to index the new attribute right away
        """
        basedn = self._dn
        vlv = VLVSearch(instance=self._instance)
        vlv.create(rdn="cn=" + vlvname, properties=props, basedn=basedn)

    def get_sub_suffixes(self):
        """Return a list of Backend's
        returns: a List of subsuffix entries
        """
        subsuffixes = []
        top_be_suffix = self.get_attr_val_utf8_l('nsslapd-suffix').lower()
        mts = self._mts.list()
        for mt in mts:
            parent_suffix = mt.get_attr_val_utf8_l('nsslapd-parent-suffix')
            if parent_suffix is None:
                continue
            if parent_suffix.lower() == top_be_suffix:
                child_suffix = mt.get_attr_val_utf8_l('cn').lower()
                be_insts = Backends(self._instance).list()
                for be in be_insts:
                    be_suffix = ensure_str(
                        be.get_attr_val_utf8_l('nsslapd-suffix')).lower()
                    if child_suffix == be_suffix:
                        subsuffixes.append(be)
                        break
        return subsuffixes

    def get_cos_indirect_defs(self):
        return CosIndirectDefinitions(self._instance, self._dn).list()

    def get_cos_pointer_defs(self):
        return CosPointerDefinitions(self._instance, self._dn).list()

    def get_cos_classic_defs(self):
        return CosClassicDefinitions(self._instance, self._dn).list()

    def get_cos_templates(self):
        return CosTemplates(self._instance, self._dn).list()
def test_chaining_paged_search(topology):
    """ Check that when the chaining target has anonymous access
    disabled that the ping still functions and allows the search
    to continue with an appropriate bind user.

    :id: 00bf31db-d93b-4224-8e70-86abb2d4cd17
    :setup: Two standalones in chaining.
    :steps:
        1. Configure chaining between the nodes
        2. Do a chaining search (w anon allow) to assert it works
        3. Configure anon dis allowed on st2
        4. Restart both
        5. Check search still works

    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
    """
    st1 = topology.ins["standalone1"]
    st2 = topology.ins["standalone2"]

    ### We setup so that st1 -> st2

    # Setup a chaining user on st2 to authenticate to.
    sa = ServiceAccounts(st2, DEFAULT_SUFFIX).create(properties = {
        'cn': 'sa',
        'userPassword': PW
    })

    # Add a proxy user.
    sproxy = ServiceAccounts(st2, DEFAULT_SUFFIX).create(properties = {
        'cn': 'proxy',
        'userPassword': PW
    })

    # Add the read and proxy ACI
    dc = Domain(st2, DEFAULT_SUFFIX)
    dc.add('aci',
        f"""(targetattr="objectClass || cn || uid")(version 3.0; acl "Enable sa read"; allow (read, search, compare)(userdn="ldap:///{sa.dn}");)"""
    )
    # Add the proxy ACI
    dc.add('aci',
        f"""(targetattr="*")(version 3.0; acl "Enable proxy access"; allow (proxy)(userdn="ldap:///{sproxy.dn}");)"""
    )

    # Clear all the BE in st1
    bes1 = Backends(st1)
    for be in bes1.list():
        be.delete()

    # Setup st1 to chain to st2
    chain_plugin_1 = ChainingBackendPlugin(st1)
    chain_plugin_1.enable()

    # Chain with the proxy user.
    chains = ChainingLinks(st1)
    chain = chains.create(properties={
        'cn': 'demochain',
        'nsfarmserverurl': st2.toLDAPURL(),
        'nsslapd-suffix': DEFAULT_SUFFIX,
        'nsmultiplexorbinddn': sproxy.dn,
        'nsmultiplexorcredentials': PW,
        'nsCheckLocalACI': 'on',
        'nsConnectionLife': '30',
    })

    mts = MappingTrees(st1)
    # Due to a bug in lib389, we need to delete and recreate the mt.
    for mt in mts.list():
        mt.delete()
    mts.ensure_state(properties={
        'cn': DEFAULT_SUFFIX,
        'nsslapd-state': 'backend',
        'nsslapd-backend': 'demochain',
        'nsslapd-distribution-plugin': 'libreplication-plugin',
        'nsslapd-distribution-funct': 'repl_chain_on_update',
    })

    # Enable pwpolicy (Not sure if part of the issue).
    st1.config.set('passwordIsGlobalPolicy', 'on')
    st2.config.set('passwordIsGlobalPolicy', 'on')

    # Restart to enable everything.
    st1.restart()

    # Get a proxy auth connection.
    sa1 = ServiceAccount(st1, sa.dn)
    sa1_conn = sa1.bind(password=PW)

    # Now do a search from st1 -> st2
    sa1_dc = Domain(sa1_conn, DEFAULT_SUFFIX)
    assert sa1_dc.exists()

    # Now on st2 disable anonymous access.
    st2.config.set('nsslapd-allow-anonymous-access', 'rootdse')

    # Stop st2 to force the connection to be dead.
    st2.stop()
    # Restart st1 - this means it must re-do the ping/keepalive.
    st1.restart()

    # do a bind - this should fail, and forces the conn offline.
    with pytest.raises(ldap.OPERATIONS_ERROR):
        sa1.bind(password=PW)

    # Allow time to attach lldb if needed.
    # print("🔥🔥🔥")
    # time.sleep(45)

    # Bring st2 online.
    st2.start()

    # Wait a bit
    time.sleep(5)

    # Get a proxy auth connection (again)
    sa1_conn = sa1.bind(password=PW)
    # Now do a search from st1 -> st2
    sa1_dc = Domain(sa1_conn, DEFAULT_SUFFIX)
    assert sa1_dc.exists()