コード例 #1
0
ファイル: conf_backend.py プロジェクト: Firstyear/lib389
def test_backend_cli(topology):
    # 
    args = FakeArgs()
    backend_list(topology.standalone, None, topology.logcap.log, None)
    # Assert none.
    assert(topology.logcap.contains("No objects to display"))
    topology.logcap.flush()
    # Add a backend
    # We need to fake the args
    args.extra = ['dc=example,dc=com', 'userRoot']
    backend_create(topology.standalone, None, topology.logcap.log, args)
    # Assert one.
    backend_list(topology.standalone, None, topology.logcap.log, None)
    # Assert none.
    assert(topology.logcap.contains("userRoot"))
    topology.logcap.flush()
    # Assert we can get by name, suffix, dn
    args.selector = 'userRoot'
    backend_get(topology.standalone, None, topology.logcap.log, args)
    # Assert none.
    assert(topology.logcap.contains("userRoot"))
    topology.logcap.flush()
    # Assert we can get by name, suffix, dn
    args.dn = 'cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
    backend_get_dn(topology.standalone, None, topology.logcap.log, args)
    # Assert none.
    assert(topology.logcap.contains("userRoot"))
    topology.logcap.flush()
    # delete it
    backend_delete(topology.standalone, None, topology.logcap.log, args, warn=False)
    backend_list(topology.standalone, None, topology.logcap.log, None)
    # Assert none.
    assert(topology.logcap.contains("No objects to display"))
    topology.logcap.flush()
コード例 #2
0
ファイル: conf_plugin.py プロジェクト: Firstyear/lib389
def test_plugin_cli(topology):
    args = FakeArgs()

    plugin_list(topology.standalone, None, topology.logcap.log, None)
    for p in plugins:
        assert(topology.logcap.contains(p))
    topology.logcap.flush()

    # print(topology.logcap.outputs)
    # Need to delete something, then re-add it.
    args.selector = 'USN'
    plugin_get(topology.standalone, None, topology.logcap.log, args)
    assert(topology.logcap.contains('USN'))
    topology.logcap.flush()

    args.dn = 'cn=USN,cn=plugins,cn=config'
    plugin_get_dn(topology.standalone, None, topology.logcap.log, args)
    assert(topology.logcap.contains('USN'))
    topology.logcap.flush()

    plugin_disable(topology.standalone, None, topology.logcap.log, args, warn=False)
    assert(topology.logcap.contains('Disabled'))
    topology.logcap.flush()

    plugin_enable(topology.standalone, None, topology.logcap.log, args)
    assert(topology.logcap.contains('Enabled'))
    topology.logcap.flush()
コード例 #3
0
ファイル: export_test.py プロジェクト: vashirov/389-ds-base
def run_db2ldif_and_clear_logs(topology,
                               instance,
                               backend,
                               ldif,
                               output_msg,
                               encrypt=False,
                               repl=False):
    args = FakeArgs()
    args.instance = instance.serverid
    args.backend = backend
    args.encrypted = encrypt
    args.replication = repl
    args.ldif = ldif

    dbtasks_db2ldif(instance, topology.logcap.log, args)

    log.info('checking output msg')
    if not topology.logcap.contains(output_msg):
        log.error('The output message is not the expected one')
        assert False

    log.info('Clear the log')
    topology.logcap.flush()
コード例 #4
0
def test_dsidm_service_rename(topology_st, create_test_service):
    """ Test dsidm service rename option

    :id: 4a13ea64-51e1-11ec-b3ff-3497f624ea11
    :setup: Standalone instance
    :steps:
         1. Run dsidm service rename option on created service
         2. Check the service does not have another cn attribute with the old rdn
         3. Check the old service is deleted
    :expectedresults:
         1. Success
         2. Success
         3. Success
    """

    standalone = topology_st.standalone
    services = ServiceAccounts(standalone, DEFAULT_SUFFIX)
    test_service = services.get('test_service')

    args = FakeArgs()
    args.selector = test_service.rdn
    args.new_name = 'my_service'
    args.keep_old_rdn = False

    log.info('Test dsidm service rename')
    args.new_name = 'my_service'
    rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
    my_service = services.get(args.new_name)
    output = f'Successfully renamed to {my_service.dn}'
    check_value_in_log_and_reset(topology_st, check_value=output)

    log.info('New service should not have cn attribute with the old rdn')
    assert not my_service.present('cn', 'test_service')
    assert my_service.get_attr_val_utf8('cn') == 'my_service'
    assert my_service.get_attr_val_utf8('description') == 'Test Service'

    log.info('Old service dn should not exist.')
    assert not test_service.exists()

    log.info('Clean up')
    my_service.delete()
コード例 #5
0
def test_dsidm_user_rename(topology_st, create_test_user):
    """ Test dsidm user rename option

    :id: fa569966-3954-465f-92b0-331a3a088b1b
    :setup: Standalone instance
    :steps:
         1. Run dsidm user rename option on created user
         2. Check the user does not have another uid attribute with the old rdn
         3. Check the old user is deleted
    :expectedresults:
         1. Success
         2. Success
         3. Success
    """

    standalone = topology_st.standalone
    users = nsUserAccounts(standalone, DEFAULT_SUFFIX)
    test_user = users.get('test_user_1000')

    args = FakeArgs()
    args.selector = test_user.rdn
    args.new_name = 'my_user'
    args.keep_old_rdn = False

    log.info('Test dsidm user rename')
    args.new_name = 'my_user'
    rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
    my_user = users.get(args.new_name)
    output = 'Successfully renamed to {}'.format(my_user.dn)
    check_value_in_log_and_reset(topology_st, check_value=output)

    log.info('New user should not have uid attribute with the old rdn')
    assert not my_user.present('uid', 'test_user_1000')
    assert my_user.get_attr_val_utf8('cn') == 'test_user_1000'
    assert my_user.get_attr_val_utf8('displayName') == 'test_user_1000'

    log.info('Old user dn should not exist.')
    assert not test_user.exists()

    log.info('Clean up')
    my_user.delete()
コード例 #6
0
def test_set_update_delay(topology):
    args = FakeArgs()

    args.value = 60
    referint_cli.manage_update_delay(topology.standalone, None, topology.logcap.log, args)
    assert topology.logcap.contains('referint-update-delay set to "60"')
    topology.logcap.flush()

    args.value = None
    referint_cli.manage_update_delay(topology.standalone, None, topology.logcap.log, args)
    assert topology.logcap.contains("referint-update-delay: 60")
    topology.logcap.flush()

    args.value = 0
    referint_cli.manage_update_delay(topology.standalone, None, topology.logcap.log, args)
    assert topology.logcap.contains('referint-update-delay set to "0"')
    topology.logcap.flush()

    args.value = None
    referint_cli.manage_update_delay(topology.standalone, None, topology.logcap.log, args)
    assert topology.logcap.contains("referint-update-delay: 0")
    topology.logcap.flush()
コード例 #7
0
def test_backend_cli(topology):
    #
    args = FakeArgs()
    backend_list(topology.standalone, None, topology.logcap.log, None)
    # Assert none.
    assert (topology.logcap.contains("No objects to display"))
    topology.logcap.flush()
    # Add a backend
    # We need to fake the args
    args.cn = 'userRoot'
    args.nsslapd_suffix = 'dc=example,dc=com'
    backend_create(topology.standalone, None, topology.logcap.log, args)
    # Assert one.
    backend_list(topology.standalone, None, topology.logcap.log, None)
    # Assert none.
    assert (topology.logcap.contains("userRoot"))
    topology.logcap.flush()
    # Assert we can get by name, suffix, dn
    args.selector = 'userRoot'
    backend_get(topology.standalone, None, topology.logcap.log, args)
    # Assert none.
    assert (topology.logcap.contains("userRoot"))
    topology.logcap.flush()
    # Assert we can get by name, suffix, dn
    args.dn = 'cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
    backend_get_dn(topology.standalone, None, topology.logcap.log, args)
    # Assert none.
    assert (topology.logcap.contains("userRoot"))
    topology.logcap.flush()
    # delete it
    backend_delete(topology.standalone,
                   None,
                   topology.logcap.log,
                   args,
                   warn=False)
    backend_list(topology.standalone, None, topology.logcap.log, None)
    # Assert none.
    assert (topology.logcap.contains("No objects to display"))
    topology.logcap.flush()
コード例 #8
0
def test_chaining_cli(topology_st, create_backend):
    """Test creating, listing, getting, and deleting a backend (and subsuffix)

    :id: 800f432a-52ab-4661-ac66-a2bdd9b984d7
    :setup: Standalone instance
    :steps:
        1. Update config controls and components
        2. Verify update to config
        3. Set default config
        4. Verify update to default config
        5. Add DB Link
        6. Verify Link was created
        7. Edit Link
        8. Verify edit to link
        9. Test monitor
        10. Delete link
        11. Verify link was deleted
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
        9. Success
        10. Success
        11. Success
    """
    topology_st.logcap = LogCapture()
    sys.stdout = io.StringIO()
    args = FakeArgs()
    args.CHAIN_NAME = [LINK_NAME]
    args.suffix = LINK_SUFFIX
    args.json = False
    args.add_control = None
    args.del_control = None
    args.add_comp = None
    args.del_comp = None

    # Set config (add control)
    args.add_control = '1.1.1.1.1.1.1'
    config_set(topology_st.standalone, None, None, args)
    args.add_control = None
    check_output("updated chaining configuration")

    # Verify config change
    config_get(topology_st.standalone, None, None, args)
    check_output("1.1.1.1.1.1.1")

    # Set config (delete control)
    args.del_control = '1.1.1.1.1.1.1'
    config_set(topology_st.standalone, None, None, args)
    args.del_control = None
    check_output("updated chaining configuration")

    # Verify config change
    config_get(topology_st.standalone, None, None, args)
    check_output("1.1.1.1.1.1.1", missing=True)

    # Set config (add comp)
    args.add_comp = 'cn=test,cn=config'
    config_set(topology_st.standalone, None, None, args)
    args.add_comp = None
    check_output("updated chaining configuration")

    # Verify config change
    config_get(topology_st.standalone, None, None, args)
    check_output('cn=test,cn=config')

    # Set config (delete comp)
    args.del_comp = 'cn=test,cn=config'
    config_set(topology_st.standalone, None, None, args)
    args.del_comp = None
    check_output("updated chaining configuration")

    # Verify config change
    config_get(topology_st.standalone, None, None, args)
    check_output("cn=test,cn=config", missing=True)

    # Set default config
    args.time_limit = '5555'
    def_config_set(topology_st.standalone, None, None, args)
    check_output("updated chaining default instance creation configuration")

    # Verify default config change
    def_config_get(topology_st.standalone, None, None, args)
    check_output("nsslapd_timelimit: 5555")

    # Create database link
    args.server_url = "ldap://localhost.localdomain"
    args.bind_dn = "cn=link_admin," + SUFFIX
    args.bind_pw = "secret_157"
    args.bind_mech = "LDAP"
    create_link(topology_st.standalone, None, None, args)
    check_output("created database link")

    # Verify link was created
    list_links(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output(LINK_NAME)

    # Edit link
    args.bind_dn = "uid=newuser,cn=config"
    args.suffix = None
    edit_link(topology_st.standalone, None, None, args)
    check_output("updated database chaining link")

    # Verify link was edited
    args.cn = LINK_NAME
    get_link(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output("uid=newuser,cn=config")

    # Test monitor
    time.sleep(2)  # need time for link to start up and generate monitor
    monitor_link(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output("nssearchonelevelcount: ")

    # Delete link
    delete_link(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output("deleted database link")

    # Verify link was deleted
    list_links(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output(LINK_NAME, missing=True)
コード例 #9
0
def test_retrocl_exclude_attr_add(topology_st):
    """ Test exclude attribute feature of the retrocl plugin for add operation

    :id: 3481650f-2070-45ef-9600-2500cfc51559

    :setup: Standalone instance

    :steps:
        1. Enable dynamic plugins
        2. Confige retro changelog plugin
        3. Add an entry
        4. Ensure entry attrs are in the changelog
        5. Exclude an attr
        6. Add another entry
        7. Ensure excluded attr is not in the changelog

    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
    """

    st = topology_st.standalone

    log.info('Configure retrocl plugin')
    rcl = RetroChangelogPlugin(st)
    rcl.disable()
    rcl.enable()
    rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')

    log.info('Restarting instance')
    try:
        st.restart()
    except ldap.LDAPError as e:
        ldap.error('Failed to restart instance ' + e.args[0]['desc'])
        assert False

    users = UserAccounts(st, DEFAULT_SUFFIX)

    log.info('Adding user1')
    try:
        users.create(
            properties={
                'sn': '1',
                'cn': 'user 1',
                'uid': 'user1',
                'uidNumber': '11',
                'gidNumber': '111',
                'givenname': 'user1',
                'homePhone': '0861234567',
                'carLicense': '131D16674',
                'mail': '*****@*****.**',
                'homeDirectory': '/home/user1',
                'userpassword': USER_PW
            })
    except ldap.ALREADY_EXISTS:
        pass
    except ldap.LDAPError as e:
        log.error("Failed to add user1: " + str(e))

    log.info(
        'Verify homePhone and carLicense attrs are in the changelog changestring'
    )
    try:
        retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
        cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
    except ldap.LDAPError as e:
        log.fatal("Changelog search failed, error: " + str(e))
        assert False
    assert len(cllist) > 0
    if cllist[0].present('changes'):
        clstr = str(cllist[0].get_attr_vals_utf8('changes'))
        assert ATTR_HOMEPHONE in clstr
        assert ATTR_CARLICENSE in clstr

    log.info('Excluding attribute ' + ATTR_HOMEPHONE)
    args = FakeArgs()
    args.connections = [
        st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM
    ]
    args.instance = 'standalone1'
    args.basedn = None
    args.binddn = None
    args.starttls = False
    args.pwdfile = None
    args.bindpw = None
    args.prompt = False
    args.exclude_attrs = ATTR_HOMEPHONE
    args.func = retrochangelog_add
    dsrc_inst = dsrc_arg_concat(args, None)
    inst = connect_instance(dsrc_inst, False, args)
    result = args.func(inst, None, log, args)
    disconnect_instance(inst)
    assert result is None

    log.info('Restarting instance')
    try:
        st.restart()
    except ldap.LDAPError as e:
        ldap.error('Failed to restart instance ' + e.args[0]['desc'])
        assert False

    log.info('Adding user2')
    try:
        users.create(
            properties={
                'sn': '2',
                'cn': 'user 2',
                'uid': 'user2',
                'uidNumber': '22',
                'gidNumber': '222',
                'givenname': 'user2',
                'homePhone': '0879088363',
                'carLicense': '04WX11038',
                'mail': '*****@*****.**',
                'homeDirectory': '/home/user2',
                'userpassword': USER_PW
            })
    except ldap.ALREADY_EXISTS:
        pass
    except ldap.LDAPError as e:
        log.error("Failed to add user2: " + str(e))

    log.info('Verify homePhone attr is not in the changelog changestring')
    try:
        cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})')
        assert len(cllist) > 0
        if cllist[0].present('changes'):
            clstr = str(cllist[0].get_attr_vals_utf8('changes'))
            assert ATTR_HOMEPHONE not in clstr
            assert ATTR_CARLICENSE in clstr
    except ldap.LDAPError as e:
        log.fatal("Changelog search failed, error: " + str(e))
        assert False
コード例 #10
0
def test_directory_manager(topology):
    #
    args = FakeArgs()
    args.password = '******'
    password_change(topology.standalone, None, topology.logcap.log, args)
コード例 #11
0
def test_import_export(topology_st):
    BE_NAME = 'userRoot'
    EXCLUDE_SUFFIX = "ou=Groups,dc=example,dc=com"
    LDIF_NAME = "test_import_export.ldif"
    LDIF_PATH = os.path.join(topology_st.standalone.ds_paths.ldif_dir,
                             LDIF_NAME)
    topology_st.logcap = LogCapture()
    args = FakeArgs()

    # Export the backend
    args.be_names = [BE_NAME]
    args.ldif = LDIF_NAME
    args.use_id2entry = None
    args.encrypted = None
    args.min_base64 = None
    args.no_dump_uniq_id = None
    args.replication = None
    args.not_folded = None
    args.no_seq_num = None
    args.include_suffixes = None
    args.exclude_suffixes = [EXCLUDE_SUFFIX]
    backend_export(topology_st.standalone, None, topology_st.logcap.log, args)

    # Verify export worked
    assert os.path.exists(LDIF_PATH)
    with open(LDIF_PATH, 'r') as ldif:
        for line in ldif:
            assert not line.endswith("%s\n" % EXCLUDE_SUFFIX)

    # Import the backend
    args.be_name = BE_NAME
    args.ldifs = [LDIF_NAME]
    args.chunks_size = None
    args.encrypted = None
    args.gen_uniq_id = None
    args.only_core = None
    args.include_suffixes = None
    args.exclude_suffixes = None
    backend_import(topology_st.standalone, None, topology_st.logcap.log, args)
    os.remove(LDIF_PATH)
コード例 #12
0
def test_attr_encrypt(topology_st, create_backend):
    """Test adding/removing encrypted attrs
    :id: 800f432a-52ab-4661-ac66-a2bdd9b984d789
    :setup: Standalone instance
    :steps:
        1. Add encrypted attr
        2. Verify it succeeded
        3. Delete encrypted attr
        4. Verity it was removed
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success

    """
    sys.stdout = io.StringIO()
    args = FakeArgs()
    args.cn = BE_NAME
    args.be_name = BE_NAME
    args.suffix = False
    args.nsslapd_suffix = SUFFIX
    args.json = False
    args.just_names = False
    args.list = False
    args.add_attr = None
    args.del_attr = None

    # Add an encrytped attr
    args.add_attr = ['description']
    backend_attr_encrypt(topology_st.standalone, None, None, args)
    args.add_attr = None
    check_output("added encrypted attribute")

    # Verify it worked
    args.list = True
    backend_attr_encrypt(topology_st.standalone, None, None, args)
    args.list = False
    check_output("cn: description")

    # Delete encrypted attr
    args.del_attr = ['description']
    backend_attr_encrypt(topology_st.standalone, None, None, args)
    args.del_attr = None
    check_output("deleted encrypted attribute")

    # Verify it worked
    args.list = True
    backend_attr_encrypt(topology_st.standalone, None, None, args)
    args.list = False
    check_output("cn: description", missing=True)
コード例 #13
0
def test_dsconf_replication_monitor(topology_m2, set_log_file):
    """Test replication monitor that was ported from legacy tools

    :id: ce48020d-7c30-41b7-8f68-144c9cd757f6
    :setup: 2 MM topology
    :steps:
         1. Create DS instance
         2. Run replication monitor with connections option
         3. Run replication monitor with aliases option
         4. Run replication monitor with --json option
         5. Run replication monitor with .dsrc file created
    :expectedresults:
         1. Success
         2. Success
         3. Success
         4. Success
         5. Success
    """

    m1 = topology_m2.ms["master1"]
    m2 = topology_m2.ms["master2"]

    alias_content = [
        'Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
        'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'
    ]

    connection_content = 'Supplier: ' + m1.host + ':' + str(m1.port)
    content_list = [
        'Replica Root: dc=example,dc=com', 'Replica ID: 1',
        'Replica Status: Available', 'Max CSN',
        'Status For Agreement: "002" (' + m2.host + ':' + str(m2.port) + ')',
        'Replica Enabled: on', 'Update In Progress: FALSE',
        'Last Update Start:', 'Last Update End:', 'Number Of Changes Sent:',
        'Number Of Changes Skipped: None',
        'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded',
        'Last Init Start:', 'Last Init End:', 'Last Init Status:',
        'Reap Active: 0', 'Replication Status: In Synchronization',
        'Replication Lag Time:', 'Supplier: ', m2.host + ':' + str(m2.port),
        'Replica Root: dc=example,dc=com', 'Replica ID: 2',
        'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port) + ')'
    ]

    json_list = [
        'type', 'list', 'items', 'name', m1.host + ':' + str(m1.port), 'data',
        '"replica_id": "1"', '"replica_root": "dc=example,dc=com"',
        '"replica_status": "Available"', 'maxcsn', 'agmts_status', 'agmt-name',
        '002', 'replica', m2.host + ':' + str(m2.port), 'replica-enabled',
        'update-in-progress', 'last-update-start', 'last-update-end',
        'number-changes-sent', 'number-changes-skipped', 'last-update-status',
        'Error (0) Replica acquired successfully: Incremental update succeeded',
        'last-init-start', 'last-init-end', 'last-init-status', 'reap-active',
        'replication-status', 'In Synchronization', 'replication-lag-time',
        '"replica_id": "2"', '001', m1.host + ':' + str(m1.port)
    ]

    dsrc_content = '[repl-monitor-connections]\n' \
                   'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
                   'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
                   '\n' \
                   '[repl-monitor-aliases]\n' \
                   'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
                   'M2 = ' + m2.host + ':' + str(m2.port)

    connections = [
        m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
        m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM
    ]

    aliases = [
        'M1=' + m1.host + ':' + str(m1.port),
        'M2=' + m2.host + ':' + str(m2.port)
    ]

    args = FakeArgs()
    args.connections = connections
    args.aliases = None
    args.json = False

    log.info('Run replication monitor with connections option')
    get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
    check_value_in_log_and_reset(content_list, connection_content)

    log.info('Run replication monitor with aliases option')
    args.aliases = aliases
    get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
    check_value_in_log_and_reset(content_list, alias_content)

    log.info('Run replication monitor with --json option')
    args.aliases = None
    args.json = True
    get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
    check_value_in_log_and_reset(json_list)

    with open(os.path.expanduser(DSRC_HOME), 'w+') as f:
        f.write(dsrc_content)

    args.connections = None
    args.aliases = None
    args.json = False

    log.info('Run replication monitor when .dsrc file is present with content')
    get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
    check_value_in_log_and_reset(content_list, alias_content)
コード例 #14
0
def test_dsconf_replication_monitor(topology_m2, set_log_file):
    """Test replication monitor that was ported from legacy tools

    :id: ce48020d-7c30-41b7-8f68-144c9cd757f6
    :setup: 2 MM topology
    :steps:
         1. Create DS instance
         2. Run replication monitor with connections option
         3. Run replication monitor with aliases option
         4. Run replication monitor with --json option
         5. Run replication monitor with .dsrc file created
         6. Run replication monitor with connections option as if using dsconf CLI
    :expectedresults:
         1. Success
         2. Success
         3. Success
         4. Success
         5. Success
         6. Success
    """

    m1 = topology_m2.ms["master1"]
    m2 = topology_m2.ms["master2"]

    # Enable ldapi if not already done.
    for inst in [topology_m2.ms["master1"], topology_m2.ms["master2"]]:
        if not inst.can_autobind():
            # Update ns-slapd instance
            inst.config.set('nsslapd-ldapilisten', 'on')
            inst.config.set('nsslapd-ldapiautobind', 'on')
            inst.restart()
    # Ensure that updates have been sent both ways.
    replicas = Replicas(m1)
    replica = replicas.get(DEFAULT_SUFFIX)
    replica.test_replication([m2])
    replicas = Replicas(m2)
    replica = replicas.get(DEFAULT_SUFFIX)
    replica.test_replication([m1])

    alias_content = [
        'Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
        'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'
    ]

    connection_content = 'Supplier: ' + m1.host + ':' + str(m1.port)
    content_list = [
        'Replica Root: dc=example,dc=com', 'Replica ID: 1',
        'Replica Status: Available', 'Max CSN',
        'Status For Agreement: "002" (' + m2.host + ':' + str(m2.port) + ')',
        'Replica Enabled: on', 'Update In Progress: FALSE',
        'Last Update Start:', 'Last Update End:', 'Number Of Changes Sent:',
        'Number Of Changes Skipped: None',
        'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded',
        'Last Init Start:', 'Last Init End:', 'Last Init Status:',
        'Reap Active: 0', 'Replication Status: In Synchronization',
        'Replication Lag Time:', 'Supplier: ', m2.host + ':' + str(m2.port),
        'Replica Root: dc=example,dc=com', 'Replica ID: 2',
        'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port) + ')'
    ]

    error_list = [
        'consumer (Unavailable)',
        'Failed to retrieve database RUV entry from consumer'
    ]

    json_list = [
        'type', 'list', 'items', 'name', m1.host + ':' + str(m1.port), 'data',
        '"replica_id": "1"', '"replica_root": "dc=example,dc=com"',
        '"replica_status": "Available"', 'maxcsn', 'agmts_status', 'agmt-name',
        '002', 'replica', m2.host + ':' + str(m2.port), 'replica-enabled',
        'update-in-progress', 'last-update-start', 'last-update-end',
        'number-changes-sent', 'number-changes-skipped', 'last-update-status',
        'Error (0) Replica acquired successfully: Incremental update succeeded',
        'last-init-start', 'last-init-end', 'last-init-status', 'reap-active',
        'replication-status', 'In Synchronization', 'replication-lag-time',
        '"replica_id": "2"', '001', m1.host + ':' + str(m1.port)
    ]

    connections = [
        m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
        m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM
    ]

    args = FakeArgs()
    args.connections = connections
    args.aliases = None
    args.json = False

    log.info('Run replication monitor with connections option')
    get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
    (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
    check_value_in_log_and_reset(content_list,
                                 connection_content,
                                 error_list=error_list)

    # Prepare the data for next tests
    aliases = [
        'M1=' + host_m1 + ':' + str(m1.port),
        'M2=' + host_m2 + ':' + str(m2.port)
    ]

    alias_content = [
        'Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
        'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'
    ]

    dsrc_content = '[repl-monitor-connections]\n' \
                   'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
                   'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
                   '\n' \
                   '[repl-monitor-aliases]\n' \
                   'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
                   'M2 = ' + host_m2 + ':' + str(m2.port)

    log.info('Run replication monitor with aliases option')
    args.aliases = aliases
    get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
    check_value_in_log_and_reset(content_list, alias_content)

    log.info('Run replication monitor with --json option')
    args.aliases = None
    args.json = True
    get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
    check_value_in_log_and_reset(json_list)

    with open(os.path.expanduser(DSRC_HOME), 'w+') as f:
        f.write(dsrc_content)

    args.connections = None
    args.aliases = None
    args.json = False

    log.info('Run replication monitor when .dsrc file is present with content')
    get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
    check_value_in_log_and_reset(content_list, alias_content)
    os.remove(os.path.expanduser(DSRC_HOME))

    log.info(
        'Run replication monitor with connections option as if using dsconf CLI'
    )
    # Perform same test than steps 2 test but without using directly the topology instance.
    # but with an instance similar to those than dsconf cli generates:
    # step 2 args
    args.connections = connections
    args.aliases = None
    args.json = False
    # args needed to generate an instance with dsrc_arg_concat
    args.instance = 'master1'
    args.basedn = None
    args.binddn = None
    args.bindpw = None
    args.pwdfile = None
    args.prompt = False
    args.starttls = False
    dsrc_inst = dsrc_arg_concat(args, None)
    inst = connect_instance(dsrc_inst, True, args)
    get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args)
    check_value_in_log_and_reset(content_list,
                                 connection_content,
                                 error_list=error_list)
コード例 #15
0
def test_usandsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif):
    """Test ldifgen (formerly dbgen) tool to create mixed modification ldif

        :id: 4a2e0901-2b48-452e-a4a0-507735132c81
        :setup: Standalone instance
        :steps:
             1. Create DS instance
             2. Run ldifgen to generate modification ldif
             3. Import generated ldif to database
             4. Check it was properly imported
        :expectedresults:
             1. Success
             2. Success
             3. Success
             4. Success
        """

    standalone = topology_st.standalone

    args = FakeArgs()
    args.parent = DEFAULT_SUFFIX
    args.create_users = True
    args.delete_users = True
    args.create_parent = False
    args.num_users = "1000"
    args.add_users = "100"
    args.del_users = "999"
    args.modrdn_users = "100"
    args.mod_users = "10"
    args.mod_attrs = ['cn', 'uid', 'sn']
    args.randomize = False
    args.ldif_file = ldif_file

    content_list = [
        'Generating LDIF with the following options:',
        'create-users={}'.format(args.create_users), 'parent={}'.format(
            args.parent), 'create-parent={}'.format(args.create_parent),
        'delete-users={}'.format(args.delete_users), 'num-users={}'.format(
            args.num_users), 'add-users={}'.format(args.add_users),
        'del-users={}'.format(args.del_users), 'modrdn-users={}'.format(
            args.modrdn_users), 'mod-users={}'.format(args.mod_users),
        'mod-attrs={}'.format(args.mod_attrs),
        'randomize={}'.format(args.randomize),
        'ldif-file={}'.format(args.ldif_file), 'Writing LDIF',
        'Successfully created LDIF file: {}'.format(args.ldif_file)
    ]

    log.info('Run ldifgen to create modification ldif')
    dbgen_create_mods(standalone, log, args)

    log.info('Check if file exists')
    assert os.path.exists(ldif_file)

    check_value_in_log_and_reset(content_list)

    log.info('Get number of accounts before import')
    accounts = Accounts(standalone, DEFAULT_SUFFIX)
    count_account = len(accounts.filter('(uid=*)'))

    # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
    # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0
    with pytest.raises(subprocess.CalledProcessError):
        run_ldapmodify_from_file(standalone, ldif_file)

    log.info('Check that some accounts are imported')
    assert len(accounts.filter('(uid=*)')) > count_account
コード例 #16
0
def test_usandsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif):
    """Test ldifgen (formerly dbgen) tool to create a COS definition

        :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8f1
        :setup: Standalone instance
        :steps:
             1. Create DS instance
             2. Run ldifgen to generate ldif with classic COS definition
             3. Import generated ldif to database
             4. Check it was properly imported
        :expectedresults:
             1. Success
             2. Success
             3. Success
             4. Success
        """

    LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"'

    standalone = topology_st.standalone

    args = FakeArgs()
    args.type = 'classic'
    args.NAME = 'My_Postal_Def'
    args.parent = 'ou=cos definitions,dc=example,dc=com'
    args.create_parent = True
    args.cos_specifier = 'businessCategory'
    args.cos_attr = ['postalcode', 'telephonenumber']
    args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com'
    args.ldif_file = ldif_file

    content_list = [
        'Generating LDIF with the following options:',
        'NAME={}'.format(args.NAME), 'type={}'.format(args.type),
        'parent={}'.format(args.parent), 'create-parent={}'.format(
            args.create_parent), 'cos-specifier={}'.format(args.cos_specifier),
        'cos-template={}'.format(args.cos_template),
        'cos-attr={}'.format(args.cos_attr),
        'ldif-file={}'.format(args.ldif_file), 'Writing LDIF',
        'Successfully created LDIF file: {}'.format(args.ldif_file)
    ]

    log.info('Run ldifgen to create COS definition ldif')
    dbgen_create_cos_def(standalone, log, args)

    log.info('Check if file exists')
    assert os.path.exists(ldif_file)

    check_value_in_log_and_reset(content_list)

    # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
    run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)

    log.info('Check that COS definition is imported')
    cos_def = CosClassicDefinitions(standalone, args.parent)
    assert cos_def.exists(args.NAME)
    new_cos = cos_def.get(args.NAME)
    assert new_cos.present('cosTemplateDN', args.cos_template)
    assert new_cos.present('cosSpecifier', args.cos_specifier)
    assert new_cos.present('cosAttribute', args.cos_attr[0])
    assert new_cos.present('cosAttribute', args.cos_attr[1])
コード例 #17
0
def test_usandsconf_dbgen_groups(topology_st, set_log_file_and_ldif):
    """Test ldifgen (formerly dbgen) tool to create ldif with group

            :id: 97207413-9a93-4065-a5ec-63aa93801a31
            :setup: Standalone instance
            :steps:
                 1. Create DS instance
                 2. Run ldifgen to generate ldif with group
                 3. Import generated ldif to database
                 4. Check it was properly imported
            :expectedresults:
                 1. Success
                 2. Success
                 3. Success
                 4. Success
            """
    LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"'

    standalone = topology_st.standalone

    args = FakeArgs()
    args.NAME = 'myGroup'
    args.parent = 'ou=groups,dc=example,dc=com'
    args.suffix = DEFAULT_SUFFIX
    args.number = "1"
    args.num_members = "1000"
    args.create_members = True
    args.member_attr = 'uniquemember'
    args.member_parent = 'ou=people,dc=example,dc=com'
    args.ldif_file = ldif_file

    content_list = [
        'Generating LDIF with the following options:',
        'NAME={}'.format(args.NAME), 'number={}'.format(args.number),
        'suffix={}'.format(args.suffix), 'num-members={}'.format(
            args.num_members), 'create-members={}'.format(args.create_members),
        'member-parent={}'.format(args.member_parent),
        'member-attr={}'.format(args.member_attr),
        'ldif-file={}'.format(args.ldif_file), 'Writing LDIF',
        'Successfully created LDIF file: {}'.format(args.ldif_file)
    ]

    log.info('Run ldifgen to create group ldif')
    dbgen_create_groups(standalone, log, args)

    log.info('Check if file exists')
    assert os.path.exists(ldif_file)

    check_value_in_log_and_reset(content_list)

    log.info('Get number of accounts before import')
    accounts = Accounts(standalone, DEFAULT_SUFFIX)
    count_account = len(accounts.filter('(uid=*)'))

    # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
    # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0
    with pytest.raises(subprocess.CalledProcessError):
        run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)

    log.info('Check that accounts are imported')
    assert len(accounts.filter('(uid=*)')) > count_account

    log.info('Check that group is imported')
    groups = Groups(standalone, DEFAULT_SUFFIX)
    assert groups.exists(args.NAME + '-1')
    new_group = groups.get(args.NAME + '-1')
    new_group.present('uniquemember',
                      'uid=group_entry1-0152,ou=people,dc=example,dc=com')
コード例 #18
0
def test_user_modify(topology):
    be_args = FakeArgs()

    be_args.be_name = 'userRoot'
    be_args.suffix = DEFAULT_SUFFIX
    be_args.parent_suffix = None
    be_args.create_entries = False
    backend_create(topology.standalone, None, topology.logcap.log, be_args)

    # And add the skeleton objects.
    init_args = FakeArgs()
    init_args.version = INSTALL_LATEST_CONFIG
    initialise(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, init_args)

    # Check that our modify parser works. Modify statements are such as:
    # "add:attr:value". Replace is the exception as "replace:attr:old:new"

    # Check bad syntax
    modify_args = FakeArgs()
    modify_args.selector = "demo_user"
    modify_args.changes = ["tnaohtnsuahtnsouhtns"]

    with pytest.raises(ValueError):
        modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    modify_args.changes = ["add:attr:"]
    with pytest.raises(ValueError):
        modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    modify_args.changes = ["add:attr"]
    with pytest.raises(ValueError):
        modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    modify_args.changes = ["replace::"]
    with pytest.raises(ValueError):
        modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    modify_args.changes = ["replace:attr::new"]
    with pytest.raises(ValueError):
        modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    modify_args.changes = ["delete:attr:old:new"]
    with pytest.raises(ValueError):
        modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    # Check that even a single bad value causes error
    modify_args.changes = ["add:description:goodvalue", "add:attr:"]
    with pytest.raises(ValueError):
        modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    # check good syntax
    modify_args.changes = ["add:description:testvalue"]
    modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    modify_args.changes = ["replace:description:newvalue"]
    modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    modify_args.changes = ["delete:description:newvalue"]
    modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    modify_args.changes = ["add:description:testvalue"]
    modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    modify_args.changes = ["delete:description:"]
    modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)

    # check mixed type, with multiple actions

    modify_args.changes = ["add:objectclass:nsMemberOf", "add:description:anothervalue"]
    modify(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, modify_args)
コード例 #19
0
def test_indexes(topology_st, create_backend):
    """Test creating, listing, getting, and deleting an index
    :id: 800f432a-52ab-4661-ac66-a2bdd9b984d78
    :setup: Standalone instance
    :steps:
        1. Add index (description)
        2. Verify index was added
        3. Modify index (Add type)
        4. Verify index was modified
        5. Modify index (Delete type)
        6. Verify index was modified
        7. Modify index (Add MR)
        8. Verify index was modified
        9. Modify index (Delete MR)
        10. Verify index was modified
        11. Reindex index
        12. Remove index
        13. Verify index was removed

    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
        9. Success
        10. Success
        11. Success
        12. Success
        13. Success
    """
    sys.stdout = io.StringIO()

    args = FakeArgs()
    args.cn = BE_NAME
    args.be_name = BE_NAME
    args.suffix = False
    args.nsslapd_suffix = SUFFIX
    args.attr = 'description'
    args.index_type = 'eq'
    args.matching_rule = None
    args.reindex = False
    args.json = False
    args.just_names = False
    args.add_type = None
    args.del_type = None
    args.add_mr = None
    args.del_mr = None

    # Add an index
    backend_add_index(topology_st.standalone, None, None, args)
    check_output("added index")

    # List indexes
    backend_list_index(topology_st.standalone, None, None, args)
    check_output("cn: description")

    # Modify index (Add type)
    args.add_type = ['sub']
    backend_set_index(topology_st.standalone, None, None, args)
    args.add_type = None
    check_output("successfully updated")

    # Verify type was added
    args.attr = ['description']
    backend_get_index(topology_st.standalone, None, None, args)
    check_output("nsindextype: sub")

    # Remove index type sub
    args.attr = 'description'

    args.del_type = ['sub']
    backend_set_index(topology_st.standalone, None, None, args)
    args.del_type = None
    check_output("successfully updated")

    # Verify type was removed
    args.attr = ['description']
    backend_get_index(topology_st.standalone, None, None, args)
    check_output("nsindextype: sub", missing=True)

    # Modify index (add MR)
    args.attr = 'description'
    args.add_mr = ['1.1.1.1.1.1']
    backend_set_index(topology_st.standalone, None, None, args)
    args.add_mr = None
    check_output("successfully updated")

    # Verify MR was added
    args.attr = ['description']
    backend_get_index(topology_st.standalone, None, None, args)
    check_output("nsmatchingrule: 1.1.1.1.1.1")

    # Modify index (delete MR)
    args.attr = 'description'
    args.del_mr = ['1.1.1.1.1.1']
    backend_set_index(topology_st.standalone, None, None, args)
    args.del_mr = None
    check_output("successfully updated")

    # Verify MR was added
    args.attr = ['description']
    backend_get_index(topology_st.standalone, None, None, args)
    check_output("nsmatchingrule: 1.1.1.1.1.1", missing=True)

    # Reindex index
    backend_reindex(topology_st.standalone, None, None, args)
    check_output("reindexed database")
    time.sleep(2)

    # Delete index
    backend_del_index(topology_st.standalone, None, None, args)
    check_output("deleted index")

    # Verify index was removed
    backend_list_index(topology_st.standalone, None, None, args)
    check_output("cn: description", missing=True)
コード例 #20
0
def test_user_tasks(topology):
    be_args = FakeArgs()

    be_args.cn = 'userRoot'
    be_args.nsslapd_suffix = DEFAULT_SUFFIX
    backend_create(topology.standalone, None, topology.logcap.log, be_args)

    # And add the skeleton objects.
    init_args = FakeArgs()
    init_args.version = INSTALL_LATEST_CONFIG
    initialise(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, init_args)

    # First check that our test user isn't there:
    topology.logcap.flush()
    u_args = FakeArgs()
    u_args.selector = 'testuser'
    with pytest.raises(ldap.NO_SUCH_OBJECT):
        get(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, u_args)

    # Create the user
    topology.logcap.flush()
    u_args.uid = 'testuser'
    # u_args.sn = 'testuser'
    u_args.cn = 'Test User'
    u_args.displayName = 'Test User'
    u_args.homeDirectory = '/home/testuser'
    u_args.uidNumber = '5000'
    u_args.gidNumber = '5000'
    create(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, u_args)

    assert(topology.logcap.contains("Sucessfully created testuser"))
    # Assert they exist
    topology.logcap.flush()
    get(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, u_args)
    assert(topology.logcap.contains('dn: uid=testuser,ou=people,dc=example,dc=com'))

    # Reset the password

    # Lock the account, check status
    topology.logcap.flush()
    lock(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, u_args)
    assert(topology.logcap.contains('locked'))

    topology.logcap.flush()
    status(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, u_args)
    assert(topology.logcap.contains('locked: True'))

    # Unlock check status
    topology.logcap.flush()
    unlock(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, u_args)
    assert(topology.logcap.contains('unlocked'))

    topology.logcap.flush()
    status(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, u_args)
    assert(topology.logcap.contains('locked: False'))

    # Enroll a dummy cert

    # Enroll a dummy sshkey

    # Delete it 
    topology.logcap.flush()
    u_args.dn = 'uid=testuser,ou=people,dc=example,dc=com'
    delete(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, u_args, warn=False)
    assert(topology.logcap.contains('Sucessfully deleted uid=testuser,ou=people,dc=example,dc=com'))
コード例 #21
0
def create_backend(topology_st, request):
    """Create backend "dc=backend,dc=test" / backendRoot
    """
    sys.stdout = io.StringIO()

    args = FakeArgs()
    args.cn = BE_NAME
    args.be_name = BE_NAME
    args.suffix = False
    args.nsslapd_suffix = SUFFIX
    args.skip_subsuffixes = False
    args.json = False
    args.parent_suffix = False
    args.create_entries = True

    args.suffix = SUFFIX
    backend_create(topology_st.standalone, None, None, args)
    check_output("The database was successfully created")

    def fin():
        sys.stdout = io.StringIO()
        args = FakeArgs()
        args.cn = BE_NAME
        args.be_name = BE_NAME
        args.suffix = SUFFIX
        args.skip_subsuffixes = False
        args.json = False

        # Delete backend
        backend_delete(topology_st.standalone, None, None, args, warn=False)
        check_output("successfully deleted")

        # Verify it's removed
        args.suffix = False
        backend_list(topology_st.standalone, None, None, args)
        check_output("backendroot", missing=True)

    request.addfinalizer(fin)
コード例 #22
0
def test_backend_cli(topology_st, create_backend):
    """Test creating, listing, getting, and deleting a backend (and subsuffix)
    :id: 800f432a-52ab-4661-ac66-a2bdd9b984d7
    :setup: Standalone instance
    :steps:
        1. List backends
        2. Get backend by suffix
        3. Get backend by DN
        4. Add subsuffix
        5. Verify subsuffix
        6. Modify subsuffix
        7. Delete subsuffix
        8. Verify subsuffix is removed
        9. Modify backend
        10. Verify modify worked
        11. Test monitor works
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
        9. Success
        10. Success
        11. Success
    """
    topology_st.logcap = LogCapture()
    sys.stdout = io.StringIO()

    args = FakeArgs()
    args.cn = BE_NAME
    args.be_name = BE_NAME
    args.suffix = False
    args.nsslapd_suffix = SUFFIX
    args.skip_subsuffixes = False
    args.json = False
    args.parent_suffix = False
    args.create_entries = True

    # List backend
    backend_list(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output(SUFFIX)

    # Get backend by by name
    args.selector = BE_NAME
    backend_get(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output(BE_NAME)

    # Get backend by DN
    args.dn = 'cn=backendRoot,cn=ldbm database,cn=plugins,cn=config'
    backend_get_dn(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output(BE_NAME)

    # Add subsuffix
    args.parent_suffix = SUFFIX
    args.suffix = SUB_SUFFIX
    args.be_name = SUB_BE_NAME
    backend_create(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output("The database was successfully created")

    # Verify subsuffix
    args.suffix = False
    backend_list(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output(SUB_SUFFIX)

    # Modify subsuffix
    args.enable = False
    args.disable = False
    args.add_referral = False
    args.del_referral = False
    args.cache_size = False
    args.cache_memsize = False
    args.dncache_memsize = False
    args.enable_readonly = True  # Setting nsslapd-readonly to "on"
    args.disable_readonly = False
    backend_set(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output("successfully updated")

    # Verify modified worked
    args.selector = SUB_BE_NAME
    backend_get(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output("nsslapd-readonly: on")

    # Delete subsuffix
    args.suffix = SUB_SUFFIX
    backend_delete(topology_st.standalone,
                   None,
                   topology_st.logcap.log,
                   args,
                   warn=False)
    check_output("successfully deleted")

    # Verify it is deleted
    args.suffix = False
    backend_list(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output(SUB_BE_NAME, missing=True)

    # Modify backend (use same args from subsuffix modify)
    args.be_name = BE_NAME
    backend_set(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output("successfully updated")

    # Verify modified worked
    args.selector = BE_NAME
    backend_get(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output("nsslapd-readonly: on")

    # Run database monitor
    args.suffix = SUFFIX
    get_monitor(topology_st.standalone, None, topology_st.logcap.log, args)
    check_output("entrycachetries")
コード例 #23
0
def test_vlv(topology_st, create_backend):
    """Test creating, listing, getting, and deleting vlv's
    :id: 800f432a-52ab-4661-ac66-a2bdd9b984d790
    :setup: Standalone instance
    :steps:
        1. Add VLV search and index entries
        2. Verify they are created
        3. Edit VLV search and verify change
        4. Create additional VLV indexes
        5. Verity new indedxes were created
        6. Remove VLV indexes
        7. Verify indexes were removed
        8. Reindex VLV
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success

    """
    sys.stdout = io.StringIO()
    args = FakeArgs()
    args.cn = BE_NAME
    args.be_name = BE_NAME
    args.suffix = False
    args.nsslapd_suffix = SUFFIX
    args.json = False
    args.name = "myVLVSearch"
    args.index_name = "myVLVIndex"
    args.search_base = SUFFIX
    args.search_scope = '2'
    args.search_filter = "cn=*"
    args.parent_name = args.name
    args.index = False
    args.reindex = False
    args.sort = "cn sn"
    args.just_names = False

    # Create vlv search
    backend_create_vlv(topology_st.standalone, None, None, args)
    check_output("created new VLV Search entry")

    # Verify search is present
    backend_get_vlv(topology_st.standalone, None, None, args)
    check_output("VLV Search:")

    # Create VLV index under vlvSearch
    backend_create_vlv_index(topology_st.standalone, None, None, args)
    check_output("created new VLV index entry")

    # Verify index is present
    backend_get_vlv(topology_st.standalone, None, None, args)
    check_output("VLV Index:")

    # Edit VLV Search
    args.search_base = None
    args.search_scope = '0'
    args.search_filter = None
    args.sort = None
    backend_edit_vlv(topology_st.standalone, None, None, args)
    check_output("updated VLV search entry")

    # Verify edit was successful
    backend_get_vlv(topology_st.standalone, None, None, args)
    check_output("vlvscope: 0")

    # List vlv searches
    backend_list_vlv(topology_st.standalone, None, None, args)
    check_output("vlvbase: " + SUFFIX)

    # Add another index
    args.index_name = "my2ndVLVIndex"
    args.sort = "uid givenname"
    backend_create_vlv_index(topology_st.standalone, None, None, args)
    check_output("created new VLV index entry")

    # Verify new index was created
    backend_get_vlv(topology_st.standalone, None, None, args)
    check_output("vlvsort: uid givenname")

    # Reindex VLV
    backend_reindex_vlv(topology_st.standalone, None, None, args)
    check_output("reindexed VLV indexes")
    time.sleep(2)

    # Delete VLV search and indexes
    backend_del_vlv(topology_st.standalone, None, None, args)
    check_output("deleted VLV search and its indexes")

    # List vlv searches/indexes
    backend_list_vlv(topology_st.standalone, None, None, args)
    check_output("")
コード例 #24
0
def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user):
    """ Test dsidm account entry-status option with account lock/unlock

    :id: d911bbf2-3a65-42a4-ad76-df1114caa396
    :setup: Standalone instance
    :steps:
         1. Create user account
         2. Run dsidm account entry status
         3. Run dsidm account lock
         4. Run dsidm account entry status
         5. Run dsidm account unlock
         6. Run dsidm account entry status
    :expectedresults:
         1. Success
         2. The state message should be Entry State: activated
         3. Success
         4. The state message should be Entry State: directly locked through nsAccountLock
         5. Success
         6. The state message should be Entry State: activated
    """

    standalone = topology_st.standalone
    users = nsUserAccounts(standalone, DEFAULT_SUFFIX)
    test_user = users.get('test_user_1000')

    entry_list = [
        'Entry DN: {}'.format(test_user.dn), 'Entry Creation Date',
        'Entry Modification Date'
    ]

    state_lock = 'Entry State: directly locked through nsAccountLock'
    state_unlock = 'Entry State: activated'

    lock_msg = 'Entry {} is locked'.format(test_user.dn)
    unlock_msg = 'Entry {} is unlocked'.format(test_user.dn)

    args = FakeArgs()
    args.dn = test_user.dn
    args.json = False

    log.info('Test dsidm account entry-status')
    entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
    check_value_in_log_and_reset(topology_st,
                                 content_list=entry_list,
                                 check_value=state_unlock)

    log.info('Test dsidm account lock')
    lock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
    check_value_in_log_and_reset(topology_st, check_value=lock_msg)

    log.info('Test dsidm account entry-status with locked account')
    entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
    check_value_in_log_and_reset(topology_st,
                                 content_list=entry_list,
                                 check_value=state_lock)

    log.info('Test dsidm account unlock')
    unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
    check_value_in_log_and_reset(topology_st, check_value=unlock_msg)

    log.info('Test dsidm account entry-status with unlocked account')
    entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
    check_value_in_log_and_reset(topology_st,
                                 content_list=entry_list,
                                 check_value=state_unlock)
コード例 #25
0
def test_conflict_cli(topo):
    """Test manageing replication conflict entries

    :id: 800f432a-52ab-4661-ac66-a2bdd9b984d8
    :setup: two masters
    :steps:
        1. Create replication conflict entries
        2. List conflicts
        3. Compare conflict entry
        4. Delete conflict
        5. Resurrect conflict
        6. Swap conflict
        7. List glue entry
        8. Delete glue entry
        9. Convert glue entry

    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
        9. Success
        10. Success
    """

    # Setup our default parameters for CLI functions
    topo.logcap = LogCapture()
    sys.stdout = io.StringIO()
    args = FakeArgs()
    args.DN = ""
    args.suffix = DEFAULT_SUFFIX
    args.json = True

    m1 = topo.ms["master1"]
    m2 = topo.ms["master2"]

    topo.pause_all_replicas()

    # Create entries
    _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent1')
    _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent1')
    _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent2')
    _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent2')
    cont_parent_m1 = _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent3')
    cont_parent_m2 = _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent3')
    cont_glue_m1 = _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent4')
    cont_glue_m2 = _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent4')

    # Create the conflicts
    _delete_container(cont_parent_m1)
    _create_container(m2, cont_parent_m2.dn, 'conflict_child1')
    _delete_container(cont_glue_m1)
    _create_container(m2, cont_glue_m2.dn, 'conflict_child2')

    # Resume replication
    topo.resume_all_replicas()
    time.sleep(5)

    # Test "list"
    list_conflicts(m2, None, topo.logcap.log, args)
    conflicts = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(conflicts['items']) == 4
    conflict_1_DN = conflicts['items'][0]['dn']
    conflict_2_DN = conflicts['items'][1]['dn']
    conflict_3_DN = conflicts['items'][2]['dn']
    topo.logcap.flush()

    # Test compare
    args.DN = conflict_1_DN
    cmp_conflict(m2, None, topo.logcap.log, args)
    conflicts = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(conflicts['items']) == 2
    topo.logcap.flush()

    # Test delete
    del_conflict(m2, None, topo.logcap.log, args)
    list_conflicts(m2, None, topo.logcap.log, args)
    conflicts = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(conflicts['items']) == 3
    topo.logcap.flush()

    # Test swap
    args.DN = conflict_2_DN
    swap_conflict(m2, None, topo.logcap.log, args)
    list_conflicts(m2, None, topo.logcap.log, args)
    conflicts = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(conflicts['items']) == 2
    topo.logcap.flush()

    # Test conflict convert
    args.DN = conflict_3_DN
    args.new_rdn = "cn=testing convert"
    convert_conflict(m2, None, topo.logcap.log, args)
    list_conflicts(m2, None, topo.logcap.log, args)
    conflicts = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(conflicts['items']) == 1
    topo.logcap.flush()

    # Test list glue entries
    list_glue(m2, None, topo.logcap.log, args)
    glues = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(glues['items']) == 2
    topo.logcap.flush()

    # Test delete glue entries
    args.DN = "cn=conflict_parent3,dc=example,dc=com"
    del_glue(m2, None, topo.logcap.log, args)
    list_glue(m2, None, topo.logcap.log, args)
    glues = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(glues['items']) == 1
    topo.logcap.flush()

    # Test convert glue entries
    args.DN = "cn=conflict_parent4,dc=example,dc=com"
    convert_glue(m2, None, topo.logcap.log, args)
    list_glue(m2, None, topo.logcap.log, args)
    glues = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(glues['items']) == 0
    topo.logcap.flush()
コード例 #26
0
def test_retrocl_exclude_attr_mod(topology_st):
    """ Test exclude attribute feature of the retrocl plugin for mod operation

    :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3

    :setup: Standalone instance

    :steps:
        1. Enable dynamic plugins
        2. Confige retro changelog plugin
        3. Add user1 entry
        4. Ensure entry attrs are in the changelog
        5. Exclude an attr
        6. Modify user1 entry
        7. Ensure excluded attr is not in the changelog

    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
    """

    st = topology_st.standalone

    log.info('Configure retrocl plugin')
    rcl = RetroChangelogPlugin(st)
    rcl.disable()
    rcl.enable()
    rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')

    log.info('Restarting instance')
    try:
        st.restart()
    except ldap.LDAPError as e:
        ldap.error('Failed to restart instance ' + e.args[0]['desc'])
        assert False

    users = UserAccounts(st, DEFAULT_SUFFIX)

    log.info('Adding user1')
    try:
        user1 = users.create(
            properties={
                'sn': '1',
                'cn': 'user 1',
                'uid': 'user1',
                'uidNumber': '11',
                'gidNumber': '111',
                'givenname': 'user1',
                'homePhone': '0861234567',
                'carLicense': '131D16674',
                'mail': '*****@*****.**',
                'homeDirectory': '/home/user1',
                'userpassword': USER_PW
            })
    except ldap.ALREADY_EXISTS:
        user1 = UserAccount(st, dn=USER1_DN)
    except ldap.LDAPError as e:
        log.error("Failed to add user1: " + str(e))

    log.info(
        'Verify homePhone and carLicense attrs are in the changelog changestring'
    )
    try:
        retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
        cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
    except ldap.LDAPError as e:
        log.fatal("Changelog search failed, error: " + str(e))
        assert False
    assert len(cllist) > 0
    if cllist[0].present('changes'):
        clstr = str(cllist[0].get_attr_vals_utf8('changes'))
        assert ATTR_HOMEPHONE in clstr
        assert ATTR_CARLICENSE in clstr

    log.info('Excluding attribute ' + ATTR_CARLICENSE)
    args = FakeArgs()
    args.connections = [
        st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM
    ]
    args.instance = 'standalone1'
    args.basedn = None
    args.binddn = None
    args.starttls = False
    args.pwdfile = None
    args.bindpw = None
    args.prompt = False
    args.exclude_attrs = ATTR_CARLICENSE
    args.func = retrochangelog_add
    dsrc_inst = dsrc_arg_concat(args, None)
    inst = connect_instance(dsrc_inst, False, args)
    result = args.func(inst, None, log, args)
    disconnect_instance(inst)
    assert result is None

    log.info('Restarting instance')
    try:
        st.restart()
    except ldap.LDAPError as e:
        ldap.error('Failed to restart instance ' + e.args[0]['desc'])
        assert False

    log.info('Modify user1 carLicense attribute')
    try:
        user1.replace(ATTR_CARLICENSE, "123WX321")
    except ldap.LDAPError as e:
        log.fatal(
            'test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error '
            + e.message['desc'])
        assert False

    log.info('Verify carLicense attr is not in the changelog changestring')
    try:
        cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
        assert len(cllist) > 0
        # There will be 2 entries in the changelog for this user, we are only
        #interested in the second one, the modify operation.
        if cllist[1].present('changes'):
            clstr = str(cllist[1].get_attr_vals_utf8('changes'))
            assert ATTR_CARLICENSE not in clstr
    except ldap.LDAPError as e:
        log.fatal("Changelog search failed, error: " + str(e))
        assert False
コード例 #27
0
ファイル: idm_group_test.py プロジェクト: nextoa/389-ds-base
def test_group_tasks(topology):
    # First check that our test group isn't there:
    topology.logcap.flush()
    g_args = FakeArgs()
    g_args.selector = 'testgroup'
    with pytest.raises(ldap.NO_SUCH_OBJECT):
        get(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, g_args)

    # Create a group
    topology.logcap.flush()
    g_args.cn = 'testgroup'
    create(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, g_args)
    assert (topology.logcap.contains("Sucessfully created testgroup"))

    # Assert it exists
    topology.logcap.flush()
    g_args = FakeArgs()
    g_args.selector = 'testgroup'
    get(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, g_args)
    assert (topology.logcap.contains(
        "dn: cn=testgroup,ou=groups,dc=example,dc=com"))

    # Add a user
    topology.logcap.flush()
    u_args = FakeArgs()
    u_args.uid = 'testuser'
    u_args.cn = 'Test User'
    u_args.displayName = 'Test User'
    u_args.homeDirectory = '/home/testuser'
    u_args.uidNumber = '5000'
    u_args.gidNumber = '5000'
    create_user(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log,
                u_args)
    assert (topology.logcap.contains("Sucessfully created testuser"))

    # Add them to the group as a member
    topology.logcap.flush()
    g_args.cn = "testgroup"
    g_args.dn = "uid=testuser,ou=people,dc=example,dc=com"
    add_member(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log,
               g_args)
    assert (topology.logcap.contains("added member"))

    # Check they are a member
    topology.logcap.flush()
    g_args.cn = "testgroup"
    members(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, g_args)
    assert (
        topology.logcap.contains("uid=testuser,ou=people,dc=example,dc=com"))

    # Remove them from the group
    topology.logcap.flush()
    g_args.cn = "testgroup"
    g_args.dn = "uid=testuser,ou=people,dc=example,dc=com"
    remove_member(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log,
                  g_args)
    assert (topology.logcap.contains("removed member"))

    # Check they are not a member
    topology.logcap.flush()
    g_args.cn = "testgroup"
    members(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, g_args)
    assert (topology.logcap.contains("No members to display"))

    # Delete the group
    topology.logcap.flush()
    g_args.dn = "cn=testgroup,ou=groups,dc=example,dc=com"
    delete(topology.standalone,
           DEFAULT_SUFFIX,
           topology.logcap.log,
           g_args,
           warn=False)
    assert (topology.logcap.contains(
        "Sucessfully deleted cn=testgroup,ou=groups,dc=example,dc=com"))
コード例 #28
0
def test_dsrc(topo, setup):
    """Test "dsctl dsrc" command

    :id: 0610de6c-e167-4761-bdab-3e677b2d44bb
    :setup: Standalone Instance
    :steps:
        1. Test creation works
        2. Test creating duplicate section
        3. Test adding an additional inst config works
        4. Test removing an instance works
        5. Test modify works
        6. Test delete works
        7. Test display fails when no file is present

    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
    """

    inst = topo.standalone
    serverid = inst.serverid
    second_inst_name = "Second"
    second_inst_basedn = "o=second"
    different_suffix = "o=different"

    # Setup our args
    args = FakeArgs()
    args.basedn = DEFAULT_SUFFIX
    args.binddn = DN_DM
    args.json = None
    args.uri = None
    args.saslmech = None
    args.tls_cacertdir = None
    args.tls_cert = None
    args.tls_key = None
    args.tls_reqcert = None
    args.starttls = None
    args.cancel_starttls = None
    args.pwdfile = None
    args.do_it = True

    # Create a dsrc configuration entry
    create_dsrc(inst, log, args)
    display_dsrc(inst, topo.logcap.log, args)
    assert topo.logcap.contains("basedn = " + args.basedn)
    assert topo.logcap.contains("binddn = " + args.binddn)
    assert topo.logcap.contains("[" + serverid + "]")
    topo.logcap.flush()

    # Attempt to add duplicate instance section
    with pytest.raises(ValueError):
        create_dsrc(inst, log, args)

    # Test adding a second instance works correctly
    inst.serverid = second_inst_name
    args.basedn = second_inst_basedn
    create_dsrc(inst, log, args)
    display_dsrc(inst, topo.logcap.log, args)
    assert topo.logcap.contains("basedn = " + args.basedn)
    assert topo.logcap.contains("[" + second_inst_name + "]")
    topo.logcap.flush()

    # Delete second instance
    delete_dsrc(inst, log, args)
    inst.serverid = serverid  # Restore original instance name
    display_dsrc(inst, topo.logcap.log, args)
    assert not topo.logcap.contains("[" + second_inst_name + "]")
    assert not topo.logcap.contains("basedn = " + args.basedn)
    # Make sure first instance config is still present
    assert topo.logcap.contains("[" + serverid + "]")
    assert topo.logcap.contains("binddn = " + args.binddn)
    topo.logcap.flush()

    # Modify the config
    args.basedn = different_suffix
    modify_dsrc(inst, log, args)
    display_dsrc(inst, topo.logcap.log, args)
    assert topo.logcap.contains(different_suffix)
    topo.logcap.flush()

    # Remove an arg from the config
    args.basedn = ""
    modify_dsrc(inst, log, args)
    display_dsrc(inst, topo.logcap.log, args)
    assert not topo.logcap.contains(different_suffix)
    topo.logcap.flush()

    # Remove the last entry, which should delete the file
    delete_dsrc(inst, log, args)
    dsrc_file = f'{expanduser("~")}/.dsrc'
    assert not os.path.exists(dsrc_file)

    # Make sure display fails
    with pytest.raises(ValueError):
        display_dsrc(inst, log, args)