Exemple #1
0
def setup_kerberos(session_multihost, request):
    """ Setup kerberos """
    tools = sssdTools(session_multihost.master[0])
    tools.config_etckrb5('EXAMPLE.TEST')
    krb = krb5srv(session_multihost.master[0], 'EXAMPLE.TEST')
    krb.krb_setup_new()

    def remove_kerberos():
        """ Remove kerberos instance """
        krb.destroy_krb5server()
        remove_keytab = 'rm -f /etc/krb5.keytab'
        session_multihost.master[0].run_command(remove_keytab)
    request.addfinalizer(remove_kerberos)
Exemple #2
0
def enable_kcm(session_multihost, request):
    """ Enable sssd kcm """
    backup_krb5_conf = 'cp /etc/krb5.conf /etc/krb5.conf.nokcm'
    session_multihost.master[0].run_command(backup_krb5_conf)
    session_multihost.master[0].service_sssd('stop')
    tools = sssdTools(session_multihost.master[0])
    tools.enable_kcm()
    start_kcm = 'systemctl start sssd-kcm'
    session_multihost.master[0].service_sssd('start')
    session_multihost.master[0].run_command(start_kcm)

    def disable_kcm():
        """ Disable sssd kcm """
        restore_krb5_conf = 'cp /etc/krb5.conf.nokcm /etc/krb5.conf'
        session_multihost.master[0].run_command(restore_krb5_conf)
        stop_kcm = 'systemctl stop sssd-kcm'
        session_multihost.master[0].run_command(stop_kcm)
    request.addfinalizer(disable_kcm)
Exemple #3
0
 def test_0001_rotation(self, multihost):
     """
     :title: Verify machine passwd updates local smb secrets
     :id: 3d08ea1c-6724-4bc9-ac62-b8be66486ee4
     """
     # Get current hash of /var/lib/samba/private/secrets.tdb
     hash_cmd = 'sha1hmac /var/lib/samba/private/secrets.tdb'
     cmd = multihost.client[0].run_command(hash_cmd, raiseonerr=False)
     before_hash = cmd.stdout_text.strip()
     print("hash before reseting machine passwd", before_hash)
     stat = 'stat /var/lib/samba/private/secrets.tdb'
     cmd = multihost.client[0].run_command(stat, raiseonerr=False)
     # Get tdb-dump
     tdbdump = 'tdbdump /var/lib/samba/private/secrets.tdb'
     multihost.client[0].run_command(tdbdump, raiseonerr=False)
     client = sssdTools(multihost.client[0], multihost.ad[0])
     sssd_params = {'ad_maximum_machine_account_password_age': '1',
                    'ad_machine_account_password_renewal_opts': '300:15',
                    'ad_update_samba_machine_account_password': '******',
                    'debug_level': '9'}
     domain_name = client.get_domain_section_name()
     domain_section = 'domain/{}'.format(domain_name)
     client.sssd_conf(domain_section, sssd_params,)
     client.reset_machine_password()
     client_hostname = multihost.client[0].sys_hostname.split('.')[0]
     if len(client_hostname) > 15:
         client_hostname = client_hostname[:15]
     realm = multihost.ad[0].realm
     host_princ = 'HOST/%s@%s' % (client_hostname, realm)
     kvno_cmd = 'kvno %s' % (host_princ)
     cmd = multihost.client[0].run_command(kvno_cmd, raiseonerr=False)
     kvno = cmd.stdout_text.split('=')[1].strip()
     restart_sssd = 'systemctl restart sssd'
     try:
         multihost.client[0].run_command(restart_sssd)
     except subprocess.CalledProcessError:
         pytest.fail("Cannot restart sssd service")
     time.sleep(30)
     ls = 'cat /etc/sssd/sssd.conf'
     cmd = multihost.client[0].run_command(ls, raiseonerr=False)
     klist_cmd = "klist -k /etc/krb5.keytab"
     cmd = multihost.client[0].run_command(klist_cmd, raiseonerr=False)
     spn_list = [val.strip() for val in cmd.stdout_text.splitlines()]
     client_domain = multihost.client[0].sys_hostname.split('.')[0].upper()
     if len(client_domain) > 15:
         client_domain = client_domain[:15]
     old_kvno = int(kvno)
     new_kvno = int(kvno) + 1
     # older entry
     entry_1 = '{} {}/{}@{}'.format(old_kvno, 'host', client_domain,
                                    multihost.ad[0].realm)
     # new entry
     entry_2 = '{} {}/{}@{}'.format(new_kvno, 'host', client_domain,
                                    multihost.ad[0].realm)
     if entry_1 and entry_2 not in spn_list[3:]:
         pytest.fail("keytab rotation failed, host entries not rotated")
     cmd = multihost.client[0].run_command(hash_cmd, raiseonerr=False)
     after_hash = cmd.stdout_text.strip()
     print("hash after reseting machine passwd", after_hash)
     cmd = multihost.client[0].run_command(stat, raiseonerr=False)
     logs = "tail -n 500 /var/log/sssd/sssd_%s.log" % domain_name
     multihost.client[0].run_command(logs, raiseonerr=False)
     multihost.client[0].run_command(tdbdump, raiseonerr=False)
Exemple #4
0
    def test_innetgr_threads(self, multihost, backupsssdconf):
        """
        :title: Verify sssd is thread-safe in innetgr
        :id: d38a8279-312d-4f52-808c-17226e7168d3
        :customerscenario: True
        :description:
         SSSD was not thread safe in innetgr call when using nested netgroups
         resulting in nfs-ganesha not working correctly
        :setup:
          1. Configure client to use sssd proxy files
          2. Create /etc/netgroup file with two groups containing 1000+
             members.
          3. Add the group/host info to the c sources
          4. Compile the sources using gcc
        :steps:
          1. Restart sssd on client and clear caches
          2. Run first binary to verify that the setup is correct.
          3. Run second binary to verify that the bug is fixed.
        :expectedresults:
          1. SSSD restarted successfully
          2. Test binary returns 0 return code.
          3. Test binary returns 0 return code.
        :teardown:
          1. Remove the net groups
          2. Remove the sources and binaries.
        :bugzilla:
         https://bugzilla.redhat.com/show_bug.cgi?id=1703436
        """
        # SETUP
        # Configure proxy provider
        sssd_client = sssdTools(multihost.client[0])
        domain_params = {
            'id_provider': 'proxy',
            'proxy_lib_name': 'files',
            'auth_provider': 'none'
        }
        sssd_client.sssd_conf(
            'domain/%s' % sssd_client.get_domain_section_name(), domain_params)

        # Create the net groups in file
        client_shortname = multihost.client[0].shortname
        net_group1_name = "ngr"
        net_group1 = f"{net_group1_name} "
        net_group2 = f"{net_group1_name}2 "

        # We need long enough member searching so the issue appears.
        # This number of members reproduced failure in 2+ threads reliably.
        for i in range(1, 4096):
            net_group1 += "(host1%04d, user1%04d, domain1) " % (i, i)
            net_group2 += "(host2%04d, user2%04d, domain2) " % (i, i)
        net_group1 += f"({client_shortname}, myuser, domain6)"

        multihost.client[0].transport.put_file_contents(
            '/etc/netgroup', net_group1 + "\n" + net_group2 + "\n")

        # Prepare c code
        pre = "pthread_mutex_lock(&netg_lock);"
        post = "pthread_mutex_unlock(&netg_lock);"
        code_template_str = textwrap.dedent("""\
            #include <stdio.h>
            #include <stdlib.h>
            #include <netdb.h>
            #include <pthread.h>
            #define NTHREADS 256
            #define NUM_CALLS 1000
            static char *groups[] = {"$group"};
            static char *hosts[] = {"$host"};
            #define NHOSTS (sizeof(hosts)/sizeof(hosts[0]))
            #define NGROUPS (sizeof(groups)/sizeof(groups[0]))
            static int pass_count;
            static int fail_count;
            static int bogus_count;
            static pthread_mutex_t netg_lock = PTHREAD_MUTEX_INITIALIZER;
            static void *thread_main(void *arg)
            {
                unsigned long i;
                char *host;
                char *group;
                int rc;
                for (i = 0; i < NUM_CALLS; i++) {
                    host = hosts[rand() % NHOSTS];
                    group = groups[rand() % NGROUPS];
                    $pre
                    rc = innetgr(group, host, NULL, NULL);
                    $post
                    /* Ideally, atomic increments should be used, but
                     * rough numbers are OK for now
                     */
                    if (rc == 0)
                        fail_count++;
                    else if (rc == 1)
                        pass_count++;
                    else
                        bogus_count++;
                }
            }
            int main()
            {
                pthread_t threads[NTHREADS];
                int i;
                for (i = 0; i < NTHREADS; i++)
                    pthread_create(&threads[i], NULL, thread_main, (void *)0);
                for (i = 0; i < NTHREADS; i++)
                    pthread_join(threads[i], NULL);
                printf("pass:%d, fail:%d, bogus:%d\\n",
                        pass_count, fail_count, bogus_count);
                if ( fail_count > 0 )
                    exit(2);
            }""")
        code_template = Template(code_template_str)

        # Substitute c code and upload
        code = code_template.substitute(group=net_group1_name,
                                        host=client_shortname,
                                        pre=pre,
                                        post=post)
        multihost.client[0].transport.put_file_contents(
            '/root/netg-lock.c', code)
        code = code_template.substitute(group=net_group1_name,
                                        host=client_shortname,
                                        pre='',
                                        post='')
        multihost.client[0].transport.put_file_contents(
            '/root/netg-lock2.c', code)

        # Install packages including gcc
        sssdTools(multihost.client[0]).client_install_pkgs()

        # Compile c code
        compile_cmd = 'gcc -lpthread -o /root/netg-lock /root/netg-lock.c ' \
                      '&& gcc -lpthread -o /root/netg-lock2 /root/netg-lock2.c'
        gcc = multihost.client[0].run_command(compile_cmd, raiseonerr=False)
        chmod_cmd = 'chmod +x /root/netg-lock*'
        multihost.client[0].run_command(chmod_cmd, raiseonerr=False)

        # TEST EXECUTION
        sssd_client.clear_sssd_cache()
        cmd1 = multihost.client[0].run_command('/root/netg-lock',
                                               raiseonerr=False)
        cmd2 = multihost.client[0].run_command('/root/netg-lock2',
                                               raiseonerr=False)

        # TEARDOWN
        multihost.client[0].run_command(
            'rm -f /root/netg-lock*; rm -f /etc/netgroup', raiseonerr=False)

        # TEST EVALUATION
        assert gcc.returncode == 0, 'Compiling of binaries failed!'
        assert cmd1.returncode == 0, 'First binary failed, incorrect setup!'
        assert cmd2.returncode == 0, 'Second binary failed, test failed!'
Exemple #5
0
 def test_0003_update_removed_grp_membership(self, multihost,
                                             backupsssdconf):
     """
     :title: proxy: secondary group is shown in sssd cache after
      group is removed
     :id: 7cfb9aa9-6e68-4914-afb8-ecfae132aa84
     :bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1917970
     :customerscenario: true
     :steps:
       1. Edit sssd.conf and configure proxy provider with
          entry_cache_timeout = 1
       2. Restart SSSD with cleared cache
       3. Create a localuser and localgroup
       4. Add that localuser to the localgroup
       5. Assert localgroup is shown in localuser's group list
       6. Remove localuser from localgroup
       7. Assert that after entry_cache_timeout, localuser's groups
          are not listing localgroup
     :expectedresults:
       1. Should succeed
       2. Should succeed
       3. Should succeed
       4. Should succeed
       5. Should succeed
       6. Should succeed
       7. Should succeed
     """
     tools = sssdTools(multihost.client[0])
     domain_name = tools.get_domain_section_name()
     l_usr, l_grp = 'testuser', 'testgroup'
     multihost.client[0].run_command(f'useradd {l_usr}')
     multihost.client[0].run_command(f'groupadd {l_grp}')
     multihost.client[0].run_command(f'usermod -aG {l_grp} {l_usr}')
     domain_params = {
         'id_provider': 'proxy',
         'proxy_lib_name': 'files',
         'auth_provider': 'krb5',
         'ignore_group_members': 'False',
         'cache_credentials': 'True',
         'entry_cache_timeout': '1',
         'krb5_validate': 'True'
     }
     tools.sssd_conf('domain/%s' % domain_name, domain_params)
     del_domain_params = {
         'ldap_uri': 'ldaps:%s' % (multihost.master[0].run_command),
         'ldap_tls_cacert': '/etc/openldap/cacerts/cacert.pem',
         'ldap_search_base': ds_suffix,
         'use_fully_qualified_names': 'True'
     }
     tools.sssd_conf('domain/%s' % domain_name,
                     del_domain_params,
                     action='delete')
     tools.clear_sssd_cache()
     cmd = multihost.client[0].run_command(f'groups {l_usr}')
     assert 'testgroup' in cmd.stdout_text
     multihost.client[0].run_command(f'gpasswd -d {l_usr} {l_grp}')
     time.sleep(1)
     cmd = multihost.client[0].run_command(f'groups {l_usr}')
     multihost.client[0].run_command(f'userdel -rf {l_usr}')
     multihost.client[0].run_command(f'groupdel -f {l_grp}')
     assert 'testgroup' not in cmd.stdout_text
Exemple #6
0
 def test_randomize_sudo_timeout(self, multihost, backupsssdconf,
                                 sudo_rule):
     """
     :title: sudo: randomize sudo refresh timeouts
     :id: 57720975-29ba-4ed7-868a-f9b784bbfed2
     :bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1925514
     :customerscenario: True
     :steps:
       1. Edit sssdconfig and specify sssd smart, full timeout option
       2. Restart sssd with cleared logs and cache
       3. Wait for 120 seconds
       4. Parse logs and confirm sudo refresh timeouts are random
     :expectedresults:
       1. Should succeed
       2. Should succeed
       3. Should succeed
       4. Should succeed
     """
     tools = sssdTools(multihost.client[0])
     multihost.client[0].service_sssd('stop')
     tools.remove_sss_cache('/var/lib/sss/db')
     tools.remove_sss_cache('/var/log/sssd')
     sudo_base = 'ou=sudoers,%s' % (ds_suffix)
     sudo_uri = "ldap://%s" % multihost.master[0].sys_hostname
     params = {
         'ldap_sudo_search_base': sudo_base,
         'ldap_uri': sudo_uri,
         'sudo_provider': "ldap",
         'ldap_sudo_full_refresh_interval': '25',
         'ldap_sudo_smart_refresh_interval': '15',
         'ldap_sudo_random_offset': '5'
     }
     domain_section = 'domain/%s' % ds_instance_name
     tools.sssd_conf(domain_section, params, action='update')
     section = "sssd"
     sssd_params = {'services': 'nss, pam, sudo'}
     tools.sssd_conf(section, sssd_params, action='update')
     multihost.client[0].service_sssd('start')
     time.sleep(120)
     logfile = '/var/log/sssd/sssd_%s.log' % ds_instance_name
     tmout_ptrn = r"(SUDO.*\:\sscheduling task \d+ seconds)"
     regex_tmout = re.compile("%s" % tmout_ptrn)
     smart_tmout = []
     full_tmout = []
     log = multihost.client[0].get_file_contents(logfile).decode('utf-8')
     for line in log.split('\n'):
         if line:
             if (regex_tmout.findall(line)):
                 rfrsh_type = regex_tmout.findall(line)[0].split()[1]
                 timeout = regex_tmout.findall(line)[0].split()[5]
                 if rfrsh_type == 'Smart':
                     smart_tmout.append(timeout)
                 elif rfrsh_type == 'Full':
                     full_tmout.append(timeout)
     rand_intvl, same_intvl = 0, 0
     for timeout in smart_tmout, full_tmout:
         index = 1
         rand_intvl, same_intvl = 0, 0
         while index < len(timeout):
             if timeout[index] != timeout[index - 1]:
                 rand_intvl += 1
             else:
                 same_intvl += 1
             index += 1
         assert rand_intvl > same_intvl
Exemple #7
0
 def test_improve_refresh_timers_sudo_timeout(self, multihost,
                                              backupsssdconf,
                                              sssd_sudo_conf, sudo_rule):
     """
     :title: sudo: improve sudo full and smart refresh timeouts
     :id: 3860d1b9-28fc-4d44-9537-caf28ab033c8
     :bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1925505
     :customerscenario: True
     :steps:
       1. Edit sssdconfig and specify sssd smart, full timeout option
       2. Restart sssd with cleared logs and cache
       3. Wait for 40 seconds
       4. Parse logs and confirm sudo full refresh and smart refresh
          timeout are not running at same time
       5. If sudo full refresh and smart refresh timer are scheduled at
          same time then smart refresh is rescheduled to the next cycle
     :expectedresults:
       1. Should succeed
       2. Should succeed
       3. Should succeed
       4. Should succeed
       5. Should succeed
     """
     tools = sssdTools(multihost.client[0])
     multihost.client[0].service_sssd('stop')
     tools.remove_sss_cache('/var/lib/sss/db')
     tools.remove_sss_cache('/var/log/sssd')
     params = {
         'ldap_sudo_full_refresh_interval': '10',
         'ldap_sudo_random_offset': '0',
         'ldap_sudo_smart_refresh_interval': '5'
     }
     domain_section = f'domain/{ds_instance_name}'
     tools.sssd_conf(domain_section, params, action='update')
     multihost.client[0].service_sssd('start')
     time.sleep(40)
     logfile = f'/var/log/sssd/sssd_{ds_instance_name}.log'
     tmout_ptrn = f'(SUDO.*Refresh.*executing)'
     rschdl_ptrn = f'(SUDO.*Refresh.*rescheduling)'
     regex_tmout = re.compile(f'{tmout_ptrn}')
     rgx_rs_tstmp = re.compile(f'{rschdl_ptrn}')
     full_rfsh_tstmp = []
     smrt_rfsh_tstmp = []
     rschdl_tstmp = []
     log = multihost.client[0].get_file_contents(logfile).decode('utf-8')
     for line in log.split('\n'):
         if (regex_tmout.findall(line)):
             dt_time = line.split('):')[0]
             tstmp = dt_time.split()[1]
             ref_type = line.split()[7]
             if ref_type == 'Smart':
                 smrt_rfsh_tstmp.append(tstmp)
             elif ref_type == 'Full':
                 full_rfsh_tstmp.append(tstmp)
         if (rgx_rs_tstmp.findall(line)):
             dt_time = line.split('):')[0]
             tstmp = dt_time.split()[1]
             rschdl_tstmp.append(tstmp)
     for tm_stamp in full_rfsh_tstmp:
         if tm_stamp in smrt_rfsh_tstmp:
             assert tm_stamp in rschdl_tstmp
         else:
             assert tm_stamp not in smrt_rfsh_tstmp
Exemple #8
0
    def test_0001_multiforest(multihost, newhostname, adjoin):
        """
        :title: IDM-SSSD-TC: ad_provider: admultiforest
        :id:
        :setup:
          1. Configure two domain controllers in different forests
          2. Join client to the first domain
          3. Update sssd.conf for second domain
          4. Update krb5.conf for second domain
          5. Create krb principal and update sssd.conf
        :steps:
          1. Lookup user in the first domain
          2. Lookup user in the second domain
        :expectedresults:
          1. User is found in the first domain
          2. User is found in the second domain
        :customerscenario: True
        """
        adjoin(membersw='adcli')
        ad_domain = multihost.ad[0].domainname
        ad_server = multihost.ad[0].hostname
        # This must be the last AD server in the metadata file
        ad1_domain = multihost.ad[len(multihost.ad) - 1].domainname
        ad1_domain_upper = str.upper(ad1_domain)
        ad1_server = multihost.ad[len(multihost.ad) - 1].hostname
        ad1_password = multihost.ad[len(multihost.ad) - 1].ssh_password

        get_keytab = f'adcli join --host-keytab=/etc/krb5.keytab-domain1 ' \
                     f'{ad1_domain}'
        change_context = 'chcon -t krb5_keytab_t /etc/krb5.keytab-domain1'
        backup_krb5 = 'cp -rf /etc/krb5.conf /etc/krb5.conf.bak'
        restore_krb5 = 'mv /etc/krb5.conf.bak /etc/krb5.conf ; ' \
                       'restorecon -Rv /etc/krb5.conf'
        cleanup_krb5 = 'rm -rf /etc/krb5.keytab-domain1'
        edit_krb5_conf = f'sed -i "/domain_realm/a .{ad1_domain} ' \
                         f'= {ad1_domain_upper}" /etc/krb5.conf'
        edit1_krb5_conf = f'sed -i "/domain_realm/a {ad1_domain} ' \
                          f'= {ad1_domain_upper}" /etc/krb5.conf'

        try:
            multihost.client[0].run_command(get_keytab,
                                            stdin_text=ad1_password)
        except subprocess.CalledProcessError:
            pytest.fail("adcli join failed")
        multihost.client[0].run_command(backup_krb5, raiseonerr=False)
        multihost.client[0].run_command(edit_krb5_conf, raiseonerr=False)
        multihost.client[0].run_command(edit1_krb5_conf, raiseonerr=False)
        multihost.client[0].run_command(change_context, raiseonerr=False)

        # Configure sssd
        multihost.client[0].service_sssd('stop')
        client = sssdTools(multihost.client[0], multihost.ad[0])
        client.backup_sssd_conf()
        client.sssd_conf("sssd", {'domains': f'{ad_domain}, {ad1_domain}'},
                         action='update')
        domain_params = {
            'ad_domain': f'{ad_domain}',
            'dns_discovery_domain': f'{ad_domain}',
            'ad_server': f'{ad_server}',
            'debug_level': '9',
            'use_fully_qualified_names': 'True',
            'cache_credentials': 'True',
            'dyndns_update': 'True'
        }
        client.sssd_conf(f'domain/{ad_domain}', domain_params, action='update')
        domain1_params = {
            'ad_domain': f'{ad1_domain}',
            'ad_server': f'{ad1_server}',
            'krb5_realm': f'{ad1_domain_upper}',
            'debug_level': '9',
            'use_fully_qualified_names': 'False',
            'cache_credentials': 'True',
            'realmd_tags': 'manages-system joined-with-samba',
            'dyndns_update': 'False',
            'krb5_keytab': '/etc/krb5.keytab-domain1',
            'ldap_krb5_keytab': '/etc/krb5.keytab-domain1',
            'id_provider': 'ad',
            'access_provider': 'ad',
            'timeout': '3600',
            'krb5_store_password_if_offline': 'True',
            'default_shell': '/bin/bash',
            'ldap_id_mapping': 'True'
        }
        client.sssd_conf(f'domain/{ad1_domain}',
                         domain1_params,
                         action='update')
        client.clear_sssd_cache()
        multihost.client[0].service_sssd('start')
        time.sleep(10)
        # Search for the user in same forest and domain
        getent_domain_user1 = multihost.client[0].run_command(
            f'getent passwd user1@{ad_domain}', raiseonerr=False)
        getent_domain_user2 = multihost.client[0].run_command(
            f'getent passwd user2@{ad_domain}', raiseonerr=False)
        id_domain_user1 = multihost.client[0].run_command(
            f'id user1@{ad_domain}', raiseonerr=False)
        id_domain_user2 = multihost.client[0].run_command(
            f'id user2@{ad_domain}', raiseonerr=False)
        # Search for the user in a different forest and domain
        getent_domain1_user1 = multihost.client[0].run_command(
            f'getent passwd user1@{ad1_domain}', raiseonerr=False)
        getent_domain1_user2 = multihost.client[0].run_command(
            f'getent passwd user2@{ad1_domain}', raiseonerr=False)
        id_domain1_user1 = multihost.client[0].run_command(
            f'id user1@{ad1_domain}', raiseonerr=False)
        id_domain1_user2 = multihost.client[0].run_command(
            f'id user2@{ad1_domain}', raiseonerr=False)

        multihost.client[0].run_command(restore_krb5, raiseonerr=False)
        multihost.client[0].run_command(cleanup_krb5, raiseonerr=False)
        client.restore_sssd_conf()
        client.clear_sssd_cache()

        # Evaluate test results
        assert getent_domain_user1.returncode == 0
        assert getent_domain_user2.returncode == 0
        assert id_domain_user1.returncode == 0
        assert id_domain_user2.returncode == 0
        assert getent_domain1_user1.returncode == 0
        assert getent_domain1_user2.returncode == 0
        assert id_domain1_user1.returncode == 0
        assert id_domain1_user2.returncode == 0
Exemple #9
0
    def test_0001_bz2013297(multihost, newhostname, adchildjoin):
        """
        :title: IDM-SSSD-TC: ad_provider: forests: disabled root ad domain
        causes subdomains to be marked offline
        :id:
        :setup:
          1. Configure parent and child domain
          2. Join client to child domain
          3. ad_enabled_domains is not configured
          4. ad_enabled_domains to contain only the child domain
        :steps:
          1. Lookup user from child domain
          2. Lookup user from parent domain
          3. Change  ad_enabled_domains parameter
          4. Lookup user from child domain
          5. Lookup user from parent domain
        :expectedresults:
          1. Parent user is found
          2. Child user is found
          3. Parent user is not found
          4. Child user is found
        :customerscenario: True
        """
        adchildjoin(membersw='adcli')
        ad_domain = multihost.ad[0].domainname
        child_domain = multihost.ad[1].domainname
        ad_server = multihost.ad[1].hostname

        # Configure sssd
        multihost.client[0].service_sssd('stop')
        client = sssdTools(multihost.client[0], multihost.ad[1])
        client.backup_sssd_conf()
        dom_section = f'domain/{client.get_domain_section_name()}'
        sssd_params = {
            'ad_domain': child_domain,
            'debug_level': '9',
            'use_fully_qualified_names': 'True',
            'ad_server': ad_server,
            'cache_credentials': 'True',
        }
        client.sssd_conf(dom_section, sssd_params)
        client.clear_sssd_cache()

        # Search for the user in root domain
        getent_root_user1 = multihost.client[0].run_command(
            f'getent passwd user1@{ad_domain}', raiseonerr=False)
        # Search for the user in child domain
        getent_child_user1 = multihost.client[0].run_command(
            f'getent passwd child_user1@{child_domain}', raiseonerr=False)

        client.restore_sssd_conf()
        client.clear_sssd_cache()

        # Evaluate test results
        assert getent_root_user1.returncode == 0
        assert getent_child_user1.returncode == 0

        dom_section = f'domain/{client.get_domain_section_name()}'
        sssd_params = {
            'ad_domain': child_domain,
            'debug_level': '9',
            'use_fully_qualified_names': 'True',
            'cache_credentials': 'True',
            'ad_server': ad_server,
            'ad_enabled_domains': child_domain
        }
        client.sssd_conf(dom_section, sssd_params)
        client.clear_sssd_cache()

        # Search for the user in root domain
        getent_root_user2 = multihost.client[0].run_command(
            f'getent passwd user1@{ad_domain}', raiseonerr=False)
        # Search for the user in child domain
        getent_child_user2 = multihost.client[0].run_command(
            f'getent passwd child_user1@{child_domain}', raiseonerr=False)

        client.restore_sssd_conf()
        client.clear_sssd_cache()

        # Evaluate test results
        assert getent_root_user2.returncode == 2
        assert getent_child_user2.returncode == 0
Exemple #10
0
def clear_sssd_cache(session_multihost):
    """ Clear sssd cache """
    client = sssdTools(session_multihost.client[0])
    client.clear_sssd_cache()
    def run_test(self, timeout, multihost):
        """
        Runs the remaining test
        :param str timeout:takes the vlalue of timeout for ldap
         and string 'krb' in case of kerberos
        :param obj multihost: multihost object

        :Steps:
              1. Setup ldap_connection_expire_timeout to a certain timeout. For
              Kerberos, this is redundant as connection expires as soon as the
              ticked expires.
              2. Lookup a user and get the port number and sleep for the
              timeout period.
              3. Lookup another user and get the the port number.
              4. Compare the 2 port numbers.
        """
        sssdTools(multihost.client[0]).delete_sssd_domain_log(
            "/var/log/sssd/sssd_LDAP")
        logfile = '/var/log/sssd/sssd_%s.log' % ds_instance_name

        sssdTools(multihost.client[0]).clear_sssd_cache()

        if timeout == 'krb':
            timeout = 120
        else:
            string = "Option ldap_connection_expire_timeout has value %s" % \
                     timeout
            file_content = multihost.client[0].get_file_contents(logfile)
            x = string.encode('utf-8') in file_content
            if x is True:
                assert True
            else:
                assert False
        lookup_u = 'getent passwd foo1@%s' % ds_instance_name
        cmd = multihost.client[0].run_command(lookup_u)
        assert cmd.returncode == 0

        def find_local_port():
            nsreport = multihost.client[0].run_command(
                ["ss", "-ant"], log_stdout=False).stdout_text
            lines = nsreport.splitlines()
            lines1 = []

            for i in lines:
                if i.find('389') != -1 and i.find('ESTAB') != -1:
                    lines1.append(i)
            del lines

            if len(lines1) > 1:
                assert False

            lines1 = lines1[0]
            port = lines1[lines1.find(':') +
                          1: lines1.find(' ', lines1.find(':'))]
            return int(port)

        localport1 = find_local_port()

        time.sleep(timeout + 5) if timeout > 0 else time.sleep(5)

        lookup_u = 'getent passwd foo2@%s' % ds_instance_name
        cmd = multihost.client[0].run_command(lookup_u)
        assert cmd.returncode == 0

        localport2 = find_local_port()

        assert localport1 != localport2
        if timeout > 0:
            string = "connection is about to expire, releasing it"
            file_content = multihost.client[0].get_file_contents(logfile)
            x = string.encode('utf-8') in file_content
            if x is True:
                assert True
            else:
                assert False
        cmd_remove_log = "rm /var/log/sssd/sssd_example1.log"
        multihost.client[0].run_command(cmd_remove_log)
Exemple #12
0
def clean_sys(multihost):
    "Clean logs and restart"
    tools = sssdTools(multihost.client[0])
    execute_cmd(multihost, "> /var/log/secure")
    tools.clear_sssd_cache()
Exemple #13
0
    def test_multiple_ad_groups(self, multihost):
        """
        :title: Verify hbac evaluation when user is member
         of multiple AD Groups and with different hbac rules
        :id: eb78448d-8a4d-4800-9334-8d8cdb8b0af2
        """
        ipa_server_tools = ipaTools(multihost.master[0])
        ipa_client = sssdTools(multihost.client[0])
        ipa_server = sssdTools(multihost.master[0])
        client_host = multihost.client[0].sys_hostname
        ad_domain_name = multihost.ad[0].domainname.lower()
        aduser = '******' % ad_domain_name
        adgroup = 'idm_group3@%s' % ad_domain_name
        status = ''
        for i in range(3, 5, 1):
            ext_group = 'idm_ext_group%d' % i
            adgroup = 'idm_group%d@%s' % (i, ad_domain_name)
            posix_group = 'idm_posix_group%d' % i
            hbac_rule_name = 'ad_test%d' % i
            try:
                ipa_server_tools.create_group(ext_group, external=True)
            except SSSDException:
                status = 'FAIL'
            try:
                ipa_server_tools.group_add_member(adgroup,
                                                  ext_group,
                                                  external=True)
            except SSSDException:
                status = 'FAIL'
            try:
                ipa_server_tools.create_group(posix_group)
            except SSSDException:
                status = 'FAIL'
            try:
                ipa_server_tools.group_add_member(ext_group, posix_group)
            except SSSDException:
                status = 'FAIL'

        ipa_server_tools.add_hbac_rule('ad_test3',
                                       'idm_posix_group3',
                                       client_host,
                                       'sshd',
                                       group=True)
        ipa_server_tools.add_hbac_rule('ad_test4',
                                       'idm_posix_group4',
                                       client_host,
                                       'sudo',
                                       group=True)
        sssctl_cmd = 'sssctl user-checks -s sshd %s' % aduser
        test_pam = re.compile(r'%s' % 'pam_acct_mgmt: Success')
        cmd = multihost.client[0].run_command(sssctl_cmd, raiseonerr=False)
        result = test_pam.search(cmd.stderr_text)
        if not result:
            status = 'FAIL'
        else:
            status = 'PASS'
        for i in [
                'idm_ext_group3', 'idm_ext_group4', 'idm_posix_group3',
                'idm_posix_group4'
        ]:
            cmd = 'ipa group-del %s' % i
            multihost.master[0].run_command(cmd, raiseonerr=False)
        ipa_server_tools.del_hbac_rule('ad_test3')
        ipa_server_tools.del_hbac_rule('ad_test4')
        ipa_client.clear_sssd_cache()
        ipa_server.clear_sssd_cache()
        assert status == 'PASS'
Exemple #14
0
 def test_authentication_indicators(self, multihost):
     """
     :title: Add support to verify authentication
      indicators in pam_sss_gss
     :bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1926622
     :id: 4891ed62-7fc8-11eb-98be-002b677efe14
     :steps:
         1. Add pam_sss_gss configuration to /etc/sssd/sssd.conf
         2. Add pam_sss_gss.so to /etc/pam.d/sudo
         3. Restart SSSD
         4. Enable SSSD debug logs
         5. Switch to 'admin' user
         6. obtain Kerberos ticket and check that it
          was obtained using SPAKE pre-authentication.
         7. Create sudo configuration that allows an admin to
          run SUDO rules
         8. Try 'sudo -l' as admin
         9. As root, check content of sssd_pam.log
         10. Check if acquired service ticket has
          req. indicators: 0
         11. Add pam_sss_gss configuration to /etc/sssd/sssd.conf
         12. Check if acquired service ticket has req.
          indicators: 2
     :expectedresults:
         1. Should succeed
         2. Should succeed
         3. Should succeed
         4. Should succeed
         5. Should succeed
         6. Should succeed
         7. Should succeed
         8. Should succeed
         9. Should succeed
         10. Should succeed
         11. Should succeed
         12. Should succeed
     """
     client = sssdTools(multihost.client[0])
     domain_params = {'pam_gssapi_services': 'sudo, sudo-i',
                      'pam_gssapi_indicators_map': 'hardened, '
                                                   'sudo:pkinit, '
                                                   'sudo-i:otp'}
     client.sssd_conf('pam', domain_params)
     multihost.client[0].run_command('cp -vf '
                                     '/etc/pam.d/sudo '
                                     '/etc/pam.d/sudo_indicators')
     multihost.client[0].run_command("sed -i "
                                     "'2s/^/auth sufficient "
                                     "pam_sss_gss.so debug\\n/' "
                                     "/etc/pam.d/sudo")
     multihost.client[0].run_command('cp -vf '
                                     '/etc/pam.d/sudo-i '
                                     '/etc/pam.d/sudo-i_indicators')
     multihost.client[0].run_command("sed -i "
                                     "'2s/^/auth sufficient "
                                     "pam_sss_gss.so debug\\n/' "
                                     "/etc/pam.d/sudo-i")
     multihost.client[0].run_command('systemctl stop sssd ; '
                                     'rm -rf /var/log/sssd/* ; '
                                     'rm -rf /var/lib/sss/db/* ; '
                                     'systemctl start sssd')
     multihost.client[0].run_command("sssctl debug-level 9")
     ssh = SSHClient(multihost.client[0].ip,
                     username='******', password='******')
     (_, _, exit_status) = ssh.execute_cmd('kinit admin',
                                           stdin='Secret123')
     (result, errors, exit_status) = ssh.exec_command('klist')
     (result, errors, exit_status) = ssh.execute_cmd('ipa '
                                                     'sudocmd-add ALL2')
     (result, errors, exit_status) = ssh.execute_cmd('ipa '
                                                     'sudorule-add '
                                                     'testrule2')
     (result, errors, exit_status) = ssh.execute_cmd("ipa sudorule-add"
                                                     "-allow-command "
                                                     "testrule2 "
                                                     "--sudocmds 'ALL2'")
     (result, errors, exit_status) = ssh.execute_cmd('ipa '
                                                     'sudorule-mod '
                                                     'testrule2 '
                                                     '--hostcat=all')
     (result, errors, exit_status) = ssh.execute_cmd('ipa '
                                                     'sudorule-add-user '
                                                     'testrule2 '
                                                     '--users admin')
     (result, errors, exit_status) = ssh.execute_cmd('sudo -l')
     ssh.close()
     search = multihost.client[0].run_command('fgrep '
                                              'gssapi_ '
                                              '/var/log/sssd/sssd_pam.log '
                                              '|tail -10')
     assert 'indicators: 0' in search.stdout_text
     client = sssdTools(multihost.client[0])
     domain_params = {'pam_gssapi_services': 'sudo, sudo-i',
                      'pam_gssapi_indicators_map': 'sudo-i:hardened'}
     client.sssd_conf('pam', domain_params)
     multihost.client[0].run_command('systemctl stop sssd ; '
                                     'rm -rf /var/log/sssd/* ; '
                                     'rm -rf /var/lib/sss/db/* ; '
                                     'systemctl start sssd')
     ssh = SSHClient(multihost.client[0].ip,
                     username='******', password='******')
     (_, _, exit_status) = ssh.execute_cmd('kinit admin',
                                           stdin='Secret123')
     multihost.client[0].run_command("sssctl debug-level 9")
     (result, errors, exit_status) = ssh.execute_cmd('sudo -l')
     (result, errors, exit_status) = ssh.exec_command('klist')
     (result, errors, exit_status) = ssh.execute_cmd('ipa '
                                                     'sudocmd-del ALL2')
     (result, errors, exit_status) = ssh.execute_cmd('ipa '
                                                     'sudorule-del '
                                                     'testrule2')
     multihost.client[0].run_command('cp -vf /etc/pam.d/sudo_indicators '
                                     '/etc/pam.d/sudo')
     multihost.client[0].run_command('cp -vf /etc/pam.d/sudo-i_indicators '
                                     '/etc/pam.d/sudo-i')
     search = multihost.client[0].run_command('fgrep gssapi_ '
                                              '/var/log/sssd/sssd_pam.log'
                                              ' |tail -10')
     ssh.close()
     assert 'indicators: 2' in search.stdout_text
Exemple #15
0
 def test_two_automount_maps(self, multihost, backupsssdconf):
     """
     :title: Automount sssd issue when 2 maps have same key in
      different case
     :bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1873715
     :id: d28e6eec-ac9f-11eb-b0f5-002b677efe14
     :customerscenario: true
     :steps:
         1. Configure SSSD with autofs, automountMap,
         automount, automountInformation
         2. Add 2 automount entries in LDAP with
         same key ( cn: MIT and cn: mit)
         3. We should have the 2 automounts working
     :expectedresults:
         1. Should succeed
         2. Should succeed
         3. Should succeed
     """
     tools = sssdTools(multihost.client[0])
     domain_name = tools.get_domain_section_name()
     client = sssdTools(multihost.client[0])
     domain_params = {'services': 'nss, pam, autofs'}
     client.sssd_conf('sssd', domain_params)
     domain_params = {
         'ldap_autofs_map_object_class': 'automountMap',
         'ldap_autofs_map_name': 'ou',
         'ldap_autofs_entry_object_class': 'automount',
         'ldap_autofs_entry_key': 'cn',
         'ldap_autofs_entry_value': 'automountInformation'
     }
     client.sssd_conf(f'domain/{domain_name}', domain_params)
     multihost.client[0].service_sssd('restart')
     share_list = ['/export', '/export1', '/export2']
     nfs_server_ip = multihost.master[0].ip
     client_ip = multihost.client[0].ip
     server = sssdTools(multihost.master[0])
     bkup = 'cp -af /etc/exports /etc/exports.backup'
     multihost.master[0].run_command(bkup)
     server.export_nfs_fs(share_list, client_ip)
     search = multihost.master[0].run_command("grep 'fsid=0' "
                                              "/etc/exports")
     if search.returncode == 0:
         multihost.master[0].run_command("sed -i 's/,fsid=0//g' "
                                         "/etc/exports")
     start_nfs = 'systemctl start nfs-server'
     multihost.master[0].run_command(start_nfs)
     ldap_uri = 'ldap://%s' % (multihost.master[0].sys_hostname)
     ds_rootdn = 'cn=Directory Manager'
     ds_rootpw = 'Secret123'
     ldap_inst = LdapOperations(ldap_uri, ds_rootdn, ds_rootpw)
     for ou_ou in ['auto.master', 'auto.direct', 'auto.home']:
         user_info = {
             'ou': f'{ou_ou}'.encode('utf-8'),
             'objectClass': [b'top', b'automountMap']
         }
         user_dn = f'ou={ou_ou},dc=example,dc=test'
         (_, _) = ldap_inst.add_entry(user_info, user_dn)
     user_info = {
         'cn': '/-'.encode('utf-8'),
         'objectClass': [b'top', b'automount'],
         'automountInformation': 'auto.direct'.encode('utf-8')
     }
     user_dn = 'cn=/-,ou=auto.master,dc=example,dc=test'
     (_, _) = ldap_inst.add_entry(user_info, user_dn)
     user_info = {
         'cn': '/home'.encode('utf-8'),
         'objectClass': [b'top', b'automount'],
         'automountInformation': 'auto.home'.encode('utf-8')
     }
     user_dn = 'cn=/home,ou=auto.master,dc=example,dc=test'
     (_, _) = ldap_inst.add_entry(user_info, user_dn)
     user_info = {
         'cn': 'MIT'.encode('utf-8'),
         'objectClass': [b'top', b'automount']
     }
     user_dn = f'automountinformation={nfs_server_ip}:/export1,' \
               f'ou=auto.home,dc=example,dc=test'
     (_, _) = ldap_inst.add_entry(user_info, user_dn)
     user_info = {
         'cn': 'mit'.encode('utf-8'),
         'objectClass': [b'top', b'automount']
     }
     user_dn = f'automountinformation={nfs_server_ip}:/export2,' \
               f'ou=auto.home,dc=example,dc=test'
     (_, _) = ldap_inst.add_entry(user_info, user_dn)
     multihost.client[0].run_command("systemctl stop sssd ; "
                                     "rm -rf /var/log/sssd/* ; "
                                     "rm -rf /var/lib/sss/db/* ; "
                                     "systemctl start sssd")
     multihost.client[0].run_command("systemctl restart autofs")
     multihost.client[0].run_command("automount -m")
     multihost.master[0].run_command("touch /export1/export1")
     multihost.master[0].run_command("touch /export2/export2")
     time.sleep(2)
     MIT_export = multihost.client[0].run_command("ls /home/MIT")
     mit_export = multihost.client[0].run_command("ls /home/mit")
     assert 'export1' in MIT_export.stdout_text
     assert 'export2' in mit_export.stdout_text
     restore = 'cp -af /etc/exports.backup /etc/exports'
     multihost.master[0].run_command(restore)
     stop_nfs = 'systemctl stop nfs-server'
     multihost.master[0].run_command(stop_nfs)
     for dn_dn in [
             f'automountinformation={nfs_server_ip}:/export1,'
             f'ou=auto.home,dc=example,dc=test',
             f'automountinformation={nfs_server_ip}:/export2,'
             f'ou=auto.home,dc=example,dc=test',
             'cn=/-,ou=auto.master,dc=example,dc=test',
             'cn=/home,ou=auto.master,dc=example,dc=test',
             'ou=auto.master,dc=example,dc=test',
             'ou=auto.direct,dc=example,dc=test',
             'ou=auto.home,dc=example,dc=test'
     ]:
         multihost.master[0].run_command(f'ldapdelete -x -D '
                                         f'"cn=Directory Manager" '
                                         f'-w Secret123 -H ldap:// {dn_dn}')
Exemple #16
0
    def test_002_ad_startup_discovery_one_server_unreachable(
            self, multihost, adjoin):
        """
        @Title: IDM-SSSD-TC: ad_startup_discovery_one_server_unreachable
        * grep sssd domain logs for cldap ping
        * grep sssd logs for cldap ping parallel batch
        * grep sssd logs for cldap ping domain discovery
        """
        adjoin(membersw='adcli')
        client = sssdTools(multihost.client[0], multihost.ad[0])
        domain = client.get_domain_section_name()
        domain_section = 'domain/{}'.format(domain)
        sssd_params = {'debug_level': '0xFFF0'}
        client.sssd_conf(domain_section, sssd_params)

        ad1 = multihost.ad[0].hostname
        ad2 = multihost.ad[1].hostname
        ad2ip = multihost.ad[1].ip

        cmd_dnf_firewalld = 'dnf install -y firewalld'
        multihost.client[0].run_command(cmd_dnf_firewalld)
        cmd_start_firewalld = 'systemctl start firewalld'
        multihost.client[0].run_command(cmd_start_firewalld)
        fw_add = 'firewall-cmd --permanent --direct --add-rule ipv4 ' \
                 'filter OUTPUT 0 -d %s -j DROP' % ad2ip
        fw_reload = 'firewall-cmd --reload'
        multihost.client[0].run_command(fw_add, raiseonerr=True)
        multihost.client[0].run_command(fw_reload, raiseonerr=True)
        multihost.client[0].service_sssd('start')

        cmd_check_ping = 'grep -ire ad_cldap_ping_send ' \
                         '/var/log/sssd/sssd_%s.log | ' \
                         'grep -ire \"Found 2 domain controllers in domain ' \
                         'Default-First-Site-Name._sites.%s\"'\
                         % (domain, domain)
        check_ping = multihost.client[0].run_command(cmd_check_ping,
                                                     raiseonerr=False)
        assert check_ping.returncode == 0
        cmd_check_batch1 = 'grep -ire ad_cldap_ping_parallel_batch' \
                           ' /var/log/sssd/sssd_%s.log | ' \
                           'grep -ire \" %s\"' % (domain, ad1)
        check_batch1 = multihost.client[0].run_command(cmd_check_batch1,
                                                       raiseonerr=False)
        cmd_check_batch2 = 'grep -ire ad_cldap_ping_parallel_batch' \
                           ' /var/log/sssd/sssd_%s.log | ' \
                           'grep -ire \" %s\"' % (domain, ad2)
        check_batch2 = multihost.client[0].run_command(cmd_check_batch2,
                                                       raiseonerr=False)
        if check_batch1.returncode == 1 and check_batch2.returncode == 0:
            assert True
        else:
            assert False
        cmd_check_discovery = 'grep -ire ad_cldap_ping_domain_discovery_done' \
                              ' /var/log/sssd/sssd_%s.log | ' \
                              'grep -ire \"Found 2 domain' \
                              ' controllers in domain ' \
                              'Default-First-Site-Name._sites.%s\"'\
                              % (domain, domain)
        check_discovery = multihost.client[0].run_command(cmd_check_discovery,
                                                          raiseonerr=False)
        assert check_discovery.returncode == 0

        fw_stop = 'systemctl stop firewalld'
        multihost.client[0].run_command(fw_stop, raiseonerr=True)
        fw_remove = 'dnf remove -y firewalld'
        multihost.client[0].run_command(fw_remove, raiseonerr=True)