Example #1
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11222"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )

        dir1 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info["clients"]:
            log.info("Creating directory:")
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir1))
            log.info("Creating directories with breadth and depth:")
            client.exec_command(
                sudo=True,
                cmd=
                f"python3 smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 --files 1000 "
                f"--files-per-dir 10 --dirs-per-dir 2 --top {client_info['mounting_dir']}{dir1}",
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)

            break

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify("", return_counts)
        print(result1)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf %s%s" %
                                (client_info["mounting_dir"], dir1))
            break

        for client in client_info["clients"]:
            log.info("Creating directories with breadth and depth:")
            client.exec_command(
                sudo=True,
                cmd="python3 smallfile/smallfile_cli.py "
                "--operation create --threads 10 "
                " --file-size 4 --files 1000 "
                "--files-per-dir 10 --dirs-per-dir 2"
                " --top %s%s" % (client_info["mounting_dir"], dir1),
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)
            log.info("Renaming the dirs:")
            client.exec_command(
                sudo=True,
                cmd="python3 smallfile/smallfile_cli.py "
                "--operation rename --threads 10 --file-size 4"
                " --file-size 4 --files 1000 "
                "--files-per-dir 10 --dirs-per-dir 2"
                " --top %s%s" % (client_info["mounting_dir"], dir1),
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)

            break
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify("", return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        client1[0].exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir_name))
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "read",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                "write",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client2,
                client_info["mounting_dir"],
                "read",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "write",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("11223", return_counts)
        print(result)

        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            if rc == 0:
                log.info("Cleaning up successfull")
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #2
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        config = kw.get('config')
        num_of_dirs = config.get('num_of_dirs')
        tc = '11227'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                '',
                0,
                2,
                iotype='crefi',
            )
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    '',
                    0,
                    2,
                    iotype='crefi')
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify('', return_counts)

        client1[0].exec_command(cmd='sudo mkdir %s%s' %
                                (client_info['mounting_dir'], 'testdir'))

        if result == 'Data validation success':
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info['mds_nodes'])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            num_of_dirs = int(num_of_dirs / 5)
            with parallel() as p:
                p.spawn(fs_util.mkdir_bulk, client1, 0, num_of_dirs * 1,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client2, num_of_dirs * 1 + 1,
                        num_of_dirs * 2,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client1, num_of_dirs * 2 + 1,
                        num_of_dirs * 3,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client2, num_of_dirs * 3 + 1,
                        num_of_dirs * 4,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client1, num_of_dirs * 4 + 1,
                        num_of_dirs * 5,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                for op in p:
                    rc = op
            if rc == 0:
                log.info('Directories created successfully')
            else:
                raise CommandFailed('Directory creation failed')

            with parallel() as p:
                p.spawn(fs_util.max_dir_io, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        num_of_dirs * 1, 10)
                p.spawn(fs_util.max_dir_io, client2,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 1, num_of_dirs * 2, 10)
                rc = fs_util.check_mount_exists(client1[0])
                if rc == 0:
                    fs_util.pinning(client1, 0, 10,
                                    client_info['mounting_dir'] + 'testdir/',
                                    dir_name, 0)

                p.spawn(fs_util.max_dir_io, client3,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 3, num_of_dirs * 4, 10)
                p.spawn(fs_util.max_dir_io, client4,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 4, num_of_dirs * 5, 10)

            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])

            with parallel() as p:
                p.spawn(fs_util.pinning, client2, 10, num_of_dirs * 1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client3, num_of_dirs * 1,
                        num_of_dirs * 2,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client4, num_of_dirs * 2,
                        num_of_dirs * 3,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client1, num_of_dirs * 3,
                        num_of_dirs * 4,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client3, num_of_dirs * 4,
                        num_of_dirs * 5,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)

            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client2,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client3,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client4,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])

            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'], client_info['kernel_clients'],
                    client_info['mounting_dir'], 'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'], '',
                    client_info['mounting_dir'], 'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            if rc_client == 0 and rc_mds == 0:
                log.info('Cleaning up successfull')
            else:
                return 1
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                client_info['kernel_clients'],
                                                client_info['mounting_dir'],
                                                'umount')
            rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

        else:
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                '',
                                                client_info['mounting_dir'],
                                                'umount')
            rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)
        if rc_client == 0 and rc_mds == 0:
            log.info('Cleaning up successfull')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #3
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11230'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(
            client3,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(
            client4,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                '',
                0,
                1,
                iotype='crefi',
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                1,
                iotype='crefi'
            )
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify('', return_counts)
        if result == 'Data validation success':
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info['mds_nodes'])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            for client in client1:
                client.exec_command(
                    cmd='sudo mkdir %s%s_{1..50}' %
                        (client_info['mounting_dir'], dir_name))
                if client.node.exit_status == 0:
                    log.info("directories created succcessfully")
                else:
                    raise CommandFailed("directories creation failed")
            with parallel() as p:
                p.spawn(
                    fs_util.pinning,
                    client1,
                    1,
                    25,
                    client_info['mounting_dir'],
                    dir_name,
                    0)
                p.spawn(
                    fs_util.pinning,
                    client3,
                    26,
                    50,
                    client_info['mounting_dir'],
                    dir_name,
                    1)
            with parallel() as p:
                p.spawn(
                    fs_util.max_dir_io,
                    client1,
                    client_info['mounting_dir'],
                    dir_name,
                    1,
                    25,
                    1000)
                p.spawn(
                    fs_util.max_dir_io,
                    client3,
                    client_info['mounting_dir'],
                    dir_name,
                    26,
                    50,
                    1000)

            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client1,
                    client_info['mounting_dir'],
                    dir_name,
                    1,
                    25,
                    10,
                    fs_util.mds_fail_over,
                    client_info['mds_nodes'])
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client4,
                    client_info['mounting_dir'],
                    dir_name,
                    26,
                    50,
                    20,
                    fs_util.mds_fail_over,
                    client_info['mds_nodes'])
                for op in p:
                    return_counts, rc = op
            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            log.info(result)
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'],
                    client_info['kernel_clients'],
                    client_info['mounting_dir'],
                    'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'], '',
                    client_info['mounting_dir'], 'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            if rc_client == 0 and rc_mds == 0:
                log.info('Cleaning up successfull')
            else:
                return 1
            print('Script execution time:------')
            stop = timeit.default_timer()
            total_time = stop - start
            mins, secs = divmod(total_time, 60)
            hours, mins = divmod(mins, 60)
            print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
            return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #4
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11219,11224"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        client1[0].exec_command(
            sudo=True,
            cmd=f"mkdir {client_info['mounting_dir']}{dir_name}",
        )
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                2,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            print("Data validation success")
            dirs, rc = fs_util.mkdir(client1, 0, 3,
                                     client_info["mounting_dir"], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split("\n")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    100,
                    iotype="touch",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile",
                )
                for op in p:
                    return_counts, rc = op

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_delete",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=10,
                    fsize=1024,
                )
                for op in p:
                    return_counts, rc = op
            cluster_health_afterIO = check_ceph_healthly(
                client_info["mon_node"][0],
                num_of_osds,
                len(client_info["mon_node"]),
                build,
                None,
                300,
            )

            log.info("Execution of Test case CEPH-%s ended" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            if cluster_health_beforeIO == cluster_health_afterIO:
                print(result)

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    5,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile",
                )

            with parallel() as p:
                p.spawn(
                    fs_util.read_write_IO,
                    client1,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[2],
                )
                p.spawn(
                    fs_util.read_write_IO,
                    client2,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    5,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile",
                )

            with parallel() as p:
                p.spawn(
                    fs_util.read_write_IO,
                    client1,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )
                p.spawn(
                    fs_util.read_write_IO,
                    client2,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )

            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
                if rc == 0:
                    log.info("Cleaning up successfull")
                else:
                    return 1
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
                if rc == 0:
                    log.info("Cleaning up successfull")
                else:
                    return 1

        log.info("Execution of Test case CEPH-%s ended" % (tc))
        print("Results:")
        result = fs_util.rc_verify(tc, return_counts)
        print(result)
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #5
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '10625,11225'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                '',
                0,
                1,
                iotype='crefi',
            )
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify('', return_counts)
        if result == 'Data validation success':
            dirs, rc = fs_util.mkdir(client1, 0, 6,
                                     client_info['mounting_dir'], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split('\n')
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[1],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)

            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_rename',
                        fnum=1000,
                        fsize=10)
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_delete-renamed',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='smallfile_delete',
                        fnum=1000,
                        fsize=10)
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[3],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1,
                        fsize=1000000)
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[4],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1,
                        fsize=1000000)
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[5],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1,
                        fsize=1000000)
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[6],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1,
                        fsize=1000000)

            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[3],
                        0,
                        1,
                        iotype='smallfile_rename',
                        fnum=1,
                        fsize=1000000)
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[3],
                        0,
                        1,
                        iotype='smallfile_delete-renamed',
                        fnum=1,
                        fsize=1000000)
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[4],
                        0,
                        1,
                        iotype='smallfile_delete',
                        fnum=1,
                        fsize=1000000)
        dir_name = '!@#$%^&*()-_=+[]{};:,.<>?'
        out, rc = client1[0].exec_command(
            cmd="sudo mkdir '%s%s'" % (client_info['mounting_dir'], dir_name))
        if client1[0].node.exit_status == 0:
            log.info("Directory created")
        else:
            raise CommandFailed("Directory creation failed")
        for client in client_info['fuse_clients']:
            file_name = ''.join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in range(255))
            client.exec_command(
                cmd="sudo touch '%s%s/%s'" %
                (client_info['mounting_dir'], dir_name, file_name))
        for client in client_info['kernel_clients']:
            if client.pkg_type == 'rpm':
                file_name = ''.join(
                    random.choice(string.ascii_lowercase + string.digits)
                    for _ in range(255))
                client.exec_command(
                    cmd="sudo touch '%s%s/%s'" %
                    (client_info['mounting_dir'], dir_name, file_name))
        for num in range(0, 5):
            for client in client_info['fuse_clients']:
                client.exec_command(
                    cmd="sudo crefi %s'%s' --fop create -t %s "
                    "--multi -b 10 -d 10 -n 10 -T 10 "
                    "--random --min=1K --max=%dK" %
                    (client_info['mounting_dir'], dir_name, 'text', 5),
                    long_running=True)
                for i in range(0, 6):
                    ops = [
                        'create', 'rename', 'chmod', 'chown', 'chgrp',
                        'setxattr'
                    ]
                    rand_ops = random.choice(ops)
                    ftypes = ['text', 'sparse', 'binary', 'tar']
                    rand_filetype = random.choice(ftypes)
                    rand_count = random.randint(2, 10)
                    client.exec_command(cmd='sudo crefi %s%s --fop %s -t %s '
                                        '--multi -b 10 -d 10 -n 10 -T 10 '
                                        '--random --min=1K --max=%dK' %
                                        (client_info['mounting_dir'], dir_name,
                                         rand_ops, rand_filetype, rand_count),
                                        long_running=True)
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        else:
            return 1
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #6
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11231"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = [client_info["fuse_clients"][0]]
        client2 = [client_info["fuse_clients"][1]]
        client3 = [client_info["kernel_clients"][0]]
        client4 = [client_info["kernel_clients"][1]]

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        client1[0].exec_command(sudo=True,
                                cmd="mkdir %s%s" %
                                (client_info["mounting_dir"], dir_name))
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info["mds_nodes"])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            for client in client1:
                client.exec_command(cmd="sudo mkdir %s%s_{1..50}" %
                                    (client_info["mounting_dir"], dir_name))
                if client.node.exit_status == 0:
                    log.info("directories created succcessfully")
                else:
                    raise CommandFailed("directories creation failed")
            with parallel() as p:
                p.spawn(
                    fs_util.pinning,
                    client1,
                    1,
                    25,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                )
                p.spawn(
                    fs_util.pinning,
                    client3,
                    26,
                    50,
                    client_info["mounting_dir"],
                    dir_name,
                    1,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.max_dir_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    1,
                    25,
                    1000,
                )
                p.spawn(
                    fs_util.max_dir_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    26,
                    50,
                    1000,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["mon_node"]:
                    p.spawn(fs_util.reboot, node)

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["mon_node"]:
                    fs_util.network_disconnect(node)
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["mon_node"]:
                    fs_util.pid_kill(node, "mon")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["osd_nodes"]:
                    p.spawn(fs_util.reboot, node)
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                ),
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["osd_nodes"]:
                    fs_util.network_disconnect(node)
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["osd_nodes"]:
                    fs_util.pid_kill(node, "osd")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                ),
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["osd_nodes"]:
                    fs_util.network_disconnect(node)
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["mon_node"]:
                    p.spawn(fs_util.daemon_systemctl, node, "mon", "restart")
            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)
            if rc_client == 0 and rc_mds == 0:
                log.info("Cleaning up successfull")
            else:
                return 1
            print("Script execution time:------")
            stop = timeit.default_timer()
            total_time = stop - start
            mins, secs = divmod(total_time, 60)
            hours, mins = divmod(mins, 60)
            print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
            return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #7
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11255_11336-fuse client'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get('config')
        num_of_osds = config.get('num_of_osds')
        fs_util = FsUtils(ceph_cluster)
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("creating auth failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(
            client3,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(
            client4,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        cluster_health_beforeIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds, len(
                client_info['mon_node']), build, None, 300)
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        rc = fs_util.standby_rank(
            client_info['mds_nodes'],
            client_info['mon_node'],
            todo='add_rank')
        if rc == 0:
            log.info("Added standby ranks")
        else:
            log.error("Failed to add standby ranks")
            return 1
        client1[0].exec_command(
            cmd='sudo mkdir %s%s' %
                (client_info['mounting_dir'], dir_name))
        if client1[0].node.exit_status == 0:
            log.info("Dir created")
        else:
            raise CommandFailed('Dir creation failed')
        rc1 = fs_util.fstab_entry(
            client1,
            client_info['mounting_dir'],
            action='doEntry')
        rc2 = fs_util.fstab_entry(
            client2,
            client_info['mounting_dir'],
            action='doEntry')
        if rc1 == 0 and rc2 == 0:
            log.info("FSentry for clients are done")
        else:
            raise CommandFailed("FsEntry failed")
        rc1 = fs_util.fstab_entry(
            client3,
            client_info['mounting_dir'],
            action='doEntry', mon_node_ip=client_info['mon_node_ip'])
        rc2 = fs_util.fstab_entry(
            client4,
            client_info['mounting_dir'],
            action='doEntry', mon_node_ip=client_info['mon_node_ip'])
        if rc1 == 0 and rc2 == 0:
            log.info("FSentry for clients are done")
        else:
            raise CommandFailed("FsEntry failed")
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                50,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                1,
                iotype='smallfile_create', fnum=1000, fsize=100)
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info['mounting_dir'],
                dir_name,
                0,
                1,
                iotype='crefi')
            p.spawn(fs_util.reboot, client1[0])
        res = []
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)
                for op in p:
                    res.append(op)
        print(res)
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')

            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'read')

            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir_name,
                    0,
                    1,
                    iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch'),
            p.spawn(fs_util.reboot, client2[0])

        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds, len(
                client_info['mon_node']), build, None, 300)
        if cluster_health_afterIO == cluster_health_beforeIO:
            log.info('cluster is healthy')
        else:
            log.error("cluster is not healty")
            return 1
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch')
            for node in client_info['osd_nodes']:
                p.spawn(fs_util.reboot, node)
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='fio'),
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch')
            for node in client_info['osd_nodes']:
                fs_util.network_disconnect(node)
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch')
            for node in client_info['osd_nodes']:
                fs_util.pid_kill(node, 'osd')

        time.sleep(100)
        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds, len(
                client_info['mon_node']), build, None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            log.info("Cluster is healthy")
        else:
            return 1
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                client_info['kernel_clients'],
                client_info['mounting_dir'],
                'umount')
            rc = fs_util.standby_rank(
                client_info['mds_nodes'],
                client_info['mon_node'],
                todo='add_rank_revert')
            if rc == 0:
                log.info("removed standby ranks")
            rc1 = fs_util.fstab_entry(
                client1,
                client_info['mounting_dir'],
                action='revertEntry')
            rc2 = fs_util.fstab_entry(
                client2,
                client_info['mounting_dir'],
                action='revertEntry')
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")
            else:
                return 1
        else:
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                '',
                client_info['mounting_dir'],
                'umount')
            rc = fs_util.standby_rank(
                client_info['mds_nodes'],
                client_info['mon_node'],
                todo='add_rank_revert')
            if rc == 0:
                log.info("removed standby ranks")
            rc1 = fs_util.fstab_entry(
                client1,
                client_info['mounting_dir'],
                action='revertEntry')
            rc2 = fs_util.fstab_entry(
                client2,
                client_info['mounting_dir'],
                action='revertEntry')
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")

            if rc == 0:
                log.info('Cleaning up successfull')
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                client_info['kernel_clients'],
                client_info['mounting_dir'],
                'umount')
        else:
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                '',
                client_info['mounting_dir'],
                'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #8
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11219,11224'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get('config')
        num_of_osds = config.get('num_of_osds')
        fs_util = FsUtils(ceph_cluster)
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        cluster_health_beforeIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), build, None, 300)
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dir_name,
                    0,
                    2,
                    iotype='crefi')
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify('', return_counts)
        if result == 'Data validation success':
            print("Data validation success")
            dirs, rc = fs_util.mkdir(client1, 0, 3,
                                     client_info['mounting_dir'], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split('\n')
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='fio')
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        100,
                        iotype='touch')
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[1],
                        0,
                        1,
                        iotype='dd')
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='crefi')
                for op in p:
                    return_counts, rc = op

            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=10,
                        fsize=1024)
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[1],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=10,
                        fsize=1024)
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=10,
                        fsize=1024)
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_delete',
                        fnum=10,
                        fsize=1024)
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[1],
                        0,
                        1,
                        iotype='smallfile_delete')
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='smallfile_delete',
                        fnum=10,
                        fsize=1024)
                for op in p:
                    return_counts, rc = op
            cluster_health_afterIO = check_ceph_healthly(
                client_info['mon_node'][0], num_of_osds,
                len(client_info['mon_node']), build, None, 300)

            log.info("Execution of Test case CEPH-%s ended" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            if cluster_health_beforeIO == cluster_health_afterIO:
                print(result)
            print('-----------------------------------------')
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        5,
                        iotype='fio')
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        10,
                        iotype='dd')
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='crefi')
            print('-------------------------------------------------------')
            with parallel() as p:
                p.spawn(fs_util.read_write_IO,
                        client1,
                        client_info['mounting_dir'],
                        'g',
                        'read',
                        dir_name=dirs[0])
                p.spawn(fs_util.read_write_IO,
                        client2,
                        client_info['mounting_dir'],
                        'g',
                        'read',
                        dir_name=dirs[0])
            print('-------------------------------------------------------')
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        5,
                        iotype='fio')
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        10,
                        iotype='dd')
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='crefi')
            print('-------------------------------------------------------')
            with parallel() as p:
                p.spawn(fs_util.read_write_IO,
                        client1,
                        client_info['mounting_dir'],
                        'g',
                        'read',
                        dir_name=dirs[0])
                p.spawn(fs_util.read_write_IO,
                        client2,
                        client_info['mounting_dir'],
                        'g',
                        'read',
                        dir_name=dirs[0])
            print('-------------------------------------------------------')
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                             client_info['kernel_clients'],
                                             client_info['mounting_dir'],
                                             'umount')
                if rc == 0:
                    log.info('Cleaning up successfull')
                else:
                    return 1
            else:
                rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                             client_info['mounting_dir'],
                                             'umount')
                if rc == 0:
                    log.info('Cleaning up successfull')
                else:
                    return 1

        log.info("Execution of Test case CEPH-%s ended" % (tc))
        print("Results:")
        result = fs_util.rc_verify(tc, return_counts)
        print(result)
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #9
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11222"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(client_info["mon_node"],
                                                      12, 1, build, None, 300)

        dir1 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info["clients"]:
            log.info("Creating directory:")
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir1))
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd="sudo crefi %s%s --fop create --multi -b 10 -d 10 "
                "--random --min=1K --max=10K" %
                (client_info["mounting_dir"], dir1))
            print(out.read().decode())
            break

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="crefi",
            )
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify("", return_counts)
        print(result1)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf %s%s" %
                                (client_info["mounting_dir"], dir1))
            break

        for client in client_info["clients"]:
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd="sudo crefi %s%s --fop create --multi -b 10 -d 10 "
                "--random --min=1K --max=10K" %
                (client_info["mounting_dir"], dir1))
            print(out.read().decode())
            log.info("Renaming the dirs:")
            out, rc = client.exec_command(
                cmd="sudo crefi "
                "%s%s --fop rename --multi -b 10 -d 10 --random "
                "--min=1K --max=10K" % (client_info["mounting_dir"], dir1))
            print(out.read().decode())

            break
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="crefi",
            )
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify("", return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(client_info["mon_node"],
                                                     12, 1, build, None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            if rc == 0:
                log.info("Cleaning up successfull")
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:

        log.info(e)

        log.info(traceback.format_exc())

        return 1
Example #10
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11335"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        active_mds_node_1, active_mds_node_2, rc = fs_util.get_active_mdss(
            client_info["mds_nodes"])
        if rc == 0:
            log.info("Got active mdss")
        else:
            raise CommandFailed("getting active-mdss failed")

        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "m",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "m", "read")
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "m",
                "readwrite",
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                "m",
                "readwrite",
            )
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        print(result)

        log.info("Performing Auto Eviction:")
        mds1_before_evict, _, rc = fs_util.get_mds_info(active_mds_node_1,
                                                        active_mds_node_2,
                                                        info="session ls")
        rc = fs_util.auto_evict(active_mds_node_1, client_info["clients"], 0)
        if rc == 0:
            log.info("client process killed successfully for auto eviction")
        else:
            raise CommandFailed(
                "client process killing failed for auto eviction")
        log.info("Waiting 300 seconds for auto eviction---")
        time.sleep(300)
        mds1_after_evict, _, rc = fs_util.get_mds_info(active_mds_node_1,
                                                       active_mds_node_2,
                                                       info="session ls")
        if mds1_before_evict != mds1_after_evict:
            log.info("Auto eviction Passed")
        else:
            raise CommandFailed("Auto eviction Failed")
        print("-------------------------------------------------------")
        if client3[0].pkg_type == "deb" and client4[0].pkg_type == "deb":
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))
        else:
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

            for client in client_info["kernel_clients"]:
                client.exec_command(cmd="sudo umount %s -l" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "m",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "m", "read")
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "m",
                "readwrite",
            )
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        print(result)
        log.info("Performing Manual eviction:")
        ip_addr = fs_util.manual_evict(active_mds_node_1, 0)
        mds1_after_evict, _, rc = fs_util.get_mds_info(active_mds_node_1,
                                                       active_mds_node_2,
                                                       info="session ls")
        print(mds1_before_evict)
        print("------------------------")
        print(mds1_after_evict)
        print("-----------------------")
        if mds1_before_evict != mds1_after_evict:
            log.info("Manual eviction success")
        else:
            raise CommandFailed("Manual Eviction Failed")
        log.info("Removing client from OSD blacklisting:")
        rc = fs_util.osd_blacklist(active_mds_node_1, ip_addr)
        if rc == 0:
            log.info("Removing client from OSD blacklisting successfull")
        else:
            raise CommandFailed("Removing client from OSD blacklisting Failed")
        print("-" * 10)

        if client3[0].pkg_type == "deb" and client4[0].pkg_type == "deb":
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))
        else:
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

            for client in client_info["kernel_clients"]:
                client.exec_command(cmd="sudo umount %s -l" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "m",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "m", "read")
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "m",
                "readwrite",
            )
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        print(result)
        log.info("Performing configuring blacklisting:")
        rc = fs_util.config_blacklist_manual_evict(active_mds_node_1, 0)
        if rc == 0:
            log.info("Configure blacklisting for manual evict success")
            rc = fs_util.config_blacklist_manual_evict(active_mds_node_1,
                                                       0,
                                                       revert=True)
        else:
            raise CommandFailed(
                "Configure blacklisting for manual evict failed")
        print("-" * 10)
        rc = fs_util.config_blacklist_auto_evict(active_mds_node_1, 0)
        if rc == 0:
            log.info("Configure blacklisting for auto evict success")
            rc = fs_util.config_blacklist_auto_evict(active_mds_node_1,
                                                     0,
                                                     revert=True)
            if rc == 0:
                log.info("Reverted successfully")
            else:
                raise CommandFailed(
                    "Configure blacklisting for auto evict failed")
        else:
            raise CommandFailed("Configure blacklisting for auto evict failed")

        if client3[0].pkg_type == "deb" and client4[0].pkg_type == "deb":
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))
        else:
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

            for client in client_info["kernel_clients"]:
                client.exec_command(cmd="sudo umount %s -l" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))
        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        if client3[0].pkg_type == "deb" and client4[0].pkg_type == "deb":
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo rm -rf %s*" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))
        else:
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo rm -rf %s*" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

            for client in client_info["kernel_clients"]:
                client.exec_command(cmd="sudo umount %s -l" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))

        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #11
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11254-fuse_clients"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1

        rc3 = fs_util.kernel_mount(
            client3, client_info["mounting_dir"], client_info["mon_node_ip"]
        )
        rc4 = fs_util.kernel_mount(
            client4, client_info["mounting_dir"], client_info["mon_node_ip"]
        )
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        rc = fs_util.standby_rank(
            client_info["mds_nodes"], client_info["mon_node"], todo="add_rank"
        )
        if rc == 0:
            log.info("Added standby ranks")
        else:
            raise Exception("Adding standby ranks failed")

        client1[0].exec_command(
            cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir_name)
        )
        if client1[0].node.exit_status == 0:
            log.info("Dir created")
        else:
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
            )
        rc1 = fs_util.fstab_entry(
            client1, client_info["mounting_dir"], action="doEntry"
        )
        rc2 = fs_util.fstab_entry(
            client2, client_info["mounting_dir"], action="doEntry"
        )
        if rc1 == 0 and rc2 == 0:
            log.info("FSentry for clients are done")
        else:
            raise CommandFailed("FsEntry failed")

        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(
                fs_util.read_write_IO, client3, client_info["mounting_dir"], "g", "read"
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                50,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="smallfile_create",
                fnum=1000,
                fsize=100,
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(fs_util.reboot, client1[0])

        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )

            p.spawn(
                fs_util.read_write_IO, client4, client_info["mounting_dir"], "g", "read"
            )

            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            p.spawn(fs_util.reboot, client2[0])

        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        if cluster_health_afterIO == cluster_health_beforeIO:
            log.info("cluster is healthy")
        else:
            log.error("cluster is not healty")
            return 1
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            for node in client_info["mon_node"]:
                p.spawn(fs_util.reboot, node)

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            for node in client_info["mon_node"]:
                fs_util.network_disconnect(node)
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            for node in client_info["mon_node"]:
                fs_util.pid_kill(node, "mon")

        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        if cluster_health_beforeIO == cluster_health_afterIO:
            log.info("Cluster is healthy")
        else:
            return 1
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
            rc = fs_util.standby_rank(
                client_info["mds_nodes"],
                client_info["mon_node"],
                todo="add_rank_revert",
            )
            if rc == 0:
                log.info("removed standby ranks")
            rc1 = fs_util.fstab_entry(
                client1, client_info["mounting_dir"], action="revertEntry"
            )
            rc2 = fs_util.fstab_entry(
                client2, client_info["mounting_dir"], action="revertEntry"
            )
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")
            else:
                return 1
        else:
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"], "", client_info["mounting_dir"], "umount"
            )

            rc = fs_util.standby_rank(
                client_info["mds_nodes"],
                client_info["mon_node"],
                todo="add_rank_revert",
            )
            if rc == 0:
                log.info("removed standby ranks")
            else:
                return 1
            rc1 = fs_util.fstab_entry(
                client1, client_info["mounting_dir"], action="revertEntry"
            )
            rc2 = fs_util.fstab_entry(
                client2, client_info["mounting_dir"], action="revertEntry"
            )
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")
            else:
                return 1
            if rc == 0:
                log.info("Cleaning up successfull")
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"], "", client_info["mounting_dir"], "umount"
            )
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #12
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11256-fuse'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        num_of_osds = config.get('num_of_osds')
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise Exception("kernel mount failed")
        cluster_health_beforeIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), None, 300)
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        rc = fs_util.standby_rank(client_info['mds_nodes'],
                                  client_info['mon_node'],
                                  todo='add_rank')
        if rc == 0:
            log.info("Added standby ranks")
        else:
            raise Exception("Adding standby ranks failed")

        dirs, rc = fs_util.mkdir(client1, 0, 4, client_info['mounting_dir'],
                                 dir_name)
        if rc == 0:
            log.info("Directories created")
        else:
            raise CommandFailed("Directory creation failed")
        dirs = dirs.split('\n')
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[1],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[2],
                    '',
                    '',
                    iotype='smallfile_create',
                    fnum=1000,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    10,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.reboot, node)
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[2],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[1],
                    '',
                    '',
                    iotype='smallfile_create',
                    fnum=1000,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    10,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.daemon_systemctl, node, 'mds',
                        'active_mds_restart')
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)

        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), None, 300)
        if cluster_health_afterIO == cluster_health_beforeIO:
            log.info('cluster is healthy')
        else:
            log.error("cluster is not healty")
            return 1
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[2],
                    0,
                    1,
                    iotype='smallfile_create',
                    fnum=10,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[1],
                    0,
                    1,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                fs_util.network_disconnect(node)
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[1],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    1,
                    iotype='smallfile_create',
                    fnum=10,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[2],
                    0,
                    1,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                fs_util.pid_kill(node, 'mds')
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)
        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            log.info("Cluster is healthy")
        else:
            return 1
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            fs_util.client_clean_up(client_info['fuse_clients'],
                                    client_info['kernel_clients'],
                                    client_info['mounting_dir'], 'umount')

        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
            if rc == 0:
                log.info('Cleaning up successfull')
        fs_util.standby_rank(client_info['mds_nodes'],
                             client_info['mon_node'],
                             todo='add_rank_revert')
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
            rc = fs_util.standby_rank(client_info['mds_nodes'],
                                      client_info['mon_node'],
                                      todo='add_rank_revert')

        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
            rc = fs_util.standby_rank(client_info['mds_nodes'],
                                      client_info['mon_node'],
                                      todo='add_rank_revert')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11232 and 11233"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")

        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                "",
                0,
                2,
                iotype="crefi",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        if "Data validation success" in result:
            print("Data validation success")
            tc = "11232 and 11233"
            log.info("Execution of Test cases %s started:" % (tc))
            fs_util.allow_dir_fragmentation(client_info["mds_nodes"])
            log.info("Creating directory:")
            for node in client_info["fuse_clients"]:
                out, rc = node.exec_command(
                    cmd="sudo mkdir %s%s" %
                    (client_info["mounting_dir"], dir_name))
                print(out.read().decode())
                break
            active_mds_node_1, active_mds_node_2, rc = fs_util.get_active_mdss(
                client_info["mds_nodes"])
            if rc == 0:
                log.info("Got active mdss")
            else:
                raise CommandFailed("getting active-mdss failed")
            node1_before_io, _, rc = fs_util.get_mds_info(active_mds_node_1,
                                                          active_mds_node_2,
                                                          info="get subtrees")
            if rc == 0:
                log.info("Got mds subtree info")
            else:
                raise CommandFailed("Mds info command failed")

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    1000,
                    iotype="touch",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    1000,
                    2000,
                    iotype="touch",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    2000,
                    3000,
                    iotype="touch",
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dir_name,
                    3000,
                    4000,
                    iotype="touch",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    4000,
                    5000,
                    iotype="touch",
                )

            node1_after_io, _, rc = fs_util.get_mds_info(active_mds_node_1,
                                                         active_mds_node_2,
                                                         info="get subtrees")
            if rc == 0:
                log.info("Got mds subtree info")
            else:
                raise CommandFailed("Mds info command failed")

            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
            )
            if rc == 0:
                log.info("Cleaning mount success")
            else:
                raise CommandFailed("Cleaning mount failed")

            node1_after_del, _, rc = fs_util.get_mds_info(active_mds_node_1,
                                                          active_mds_node_2,
                                                          info="get subtrees")
            if rc == 0:
                log.info("Got mds subtree info")
            else:
                raise CommandFailed("Mds info command failed")

            log.info("Execution of Test case 11232 and 11233 ended:")
            print("Results:")
            if node1_before_io != node1_after_io and node1_after_io != node1_after_del:
                log.info("Test case %s Passed" % (tc))
            else:
                return 1

            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
                rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
                rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

            if rc_client == 0 and rc_mds == 0:
                log.info("Cleaning up successfull")
            else:
                return 1
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))

        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #14
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "10625,11225"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            dirs, rc = fs_util.mkdir(client1, 0, 6,
                                     client_info["mounting_dir"], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split("\n")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_rename",
                    fnum=1000,
                    fsize=10,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_delete-renamed",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=1000,
                    fsize=10,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[4],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[5],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[6],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_rename",
                    fnum=1,
                    fsize=1000000,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_delete-renamed",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[4],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=1,
                    fsize=1000000,
                )
        dir_name = "!@#$%^&*()-_=+[]{};:,.<>?"
        out, rc = client1[0].exec_command(
            cmd="sudo mkdir '%s%s'" % (client_info["mounting_dir"], dir_name))
        if client1[0].node.exit_status == 0:
            log.info("Directory created")
        else:
            raise CommandFailed("Directory creation failed")
        for client in client_info["fuse_clients"]:
            file_name = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in range(255))
            client.exec_command(
                cmd="sudo touch '%s%s/%s'" %
                (client_info["mounting_dir"], dir_name, file_name))
        for client in client_info["kernel_clients"]:
            if client.pkg_type == "rpm":
                file_name = "".join(
                    random.choice(string.ascii_lowercase + string.digits)
                    for _ in range(255))
                client.exec_command(
                    cmd="sudo touch '%s%s/%s'" %
                    (client_info["mounting_dir"], dir_name, file_name))
        for num in range(0, 5):
            for client in client_info["fuse_clients"]:
                ops = ["create", "setxattr", "getxattr", "chmod", "rename"]
                for op in ops:
                    client.exec_command(
                        sudo=True,
                        cmd=
                        f"python3 smallfile/smallfile_cli.py --operation {op} --threads 10 --file-size 4 "
                        f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                        f"{client_info['mounting_dir']}{dir_name}",
                        long_running=True,
                        timeout=300,
                    )
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        else:
            return 1
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #15
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11222'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(client_info['mon_node'],
                                                      12, 1, None, 300)

        dir1 = ''.join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info['clients']:
            log.info("Creating directory:")
            client.exec_command(cmd='sudo mkdir %s%s' %
                                (client_info['mounting_dir'], dir1))
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd='sudo crefi %s%s --fop create --multi -b 10 -d 10 '
                '--random --min=1K --max=10K' %
                (client_info['mounting_dir'], dir1))
            print(out.read().decode())
            break

        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='crefi')
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify('', return_counts)
        print(result1)

        for client in client_info['clients']:
            client.exec_command(cmd='sudo rm -rf %s%s' %
                                (client_info['mounting_dir'], dir1))
            break

        for client in client_info['clients']:
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd='sudo crefi %s%s --fop create --multi -b 10 -d 10 '
                '--random --min=1K --max=10K' %
                (client_info['mounting_dir'], dir1))
            print(out.read().decode())
            log.info("Renaming the dirs:")
            out, rc = client.exec_command(
                cmd='sudo crefi '
                '%s%s --fop rename --multi -b 10 -d 10 --random '
                '--min=1K --max=10K' % (client_info['mounting_dir'], dir1))
            print(out.read().decode())

            break
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='crefi')
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify('', return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(client_info['mon_node'],
                                                     12, 1, None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                             client_info['kernel_clients'],
                                             client_info['mounting_dir'],
                                             'umount')
            else:
                rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                             client_info['mounting_dir'],
                                             'umount')
            if rc == 0:
                log.info('Cleaning up successfull')
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            fs_util.client_clean_up(client_info['fuse_clients'],
                                    client_info['kernel_clients'],
                                    client_info['mounting_dir'], 'umount')
        else:
            fs_util.client_clean_up(client_info['fuse_clients'], '',
                                    client_info['mounting_dir'], 'umount')
        return 1
    except Exception as e:

        log.info(e)

        log.info(traceback.format_exc())

        return 1
Example #16
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        config = kw.get("config")
        num_of_dirs = config.get("num_of_dirs")
        num_of_dirs = num_of_dirs / 5
        tc = "11228"
        dir_name = "dir"
        test_dir = "testdir/"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)

        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="crefi",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="crefi",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info["mds_nodes"])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            for client in client1:
                client.exec_command(cmd="sudo mkdir %s%s" %
                                    (client_info["mounting_dir"], test_dir))

            with parallel() as p:
                p.spawn(
                    fs_util.mkdir_bulk,
                    client1,
                    0,
                    num_of_dirs * 2,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 2 + 1,
                    num_of_dirs * 4,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 4 + 1,
                    num_of_dirs * 6,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 6 + 1,
                    num_of_dirs * 8,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 8 + 1,
                    num_of_dirs * 10,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                for op in p:
                    rc = op
            if rc == 0:
                log.info("Directories created successfully")
            else:
                raise CommandFailed("Directory creation failed")

            with parallel() as p:
                p.spawn(
                    fs_util.pinning,
                    client2,
                    0,
                    num_of_dirs * 1,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client3,
                    num_of_dirs * 1,
                    num_of_dirs * 2,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client4,
                    num_of_dirs * 2,
                    num_of_dirs * 3,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client1,
                    num_of_dirs * 3,
                    num_of_dirs * 4,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    0,
                )
                p.spawn(
                    fs_util.pinning,
                    client3,
                    num_of_dirs * 4,
                    num_of_dirs * 5,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    0,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client1,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    num_of_dirs * 1,
                    num_of_dirs * 5,
                    10,
                    fs_util.mds_fail_over,
                    client_info["mds_nodes"],
                )
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client3,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    num_of_dirs * 7,
                    num_of_dirs * 8,
                    20,
                    fs_util.mds_fail_over,
                    client_info["mds_nodes"],
                )
                for op in p:
                    return_counts, rc = op
            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            print(result)
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
                rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
                rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

            if rc_client == 0 and rc_mds == 0:
                log.info("Cleaning up successfull")
            else:
                return 1
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc_client = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
            rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

        else:
            rc_client = fs_util.client_clean_up(client_info["fuse_clients"],
                                                "",
                                                client_info["mounting_dir"],
                                                "umount")
            rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

        if rc_client == 0 and rc_mds == 0:
            log.info("Cleaning up successfull")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #17
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11242'
        dir_name = 'dir'
        config = kw.get('config')
        num_of_dirs = config.get('num_of_dirs')
        num_of_dirs = num_of_dirs / 5
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dir_name,
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify('', return_counts)
        if result == 'Data validation success':
            print("Data validation success")
            log.info("Execution of Test cases %s started:" % (tc))
            with parallel() as p:
                p.spawn(fs_util.mkdir, client1, 0, num_of_dirs * 1,
                        client_info['mounting_dir'], dir_name)
                p.spawn(fs_util.mkdir, client2, num_of_dirs * 1,
                        num_of_dirs * 2, client_info['mounting_dir'], dir_name)
                p.spawn(fs_util.mkdir, client1, num_of_dirs * 2,
                        num_of_dirs * 3, client_info['mounting_dir'], dir_name)
                p.spawn(fs_util.mkdir, client2, num_of_dirs * 3,
                        num_of_dirs * 4, client_info['mounting_dir'], dir_name)
                p.spawn(fs_util.mkdir, client1, num_of_dirs * 4,
                        num_of_dirs * 5, client_info['mounting_dir'], dir_name)
                for op in p:
                    _, rc = op
            if rc == 0:
                log.info("Dirs created successfully")
            else:
                raise CommandFailed("Dirs creation failed")
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client1,
                        client_info['mounting_dir'], dir_name, 0,
                        num_of_dirs * 1, 1, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
                p.spawn(fs_util.filesystem_utilities, client2,
                        client_info['mounting_dir'], dir_name, 0,
                        num_of_dirs * 1)
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client3,
                        client_info['mounting_dir'], dir_name, num_of_dirs * 1,
                        num_of_dirs * 2, 1, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
                p.spawn(fs_util.filesystem_utilities, client4,
                        client_info['mounting_dir'], dir_name, num_of_dirs * 1,
                        num_of_dirs * 2)
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client1,
                        client_info['mounting_dir'], dir_name, num_of_dirs * 2,
                        num_of_dirs * 3, 1, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
                p.spawn(fs_util.filesystem_utilities, client2,
                        client_info['mounting_dir'], dir_name, num_of_dirs * 2,
                        num_of_dirs * 3)
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client3,
                        client_info['mounting_dir'], dir_name, num_of_dirs * 3,
                        num_of_dirs * 4, 1, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(fs_util.filesystem_utilities, client3,
                        client_info['mounting_dir'], dir_name, num_of_dirs * 3,
                        num_of_dirs * 4)
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client4,
                        client_info['mounting_dir'], dir_name, num_of_dirs * 4,
                        num_of_dirs * 5, 1, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
                p.spawn(fs_util.filesystem_utilities, client1,
                        client_info['mounting_dir'], dir_name, num_of_dirs * 4,
                        num_of_dirs * 5)
                for op in p:
                    return_counts, rc = op
            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            print(result)
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                             client_info['kernel_clients'],
                                             client_info['mounting_dir'],
                                             'umount')
            else:
                rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                             client_info['mounting_dir'],
                                             'umount')
            if rc == 0:
                log.info('Cleaning up successfull')
            else:
                return 1
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))

        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1