Пример #1
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11254-fuse_clients"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1

        rc3 = fs_util.kernel_mount(
            client3, client_info["mounting_dir"], client_info["mon_node_ip"]
        )
        rc4 = fs_util.kernel_mount(
            client4, client_info["mounting_dir"], client_info["mon_node_ip"]
        )
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        rc = fs_util.standby_rank(
            client_info["mds_nodes"], client_info["mon_node"], todo="add_rank"
        )
        if rc == 0:
            log.info("Added standby ranks")
        else:
            raise Exception("Adding standby ranks failed")

        client1[0].exec_command(
            cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir_name)
        )
        if client1[0].node.exit_status == 0:
            log.info("Dir created")
        else:
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
            )
        rc1 = fs_util.fstab_entry(
            client1, client_info["mounting_dir"], action="doEntry"
        )
        rc2 = fs_util.fstab_entry(
            client2, client_info["mounting_dir"], action="doEntry"
        )
        if rc1 == 0 and rc2 == 0:
            log.info("FSentry for clients are done")
        else:
            raise CommandFailed("FsEntry failed")

        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(
                fs_util.read_write_IO, client3, client_info["mounting_dir"], "g", "read"
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                50,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="smallfile_create",
                fnum=1000,
                fsize=100,
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(fs_util.reboot, client1[0])

        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )

            p.spawn(
                fs_util.read_write_IO, client4, client_info["mounting_dir"], "g", "read"
            )

            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            p.spawn(fs_util.reboot, client2[0])

        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        if cluster_health_afterIO == cluster_health_beforeIO:
            log.info("cluster is healthy")
        else:
            log.error("cluster is not healty")
            return 1
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            for node in client_info["mon_node"]:
                p.spawn(fs_util.reboot, node)

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            for node in client_info["mon_node"]:
                fs_util.network_disconnect(node)
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            for node in client_info["mon_node"]:
                fs_util.pid_kill(node, "mon")

        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        if cluster_health_beforeIO == cluster_health_afterIO:
            log.info("Cluster is healthy")
        else:
            return 1
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
            rc = fs_util.standby_rank(
                client_info["mds_nodes"],
                client_info["mon_node"],
                todo="add_rank_revert",
            )
            if rc == 0:
                log.info("removed standby ranks")
            rc1 = fs_util.fstab_entry(
                client1, client_info["mounting_dir"], action="revertEntry"
            )
            rc2 = fs_util.fstab_entry(
                client2, client_info["mounting_dir"], action="revertEntry"
            )
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")
            else:
                return 1
        else:
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"], "", client_info["mounting_dir"], "umount"
            )

            rc = fs_util.standby_rank(
                client_info["mds_nodes"],
                client_info["mon_node"],
                todo="add_rank_revert",
            )
            if rc == 0:
                log.info("removed standby ranks")
            else:
                return 1
            rc1 = fs_util.fstab_entry(
                client1, client_info["mounting_dir"], action="revertEntry"
            )
            rc2 = fs_util.fstab_entry(
                client2, client_info["mounting_dir"], action="revertEntry"
            )
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")
            else:
                return 1
            if rc == 0:
                log.info("Cleaning up successfull")
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"], "", client_info["mounting_dir"], "umount"
            )
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #2
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11298'
        source_dir = '/mnt/source'
        target_dir = 'target'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        for client in client_info['clients']:
            client.exec_command(cmd='sudo rm -rf  %s' % source_dir)
            client.exec_command(cmd='sudo mkdir %s' % source_dir)

        for client in client_info['clients']:
            client.exec_command(cmd='sudo mkdir %s%s' %
                                (client_info['mounting_dir'], target_dir))
            break
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    source_dir,
                    '',
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.read_write_IO, client1, source_dir, 'g', 'write')
            p.spawn(fs_util.stress_io,
                    client2,
                    source_dir,
                    '',
                    0,
                    10,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client3,
                    source_dir,
                    '',
                    0,
                    10,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client4,
                    source_dir,
                    '',
                    0,
                    1,
                    iotype='fio')
            for op in p:
                return_counts1, rc = op

        with parallel() as p:
            p.spawn(fs_util.rsync, client1, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            p.spawn(fs_util.rsync, client2, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            p.spawn(fs_util.rsync, client3, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            p.spawn(fs_util.rsync, client4, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            for op in p:
                return_counts2, rc = op

        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    11,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    3,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    1,
                    iotype='fio')
            for op in p:
                return_counts3, rc = op
        with parallel() as p:
            p.spawn(fs_util.rsync, client1,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            p.spawn(fs_util.rsync, client2,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            p.spawn(fs_util.rsync, client3,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            p.spawn(fs_util.rsync, client4,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            for op in p:
                return_counts4, rc = op

        rc = list(return_counts1.values()) + list(return_counts2.values()) + \
            list(return_counts3.values()) + list(return_counts4.values())
        rc_set = set(rc)
        if len(rc_set) == 1:
            print("Test case CEPH-%s passed" % (tc))
        else:
            print(("Test case CEPH-%s failed" % (tc)))
        log.info("Test completed for CEPH-%s" % (tc))
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        else:
            return 1
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            fs_util.client_clean_up(client_info['fuse_clients'],
                                    client_info['kernel_clients'],
                                    client_info['mounting_dir'], 'umount')
        else:
            fs_util.client_clean_up(client_info['fuse_clients'], '',
                                    client_info['mounting_dir'], 'umount')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #3
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        config = kw.get("config")
        num_of_dirs = config.get("num_of_dirs")
        num_of_dirs = num_of_dirs / 5
        tc = "11228"
        dir_name = "dir"
        test_dir = "testdir/"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)

        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="crefi",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="crefi",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info["mds_nodes"])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            for client in client1:
                client.exec_command(cmd="sudo mkdir %s%s" %
                                    (client_info["mounting_dir"], test_dir))

            with parallel() as p:
                p.spawn(
                    fs_util.mkdir_bulk,
                    client1,
                    0,
                    num_of_dirs * 2,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 2 + 1,
                    num_of_dirs * 4,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 4 + 1,
                    num_of_dirs * 6,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 6 + 1,
                    num_of_dirs * 8,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 8 + 1,
                    num_of_dirs * 10,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                for op in p:
                    rc = op
            if rc == 0:
                log.info("Directories created successfully")
            else:
                raise CommandFailed("Directory creation failed")

            with parallel() as p:
                p.spawn(
                    fs_util.pinning,
                    client2,
                    0,
                    num_of_dirs * 1,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client3,
                    num_of_dirs * 1,
                    num_of_dirs * 2,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client4,
                    num_of_dirs * 2,
                    num_of_dirs * 3,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client1,
                    num_of_dirs * 3,
                    num_of_dirs * 4,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    0,
                )
                p.spawn(
                    fs_util.pinning,
                    client3,
                    num_of_dirs * 4,
                    num_of_dirs * 5,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    0,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client1,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    num_of_dirs * 1,
                    num_of_dirs * 5,
                    10,
                    fs_util.mds_fail_over,
                    client_info["mds_nodes"],
                )
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client3,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    num_of_dirs * 7,
                    num_of_dirs * 8,
                    20,
                    fs_util.mds_fail_over,
                    client_info["mds_nodes"],
                )
                for op in p:
                    return_counts, rc = op
            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            print(result)
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
                rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
                rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

            if rc_client == 0 and rc_mds == 0:
                log.info("Cleaning up successfull")
            else:
                return 1
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc_client = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
            rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

        else:
            rc_client = fs_util.client_clean_up(client_info["fuse_clients"],
                                                "",
                                                client_info["mounting_dir"],
                                                "umount")
            rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

        if rc_client == 0 and rc_mds == 0:
            log.info("Cleaning up successfull")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #4
0
def run(ceph_cluster, **kw):
    try:
        tc = 'nfs-ganesha'
        nfs_mounting_dir = '/mnt/nfs_mount/'
        log.info("Running cephfs %s test case" % (tc))

        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        nfs_server = client_info['kernel_clients'][0]
        nfs_client = [client_info['kernel_clients'][1]]
        client1 = [client_info['fuse_clients'][0]]
        client2 = [client_info['fuse_clients'][1]]
        client3 = [client_info['kernel_clients'][0]]
        client4 = [client_info['kernel_clients'][1]]
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(
            client3,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(
            client4,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        dirs, rc = fs_util.mkdir(
            client1, 0, 4, client_info['mounting_dir'], 'dir')
        if rc == 0:
            log.info("Directories created")
        else:
            raise CommandFailed("Directory creation failed")
        dirs = dirs.split('\n')

        rc = fs_util.nfs_ganesha_install(nfs_server)
        if rc == 0:
            log.info('NFS ganesha installed successfully')
        else:
            raise CommandFailed('NFS ganesha installation failed')
        rc = fs_util.nfs_ganesha_conf(nfs_server, 'admin')
        if rc == 0:
            log.info('NFS ganesha config added successfully')
        else:
            raise CommandFailed('NFS ganesha config adding failed')
        rc = fs_util.nfs_ganesha_mount(
            nfs_client[0],
            nfs_mounting_dir,
            nfs_server.node.hostname)
        if rc == 0:
            log.info('NFS-ganesha mount passed')
        else:
            raise CommandFailed('NFS ganesha mount failed')
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[0],
                0,
                5,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[2],
                0,
                5,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[1],
                0,
                1,
                iotype='crefi')
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[3],
                0,
                1,
                iotype='smallfile_create', fnum=1000, fsize=1024)

        for client in nfs_client:
            log.info('Unmounting nfs-ganesha mount on client:')
            client.exec_command(cmd='sudo umount %s -l' % (nfs_mounting_dir))
            log.info('Removing nfs-ganesha mount dir on client:')
            client.exec_command(cmd='sudo rm -rf  %s' % (nfs_mounting_dir))

        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                client_info['kernel_clients'],
                client_info['mounting_dir'],
                'umount')
        else:
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                '',
                client_info['mounting_dir'],
                'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #5
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        config = kw.get('config')
        num_of_dirs = config.get('num_of_dirs')
        tc = '11227'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                '',
                0,
                2,
                iotype='crefi',
            )
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    '',
                    0,
                    2,
                    iotype='crefi')
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify('', return_counts)

        client1[0].exec_command(cmd='sudo mkdir %s%s' %
                                (client_info['mounting_dir'], 'testdir'))

        if result == 'Data validation success':
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info['mds_nodes'])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            num_of_dirs = int(num_of_dirs / 5)
            with parallel() as p:
                p.spawn(fs_util.mkdir_bulk, client1, 0, num_of_dirs * 1,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client2, num_of_dirs * 1 + 1,
                        num_of_dirs * 2,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client1, num_of_dirs * 2 + 1,
                        num_of_dirs * 3,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client2, num_of_dirs * 3 + 1,
                        num_of_dirs * 4,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client1, num_of_dirs * 4 + 1,
                        num_of_dirs * 5,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                for op in p:
                    rc = op
            if rc == 0:
                log.info('Directories created successfully')
            else:
                raise CommandFailed('Directory creation failed')

            with parallel() as p:
                p.spawn(fs_util.max_dir_io, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        num_of_dirs * 1, 10)
                p.spawn(fs_util.max_dir_io, client2,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 1, num_of_dirs * 2, 10)
                rc = fs_util.check_mount_exists(client1[0])
                if rc == 0:
                    fs_util.pinning(client1, 0, 10,
                                    client_info['mounting_dir'] + 'testdir/',
                                    dir_name, 0)

                p.spawn(fs_util.max_dir_io, client3,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 3, num_of_dirs * 4, 10)
                p.spawn(fs_util.max_dir_io, client4,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 4, num_of_dirs * 5, 10)

            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])

            with parallel() as p:
                p.spawn(fs_util.pinning, client2, 10, num_of_dirs * 1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client3, num_of_dirs * 1,
                        num_of_dirs * 2,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client4, num_of_dirs * 2,
                        num_of_dirs * 3,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client1, num_of_dirs * 3,
                        num_of_dirs * 4,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client3, num_of_dirs * 4,
                        num_of_dirs * 5,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)

            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client2,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client3,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client4,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])

            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'], client_info['kernel_clients'],
                    client_info['mounting_dir'], 'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'], '',
                    client_info['mounting_dir'], 'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            if rc_client == 0 and rc_mds == 0:
                log.info('Cleaning up successfull')
            else:
                return 1
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                client_info['kernel_clients'],
                                                client_info['mounting_dir'],
                                                'umount')
            rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

        else:
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                '',
                                                client_info['mounting_dir'],
                                                'umount')
            rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)
        if rc_client == 0 and rc_mds == 0:
            log.info('Cleaning up successfull')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #6
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11221'
        log.info('Running cephfs %s test case' % tc)
        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info('Got client info')
        else:
            raise CommandFailed('fetching client info failed')
        c1 = 1
        client1 = []
        client2 = []
        client3 = []
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)

        print(rc1, rc2, rc3)
        if rc1 == 0 and rc2 == 0 and rc3 == 0:
            log.info('got auth keys')
        else:
            raise CommandFailed('auth list failed')
        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])
        if rc1 == 0 and rc2 == 0:
            log.info('Fuse mount passed')
        else:
            raise CommandFailed('Fuse mount failed')

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])

        if rc3 == 0:
            log.info('kernel mount passed')
        else:
            raise CommandFailed('kernel mount failed')

        while c1:

            with parallel() as p:
                p.spawn(fs_util.read_write_IO, client1,
                        client_info['mounting_dir'], 'g', 'write')
                p.spawn(fs_util.read_write_IO, client2,
                        client_info['mounting_dir'], 'g', 'read')
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    '',
                    0,
                    2,
                    iotype='crefi',
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    '',
                    0,
                    2,
                    iotype='crefi',
                )
                p.spawn(fs_util.read_write_IO, client3,
                        client_info['mounting_dir'])
                for op in p:
                    (return_counts, rc) = op
            c1 = ceph_df(ceph_cluster)

        check_health(ceph_cluster)
        log.info('Test completed for CEPH-%s' % tc)
        print('Results:')
        result = fs_util.rc_verify(tc, return_counts)
        print(result)
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        (mins, secs) = divmod(total_time, 60)
        (hours, mins) = divmod(mins, 60)
        print('Hours:%d Minutes:%d Seconds:%f' % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #7
0
def run(ceph_cluster, **kw):
    new_fs_name = "cephfs_ec"
    new_fs_datapool = "ec_data_pool"
    fs_util = FsUtils(ceph_cluster)
    config = kw.get("config")
    build = config.get("build", config.get("rhbuild"))
    client_info, rc = fs_util.get_clients(build)
    filestore = config.get("filestore")
    k_and_m = config.get("ec-pool-k-m")
    if (filestore is not None and k_and_m is None) or (
        filestore is None and k_and_m is None
    ):
        log.info("tests will run on replicated pool")
        return 0
    elif filestore is not None and k_and_m is not None:
        log.error("Filestore does not support ecpools")
        return 1

    fs_info = fs_util.get_fs_info(client_info["mon_node"][0])
    fs_util.del_cephfs(client_info["mds_nodes"], fs_info.get("fs_name"))
    profile_name = fs_util.create_erasure_profile(
        client_info["mon_node"][0], "ec_profile", k_and_m[0], k_and_m[2]
    )
    fs_util.create_pool(
        client_info["mon_node"][0],
        new_fs_datapool,
        64,
        64,
        pool_type="erasure",
        profile_name=profile_name,
    )
    fs_util.create_fs(
        client_info["mds_nodes"],
        new_fs_name,
        new_fs_datapool,
        fs_info.get("metadata_pool_name"),
        pool_type="erasure_pool",
    )
    time.sleep(100)
    return 0
Пример #8
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11335"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        active_mds_node_1, active_mds_node_2, rc = fs_util.get_active_mdss(
            client_info["mds_nodes"])
        if rc == 0:
            log.info("Got active mdss")
        else:
            raise CommandFailed("getting active-mdss failed")

        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "m",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "m", "read")
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "m",
                "readwrite",
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                "m",
                "readwrite",
            )
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        print(result)

        log.info("Performing Auto Eviction:")
        mds1_before_evict, _, rc = fs_util.get_mds_info(active_mds_node_1,
                                                        active_mds_node_2,
                                                        info="session ls")
        rc = fs_util.auto_evict(active_mds_node_1, client_info["clients"], 0)
        if rc == 0:
            log.info("client process killed successfully for auto eviction")
        else:
            raise CommandFailed(
                "client process killing failed for auto eviction")
        log.info("Waiting 300 seconds for auto eviction---")
        time.sleep(300)
        mds1_after_evict, _, rc = fs_util.get_mds_info(active_mds_node_1,
                                                       active_mds_node_2,
                                                       info="session ls")
        if mds1_before_evict != mds1_after_evict:
            log.info("Auto eviction Passed")
        else:
            raise CommandFailed("Auto eviction Failed")
        print("-------------------------------------------------------")
        if client3[0].pkg_type == "deb" and client4[0].pkg_type == "deb":
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))
        else:
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

            for client in client_info["kernel_clients"]:
                client.exec_command(cmd="sudo umount %s -l" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "m",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "m", "read")
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "m",
                "readwrite",
            )
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        print(result)
        log.info("Performing Manual eviction:")
        ip_addr = fs_util.manual_evict(active_mds_node_1, 0)
        mds1_after_evict, _, rc = fs_util.get_mds_info(active_mds_node_1,
                                                       active_mds_node_2,
                                                       info="session ls")
        print(mds1_before_evict)
        print("------------------------")
        print(mds1_after_evict)
        print("-----------------------")
        if mds1_before_evict != mds1_after_evict:
            log.info("Manual eviction success")
        else:
            raise CommandFailed("Manual Eviction Failed")
        log.info("Removing client from OSD blacklisting:")
        rc = fs_util.osd_blacklist(active_mds_node_1, ip_addr)
        if rc == 0:
            log.info("Removing client from OSD blacklisting successfull")
        else:
            raise CommandFailed("Removing client from OSD blacklisting Failed")
        print("-" * 10)

        if client3[0].pkg_type == "deb" and client4[0].pkg_type == "deb":
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))
        else:
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

            for client in client_info["kernel_clients"]:
                client.exec_command(cmd="sudo umount %s -l" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "m",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "m", "read")
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "m",
                "readwrite",
            )
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        print(result)
        log.info("Performing configuring blacklisting:")
        rc = fs_util.config_blacklist_manual_evict(active_mds_node_1, 0)
        if rc == 0:
            log.info("Configure blacklisting for manual evict success")
            rc = fs_util.config_blacklist_manual_evict(active_mds_node_1,
                                                       0,
                                                       revert=True)
        else:
            raise CommandFailed(
                "Configure blacklisting for manual evict failed")
        print("-" * 10)
        rc = fs_util.config_blacklist_auto_evict(active_mds_node_1, 0)
        if rc == 0:
            log.info("Configure blacklisting for auto evict success")
            rc = fs_util.config_blacklist_auto_evict(active_mds_node_1,
                                                     0,
                                                     revert=True)
            if rc == 0:
                log.info("Reverted successfully")
            else:
                raise CommandFailed(
                    "Configure blacklisting for auto evict failed")
        else:
            raise CommandFailed("Configure blacklisting for auto evict failed")

        if client3[0].pkg_type == "deb" and client4[0].pkg_type == "deb":
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))
        else:
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

            for client in client_info["kernel_clients"]:
                client.exec_command(cmd="sudo umount %s -l" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))
        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        if client3[0].pkg_type == "deb" and client4[0].pkg_type == "deb":
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo rm -rf %s*" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))
        else:
            for client in client_info["fuse_clients"]:
                client.exec_command(cmd="sudo rm -rf %s*" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo fusermount -u %s -z" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

            for client in client_info["kernel_clients"]:
                client.exec_command(cmd="sudo umount %s -l" %
                                    (client_info["mounting_dir"]))
                client.exec_command(cmd="sudo rm -rf %s" %
                                    (client_info["mounting_dir"]))

        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))

        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #9
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()

        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        tc1 = "11293"
        tc2 = "11296"
        tc3 = "11297"
        tc4 = "11295"
        dir1 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        dir2 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        dir3 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        results = []
        return_counts = []
        log.info("Create files and directories of 1000 depth and 1000 breadth")
        for client in client_info["fuse_clients"]:
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir1))
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir2))
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir3))
            log.info("Execution of testcase %s started" % tc1)
            out, rc = client.exec_command(
                sudo=True,
                cmd=
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                f"{client_info['mounting_dir']}{dir1}",
                long_running=True,
            )
            log.info("Execution of testcase %s ended" % tc1)
            if client.node.exit_status == 0:
                results.append("TC %s passed" % tc1)

            log.info("Execution of testcase %s started" % tc2)
            client.exec_command(cmd="sudo cp -r  %s%s/* %s%s/" %
                                (client_info["mounting_dir"], dir1,
                                 client_info["mounting_dir"], dir2))
            client.exec_command(cmd="diff -qr  %s%s %s%s/" %
                                (client_info["mounting_dir"], dir1,
                                 client_info["mounting_dir"], dir2))
            log.info("Execution of testcase %s ended" % tc2)
            if client.node.exit_status == 0:
                results.append("TC %s passed" % tc2)

            log.info("Execution of testcase %s started" % tc3)
            out, rc = client.exec_command(cmd="sudo mv  %s%s/* %s%s/" %
                                          (client_info["mounting_dir"], dir1,
                                           client_info["mounting_dir"], dir3))
            log.info("Execution of testcase %s ended" % tc3)
            if client.node.exit_status == 0:
                results.append("TC %s passed" % tc3)
            log.info("Execution of testcase %s started" % tc4)
            for client in client_info["clients"]:
                if client.pkg_type != "deb":
                    client.exec_command(
                        cmd="sudo dd if=/dev/zero of=%s%s.txt bs=100M "
                        "count=5" %
                        (client_info["mounting_dir"], client.node.hostname))
                    out1, rc1 = client.exec_command(
                        cmd="sudo  ls -c -ltd -- %s%s.*" %
                        (client_info["mounting_dir"], client.node.hostname))
                    client.exec_command(
                        cmd="sudo dd if=/dev/zero of=%s%s.txt bs=200M "
                        "count=5" %
                        (client_info["mounting_dir"], client.node.hostname))
                    out2, rc2 = client.exec_command(
                        cmd="sudo  ls -c -ltd -- %s%s.*" %
                        (client_info["mounting_dir"], client.node.hostname))
                    a = out1.read().decode()
                    print("------------")
                    b = out2.read().decode()
                    if a != b:
                        return_counts.append(out1.channel.recv_exit_status())
                        return_counts.append(out2.channel.recv_exit_status())
                    else:
                        raise CommandFailed("Metadata info command failed")
                    break
            log.info("Execution of testcase %s ended" % tc4)
            print(return_counts)
            rc_set = set(return_counts)
            if len(rc_set) == 1:
                results.append("TC %s passed" % tc4)

            print("Testcase Results:")
            for res in results:
                print(res)
            break
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        else:
            return 1
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))

        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #10
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        dir_name = "dir"
        log.info("Running cephfs 11338 test case")
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        dirs, rc = fs_util.mkdir(client1, 1, 3, client_info["mounting_dir"],
                                 dir_name)
        if rc == 0:
            log.info("Directories created")
        dirs = dirs.split("\n")
        """
        new clients with restrictions
        """
        new_client1_name = client_info["fuse_clients"][
            0].node.hostname + "_%s" % (dirs[0])
        new_client2_name = client_info["fuse_clients"][
            1].node.hostname + "_%s" % (dirs[0])
        new_client3_name = client_info["kernel_clients"][
            0].node.hostname + "_%s" % (dirs[1])
        new_client3_mouting_dir = "/mnt/%s_%s/" % (
            client_info["kernel_clients"][0].node.hostname,
            dirs[1],
        )
        new_client2_mouting_dir = "/mnt/%s_%s/" % (
            client_info["fuse_clients"][1].node.hostname,
            dirs[0],
        )
        new_client1_mouting_dir = "/mnt/%s_%s/" % (
            client_info["fuse_clients"][0].node.hostname,
            dirs[0],
        )
        rc1 = fs_util.auth_list(client1,
                                path=dirs[0],
                                permission="rw",
                                mds=True)
        rc2 = fs_util.auth_list(client2,
                                path=dirs[0],
                                permission="r",
                                mds=True)
        rc3 = fs_util.auth_list(client3,
                                path=dirs[1],
                                permission="*",
                                mds=True)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(
            client1,
            new_client1_mouting_dir,
            new_client=new_client1_name,
            sub_dir=dirs[0],
        )
        rc2 = fs_util.fuse_mount(
            client2,
            new_client2_mouting_dir,
            new_client=new_client2_name,
            sub_dir=dirs[0],
        )
        rc3 = fs_util.kernel_mount(
            client3,
            new_client3_mouting_dir,
            client_info["mon_node_ip"],
            new_client=new_client3_name,
            sub_dir=dirs[1],
        )
        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        if rc3 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        _, rc = fs_util.stress_io(
            client1,
            new_client1_mouting_dir,
            "",
            0,
            1,
            iotype="smallfile_create",
            fnum=1000,
            fsize=10,
        )

        if rc == 0:
            log.info("Permissions set  for client %s is working " %
                     new_client1_name)
        else:
            log.error("Permissions set  for client %s is failed" %
                      new_client1_name)
            return 1
        _, rc = fs_util.stress_io(
            client1,
            new_client1_mouting_dir,
            "",
            0,
            1,
            iotype="smallfile_delete",
            fnum=1000,
            fsize=10,
        )
        if rc == 0:
            log.info("Permissions set  for client %s is working properly" %
                     new_client1_name)
        else:
            log.error("Permissions set  for client %s is failed" %
                      new_client1_name)
            return 1
        try:
            _, rc = fs_util.stress_io(client2,
                                      new_client2_mouting_dir,
                                      "",
                                      0,
                                      1,
                                      iotype="touch")
        except CommandFailed:
            log.info("Permissions set  for client %s is working properly" %
                     new_client2_name)

        _, rc = fs_util.stress_io(
            client3,
            new_client3_mouting_dir,
            "",
            0,
            1,
            iotype="smallfile_create",
            fnum=1000,
            fsize=10,
        )

        if rc == 0:
            log.info("Permissions set  for client %s is working properly" %
                     new_client3_name)
        else:
            log.error("Permissions set  for client %s is failed" %
                      new_client3_name)
            return 1
        _, rc = fs_util.stress_io(
            client3,
            new_client3_mouting_dir,
            "",
            0,
            1,
            iotype="smallfile_delete",
            fnum=1000,
            fsize=10,
        )
        if rc == 0:
            log.info("Permissions set  for client %s is working properly")
        else:
            log.error("Permissions set  for client %s is failed")
            return 1

        fs_util.client_clean_up(client1,
                                "",
                                new_client1_mouting_dir,
                                "umount",
                                client_name=new_client1_name)
        fs_util.client_clean_up(client2,
                                "",
                                new_client2_mouting_dir,
                                "umount",
                                client_name=new_client2_name)
        fs_util.client_clean_up("",
                                client3,
                                new_client3_mouting_dir,
                                "umount",
                                client_name=new_client3_name)

        fs_util.auth_list(client1, path=dirs[0], permission="rw", osd=True)
        fs_util.auth_list(client3, path=dirs[1], permission="r", osd=True)

        fs_util.fuse_mount(client1,
                           new_client1_mouting_dir,
                           new_client=new_client1_name)
        fs_util.kernel_mount(
            client3,
            new_client3_mouting_dir,
            client_info["mon_node_ip"],
            new_client=new_client3_name,
        )

        fs_util.stress_io(
            client1,
            new_client1_mouting_dir,
            "",
            0,
            1,
            iotype="smallfile_delete",
            fnum=1000,
            fsize=10,
        )
        try:
            if client_info["kernel_clients"][0].pkg_type == "rpm":
                client_info["kernel_clients"][0].exec_command(
                    cmd="sudo dd if=/dev/zero of=%s/file bs=10M count=10" %
                    new_client3_mouting_dir)

        except CommandFailed as e:
            log.info(e)
            log.info("Permissions set  for client %s is working properly" %
                     (client_info["kernel_clients"][0].node.hostname + "_" +
                      (dirs[1])))

        fs_util.client_clean_up(
            client1,
            "",
            new_client1_mouting_dir,
            "umount",
            client_name=client_info["fuse_clients"][0].node.hostname + "_%s" %
            (dirs[0]),
        )

        fs_util.client_clean_up("",
                                client3,
                                new_client3_mouting_dir,
                                "umount",
                                client_name=new_client3_name)
        fs_util.auth_list(client1, path=dirs[0], layout_quota="p_flag")
        fs_util.auth_list(client3, path=dirs[1], layout_quota="!p_flag")

        fs_util.fuse_mount(client1,
                           new_client1_mouting_dir,
                           new_client=new_client1_name)
        fs_util.kernel_mount(
            client3,
            new_client3_mouting_dir,
            client_info["mon_node_ip"],
            new_client=new_client3_name,
        )
        file_name = "file1"
        client_info["fuse_clients"][0].exec_command(
            cmd="sudo touch %s/%s" % (new_client1_mouting_dir, file_name))
        client_info["fuse_clients"][0].exec_command(
            cmd="sudo mkdir  %s/%s" % (new_client1_mouting_dir, dirs[0]))

        try:
            fs_util.setfattr(client3, "stripe_unit", "1048576",
                             new_client3_mouting_dir, file_name)
            fs_util.setfattr(client3, "max_bytes", "100000000",
                             new_client3_mouting_dir, dirs[1])
        except CommandFailed:
            log.info("Permission denied for setting attrs,success")
        fs_util.setfattr(client1, "stripe_unit", "1048576",
                         new_client1_mouting_dir, file_name)
        fs_util.setfattr(client1, "max_bytes", "100000000",
                         new_client1_mouting_dir, dirs[0])
        fs_util.client_clean_up(client1,
                                "",
                                new_client1_mouting_dir,
                                "umount",
                                client_name=new_client1_name)

        fs_util.client_clean_up("",
                                client3,
                                new_client3_mouting_dir,
                                "umount",
                                client_name=new_client3_name)
        fs_util.client_clean_up(
            client_info["fuse_clients"],
            client_info["kernel_clients"],
            client_info["mounting_dir"],
            "umount",
        )
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #11
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11222"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(client_info["mon_node"],
                                                      12, 1, build, None, 300)

        dir1 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info["clients"]:
            log.info("Creating directory:")
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir1))
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd="sudo crefi %s%s --fop create --multi -b 10 -d 10 "
                "--random --min=1K --max=10K" %
                (client_info["mounting_dir"], dir1))
            print(out.read().decode())
            break

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="crefi",
            )
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify("", return_counts)
        print(result1)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf %s%s" %
                                (client_info["mounting_dir"], dir1))
            break

        for client in client_info["clients"]:
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd="sudo crefi %s%s --fop create --multi -b 10 -d 10 "
                "--random --min=1K --max=10K" %
                (client_info["mounting_dir"], dir1))
            print(out.read().decode())
            log.info("Renaming the dirs:")
            out, rc = client.exec_command(
                cmd="sudo crefi "
                "%s%s --fop rename --multi -b 10 -d 10 --random "
                "--min=1K --max=10K" % (client_info["mounting_dir"], dir1))
            print(out.read().decode())

            break
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="crefi",
            )
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify("", return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(client_info["mon_node"],
                                                     12, 1, build, None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            if rc == 0:
                log.info("Cleaning up successfull")
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:

        log.info(e)

        log.info(traceback.format_exc())

        return 1
Пример #12
0
def run(ceph_cluster, **kw):
    try:
        log.info("Running  11333 test")
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        k_and_m = config.get("ec-pool-k-m")
        new_fs_name = "cephfs_new"
        new_fs_datapool = "data_pool"
        new_pool = "new_pool"
        fs_info = fs_util.get_fs_info(client_info["mon_node"][0])
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        if k_and_m:
            fs_util.del_cephfs(client_info["mds_nodes"],
                               fs_info.get("fs_name"))
            profile_name = fs_util.create_erasure_profile(
                client_info["mon_node"][0], "ec_profile_new", k_and_m[0],
                k_and_m[2])
            fs_util.create_pool(
                client_info["mon_node"][0],
                new_fs_datapool,
                64,
                64,
                pool_type="erasure",
                profile_name=profile_name,
            )
            fs_util.create_fs(
                client_info["mds_nodes"],
                new_fs_name,
                new_fs_datapool,
                fs_info.get("metadata_pool_name"),
                pool_type="erasure_pool",
            )
            fs_util.del_cephfs(client_info["mds_nodes"], new_fs_name)
            fs_util.create_fs(
                client_info["mds_nodes"],
                new_fs_name,
                new_fs_datapool,
                fs_info.get("metadata_pool_name"),
                pool_type="erasure_pool",
            )
            fs_util.set_attr(client_info["mds_nodes"], new_fs_name)
            fs_util.create_pool(
                client_info["mon_node"][0],
                new_pool,
                64,
                64,
                pool_type="erasure",
                profile_name=profile_name,
            )
            fs_util.add_pool_to_fs(client_info["mon_node"][0], new_fs_name,
                                   new_pool)
            fs_util.remove_pool_from_fs(client_info["mon_node"][0],
                                        new_fs_name, new_pool)
            fs_util.del_cephfs(client_info["mds_nodes"], new_fs_name)
            fs_util.create_fs(
                client_info["mds_nodes"],
                new_fs_name,
                new_fs_datapool,
                fs_info.get("metadata_pool_name"),
                pool_type="erasure_pool",
            )
        else:
            fs_util.del_cephfs(client_info["mds_nodes"],
                               fs_info.get("fs_name"))
            fs_util.create_pool(client_info["mon_node"][0], new_fs_datapool,
                                64, 64)
            fs_util.create_fs(
                client_info["mds_nodes"],
                new_fs_name,
                new_fs_datapool,
                fs_info.get("metadata_pool_name"),
            )
            fs_util.del_cephfs(client_info["mds_nodes"], new_fs_name)
            fs_util.create_fs(
                client_info["mds_nodes"],
                new_fs_name,
                new_fs_datapool,
                fs_info.get("metadata_pool_name"),
            )
            fs_util.set_attr(client_info["mds_nodes"], new_fs_name)
            fs_util.create_pool(client_info["mon_node"][0], new_pool, 64, 64)
            fs_util.add_pool_to_fs(client_info["mon_node"][0], new_fs_name,
                                   new_pool)
            fs_util.remove_pool_from_fs(client_info["mon_node"][0],
                                        new_fs_name, new_pool)
            fs_util.del_cephfs(client_info["mds_nodes"], new_fs_name)
            fs_util.create_fs(
                client_info["mds_nodes"],
                new_fs_name,
                new_fs_datapool,
                fs_info.get("metadata_pool_name"),
            )

        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #13
0
def run(ceph_cluster, **kw):
    new_fs_name = 'cephfs_ec'
    new_fs_datapool = 'ec_data_pool'
    fs_util = FsUtils(ceph_cluster)
    client_info, rc = fs_util.get_clients()
    config = kw.get('config')
    bluestore = config.get('bluestore')
    k_and_m = config.get('ec-pool-k-m')
    if (bluestore is not None and k_and_m is None) or (bluestore is None and k_and_m is None):
        log.info('tests will run on replicated pool')
        return 0
    elif bluestore is None and k_and_m is not None:
        log.error('Filestore does not support ecpools')
        return 1

    fs_info = fs_util.get_fs_info(client_info['mon_node'][0])
    fs_util.del_cephfs(
        client_info['mds_nodes'], fs_info.get('fs_name'))
    profile_name = fs_util.create_erasure_profile(
        client_info['mon_node'][0],
        'ec_profile',
        k_and_m[0],
        k_and_m[2])
    fs_util.create_pool(
        client_info['mon_node'][0],
        new_fs_datapool,
        64,
        64,
        pool_type='erasure',
        profile_name=profile_name)
    fs_util.create_fs(
        client_info['mds_nodes'],
        new_fs_name,
        new_fs_datapool,
        fs_info.get('metadata_pool_name'),
        pool_type='erasure_pool')
    time.sleep(100)
    return 0
Пример #14
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        bz = '1798719'
        log.info('Running cephfs test for bug %s' % bz)
        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info('Got client info')
        else:
            raise CommandFailed('fetching client info failed')
        client1 = []
        client1.append(client_info['kernel_clients'][0])
        mon_node_ip = client_info['mon_node_ip']
        mounting_dir = client_info['mounting_dir']
        user_name = 'qwertyuiopasdfghjklzxcvbnm1234567890123'
        p_flag = 'rw'
        log.info("Creating user with more than 37 letters")
        for client in client1:
            client.exec_command(
                cmd="sudo ceph auth get-or-create client.%s "
                    "mon 'allow r' mds "
                    "'allow %s' osd 'allow rw' "
                    "-o /etc/ceph/ceph.client.%s.keyring" %
                    (user_name, p_flag, user_name))
            log.info("Creating mounting dir:")
            client.exec_command(cmd='sudo mkdir %s' % (mounting_dir))
            out, rc = client.exec_command(
                cmd='sudo ceph auth get-key client.%s' %
                    (user_name))
            secret_key = out.read().decode().rstrip('\n')
            key_file = client.write_file(
                sudo=True, file_name='/etc/ceph/%s.secret' %
                (user_name), file_mode='w')
            key_file.write(secret_key)
            key_file.flush()
            op, rc = client.exec_command(
                cmd='sudo mount -t ceph %s,%s,%s:/ '
                    '%s -o name=%s,secretfile=/etc/ceph/%s.secret' % (
                        mon_node_ip[0], mon_node_ip[1], mon_node_ip[2],
                        mounting_dir, user_name, user_name))
            out, rc = client.exec_command(cmd='mount')
            mount_output = out.read().decode()
            mount_output = mount_output.split()
            log.info("Checking if kernel mount is passed or failed:")
            assert mounting_dir.rstrip('/') in mount_output
            log.info("mount is passed")
            log.info("Execution of Test for bug %s ended:" % (bz))
            print('Script execution time:------')
            stop = timeit.default_timer()
            total_time = stop - start
            mins, secs = divmod(total_time, 60)
            hours, mins = divmod(mins, 60)
            print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
            return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client1[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
            if rc == 0:
                log.info('Cleaning up successfull')

            else:
                return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #15
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11231"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = [client_info["fuse_clients"][0]]
        client2 = [client_info["fuse_clients"][1]]
        client3 = [client_info["kernel_clients"][0]]
        client4 = [client_info["kernel_clients"][1]]

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        client1[0].exec_command(sudo=True,
                                cmd="mkdir %s%s" %
                                (client_info["mounting_dir"], dir_name))
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info["mds_nodes"])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            for client in client1:
                client.exec_command(cmd="sudo mkdir %s%s_{1..50}" %
                                    (client_info["mounting_dir"], dir_name))
                if client.node.exit_status == 0:
                    log.info("directories created succcessfully")
                else:
                    raise CommandFailed("directories creation failed")
            with parallel() as p:
                p.spawn(
                    fs_util.pinning,
                    client1,
                    1,
                    25,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                )
                p.spawn(
                    fs_util.pinning,
                    client3,
                    26,
                    50,
                    client_info["mounting_dir"],
                    dir_name,
                    1,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.max_dir_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    1,
                    25,
                    1000,
                )
                p.spawn(
                    fs_util.max_dir_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    26,
                    50,
                    1000,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["mon_node"]:
                    p.spawn(fs_util.reboot, node)

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["mon_node"]:
                    fs_util.network_disconnect(node)
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["mon_node"]:
                    fs_util.pid_kill(node, "mon")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["osd_nodes"]:
                    p.spawn(fs_util.reboot, node)
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                ),
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["osd_nodes"]:
                    fs_util.network_disconnect(node)
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["osd_nodes"]:
                    fs_util.pid_kill(node, "osd")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                ),
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["osd_nodes"]:
                    fs_util.network_disconnect(node)
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dir_name,
                    0,
                    500,
                    iotype="touch",
                )
                for node in client_info["mon_node"]:
                    p.spawn(fs_util.daemon_systemctl, node, "mon", "restart")
            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)
            if rc_client == 0 and rc_mds == 0:
                log.info("Cleaning up successfull")
            else:
                return 1
            print("Script execution time:------")
            stop = timeit.default_timer()
            total_time = stop - start
            mins, secs = divmod(total_time, 60)
            hours, mins = divmod(mins, 60)
            print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
            return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #16
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11255_11336-fuse client'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get('config')
        num_of_osds = config.get('num_of_osds')
        fs_util = FsUtils(ceph_cluster)
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("creating auth failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(
            client3,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(
            client4,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        cluster_health_beforeIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds, len(
                client_info['mon_node']), build, None, 300)
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        rc = fs_util.standby_rank(
            client_info['mds_nodes'],
            client_info['mon_node'],
            todo='add_rank')
        if rc == 0:
            log.info("Added standby ranks")
        else:
            log.error("Failed to add standby ranks")
            return 1
        client1[0].exec_command(
            cmd='sudo mkdir %s%s' %
                (client_info['mounting_dir'], dir_name))
        if client1[0].node.exit_status == 0:
            log.info("Dir created")
        else:
            raise CommandFailed('Dir creation failed')
        rc1 = fs_util.fstab_entry(
            client1,
            client_info['mounting_dir'],
            action='doEntry')
        rc2 = fs_util.fstab_entry(
            client2,
            client_info['mounting_dir'],
            action='doEntry')
        if rc1 == 0 and rc2 == 0:
            log.info("FSentry for clients are done")
        else:
            raise CommandFailed("FsEntry failed")
        rc1 = fs_util.fstab_entry(
            client3,
            client_info['mounting_dir'],
            action='doEntry', mon_node_ip=client_info['mon_node_ip'])
        rc2 = fs_util.fstab_entry(
            client4,
            client_info['mounting_dir'],
            action='doEntry', mon_node_ip=client_info['mon_node_ip'])
        if rc1 == 0 and rc2 == 0:
            log.info("FSentry for clients are done")
        else:
            raise CommandFailed("FsEntry failed")
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                50,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                1,
                iotype='smallfile_create', fnum=1000, fsize=100)
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info['mounting_dir'],
                dir_name,
                0,
                1,
                iotype='crefi')
            p.spawn(fs_util.reboot, client1[0])
        res = []
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)
                for op in p:
                    res.append(op)
        print(res)
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')

            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'read')

            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir_name,
                    0,
                    1,
                    iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch'),
            p.spawn(fs_util.reboot, client2[0])

        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds, len(
                client_info['mon_node']), build, None, 300)
        if cluster_health_afterIO == cluster_health_beforeIO:
            log.info('cluster is healthy')
        else:
            log.error("cluster is not healty")
            return 1
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch')
            for node in client_info['osd_nodes']:
                p.spawn(fs_util.reboot, node)
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='fio'),
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch')
            for node in client_info['osd_nodes']:
                fs_util.network_disconnect(node)
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch')
            for node in client_info['osd_nodes']:
                fs_util.pid_kill(node, 'osd')

        time.sleep(100)
        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds, len(
                client_info['mon_node']), build, None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            log.info("Cluster is healthy")
        else:
            return 1
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                client_info['kernel_clients'],
                client_info['mounting_dir'],
                'umount')
            rc = fs_util.standby_rank(
                client_info['mds_nodes'],
                client_info['mon_node'],
                todo='add_rank_revert')
            if rc == 0:
                log.info("removed standby ranks")
            rc1 = fs_util.fstab_entry(
                client1,
                client_info['mounting_dir'],
                action='revertEntry')
            rc2 = fs_util.fstab_entry(
                client2,
                client_info['mounting_dir'],
                action='revertEntry')
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")
            else:
                return 1
        else:
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                '',
                client_info['mounting_dir'],
                'umount')
            rc = fs_util.standby_rank(
                client_info['mds_nodes'],
                client_info['mon_node'],
                todo='add_rank_revert')
            if rc == 0:
                log.info("removed standby ranks")
            rc1 = fs_util.fstab_entry(
                client1,
                client_info['mounting_dir'],
                action='revertEntry')
            rc2 = fs_util.fstab_entry(
                client2,
                client_info['mounting_dir'],
                action='revertEntry')
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")

            if rc == 0:
                log.info('Cleaning up successfull')
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                client_info['kernel_clients'],
                client_info['mounting_dir'],
                'umount')
        else:
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                '',
                client_info['mounting_dir'],
                'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #17
0
def run(ceph_cluster, **kw):
    try:
        log.info("Running  11333 test")
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        config = kw.get('config')
        k_and_m = config.get('ec-pool-k-m')
        new_fs_name = 'cephfs_new'
        new_fs_datapool = 'data_pool'
        new_pool = 'new_pool'
        fs_info = fs_util.get_fs_info(client_info['mon_node'][0])
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        if k_and_m:
            fs_util.del_cephfs(
                client_info['mds_nodes'], fs_info.get('fs_name'))
            profile_name = fs_util.create_erasure_profile(
                client_info['mon_node'][0], 'ec_profile_new', k_and_m[0], k_and_m[2])
            fs_util.create_pool(
                client_info['mon_node'][0],
                new_fs_datapool,
                64,
                64,
                pool_type='erasure',
                profile_name=profile_name)
            fs_util.create_fs(
                client_info['mds_nodes'],
                new_fs_name,
                new_fs_datapool,
                fs_info.get('metadata_pool_name'),
                pool_type='erasure_pool')
            fs_util.del_cephfs(client_info['mds_nodes'], new_fs_name)
            fs_util.create_fs(
                client_info['mds_nodes'],
                new_fs_name,
                new_fs_datapool,
                fs_info.get('metadata_pool_name'),
                pool_type='erasure_pool')
            fs_util.set_attr(client_info['mds_nodes'], new_fs_name)
            fs_util.create_pool(
                client_info['mon_node'][0],
                new_pool,
                64,
                64,
                pool_type='erasure',
                profile_name=profile_name)
            fs_util.add_pool_to_fs(
                client_info['mon_node'][0], new_fs_name, new_pool)
            fs_util.remove_pool_from_fs(
                client_info['mon_node'][0], new_fs_name, new_pool)
            fs_util.del_cephfs(client_info['mds_nodes'], new_fs_name)
            fs_util.create_fs(
                client_info['mds_nodes'],
                new_fs_name,
                new_fs_datapool,
                fs_info.get('metadata_pool_name'),
                pool_type='erasure_pool')
        else:
            fs_util.del_cephfs(
                client_info['mds_nodes'],
                fs_info.get('fs_name'))
            fs_util.create_pool(
                client_info['mon_node'][0], new_fs_datapool, 64, 64)
            fs_util.create_fs(
                client_info['mds_nodes'],
                new_fs_name,
                new_fs_datapool,
                fs_info.get('metadata_pool_name'))
            fs_util.del_cephfs(client_info['mds_nodes'], new_fs_name)
            fs_util.create_fs(
                client_info['mds_nodes'],
                new_fs_name,
                new_fs_datapool,
                fs_info.get('metadata_pool_name'))
            fs_util.set_attr(client_info['mds_nodes'], new_fs_name)
            fs_util.create_pool(client_info['mon_node'][0], new_pool, 64, 64)
            fs_util.add_pool_to_fs(
                client_info['mon_node'][0], new_fs_name, new_pool)
            fs_util.remove_pool_from_fs(
                client_info['mon_node'][0], new_fs_name, new_pool)
            fs_util.del_cephfs(client_info['mds_nodes'], new_fs_name)
            fs_util.create_fs(
                client_info['mds_nodes'],
                new_fs_name,
                new_fs_datapool,
                fs_info.get('metadata_pool_name'))

        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #18
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11220"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")

        log.info("Creating directory:")
        for node in client1:
            out, rc = node.exec_command(
                cmd="sudo mkdir %s%s" %
                (client_info["mounting_dir"], dir_name))
            print(out)
            break

        return_counts1, rc1 = fs_util.stress_io(client1,
                                                client_info["mounting_dir"],
                                                dir_name,
                                                0,
                                                1,
                                                iotype="smallfile")
        return_counts2, rc2 = fs_util.stress_io(client2,
                                                client_info["mounting_dir"],
                                                dir_name,
                                                0,
                                                1,
                                                iotype="fio")
        return_counts3, rc3 = fs_util.read_write_IO(
            client3, client_info["mounting_dir"], dir_name=dir_name)
        if rc1 == 0 and rc2 == 0 and rc3 == 0:
            log.info("IOs on clients successfull")
            log.info("Testcase %s passed" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            if rc == 0:
                log.info("Cleaning up successfull")
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))

        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1

    except Exception as e:

        log.info(e)

        log.info(traceback.format_exc())

        return 1
Пример #19
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11230'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(
            client3,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(
            client4,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                '',
                0,
                1,
                iotype='crefi',
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                1,
                iotype='crefi'
            )
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify('', return_counts)
        if result == 'Data validation success':
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info['mds_nodes'])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            for client in client1:
                client.exec_command(
                    cmd='sudo mkdir %s%s_{1..50}' %
                        (client_info['mounting_dir'], dir_name))
                if client.node.exit_status == 0:
                    log.info("directories created succcessfully")
                else:
                    raise CommandFailed("directories creation failed")
            with parallel() as p:
                p.spawn(
                    fs_util.pinning,
                    client1,
                    1,
                    25,
                    client_info['mounting_dir'],
                    dir_name,
                    0)
                p.spawn(
                    fs_util.pinning,
                    client3,
                    26,
                    50,
                    client_info['mounting_dir'],
                    dir_name,
                    1)
            with parallel() as p:
                p.spawn(
                    fs_util.max_dir_io,
                    client1,
                    client_info['mounting_dir'],
                    dir_name,
                    1,
                    25,
                    1000)
                p.spawn(
                    fs_util.max_dir_io,
                    client3,
                    client_info['mounting_dir'],
                    dir_name,
                    26,
                    50,
                    1000)

            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client1,
                    client_info['mounting_dir'],
                    dir_name,
                    1,
                    25,
                    10,
                    fs_util.mds_fail_over,
                    client_info['mds_nodes'])
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client4,
                    client_info['mounting_dir'],
                    dir_name,
                    26,
                    50,
                    20,
                    fs_util.mds_fail_over,
                    client_info['mds_nodes'])
                for op in p:
                    return_counts, rc = op
            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            log.info(result)
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'],
                    client_info['kernel_clients'],
                    client_info['mounting_dir'],
                    'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'], '',
                    client_info['mounting_dir'], 'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            if rc_client == 0 and rc_mds == 0:
                log.info('Cleaning up successfull')
            else:
                return 1
            print('Script execution time:------')
            stop = timeit.default_timer()
            total_time = stop - start
            mins, secs = divmod(total_time, 60)
            hours, mins = divmod(mins, 60)
            print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
            return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #20
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11222'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(client_info['mon_node'],
                                                      12, 1, None, 300)

        dir1 = ''.join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info['clients']:
            log.info("Creating directory:")
            client.exec_command(cmd='sudo mkdir %s%s' %
                                (client_info['mounting_dir'], dir1))
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd='sudo crefi %s%s --fop create --multi -b 10 -d 10 '
                '--random --min=1K --max=10K' %
                (client_info['mounting_dir'], dir1))
            print(out.read().decode())
            break

        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='crefi')
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify('', return_counts)
        print(result1)

        for client in client_info['clients']:
            client.exec_command(cmd='sudo rm -rf %s%s' %
                                (client_info['mounting_dir'], dir1))
            break

        for client in client_info['clients']:
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd='sudo crefi %s%s --fop create --multi -b 10 -d 10 '
                '--random --min=1K --max=10K' %
                (client_info['mounting_dir'], dir1))
            print(out.read().decode())
            log.info("Renaming the dirs:")
            out, rc = client.exec_command(
                cmd='sudo crefi '
                '%s%s --fop rename --multi -b 10 -d 10 --random '
                '--min=1K --max=10K' % (client_info['mounting_dir'], dir1))
            print(out.read().decode())

            break
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='crefi')
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify('', return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(client_info['mon_node'],
                                                     12, 1, None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                             client_info['kernel_clients'],
                                             client_info['mounting_dir'],
                                             'umount')
            else:
                rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                             client_info['mounting_dir'],
                                             'umount')
            if rc == 0:
                log.info('Cleaning up successfull')
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            fs_util.client_clean_up(client_info['fuse_clients'],
                                    client_info['kernel_clients'],
                                    client_info['mounting_dir'], 'umount')
        else:
            fs_util.client_clean_up(client_info['fuse_clients'], '',
                                    client_info['mounting_dir'], 'umount')
        return 1
    except Exception as e:

        log.info(e)

        log.info(traceback.format_exc())

        return 1
Пример #21
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11219,11224"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        client1[0].exec_command(
            sudo=True,
            cmd=f"mkdir {client_info['mounting_dir']}{dir_name}",
        )
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                2,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            print("Data validation success")
            dirs, rc = fs_util.mkdir(client1, 0, 3,
                                     client_info["mounting_dir"], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split("\n")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    100,
                    iotype="touch",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile",
                )
                for op in p:
                    return_counts, rc = op

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_delete",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=10,
                    fsize=1024,
                )
                for op in p:
                    return_counts, rc = op
            cluster_health_afterIO = check_ceph_healthly(
                client_info["mon_node"][0],
                num_of_osds,
                len(client_info["mon_node"]),
                build,
                None,
                300,
            )

            log.info("Execution of Test case CEPH-%s ended" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            if cluster_health_beforeIO == cluster_health_afterIO:
                print(result)

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    5,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile",
                )

            with parallel() as p:
                p.spawn(
                    fs_util.read_write_IO,
                    client1,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[2],
                )
                p.spawn(
                    fs_util.read_write_IO,
                    client2,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    5,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile",
                )

            with parallel() as p:
                p.spawn(
                    fs_util.read_write_IO,
                    client1,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )
                p.spawn(
                    fs_util.read_write_IO,
                    client2,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )

            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
                if rc == 0:
                    log.info("Cleaning up successfull")
                else:
                    return 1
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
                if rc == 0:
                    log.info("Cleaning up successfull")
                else:
                    return 1

        log.info("Execution of Test case CEPH-%s ended" % (tc))
        print("Results:")
        result = fs_util.rc_verify(tc, return_counts)
        print(result)
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #22
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11334'
        file_name = 'file'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        vals, rc = fs_util.getfattr(client1, client_info['mounting_dir'],
                                    file_name)
        rc = fs_util.setfattr(client1, 'stripe_unit', '1048576',
                              client_info['mounting_dir'], file_name)
        if rc == 0:
            log.info("Setfattr stripe_unit for file %s success" % file_name)
        else:
            raise CommandFailed("Setfattr stripe_unit for file %s success" %
                                file_name)
        rc = fs_util.setfattr(client1, 'stripe_count', '8',
                              client_info['mounting_dir'], file_name)
        if rc == 0:
            log.info("Setfattr stripe_count for file %s success" % file_name)
        else:
            raise CommandFailed("Setfattr stripe_count for file %s success" %
                                file_name)
        rc = fs_util.setfattr(client1, 'object_size', '10485760',
                              client_info['mounting_dir'], file_name)
        if rc == 0:
            log.info("Setfattr object_size for file %s success" % file_name)
        else:
            raise CommandFailed("Setfattr object_size for file %s success" %
                                file_name)
        fs_info = fs_util.get_fs_info(client_info['mon_node'][0])
        fs_util.create_pool(client_info['mon_node'][0], 'new_data_pool', 64,
                            64)
        rc = fs_util.add_pool_to_fs(client_info['mon_node'][0],
                                    fs_info.get('fs_name'), 'new_data_pool')
        if 0 in rc:
            log.info("Adding new pool to cephfs success")
        else:
            raise CommandFailed("Adding new pool to cephfs failed")
        rc = fs_util.setfattr(client1, 'pool', 'new_data_pool',
                              client_info['mounting_dir'], file_name)
        if rc == 0:
            log.info("Setfattr pool for file %s success" % file_name)
        else:
            raise CommandFailed("Setfattr pool for file %s success" %
                                file_name)

        vals, rc = fs_util.getfattr(client1, client_info['mounting_dir'],
                                    file_name)
        log.info("Read individual layout fields by using getfattr:")
        for client in client1:
            out, rc = client.exec_command(
                cmd="sudo getfattr -n ceph.file.layout.pool %s%s" %
                (client_info['mounting_dir'], file_name))
            if vals['pool'] in out.read().decode():
                log.info("reading pool by getfattr successfull")
            out, rc = client.exec_command(
                cmd="sudo getfattr -n ceph.file.layout.stripe_unit  %s%s" %
                (client_info['mounting_dir'], file_name))
            if vals['stripe_unit'] in out.read().decode():
                log.info("reading stripe_unit by getfattr successfull")
            out, rc = client.exec_command(
                cmd="sudo getfattr -n ceph.file.layout.stripe_count  %s%s" %
                (client_info['mounting_dir'], file_name))
            if vals['stripe_count'] in out.read().decode():
                log.info("reading stripe_count by getfattr successfull")
            out, rc = client.exec_command(
                cmd="sudo getfattr -n ceph.file.layout.object_size  %s%s" %
                (client_info['mounting_dir'], file_name))
            if vals['object_size'] in out.read().decode():
                log.info("reading object_size by getfattr successfull")
            break
        rc = fs_util.remove_pool_from_fs(client_info['mon_node'][0],
                                         fs_info.get('fs_name'),
                                         'new_data_pool')
        if 0 in rc:
            log.info("Pool removing success")
        else:
            raise CommandFailed("Pool removing  failed")
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                client_info['kernel_clients'],
                                                client_info['mounting_dir'],
                                                'umount')

        else:
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                '',
                                                client_info['mounting_dir'],
                                                'umount')

        if rc_client == 0:
            log.info('Cleaning up successfull')
        else:
            return 1
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                client_info['kernel_clients'],
                                                client_info['mounting_dir'],
                                                'umount')
        else:
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                '',
                                                client_info['mounting_dir'],
                                                'umount')

        if rc_client == 0:
            log.info('Cleaning up successfull')
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #23
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11222"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )

        dir1 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info["clients"]:
            log.info("Creating directory:")
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir1))
            log.info("Creating directories with breadth and depth:")
            client.exec_command(
                sudo=True,
                cmd=
                f"python3 smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 --files 1000 "
                f"--files-per-dir 10 --dirs-per-dir 2 --top {client_info['mounting_dir']}{dir1}",
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)

            break

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify("", return_counts)
        print(result1)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf %s%s" %
                                (client_info["mounting_dir"], dir1))
            break

        for client in client_info["clients"]:
            log.info("Creating directories with breadth and depth:")
            client.exec_command(
                sudo=True,
                cmd="python3 smallfile/smallfile_cli.py "
                "--operation create --threads 10 "
                " --file-size 4 --files 1000 "
                "--files-per-dir 10 --dirs-per-dir 2"
                " --top %s%s" % (client_info["mounting_dir"], dir1),
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)
            log.info("Renaming the dirs:")
            client.exec_command(
                sudo=True,
                cmd="python3 smallfile/smallfile_cli.py "
                "--operation rename --threads 10 --file-size 4"
                " --file-size 4 --files 1000 "
                "--files-per-dir 10 --dirs-per-dir 2"
                " --top %s%s" % (client_info["mounting_dir"], dir1),
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)

            break
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify("", return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        client1[0].exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir_name))
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "read",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                "write",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client2,
                client_info["mounting_dir"],
                "read",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "write",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("11223", return_counts)
        print(result)

        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            if rc == 0:
                log.info("Cleaning up successfull")
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #24
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        bz = "1798719"
        log.info("Running cephfs test for bug %s" % bz)
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client1.append(client_info["kernel_clients"][0])
        mon_node_ip = client_info["mon_node_ip"]
        mounting_dir = client_info["mounting_dir"]
        user_name = "qwertyuiopasdfghjklzxcvbnm1234567890123"
        p_flag = "rw"
        log.info("Creating user with more than 37 letters")
        for client in client1:
            client.exec_command(cmd="sudo ceph auth get-or-create client.%s "
                                "mon 'allow r' mds "
                                "'allow %s' osd 'allow rw' "
                                "-o /etc/ceph/ceph.client.%s.keyring" %
                                (user_name, p_flag, user_name))
            log.info("Creating mounting dir:")
            client.exec_command(cmd="sudo mkdir %s" % (mounting_dir))
            out, rc = client.exec_command(
                cmd="sudo ceph auth get-key client.%s" % (user_name))
            secret_key = out.rstrip("\n")
            key_file = client.remote_file(sudo=True,
                                          file_name="/etc/ceph/%s.secret" %
                                          (user_name),
                                          file_mode="w")
            key_file.write(secret_key)
            key_file.flush()
            op, rc = client.exec_command(
                cmd="sudo mount -t ceph %s,%s,%s:/ "
                "%s -o name=%s,secretfile=/etc/ceph/%s.secret" % (
                    mon_node_ip[0],
                    mon_node_ip[1],
                    mon_node_ip[2],
                    mounting_dir,
                    user_name,
                    user_name,
                ))
            out, rc = client.exec_command(cmd="mount")
            mount_output = out.split()
            log.info("Checking if kernel mount is passed or failed:")
            assert mounting_dir.rstrip("/") in mount_output
            log.info("mount is passed")
            log.info("Execution of Test for bug %s ended:" % (bz))
            print("Script execution time:------")
            stop = timeit.default_timer()
            total_time = stop - start
            mins, secs = divmod(total_time, 60)
            hours, mins = divmod(mins, 60)
            print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
            return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client1[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(client_info["kernel_clients"],
                                         client_info["mounting_dir"], "umount")
            if rc == 0:
                log.info("Cleaning up successfull")

            else:
                return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #25
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        dir_name = 'dir'
        log.info("Running cephfs 11338 test case")
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        dirs, rc = fs_util.mkdir(client1, 1, 3, client_info['mounting_dir'],
                                 dir_name)
        if rc == 0:
            log.info("Directories created")
        dirs = dirs.split('\n')
        '''
        new clients with restrictions
        '''
        new_client1_name = client_info['fuse_clients'][
            0].node.hostname + '_%s' % (dirs[0])
        new_client2_name = client_info['fuse_clients'][
            1].node.hostname + '_%s' % (dirs[0])
        new_client3_name = client_info['kernel_clients'][
            0].node.hostname + '_%s' % (dirs[1])
        new_client3_mouting_dir = '/mnt/%s_%s/' % (
            client_info['kernel_clients'][0].node.hostname, dirs[1])
        new_client2_mouting_dir = '/mnt/%s_%s/' % (
            client_info['fuse_clients'][1].node.hostname, dirs[0])
        new_client1_mouting_dir = '/mnt/%s_%s/' % (
            client_info['fuse_clients'][0].node.hostname, dirs[0])
        rc1 = fs_util.auth_list(client1,
                                path=dirs[0],
                                permission='rw',
                                mds=True)
        rc2 = fs_util.auth_list(client2,
                                path=dirs[0],
                                permission='r',
                                mds=True)
        rc3 = fs_util.auth_list(client3,
                                path=dirs[1],
                                permission='*',
                                mds=True)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(client1,
                                 new_client1_mouting_dir,
                                 new_client=new_client1_name,
                                 sub_dir=dirs[0])
        rc2 = fs_util.fuse_mount(client2,
                                 new_client2_mouting_dir,
                                 new_client=new_client2_name,
                                 sub_dir=dirs[0])
        rc3 = fs_util.kernel_mount(client3,
                                   new_client3_mouting_dir,
                                   client_info['mon_node_ip'],
                                   new_client=new_client3_name,
                                   sub_dir=dirs[1])
        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        if rc3 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        _, rc = fs_util.stress_io(client1,
                                  new_client1_mouting_dir,
                                  '',
                                  0,
                                  1,
                                  iotype='smallfile_create',
                                  fnum=1000,
                                  fsize=10)

        if rc == 0:
            log.info('Permissions set  for client %s is working ' %
                     new_client1_name)
        else:
            log.error('Permissions set  for client %s is failed' %
                      new_client1_name)
            return 1
        _, rc = fs_util.stress_io(client1,
                                  new_client1_mouting_dir,
                                  '',
                                  0,
                                  1,
                                  iotype='smallfile_delete',
                                  fnum=1000,
                                  fsize=10)
        if rc == 0:
            log.info('Permissions set  for client %s is working properly' %
                     new_client1_name)
        else:
            log.error('Permissions set  for client %s is failed' %
                      new_client1_name)
            return 1
        try:
            _, rc = fs_util.stress_io(client2,
                                      new_client2_mouting_dir,
                                      '',
                                      0,
                                      1,
                                      iotype='touch')
        except CommandFailed:
            log.info('Permissions set  for client %s is working properly' %
                     new_client2_name)

        _, rc = fs_util.stress_io(client3,
                                  new_client3_mouting_dir,
                                  '',
                                  0,
                                  1,
                                  iotype='smallfile_create',
                                  fnum=1000,
                                  fsize=10)

        if rc == 0:
            log.info('Permissions set  for client %s is working properly' %
                     new_client3_name)
        else:
            log.error('Permissions set  for client %s is failed' %
                      new_client3_name)
            return 1
        _, rc = fs_util.stress_io(client3,
                                  new_client3_mouting_dir,
                                  '',
                                  0,
                                  1,
                                  iotype='smallfile_delete',
                                  fnum=1000,
                                  fsize=10)
        if rc == 0:
            log.info('Permissions set  for client %s is working properly')
        else:
            log.error('Permissions set  for client %s is failed')
            return 1

        fs_util.client_clean_up(client1,
                                '',
                                new_client1_mouting_dir,
                                'umount',
                                client_name=new_client1_name)
        fs_util.client_clean_up(client2,
                                '',
                                new_client2_mouting_dir,
                                'umount',
                                client_name=new_client2_name)
        fs_util.client_clean_up('',
                                client3,
                                new_client3_mouting_dir,
                                'umount',
                                client_name=new_client3_name)

        fs_util.auth_list(client1, path=dirs[0], permission='rw', osd=True)
        fs_util.auth_list(client3, path=dirs[1], permission='r', osd=True)

        fs_util.fuse_mount(client1,
                           new_client1_mouting_dir,
                           new_client=new_client1_name)
        fs_util.kernel_mount(client3,
                             new_client3_mouting_dir,
                             client_info['mon_node_ip'],
                             new_client=new_client3_name)

        fs_util.stress_io(client1,
                          new_client1_mouting_dir,
                          '',
                          0,
                          1,
                          iotype='smallfile_delete',
                          fnum=1000,
                          fsize=10)
        try:
            if client_info['kernel_clients'][0].pkg_type == 'rpm':
                client_info['kernel_clients'][0].exec_command(
                    cmd='sudo dd if=/dev/zero of=%s/file bs=10M count=10' %
                    new_client3_mouting_dir)

        except CommandFailed as e:
            log.info(e)
            log.info('Permissions set  for client %s is working properly' %
                     (client_info['kernel_clients'][0].node.hostname + '_' +
                      (dirs[1])))

        fs_util.client_clean_up(
            client1,
            '',
            new_client1_mouting_dir,
            'umount',
            client_name=client_info['fuse_clients'][0].node.hostname + '_%s' %
            (dirs[0]))

        fs_util.client_clean_up('',
                                client3,
                                new_client3_mouting_dir,
                                'umount',
                                client_name=new_client3_name)
        fs_util.auth_list(client1, path=dirs[0], layout_quota='p_flag')
        fs_util.auth_list(client3, path=dirs[1], layout_quota='!p_flag')

        fs_util.fuse_mount(client1,
                           new_client1_mouting_dir,
                           new_client=new_client1_name)
        fs_util.kernel_mount(client3,
                             new_client3_mouting_dir,
                             client_info['mon_node_ip'],
                             new_client=new_client3_name)
        file_name = 'file1'
        client_info['fuse_clients'][0].exec_command(
            cmd='sudo touch %s/%s' % (new_client1_mouting_dir, file_name))
        client_info['fuse_clients'][0].exec_command(
            cmd='sudo mkdir  %s/%s' % (new_client1_mouting_dir, dirs[0]))

        try:
            fs_util.setfattr(client3, 'stripe_unit', '1048576',
                             new_client3_mouting_dir, file_name)
            fs_util.setfattr(client3, 'max_bytes', '100000000',
                             new_client3_mouting_dir, dirs[1])
        except CommandFailed:
            log.info('Permission denied for setting attrs,success')
        fs_util.setfattr(client1, 'stripe_unit', '1048576',
                         new_client1_mouting_dir, file_name)
        fs_util.setfattr(client1, 'max_bytes', '100000000',
                         new_client1_mouting_dir, dirs[0])
        fs_util.client_clean_up(client1,
                                '',
                                new_client1_mouting_dir,
                                'umount',
                                client_name=new_client1_name)

        fs_util.client_clean_up('',
                                client3,
                                new_client3_mouting_dir,
                                'umount',
                                client_name=new_client3_name)
        fs_util.client_clean_up(client_info['fuse_clients'],
                                client_info['kernel_clients'],
                                client_info['mounting_dir'], 'umount')
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #26
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()

        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(
            client3,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(
            client4,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        tc1 = '11293'
        tc2 = '11296'
        tc3 = '11297'
        tc4 = '11295'
        dir1 = ''.join(
            random.choice(
                string.ascii_lowercase
                + string.digits) for _ in range(10))
        dir2 = ''.join(
            random.choice(
                string.ascii_lowercase
                + string.digits) for _ in range(10))
        dir3 = ''.join(
            random.choice(
                string.ascii_lowercase
                + string.digits) for _ in range(10))
        results = []
        return_counts = []
        log.info("Create files and directories of 1000 depth and 1000 breadth")
        for client in client_info['fuse_clients']:
            client.exec_command(
                cmd='sudo mkdir %s%s' %
                    (client_info['mounting_dir'], dir1))
            client.exec_command(
                cmd='sudo mkdir %s%s' %
                    (client_info['mounting_dir'], dir2))
            client.exec_command(
                cmd='sudo mkdir %s%s' %
                    (client_info['mounting_dir'], dir3))
            log.info('Execution of testcase %s started' % tc1)
            out, rc = client.exec_command(
                cmd='sudo crefi %s%s --fop create --multi -b 1000 -d 1000 '
                    '-n 1 -T 5 --random --min=1K --max=10K' %
                    (client_info['mounting_dir'], dir1), long_running=True)
            log.info('Execution of testcase %s ended' % tc1)
            if client.node.exit_status == 0:
                results.append("TC %s passed" % tc1)

            log.info('Execution of testcase %s started' % tc2)
            client.exec_command(
                cmd='sudo cp -r  %s%s/* %s%s/' %
                    (client_info['mounting_dir'], dir1,
                     client_info['mounting_dir'], dir2))
            client.exec_command(
                cmd="diff -qr  %s%s %s%s/" %
                    (client_info['mounting_dir'], dir1,
                     client_info['mounting_dir'], dir2))
            log.info('Execution of testcase %s ended' % tc2)
            if client.node.exit_status == 0:
                results.append("TC %s passed" % tc2)

            log.info('Execution of testcase %s started' % tc3)
            out, rc = client.exec_command(
                cmd='sudo mv  %s%s/* %s%s/' %
                    (client_info['mounting_dir'], dir1,
                     client_info['mounting_dir'], dir3))
            log.info('Execution of testcase %s ended' % tc3)
            if client.node.exit_status == 0:
                results.append("TC %s passed" % tc3)
            log.info('Execution of testcase %s started' % tc4)
            for client in client_info['clients']:
                if client.pkg_type != 'deb':
                    client.exec_command(
                        cmd='sudo dd if=/dev/zero of=%s%s.txt bs=100M '
                            'count=5' %
                            (client_info['mounting_dir'], client.node.hostname))
                    out1, rc1 = client.exec_command(
                        cmd='sudo  ls -c -ltd -- %s%s.*' %
                            (client_info['mounting_dir'], client.node.hostname))
                    client.exec_command(
                        cmd='sudo dd if=/dev/zero of=%s%s.txt bs=200M '
                            'count=5' %
                            (client_info['mounting_dir'], client.node.hostname))
                    out2, rc2 = client.exec_command(
                        cmd='sudo  ls -c -ltd -- %s%s.*' %
                            (client_info['mounting_dir'], client.node.hostname))
                    a = out1.read().decode()
                    print("------------")
                    b = out2.read().decode()
                    if a != b:
                        return_counts.append(out1.channel.recv_exit_status())
                        return_counts.append(out2.channel.recv_exit_status())
                    else:
                        raise CommandFailed("Metadata info command failed")
                    break
            log.info('Execution of testcase %s ended' % tc4)
            print(return_counts)
            rc_set = set(return_counts)
            if len(rc_set) == 1:
                results.append("TC %s passed" % tc4)

            print("Testcase Results:")
            for res in results:
                print(res)
            break
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        else:
            return 1
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))

        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #27
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "10625,11225"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            dirs, rc = fs_util.mkdir(client1, 0, 6,
                                     client_info["mounting_dir"], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split("\n")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_rename",
                    fnum=1000,
                    fsize=10,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_delete-renamed",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=1000,
                    fsize=10,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[4],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[5],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[6],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_rename",
                    fnum=1,
                    fsize=1000000,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_delete-renamed",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[4],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=1,
                    fsize=1000000,
                )
        dir_name = "!@#$%^&*()-_=+[]{};:,.<>?"
        out, rc = client1[0].exec_command(
            cmd="sudo mkdir '%s%s'" % (client_info["mounting_dir"], dir_name))
        if client1[0].node.exit_status == 0:
            log.info("Directory created")
        else:
            raise CommandFailed("Directory creation failed")
        for client in client_info["fuse_clients"]:
            file_name = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in range(255))
            client.exec_command(
                cmd="sudo touch '%s%s/%s'" %
                (client_info["mounting_dir"], dir_name, file_name))
        for client in client_info["kernel_clients"]:
            if client.pkg_type == "rpm":
                file_name = "".join(
                    random.choice(string.ascii_lowercase + string.digits)
                    for _ in range(255))
                client.exec_command(
                    cmd="sudo touch '%s%s/%s'" %
                    (client_info["mounting_dir"], dir_name, file_name))
        for num in range(0, 5):
            for client in client_info["fuse_clients"]:
                ops = ["create", "setxattr", "getxattr", "chmod", "rename"]
                for op in ops:
                    client.exec_command(
                        sudo=True,
                        cmd=
                        f"python3 smallfile/smallfile_cli.py --operation {op} --threads 10 --file-size 4 "
                        f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                        f"{client_info['mounting_dir']}{dir_name}",
                        long_running=True,
                        timeout=300,
                    )
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        else:
            return 1
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #28
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '10625,11225'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                '',
                0,
                1,
                iotype='crefi',
            )
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify('', return_counts)
        if result == 'Data validation success':
            dirs, rc = fs_util.mkdir(client1, 0, 6,
                                     client_info['mounting_dir'], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split('\n')
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[1],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)

            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_rename',
                        fnum=1000,
                        fsize=10)
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_delete-renamed',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='smallfile_delete',
                        fnum=1000,
                        fsize=10)
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[3],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1,
                        fsize=1000000)
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[4],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1,
                        fsize=1000000)
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[5],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1,
                        fsize=1000000)
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[6],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1,
                        fsize=1000000)

            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[3],
                        0,
                        1,
                        iotype='smallfile_rename',
                        fnum=1,
                        fsize=1000000)
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[3],
                        0,
                        1,
                        iotype='smallfile_delete-renamed',
                        fnum=1,
                        fsize=1000000)
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[4],
                        0,
                        1,
                        iotype='smallfile_delete',
                        fnum=1,
                        fsize=1000000)
        dir_name = '!@#$%^&*()-_=+[]{};:,.<>?'
        out, rc = client1[0].exec_command(
            cmd="sudo mkdir '%s%s'" % (client_info['mounting_dir'], dir_name))
        if client1[0].node.exit_status == 0:
            log.info("Directory created")
        else:
            raise CommandFailed("Directory creation failed")
        for client in client_info['fuse_clients']:
            file_name = ''.join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in range(255))
            client.exec_command(
                cmd="sudo touch '%s%s/%s'" %
                (client_info['mounting_dir'], dir_name, file_name))
        for client in client_info['kernel_clients']:
            if client.pkg_type == 'rpm':
                file_name = ''.join(
                    random.choice(string.ascii_lowercase + string.digits)
                    for _ in range(255))
                client.exec_command(
                    cmd="sudo touch '%s%s/%s'" %
                    (client_info['mounting_dir'], dir_name, file_name))
        for num in range(0, 5):
            for client in client_info['fuse_clients']:
                client.exec_command(
                    cmd="sudo crefi %s'%s' --fop create -t %s "
                    "--multi -b 10 -d 10 -n 10 -T 10 "
                    "--random --min=1K --max=%dK" %
                    (client_info['mounting_dir'], dir_name, 'text', 5),
                    long_running=True)
                for i in range(0, 6):
                    ops = [
                        'create', 'rename', 'chmod', 'chown', 'chgrp',
                        'setxattr'
                    ]
                    rand_ops = random.choice(ops)
                    ftypes = ['text', 'sparse', 'binary', 'tar']
                    rand_filetype = random.choice(ftypes)
                    rand_count = random.randint(2, 10)
                    client.exec_command(cmd='sudo crefi %s%s --fop %s -t %s '
                                        '--multi -b 10 -d 10 -n 10 -T 10 '
                                        '--random --min=1K --max=%dK' %
                                        (client_info['mounting_dir'], dir_name,
                                         rand_ops, rand_filetype, rand_count),
                                        long_running=True)
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        else:
            return 1
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #29
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11298"
        source_dir = "/mnt/source"
        target_dir = "target"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf  %s" % source_dir)
            client.exec_command(cmd="sudo mkdir %s" % source_dir)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], target_dir))
            break
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    source_dir,
                    "",
                    0,
                    100,
                    iotype="touch")
            p.spawn(fs_util.read_write_IO, client1, source_dir, "g", "write")
            p.spawn(fs_util.stress_io,
                    client2,
                    source_dir,
                    "",
                    0,
                    10,
                    iotype="dd")
            p.spawn(fs_util.stress_io,
                    client3,
                    source_dir,
                    "",
                    0,
                    10,
                    iotype="smallfile")
            p.spawn(fs_util.stress_io,
                    client4,
                    source_dir,
                    "",
                    0,
                    1,
                    iotype="fio")
            for op in p:
                return_counts1, rc = op

        with parallel() as p:
            p.spawn(
                fs_util.rsync,
                client1,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )
            p.spawn(
                fs_util.rsync,
                client2,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )
            p.spawn(
                fs_util.rsync,
                client3,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )

            p.spawn(
                fs_util.rsync,
                client4,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )
            for op in p:
                return_counts2, rc = op

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                target_dir,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                target_dir,
                0,
                11,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                target_dir,
                0,
                3,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                target_dir,
                0,
                1,
                iotype="fio",
            )
            for op in p:
                return_counts3, rc = op
        with parallel() as p:
            p.spawn(
                fs_util.rsync,
                client1,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            p.spawn(
                fs_util.rsync,
                client2,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            p.spawn(
                fs_util.rsync,
                client3,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            p.spawn(
                fs_util.rsync,
                client4,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            for op in p:
                return_counts4, rc = op

        rc = (list(return_counts1.values()) + list(return_counts2.values()) +
              list(return_counts3.values()) + list(return_counts4.values()))
        rc_set = set(rc)
        if len(rc_set) == 1:
            print("Test case CEPH-%s passed" % (tc))
        else:
            print(("Test case CEPH-%s failed" % (tc)))
        log.info("Test completed for CEPH-%s" % (tc))
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        else:
            return 1
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Пример #30
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11256-fuse'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        num_of_osds = config.get('num_of_osds')
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise Exception("kernel mount failed")
        cluster_health_beforeIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), None, 300)
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        rc = fs_util.standby_rank(client_info['mds_nodes'],
                                  client_info['mon_node'],
                                  todo='add_rank')
        if rc == 0:
            log.info("Added standby ranks")
        else:
            raise Exception("Adding standby ranks failed")

        dirs, rc = fs_util.mkdir(client1, 0, 4, client_info['mounting_dir'],
                                 dir_name)
        if rc == 0:
            log.info("Directories created")
        else:
            raise CommandFailed("Directory creation failed")
        dirs = dirs.split('\n')
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[1],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[2],
                    '',
                    '',
                    iotype='smallfile_create',
                    fnum=1000,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    10,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.reboot, node)
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[2],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[1],
                    '',
                    '',
                    iotype='smallfile_create',
                    fnum=1000,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    10,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.daemon_systemctl, node, 'mds',
                        'active_mds_restart')
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)

        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), None, 300)
        if cluster_health_afterIO == cluster_health_beforeIO:
            log.info('cluster is healthy')
        else:
            log.error("cluster is not healty")
            return 1
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[2],
                    0,
                    1,
                    iotype='smallfile_create',
                    fnum=10,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[1],
                    0,
                    1,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                fs_util.network_disconnect(node)
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[1],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    1,
                    iotype='smallfile_create',
                    fnum=10,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[2],
                    0,
                    1,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                fs_util.pid_kill(node, 'mds')
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)
        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            log.info("Cluster is healthy")
        else:
            return 1
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            fs_util.client_clean_up(client_info['fuse_clients'],
                                    client_info['kernel_clients'],
                                    client_info['mounting_dir'], 'umount')

        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
            if rc == 0:
                log.info('Cleaning up successfull')
        fs_util.standby_rank(client_info['mds_nodes'],
                             client_info['mon_node'],
                             todo='add_rank_revert')
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
            rc = fs_util.standby_rank(client_info['mds_nodes'],
                                      client_info['mon_node'],
                                      todo='add_rank_revert')

        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
            rc = fs_util.standby_rank(client_info['mds_nodes'],
                                      client_info['mon_node'],
                                      todo='add_rank_revert')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1