示例#1
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11222"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )

        dir1 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info["clients"]:
            log.info("Creating directory:")
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir1))
            log.info("Creating directories with breadth and depth:")
            client.exec_command(
                sudo=True,
                cmd=
                f"python3 smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 --files 1000 "
                f"--files-per-dir 10 --dirs-per-dir 2 --top {client_info['mounting_dir']}{dir1}",
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)

            break

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify("", return_counts)
        print(result1)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf %s%s" %
                                (client_info["mounting_dir"], dir1))
            break

        for client in client_info["clients"]:
            log.info("Creating directories with breadth and depth:")
            client.exec_command(
                sudo=True,
                cmd="python3 smallfile/smallfile_cli.py "
                "--operation create --threads 10 "
                " --file-size 4 --files 1000 "
                "--files-per-dir 10 --dirs-per-dir 2"
                " --top %s%s" % (client_info["mounting_dir"], dir1),
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)
            log.info("Renaming the dirs:")
            client.exec_command(
                sudo=True,
                cmd="python3 smallfile/smallfile_cli.py "
                "--operation rename --threads 10 --file-size 4"
                " --file-size 4 --files 1000 "
                "--files-per-dir 10 --dirs-per-dir 2"
                " --top %s%s" % (client_info["mounting_dir"], dir1),
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)

            break
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify("", return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        client1[0].exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir_name))
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "read",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                "write",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client2,
                client_info["mounting_dir"],
                "read",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "write",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("11223", return_counts)
        print(result)

        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            if rc == 0:
                log.info("Cleaning up successfull")
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
示例#2
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11222'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(client_info['mon_node'],
                                                      12, 1, None, 300)

        dir1 = ''.join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info['clients']:
            log.info("Creating directory:")
            client.exec_command(cmd='sudo mkdir %s%s' %
                                (client_info['mounting_dir'], dir1))
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd='sudo crefi %s%s --fop create --multi -b 10 -d 10 '
                '--random --min=1K --max=10K' %
                (client_info['mounting_dir'], dir1))
            print(out.read().decode())
            break

        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='crefi')
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify('', return_counts)
        print(result1)

        for client in client_info['clients']:
            client.exec_command(cmd='sudo rm -rf %s%s' %
                                (client_info['mounting_dir'], dir1))
            break

        for client in client_info['clients']:
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd='sudo crefi %s%s --fop create --multi -b 10 -d 10 '
                '--random --min=1K --max=10K' %
                (client_info['mounting_dir'], dir1))
            print(out.read().decode())
            log.info("Renaming the dirs:")
            out, rc = client.exec_command(
                cmd='sudo crefi '
                '%s%s --fop rename --multi -b 10 -d 10 --random '
                '--min=1K --max=10K' % (client_info['mounting_dir'], dir1))
            print(out.read().decode())

            break
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dir1,
                    0,
                    5,
                    iotype='crefi')
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify('', return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(client_info['mon_node'],
                                                     12, 1, None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                             client_info['kernel_clients'],
                                             client_info['mounting_dir'],
                                             'umount')
            else:
                rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                             client_info['mounting_dir'],
                                             'umount')
            if rc == 0:
                log.info('Cleaning up successfull')
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            fs_util.client_clean_up(client_info['fuse_clients'],
                                    client_info['kernel_clients'],
                                    client_info['mounting_dir'], 'umount')
        else:
            fs_util.client_clean_up(client_info['fuse_clients'], '',
                                    client_info['mounting_dir'], 'umount')
        return 1
    except Exception as e:

        log.info(e)

        log.info(traceback.format_exc())

        return 1
示例#3
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11219,11224"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        client1[0].exec_command(
            sudo=True,
            cmd=f"mkdir {client_info['mounting_dir']}{dir_name}",
        )
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                2,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            print("Data validation success")
            dirs, rc = fs_util.mkdir(client1, 0, 3,
                                     client_info["mounting_dir"], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split("\n")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    100,
                    iotype="touch",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile",
                )
                for op in p:
                    return_counts, rc = op

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_delete",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=10,
                    fsize=1024,
                )
                for op in p:
                    return_counts, rc = op
            cluster_health_afterIO = check_ceph_healthly(
                client_info["mon_node"][0],
                num_of_osds,
                len(client_info["mon_node"]),
                build,
                None,
                300,
            )

            log.info("Execution of Test case CEPH-%s ended" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            if cluster_health_beforeIO == cluster_health_afterIO:
                print(result)

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    5,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile",
                )

            with parallel() as p:
                p.spawn(
                    fs_util.read_write_IO,
                    client1,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[2],
                )
                p.spawn(
                    fs_util.read_write_IO,
                    client2,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    5,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile",
                )

            with parallel() as p:
                p.spawn(
                    fs_util.read_write_IO,
                    client1,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )
                p.spawn(
                    fs_util.read_write_IO,
                    client2,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )

            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
                if rc == 0:
                    log.info("Cleaning up successfull")
                else:
                    return 1
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
                if rc == 0:
                    log.info("Cleaning up successfull")
                else:
                    return 1

        log.info("Execution of Test case CEPH-%s ended" % (tc))
        print("Results:")
        result = fs_util.rc_verify(tc, return_counts)
        print(result)
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
示例#4
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11255_11336-fuse client'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get('config')
        num_of_osds = config.get('num_of_osds')
        fs_util = FsUtils(ceph_cluster)
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("creating auth failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(
            client3,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(
            client4,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        cluster_health_beforeIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds, len(
                client_info['mon_node']), build, None, 300)
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        rc = fs_util.standby_rank(
            client_info['mds_nodes'],
            client_info['mon_node'],
            todo='add_rank')
        if rc == 0:
            log.info("Added standby ranks")
        else:
            log.error("Failed to add standby ranks")
            return 1
        client1[0].exec_command(
            cmd='sudo mkdir %s%s' %
                (client_info['mounting_dir'], dir_name))
        if client1[0].node.exit_status == 0:
            log.info("Dir created")
        else:
            raise CommandFailed('Dir creation failed')
        rc1 = fs_util.fstab_entry(
            client1,
            client_info['mounting_dir'],
            action='doEntry')
        rc2 = fs_util.fstab_entry(
            client2,
            client_info['mounting_dir'],
            action='doEntry')
        if rc1 == 0 and rc2 == 0:
            log.info("FSentry for clients are done")
        else:
            raise CommandFailed("FsEntry failed")
        rc1 = fs_util.fstab_entry(
            client3,
            client_info['mounting_dir'],
            action='doEntry', mon_node_ip=client_info['mon_node_ip'])
        rc2 = fs_util.fstab_entry(
            client4,
            client_info['mounting_dir'],
            action='doEntry', mon_node_ip=client_info['mon_node_ip'])
        if rc1 == 0 and rc2 == 0:
            log.info("FSentry for clients are done")
        else:
            raise CommandFailed("FsEntry failed")
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                50,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                1,
                iotype='smallfile_create', fnum=1000, fsize=100)
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info['mounting_dir'],
                dir_name,
                0,
                1,
                iotype='crefi')
            p.spawn(fs_util.reboot, client1[0])
        res = []
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)
                for op in p:
                    res.append(op)
        print(res)
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')

            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'read')

            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dir_name,
                    0,
                    1,
                    iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch'),
            p.spawn(fs_util.reboot, client2[0])

        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds, len(
                client_info['mon_node']), build, None, 300)
        if cluster_health_afterIO == cluster_health_beforeIO:
            log.info('cluster is healthy')
        else:
            log.error("cluster is not healty")
            return 1
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch')
            for node in client_info['osd_nodes']:
                p.spawn(fs_util.reboot, node)
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='fio'),
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch')
            for node in client_info['osd_nodes']:
                fs_util.network_disconnect(node)
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                dir_name,
                0,
                10,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info['mounting_dir'],
                dir_name,
                0,
                500,
                iotype='touch')
            for node in client_info['osd_nodes']:
                fs_util.pid_kill(node, 'osd')

        time.sleep(100)
        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds, len(
                client_info['mon_node']), build, None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            log.info("Cluster is healthy")
        else:
            return 1
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                client_info['kernel_clients'],
                client_info['mounting_dir'],
                'umount')
            rc = fs_util.standby_rank(
                client_info['mds_nodes'],
                client_info['mon_node'],
                todo='add_rank_revert')
            if rc == 0:
                log.info("removed standby ranks")
            rc1 = fs_util.fstab_entry(
                client1,
                client_info['mounting_dir'],
                action='revertEntry')
            rc2 = fs_util.fstab_entry(
                client2,
                client_info['mounting_dir'],
                action='revertEntry')
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")
            else:
                return 1
        else:
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                '',
                client_info['mounting_dir'],
                'umount')
            rc = fs_util.standby_rank(
                client_info['mds_nodes'],
                client_info['mon_node'],
                todo='add_rank_revert')
            if rc == 0:
                log.info("removed standby ranks")
            rc1 = fs_util.fstab_entry(
                client1,
                client_info['mounting_dir'],
                action='revertEntry')
            rc2 = fs_util.fstab_entry(
                client2,
                client_info['mounting_dir'],
                action='revertEntry')
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")

            if rc == 0:
                log.info('Cleaning up successfull')
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                client_info['kernel_clients'],
                client_info['mounting_dir'],
                'umount')
        else:
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                '',
                client_info['mounting_dir'],
                'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
示例#5
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11222"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(client_info["mon_node"],
                                                      12, 1, build, None, 300)

        dir1 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info["clients"]:
            log.info("Creating directory:")
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir1))
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd="sudo crefi %s%s --fop create --multi -b 10 -d 10 "
                "--random --min=1K --max=10K" %
                (client_info["mounting_dir"], dir1))
            print(out.read().decode())
            break

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="crefi",
            )
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify("", return_counts)
        print(result1)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf %s%s" %
                                (client_info["mounting_dir"], dir1))
            break

        for client in client_info["clients"]:
            log.info("Creating directories with breadth and depth:")
            out, rc = client.exec_command(
                cmd="sudo crefi %s%s --fop create --multi -b 10 -d 10 "
                "--random --min=1K --max=10K" %
                (client_info["mounting_dir"], dir1))
            print(out.read().decode())
            log.info("Renaming the dirs:")
            out, rc = client.exec_command(
                cmd="sudo crefi "
                "%s%s --fop rename --multi -b 10 -d 10 --random "
                "--min=1K --max=10K" % (client_info["mounting_dir"], dir1))
            print(out.read().decode())

            break
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="crefi",
            )
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify("", return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(client_info["mon_node"],
                                                     12, 1, build, None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            if rc == 0:
                log.info("Cleaning up successfull")
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:

        log.info(e)

        log.info(traceback.format_exc())

        return 1
示例#6
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11254-fuse_clients"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1

        rc3 = fs_util.kernel_mount(
            client3, client_info["mounting_dir"], client_info["mon_node_ip"]
        )
        rc4 = fs_util.kernel_mount(
            client4, client_info["mounting_dir"], client_info["mon_node_ip"]
        )
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        rc = fs_util.standby_rank(
            client_info["mds_nodes"], client_info["mon_node"], todo="add_rank"
        )
        if rc == 0:
            log.info("Added standby ranks")
        else:
            raise Exception("Adding standby ranks failed")

        client1[0].exec_command(
            cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir_name)
        )
        if client1[0].node.exit_status == 0:
            log.info("Dir created")
        else:
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
            )
        rc1 = fs_util.fstab_entry(
            client1, client_info["mounting_dir"], action="doEntry"
        )
        rc2 = fs_util.fstab_entry(
            client2, client_info["mounting_dir"], action="doEntry"
        )
        if rc1 == 0 and rc2 == 0:
            log.info("FSentry for clients are done")
        else:
            raise CommandFailed("FsEntry failed")

        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(
                fs_util.read_write_IO, client3, client_info["mounting_dir"], "g", "read"
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                50,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="smallfile_create",
                fnum=1000,
                fsize=100,
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(fs_util.reboot, client1[0])

        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )

            p.spawn(
                fs_util.read_write_IO, client4, client_info["mounting_dir"], "g", "read"
            )

            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            p.spawn(fs_util.reboot, client2[0])

        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        if cluster_health_afterIO == cluster_health_beforeIO:
            log.info("cluster is healthy")
        else:
            log.error("cluster is not healty")
            return 1
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            for node in client_info["mon_node"]:
                p.spawn(fs_util.reboot, node)

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            for node in client_info["mon_node"]:
                fs_util.network_disconnect(node)
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir_name,
                0,
                10,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                500,
                iotype="touch",
            )
            for node in client_info["mon_node"]:
                fs_util.pid_kill(node, "mon")

        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        if cluster_health_beforeIO == cluster_health_afterIO:
            log.info("Cluster is healthy")
        else:
            return 1
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
            rc = fs_util.standby_rank(
                client_info["mds_nodes"],
                client_info["mon_node"],
                todo="add_rank_revert",
            )
            if rc == 0:
                log.info("removed standby ranks")
            rc1 = fs_util.fstab_entry(
                client1, client_info["mounting_dir"], action="revertEntry"
            )
            rc2 = fs_util.fstab_entry(
                client2, client_info["mounting_dir"], action="revertEntry"
            )
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")
            else:
                return 1
        else:
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"], "", client_info["mounting_dir"], "umount"
            )

            rc = fs_util.standby_rank(
                client_info["mds_nodes"],
                client_info["mon_node"],
                todo="add_rank_revert",
            )
            if rc == 0:
                log.info("removed standby ranks")
            else:
                return 1
            rc1 = fs_util.fstab_entry(
                client1, client_info["mounting_dir"], action="revertEntry"
            )
            rc2 = fs_util.fstab_entry(
                client2, client_info["mounting_dir"], action="revertEntry"
            )
            if rc1 == 0 and rc2 == 0:
                log.info("FSentry for clients are done")
            else:
                return 1
            if rc == 0:
                log.info("Cleaning up successfull")
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"], "", client_info["mounting_dir"], "umount"
            )
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
示例#7
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11256-fuse'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        num_of_osds = config.get('num_of_osds')
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise Exception("kernel mount failed")
        cluster_health_beforeIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), None, 300)
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        rc = fs_util.standby_rank(client_info['mds_nodes'],
                                  client_info['mon_node'],
                                  todo='add_rank')
        if rc == 0:
            log.info("Added standby ranks")
        else:
            raise Exception("Adding standby ranks failed")

        dirs, rc = fs_util.mkdir(client1, 0, 4, client_info['mounting_dir'],
                                 dir_name)
        if rc == 0:
            log.info("Directories created")
        else:
            raise CommandFailed("Directory creation failed")
        dirs = dirs.split('\n')
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[1],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[2],
                    '',
                    '',
                    iotype='smallfile_create',
                    fnum=1000,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    10,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.reboot, node)
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[2],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[1],
                    '',
                    '',
                    iotype='smallfile_create',
                    fnum=1000,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    10,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.daemon_systemctl, node, 'mds',
                        'active_mds_restart')
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)

        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), None, 300)
        if cluster_health_afterIO == cluster_health_beforeIO:
            log.info('cluster is healthy')
        else:
            log.error("cluster is not healty")
            return 1
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[2],
                    0,
                    1,
                    iotype='smallfile_create',
                    fnum=10,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[1],
                    0,
                    1,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                fs_util.network_disconnect(node)
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    dirs[0],
                    0,
                    1,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    dirs[1],
                    0,
                    1,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dirs[3],
                    0,
                    1,
                    iotype='smallfile_create',
                    fnum=10,
                    fsize=1024)
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    dirs[2],
                    0,
                    1,
                    iotype='dd')
            for node in client_info['mds_nodes']:
                fs_util.pid_kill(node, 'mds')
        with parallel() as p:
            for node in client_info['mds_nodes']:
                p.spawn(fs_util.heartbeat_map, node)
        cluster_health_afterIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), None, 300)
        if cluster_health_beforeIO == cluster_health_afterIO:
            log.info("Cluster is healthy")
        else:
            return 1
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            fs_util.client_clean_up(client_info['fuse_clients'],
                                    client_info['kernel_clients'],
                                    client_info['mounting_dir'], 'umount')

        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
            if rc == 0:
                log.info('Cleaning up successfull')
        fs_util.standby_rank(client_info['mds_nodes'],
                             client_info['mon_node'],
                             todo='add_rank_revert')
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
            rc = fs_util.standby_rank(client_info['mds_nodes'],
                                      client_info['mon_node'],
                                      todo='add_rank_revert')

        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
            rc = fs_util.standby_rank(client_info['mds_nodes'],
                                      client_info['mon_node'],
                                      todo='add_rank_revert')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
示例#8
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11219,11224'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get('config')
        num_of_osds = config.get('num_of_osds')
        fs_util = FsUtils(ceph_cluster)
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        cluster_health_beforeIO = check_ceph_healthly(
            client_info['mon_node'][0], num_of_osds,
            len(client_info['mon_node']), build, None, 300)
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    dir_name,
                    0,
                    2,
                    iotype='crefi')
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify('', return_counts)
        if result == 'Data validation success':
            print("Data validation success")
            dirs, rc = fs_util.mkdir(client1, 0, 3,
                                     client_info['mounting_dir'], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split('\n')
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='fio')
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        100,
                        iotype='touch')
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[1],
                        0,
                        1,
                        iotype='dd')
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='crefi')
                for op in p:
                    return_counts, rc = op

            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=10,
                        fsize=1024)
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[1],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=10,
                        fsize=1024)
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=10,
                        fsize=1024)
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_delete',
                        fnum=10,
                        fsize=1024)
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[1],
                        0,
                        1,
                        iotype='smallfile_delete')
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[2],
                        0,
                        1,
                        iotype='smallfile_delete',
                        fnum=10,
                        fsize=1024)
                for op in p:
                    return_counts, rc = op
            cluster_health_afterIO = check_ceph_healthly(
                client_info['mon_node'][0], num_of_osds,
                len(client_info['mon_node']), build, None, 300)

            log.info("Execution of Test case CEPH-%s ended" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            if cluster_health_beforeIO == cluster_health_afterIO:
                print(result)
            print('-----------------------------------------')
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        5,
                        iotype='fio')
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        10,
                        iotype='dd')
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='crefi')
            print('-------------------------------------------------------')
            with parallel() as p:
                p.spawn(fs_util.read_write_IO,
                        client1,
                        client_info['mounting_dir'],
                        'g',
                        'read',
                        dir_name=dirs[0])
                p.spawn(fs_util.read_write_IO,
                        client2,
                        client_info['mounting_dir'],
                        'g',
                        'read',
                        dir_name=dirs[0])
            print('-------------------------------------------------------')
            with parallel() as p:
                p.spawn(fs_util.stress_io,
                        client1,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='smallfile_create',
                        fnum=1000,
                        fsize=10)
                p.spawn(fs_util.stress_io,
                        client2,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        5,
                        iotype='fio')
                p.spawn(fs_util.stress_io,
                        client3,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        10,
                        iotype='dd')
                p.spawn(fs_util.stress_io,
                        client4,
                        client_info['mounting_dir'],
                        dirs[0],
                        0,
                        1,
                        iotype='crefi')
            print('-------------------------------------------------------')
            with parallel() as p:
                p.spawn(fs_util.read_write_IO,
                        client1,
                        client_info['mounting_dir'],
                        'g',
                        'read',
                        dir_name=dirs[0])
                p.spawn(fs_util.read_write_IO,
                        client2,
                        client_info['mounting_dir'],
                        'g',
                        'read',
                        dir_name=dirs[0])
            print('-------------------------------------------------------')
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                             client_info['kernel_clients'],
                                             client_info['mounting_dir'],
                                             'umount')
                if rc == 0:
                    log.info('Cleaning up successfull')
                else:
                    return 1
            else:
                rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                             client_info['mounting_dir'],
                                             'umount')
                if rc == 0:
                    log.info('Cleaning up successfull')
                else:
                    return 1

        log.info("Execution of Test case CEPH-%s ended" % (tc))
        print("Results:")
        result = fs_util.rc_verify(tc, return_counts)
        print(result)
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1