Example #1
0
def run(ceph_cluster, **kw):
    """
    BZ https://bugzilla.redhat.com/show_bug.cgi?id=1754078 :
    Run scrub/deep scrub and check osd memory usage
    1. Run deep scrub on a cluster parallely when IOs are running
    2. check memory usage of osd daemons is not crossing 'osd memory target' value in ceph.conf on each osd node
    Args:
        ceph_cluster (ceph.ceph.Ceph): ceph cluster
    """
    log.info("Running bz-1754078")
    log.info(run.__doc__)

    ceph_nodes = kw.get("ceph_nodes")
    config = kw.get("config")
    pg_count = config.get("pg_count", 8)
    timeout = config.get("timeout", 10)
    mons = []
    role = "mon"
    for mnode in ceph_nodes:
        if mnode.role == role:
            mons.append(mnode)

    ctrlr = mons[0]
    log.info("chosing mon {cmon} as ctrlrmon".format(cmon=ctrlr.hostname))
    helper = RadosHelper(ctrlr, config, log)
    with parallel() as p:
        helper = RadosHelper(mons[0], config, log)
        p.spawn(helper.run_radosbench, pg_count, timeout)
        helper = RadosHelper(mons[1], config, log)
        p.spawn(helper.run_scrub)
        helper = RadosHelper(mons[2], config, log)
        p.spawn(helper.run_deep_scrub)

        time.sleep(10)
        osd_nodes = []
        role = "osd"
        with parallel() as p:
            for ceph in ceph_nodes:
                if ceph.role == role:
                    osd_nodes.append(ceph)
                    out, err = ceph.exec_command(
                        cmd="sudo ceph osd ls-tree {host}".format(
                            host=ceph.hostname))
                    osd_id_list_on_node = out.split()
                    log.info("osds on node {}".format(ceph.hostname))
                    log.info(osd_id_list_on_node)
                    osd_mem_target = check_osd_memory_target_of_node(
                        ceph, osd_id_list_on_node)
                    log.info("Node {a} osd_memory_target in bytes: {b}".format(
                        a=ceph.hostname, b=osd_mem_target))
                    p.spawn(
                        check_osd_daemon_memory_usage,
                        ceph,
                        osd_id_list_on_node,
                        osd_mem_target,
                    )
                    time.sleep(1)

    return 0
Example #2
0
def run(**kw):
    log.info("Starting CEPH-9475")
    mirror1 = kw.get('test_data')['mirror1']
    mirror2 = kw.get('test_data')['mirror2']
    config = kw.get('config')
    poolname = mirror1.random_string() + '9475pool'
    imagename = mirror1.random_string() + '9475image'
    imagespec = poolname + '/' + imagename

    mirror1.create_pool(poolname=poolname)
    mirror2.create_pool(poolname=poolname)
    mirror1.create_image(imagespec=imagespec, size=config.get('imagesize'))
    mirror1.config_mirror(mirror2, poolname=poolname, mode='pool')
    mirror2.wait_for_status(poolname=poolname, images_pattern=1)
    mirror1.wait_for_status(imagespec=imagespec, state_pattern='up+stopped')
    mirror2.wait_for_status(imagespec=imagespec, state_pattern='up+replaying')

    with parallel() as p:
        for node in mirror2.ceph_nodes:
            p.spawn(mirror2.exec_cmd,
                    ceph_args=False,
                    cmd='reboot',
                    node=node,
                    check_ec=False)
        p.spawn(mirror1.benchwrite,
                imagespec=imagespec,
                io=config.get('io-total'))
    time.sleep(10)
    rc = mirror1.check_data(peercluster=mirror2, imagespec=imagespec)
    if rc == 0:
        mirror1.clean_up(peercluster=mirror2, pools=[poolname])
        return 0
    else:
        return 1
Example #3
0
def run(**kw):
    try:
        log.info("Starting CEPH-9474")
        mirror1 = kw.get('test_data')['mirror1']
        mirror2 = kw.get('test_data')['mirror2']
        config = kw.get('config')
        osd_cred = config.get('osp_cred')
        poolname = mirror1.random_string() + '9474pool'
        imagename = mirror1.random_string() + '9474image'
        imagespec = poolname + '/' + imagename

        mirror1.create_pool(poolname=poolname)
        mirror2.create_pool(poolname=poolname)
        mirror1.create_image(imagespec=imagespec, size=config.get('imagesize'))
        mirror1.config_mirror(mirror2, poolname=poolname, mode='pool')
        mirror2.wait_for_status(poolname=poolname, images_pattern=1)
        mirror1.wait_for_status(imagespec=imagespec,
                                state_pattern='up+stopped')
        mirror2.wait_for_status(imagespec=imagespec,
                                state_pattern='up+replaying')

        with parallel() as p:
            p.spawn(mirror1.benchwrite,
                    imagespec=imagespec,
                    io=config.get('io-total'))
            p.spawn(hard_reboot, osd_cred, name='ceph-rbd2')
        time.sleep(60)
        mirror1.check_data(peercluster=mirror2, imagespec=imagespec)
        mirror1.clean_up(peercluster=mirror2, pools=[poolname])
        return 0

    except Exception as e:
        log.exception(e)
        return 1
Example #4
0
def run(**kw):
    try:
        log.info("Starting CEPH-9474")
        mirror1 = kw.get("test_data")["mirror1"]
        mirror2 = kw.get("test_data")["mirror2"]
        config = kw.get("config")
        osd_cred = config.get("osp_cred")
        poolname = mirror1.random_string() + "9474pool"
        imagename = mirror1.random_string() + "9474image"
        imagespec = poolname + "/" + imagename

        mirror1.create_pool(poolname=poolname)
        mirror2.create_pool(poolname=poolname)
        mirror1.create_image(imagespec=imagespec, size=config.get("imagesize"))
        mirror1.config_mirror(mirror2, poolname=poolname, mode="pool")
        mirror2.wait_for_status(poolname=poolname, images_pattern=1)
        mirror1.wait_for_status(imagespec=imagespec,
                                state_pattern="up+stopped")
        mirror2.wait_for_status(imagespec=imagespec,
                                state_pattern="up+replaying")

        with parallel() as p:
            p.spawn(mirror1.benchwrite,
                    imagespec=imagespec,
                    io=config.get("io-total"))
            p.spawn(hard_reboot, osd_cred, name="ceph-rbd2")
        time.sleep(60)
        mirror1.check_data(peercluster=mirror2, imagespec=imagespec)
        mirror1.clean_up(peercluster=mirror2, pools=[poolname])
        return 0

    except Exception as e:
        log.exception(e)
        return 1
Example #5
0
def run(**kw):
    log.info("Running test")
    ceph_nodes = kw.get("ceph_nodes")
    # skip subscription manager if testing beta RHEL
    config = kw.get("config")
    skip_subscription = config.get("skip_subscription", False)
    enable_eus = config.get("enable_eus", False)
    repo = config.get("add-repo", False)
    rhbuild = config.get("rhbuild")
    skip_enabling_rhel_rpms = config.get("skip_enabling_rhel_rpms", False)
    is_production = config.get("is_production", False)
    with parallel() as p:
        for ceph in ceph_nodes:
            p.spawn(
                install_prereq,
                ceph,
                1800,
                skip_subscription,
                repo,
                rhbuild,
                enable_eus,
                skip_enabling_rhel_rpms,
                is_production,
            )
            time.sleep(20)
    return 0
Example #6
0
def run(**kw):
    log.info("Running test")
    ceph_nodes = kw.get("ceph_nodes")
    # skip subscription manager if testing beta RHEL
    config = kw.get("config")
    skip_subscription = config.get("skip_subscription", False)
    enable_eus = config.get("enable_eus", False)
    repo = config.get("add-repo", False)
    rhbuild = config.get("rhbuild")
    skip_enabling_rhel_rpms = config.get("skip_enabling_rhel_rpms", False)
    is_production = config.get("is_production", False)
    build_type = config.get("build_type", None)
    # when build set to released subscribing nodes with CDN credentials
    if build_type == "released":
        is_production = True
    cloud_type = config.get("cloud-type", "openstack")
    with parallel() as p:
        for ceph in ceph_nodes:
            p.spawn(
                install_prereq,
                ceph,
                1800,
                skip_subscription,
                repo,
                rhbuild,
                enable_eus,
                skip_enabling_rhel_rpms,
                is_production,
                cloud_type,
            )
            time.sleep(20)
    return 0
def run(**kwargs):
    results = dict()

    parallel_tests = kwargs["parallel"]

    with parallel() as p:
        for test in parallel_tests:
            p.spawn(execute, test, kwargs, results)
    return 0
Example #8
0
def run(args: dict) -> int:
    """Standard script to collect all the logs from ceph cluster through installer node

    Through installer node get all other nodes in the cluster, generate sosreport for all the nodes obtained.
    Run shell script on installer node, then upload all the collected logs to magna

    Args:
       ip         installer IP address
       username   username to be used to access the system other than root
       password   password for installer node

    Returns:
        0 on success or 1 for failures

    Raises:
        AssertionError: An error occurred if given IP is not of Installer node
    """
    results = []
    run_id = generate_unique_id(length=6)
    ip = args["--ip"]
    uname = args["--username"]
    pword = args["--password"]
    directory = args["--directory"]
    log_dir = f"ceph_logs_{run_id}"

    ssh_install = SSHConnectionManager(ip, uname, pword).get_client()
    stdin, stdout, stderr = ssh_install.exec_command("hostname")
    if "installer" not in stdout.read().decode():
        raise AssertionError("Please provide installer node details")

    ssh_install.exec_command(f"sudo mkdir -p {log_dir}")
    ssh_install.exec_command(f"sudo chown -R {uname}:{uname} {log_dir}")
    stdin, stdout, stderr = ssh_install.exec_command(
        "cut -f 1 /etc/hosts | cut -d ' ' -f 3")
    nodes = stdout.read().decode().split("\n")
    print(f"Host that are obtained from given host: {nodes}")
    collect_logs(ip, ssh_install, uname, log_dir, results)
    with parallel() as p:
        for nodeip in nodes:
            if nodeip:
                p.spawn(
                    generate_sosreport_in_node,
                    ssh_install,
                    nodeip,
                    uname,
                    pword,
                    log_dir,
                    results,
                )
    upload_logs(log_dir, ssh_install, ip, results, directory)
    print(f"Failed to collect logs from nodes :{results}")
    return 1 if results else 0
Example #9
0
def run(**kw):
    log.info("Running test")
    ceph_nodes = kw.get('ceph_nodes')
    # skip subscription manager if testing beta RHEL
    config = kw.get('config')
    skip_subscription = config.get('skip_subscription', False)
    repo = config.get('add-repo', False)
    rhbuild = config.get('rhbuild')
    with parallel() as p:
        for ceph in ceph_nodes:
            p.spawn(install_prereq, ceph, 1800, skip_subscription, repo, rhbuild)
            time.sleep(20)
    return 0
Example #10
0
def run(**kw):
    log.info("Running test")
    ceph_nodes = kw.get("ceph_nodes")
    # config = kw.get('config')

    with parallel() as p:
        for ceph in ceph_nodes:
            distro_info = ceph.distro_info
            distro_ver = distro_info["VERSION_ID"]
            if distro_ver.startswith("8"):
                p.spawn(add_recent_rhel8_product_cert, ceph)
            time.sleep(5)
    return 0
Example #11
0
def cleanup_ceph_vols(osp_cred):
    """
    Cleanup stale volues with satus deleting, error
    """
    vol_states = ["deleting", "error"]
    driver = get_openstack_driver(osp_cred)
    with parallel() as p:
        for volume in driver.list_volumes():
            if volume.state in vol_states:
                if volume.name.startswith("ceph-"):
                    p.spawn(volume_cleanup, volume, driver)
                    sleep(1)
    sleep(10)
Example #12
0
def run(**kw):
    try:
        log.info("Starting CEPH-9471")
        mirror1 = kw.get("test_data")["mirror1"]
        mirror2 = kw.get("test_data")["mirror2"]
        config = kw.get("config")
        poolname = mirror1.random_string() + "9471pool"
        imagename = mirror1.random_string() + "9471image"
        imagespec = poolname + "/" + imagename
        state_after_demote = "up+stopped" if mirror1.ceph_version < 3 else "up+unknown"

        mirror1.create_pool(poolname=poolname)
        mirror2.create_pool(poolname=poolname)
        mirror1.create_image(imagespec=imagespec, size=config.get("imagesize"))
        mirror1.config_mirror(mirror2, poolname=poolname, mode="pool")
        mirror2.wait_for_status(poolname=poolname, images_pattern=1)
        mirror1.benchwrite(imagespec=imagespec, io=config.get("io-total"))
        mirror2.wait_for_replay_complete(imagespec=imagespec)
        mirror1.demote(imagespec=imagespec)
        mirror1.wait_for_status(imagespec=imagespec,
                                state_pattern=state_after_demote)
        mirror2.wait_for_status(imagespec=imagespec,
                                state_pattern=state_after_demote)
        with parallel() as p:
            for node in mirror1.ceph_nodes:
                p.spawn(
                    mirror1.exec_cmd,
                    ceph_args=False,
                    cmd="reboot",
                    node=node,
                    check_ec=False,
                )
        mirror2.promote(imagespec=imagespec)
        mirror2.benchwrite(imagespec=imagespec, io=config.get("io-total"))
        time.sleep(30)
        mirror2.check_data(peercluster=mirror1, imagespec=imagespec)
        mirror2.demote(imagespec=imagespec)
        mirror2.wait_for_status(imagespec=imagespec,
                                state_pattern=state_after_demote)
        mirror1.wait_for_status(imagespec=imagespec,
                                state_pattern=state_after_demote)
        mirror1.promote(imagespec=imagespec)
        mirror1.benchwrite(imagespec=imagespec, io=config.get("io-total"))
        mirror1.check_data(peercluster=mirror2, imagespec=imagespec)
        mirror1.clean_up(peercluster=mirror2, pools=[poolname])
        return 0

    except Exception as e:
        log.exception(e)
        return 1
Example #13
0
def run(args: Dict) -> int:
    """
    Using the provided credential file, this method removes the instances that are
    running passed the allowable duration.

    Arguments:
        args: Dict - containing the key/value pairs passed by the user

    Returns:
        0 on success or 1 for failures
    """
    osp_cred_file = args["--osp-cred"]

    with open(osp_cred_file, "r") as osp_cred_stream:
        yh = yaml.safe_load(osp_cred_stream)
        osp_cred = yh["globals"]["openstack-credentials"]

        results = dict()

        tenants = ["ceph-ci", "ceph-core", "ceph-jenkins"]
        for tenant in tenants:
            driver_ = get_driver(Provider.OPENSTACK)
            osp_driver = driver_(
                osp_cred["username"],
                osp_cred["password"],
                api_version="2.2",
                ex_force_auth_url=osp_cred["auth-url"],
                ex_force_auth_version=osp_cred["auth-version"],
                ex_tenant_name=tenant,
                ex_force_service_region=osp_cred["service-region"],
                ex_domain_name=osp_cred["domain"],
                ex_tenant_domain_id=osp_cred["tenant-domain-id"],
            )

            osp_identity = osp_driver.connection.get_auth_class()
            osp_identity.connect()

            with parallel() as p:
                for node in osp_driver.list_nodes():
                    p.spawn(cleanup, osp_identity, osp_cred, node, results,
                            tenant)

    response = send_email(results)

    if response:
        print(f"Failed to delete Instances/nodes: {response}")
        return 1
    else:
        return 0
Example #14
0
def run(**kw):
    try:
        log.info("Starting CEPH-9471")
        mirror1 = kw.get('test_data')['mirror1']
        mirror2 = kw.get('test_data')['mirror2']
        config = kw.get('config')
        poolname = mirror1.random_string() + '9471pool'
        imagename = mirror1.random_string() + '9471image'
        imagespec = poolname + '/' + imagename
        state_after_demote = 'up+stopped' if mirror1.ceph_version < 3 else 'up+unknown'

        mirror1.create_pool(poolname=poolname)
        mirror2.create_pool(poolname=poolname)
        mirror1.create_image(imagespec=imagespec, size=config.get('imagesize'))
        mirror1.config_mirror(mirror2, poolname=poolname, mode='pool')
        mirror2.wait_for_status(poolname=poolname, images_pattern=1)
        mirror1.benchwrite(imagespec=imagespec, io=config.get('io-total'))
        mirror2.wait_for_replay_complete(imagespec=imagespec)
        mirror1.demote(imagespec=imagespec)
        mirror1.wait_for_status(imagespec=imagespec,
                                state_pattern=state_after_demote)
        mirror2.wait_for_status(imagespec=imagespec,
                                state_pattern=state_after_demote)
        with parallel() as p:
            for node in mirror1.ceph_nodes:
                p.spawn(mirror1.exec_cmd,
                        ceph_args=False,
                        cmd='reboot',
                        node=node,
                        check_ec=False)
        mirror2.promote(imagespec=imagespec)
        mirror2.benchwrite(imagespec=imagespec, io=config.get('io-total'))
        time.sleep(30)
        mirror2.check_data(peercluster=mirror1, imagespec=imagespec)
        mirror2.demote(imagespec=imagespec)
        mirror2.wait_for_status(imagespec=imagespec,
                                state_pattern=state_after_demote)
        mirror1.wait_for_status(imagespec=imagespec,
                                state_pattern=state_after_demote)
        mirror1.promote(imagespec=imagespec)
        mirror1.benchwrite(imagespec=imagespec, io=config.get('io-total'))
        mirror1.check_data(peercluster=mirror2, imagespec=imagespec)
        mirror1.clean_up(peercluster=mirror2, pools=[poolname])
        return 0

    except Exception as e:
        log.exception(e)
        return 1
Example #15
0
def run(**kwargs):
    results = {}
    parallel_tests = kwargs["parallel"]

    with parallel() as p:
        for test in parallel_tests:
            p.spawn(execute, test, kwargs, results)
            sleep(1)

    test_rc = 0
    for key, value in results.items():
        log.info(f"{key} test result is {'PASS' if value == 0 else 'FAILED'}")
        if value != 0:
            test_rc = value

    return test_rc
Example #16
0
def run(**kw):
    """
    :param kw:
       - ceph_nodes: ceph node list representing a cluster
    :return: 0 on success, 1 for failures
    """
    ceph_nodes = kw.get("ceph_nodes")
    results = dict()
    log.info("Running sosreport test")
    with parallel() as p:
        for cnode in ceph_nodes:
            if cnode.role != "client":
                p.spawn(generate_sosreport, cnode, results)
    log.info(results)
    if all(results.values()):
        return 0
    return 1
Example #17
0
def run(**kw):
    """
    Rbd Workflow module to manage ceph-rbd services

    Sample test script

        - test:
            abort-on-fail: true
            name: snap and clone operations on imported image
            desc: Snap and clone operations on imported image
            module: test_rbdV2.py
            clusters:
                ceph-rbd1:
                config:
                    node: node6
                    test_config: test_snap_clone_imported_image.yaml

    Arguments:
        args: Dict - containing the key/value pairs passed from the test-suite

    Returns:
        0 on success or 1 for failures
    """
    try:
        config = kw["config"]
        test_configs_path = "tests/rbd/test_configs/" + config["test_config"]
        with open(os.path.abspath(test_configs_path)) as test_config_file:
            test_config = yaml.safe_load(test_config_file)
            # Step2: Get class and method names for an entrypoint to trigger
            if test_config.get("parallel", False):
                log.info("execution started")
                with parallel() as p:
                    for step in test_config["steps"]:
                        p.spawn(operator, test_config, step, **kw)
                        sleep(1)
            else:
                for step in test_config["steps"]:
                    operator(test_config, step, **kw)

    except CommandFailed as error:
        log.error(error)
        return 1

    finally:
        log.info("")
    return 0
Example #18
0
def run(**kw):
    log.info("Running workunit test")
    ceph_nodes = kw.get('ceph_nodes')
    config = kw.get('config')

    role = 'client'
    if config.get('role'):
        role = config.get('role')
    with parallel() as p:
        for cnode in ceph_nodes:
            if cnode.role == role:
                if config.get('kernel-repo'):
                    repo = config.get('kernel-repo')
                    log.info("writing " + repo)
                    p.spawn(update_kernel_and_reboot, cnode, repo)
                elif os.environ.get('KERNEL-REPO-URL', None) is not None:
                    log.info("writing from ENV " + repo)
                    repo = os.environ['KERNEL-REPO-URL']
                    p.spawn(update_kernel_and_reboot, cnode, repo)
    return 0
Example #19
0
def run(**kw):
    log.info("Running workunit test")
    ceph_nodes = kw.get("ceph_nodes")
    config = kw.get("config")

    role = "client"
    if config.get("role"):
        role = config.get("role")
    with parallel() as p:
        for cnode in ceph_nodes:
            if cnode.role == role:
                if config.get("kernel-repo"):
                    repo = config.get("kernel-repo")
                    log.info("writing " + repo)
                    p.spawn(update_kernel_and_reboot, cnode, repo)
                elif os.environ.get("KERNEL-REPO-URL", None) is not None:
                    log.info("writing from ENV " + repo)
                    repo = os.environ["KERNEL-REPO-URL"]
                    p.spawn(update_kernel_and_reboot, cnode, repo)
    return 0
Example #20
0
def run(**kw):
    log.info("Running test")
    ceph_nodes = kw.get("ceph_nodes")
    test_data = kw.get("test_data")
    iscsi_util = IscsiUtils(ceph_nodes)
    iscsi_initiators = iscsi_util.get_iscsi_initiator_linux()
    initiatorname = iscsi_util.get_initiatorname()
    iscsi_util.write_multipath(iscsi_initiators)
    iscsi_util.write_chap(initiatorname, iscsi_initiators)
    no_of_luns = test_data["no_of_luns"]
    rc = []

    device_list = iscsi_util.get_devicelist_luns(no_of_luns)
    iscsi_util.create_directory_with_io(device_list,
                                        iscsi_initiators,
                                        io_size="1G")
    with parallel() as p:
        p.spawn(iscsi_util.do_ios, iscsi_initiators, device_list)
        p.spawn(do_failover, iscsi_initiators, device_list, ceph_nodes)
        for op in p:
            rc.append(op)

    print("return   " + str(rc))
    rc = set(rc)
    if len(rc) == 1:
        print(rc)
        iscsi_util.umount_directory(device_list, iscsi_initiators)
        iscsi_initiators.exec_command(
            sudo=True,
            cmd="iscsiadm -m node -T iqn.2003-01.com.redhat.iscsi-"
            "gw:ceph-igw -u",
            long_running=True,
        )
        iscsi_initiators.exec_command(sudo=True,
                                      cmd="systemctl stop multipathd",
                                      long_running=True)
        return 0
    else:
        print(rc)
        return 1
Example #21
0
def cleanup_ceph_vols(osp_cred):
    """
    Cleanup stale volues with satus deleting, error
    """
    projects = ["ceph-jenkins", "ceph-core", "ceph-ci"]
    vol_states = ["deleting", "error", "available"]
    for each_project in projects:
        print("Checking in project : ", each_project)
        driver = get_openstack_driver(osp_cred, each_project)
        with parallel() as p:
            for volume in driver.list_volumes():
                if volume.state not in vol_states:
                    continue
                if volume.state == "available":
                    create_datetime = datetime.strptime(
                        volume.extra["created_at"], "%Y-%m-%dT%H:%M:%S.%f")
                    allowed_duration = create_datetime + timedelta(minutes=30)
                    if allowed_duration > datetime.utcnow():
                        continue
                p.spawn(volume_cleanup, volume, driver)
                sleep(1)
        sleep(10)
Example #22
0
def run(**kw):
    """Entry point for execution used by cephci framework.

    :param kw:
       - ceph_nodes: ceph node list representing a cluster

    :return: 0 on success, 1 for failures
    """
    log.info(f"MetaData Information {log.metadata} in {__name__}")
    ceph_nodes = kw.get("ceph_nodes")
    results = dict()

    log.info("Running sosreport test")
    with parallel() as p:
        for cnode in ceph_nodes:
            if cnode.role != "client":
                p.spawn(generate_sosreport, cnode, results)

    log.info(results)
    if all(results.values()):
        return 0

    return 1
Example #23
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        config = kw.get("config")
        num_of_dirs = config.get("num_of_dirs")
        num_of_dirs = num_of_dirs / 5
        tc = "11228"
        dir_name = "dir"
        test_dir = "testdir/"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)

        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="crefi",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="crefi",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info["mds_nodes"])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            for client in client1:
                client.exec_command(cmd="sudo mkdir %s%s" %
                                    (client_info["mounting_dir"], test_dir))

            with parallel() as p:
                p.spawn(
                    fs_util.mkdir_bulk,
                    client1,
                    0,
                    num_of_dirs * 2,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 2 + 1,
                    num_of_dirs * 4,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 4 + 1,
                    num_of_dirs * 6,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 6 + 1,
                    num_of_dirs * 8,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 8 + 1,
                    num_of_dirs * 10,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                for op in p:
                    rc = op
            if rc == 0:
                log.info("Directories created successfully")
            else:
                raise CommandFailed("Directory creation failed")

            with parallel() as p:
                p.spawn(
                    fs_util.pinning,
                    client2,
                    0,
                    num_of_dirs * 1,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client3,
                    num_of_dirs * 1,
                    num_of_dirs * 2,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client4,
                    num_of_dirs * 2,
                    num_of_dirs * 3,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client1,
                    num_of_dirs * 3,
                    num_of_dirs * 4,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    0,
                )
                p.spawn(
                    fs_util.pinning,
                    client3,
                    num_of_dirs * 4,
                    num_of_dirs * 5,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    0,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client1,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    num_of_dirs * 1,
                    num_of_dirs * 5,
                    10,
                    fs_util.mds_fail_over,
                    client_info["mds_nodes"],
                )
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client3,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    num_of_dirs * 7,
                    num_of_dirs * 8,
                    20,
                    fs_util.mds_fail_over,
                    client_info["mds_nodes"],
                )
                for op in p:
                    return_counts, rc = op
            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            print(result)
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
                rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
                rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

            if rc_client == 0 and rc_mds == 0:
                log.info("Cleaning up successfull")
            else:
                return 1
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc_client = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
            rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

        else:
            rc_client = fs_util.client_clean_up(client_info["fuse_clients"],
                                                "",
                                                client_info["mounting_dir"],
                                                "umount")
            rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

        if rc_client == 0 and rc_mds == 0:
            log.info("Cleaning up successfull")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #24
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "10625,11225"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            dirs, rc = fs_util.mkdir(client1, 0, 6,
                                     client_info["mounting_dir"], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split("\n")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_rename",
                    fnum=1000,
                    fsize=10,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_delete-renamed",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=1000,
                    fsize=10,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[4],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[5],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[6],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_rename",
                    fnum=1,
                    fsize=1000000,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_delete-renamed",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[4],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=1,
                    fsize=1000000,
                )
        dir_name = "!@#$%^&*()-_=+[]{};:,.<>?"
        out, rc = client1[0].exec_command(
            cmd="sudo mkdir '%s%s'" % (client_info["mounting_dir"], dir_name))
        if client1[0].node.exit_status == 0:
            log.info("Directory created")
        else:
            raise CommandFailed("Directory creation failed")
        for client in client_info["fuse_clients"]:
            file_name = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in range(255))
            client.exec_command(
                cmd="sudo touch '%s%s/%s'" %
                (client_info["mounting_dir"], dir_name, file_name))
        for client in client_info["kernel_clients"]:
            if client.pkg_type == "rpm":
                file_name = "".join(
                    random.choice(string.ascii_lowercase + string.digits)
                    for _ in range(255))
                client.exec_command(
                    cmd="sudo touch '%s%s/%s'" %
                    (client_info["mounting_dir"], dir_name, file_name))
        for num in range(0, 5):
            for client in client_info["fuse_clients"]:
                ops = ["create", "setxattr", "getxattr", "chmod", "rename"]
                for op in ops:
                    client.exec_command(
                        sudo=True,
                        cmd=
                        f"python3 smallfile/smallfile_cli.py --operation {op} --threads 10 --file-size 4 "
                        f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                        f"{client_info['mounting_dir']}{dir_name}",
                        long_running=True,
                        timeout=300,
                    )
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        else:
            return 1
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #25
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11298'
        source_dir = '/mnt/source'
        target_dir = 'target'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        for client in client_info['clients']:
            client.exec_command(cmd='sudo rm -rf  %s' % source_dir)
            client.exec_command(cmd='sudo mkdir %s' % source_dir)

        for client in client_info['clients']:
            client.exec_command(cmd='sudo mkdir %s%s' %
                                (client_info['mounting_dir'], target_dir))
            break
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    source_dir,
                    '',
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.read_write_IO, client1, source_dir, 'g', 'write')
            p.spawn(fs_util.stress_io,
                    client2,
                    source_dir,
                    '',
                    0,
                    10,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client3,
                    source_dir,
                    '',
                    0,
                    10,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client4,
                    source_dir,
                    '',
                    0,
                    1,
                    iotype='fio')
            for op in p:
                return_counts1, rc = op

        with parallel() as p:
            p.spawn(fs_util.rsync, client1, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            p.spawn(fs_util.rsync, client2, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            p.spawn(fs_util.rsync, client3, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            p.spawn(fs_util.rsync, client4, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            for op in p:
                return_counts2, rc = op

        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    11,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    3,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    1,
                    iotype='fio')
            for op in p:
                return_counts3, rc = op
        with parallel() as p:
            p.spawn(fs_util.rsync, client1,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            p.spawn(fs_util.rsync, client2,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            p.spawn(fs_util.rsync, client3,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            p.spawn(fs_util.rsync, client4,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            for op in p:
                return_counts4, rc = op

        rc = list(return_counts1.values()) + list(return_counts2.values()) + \
            list(return_counts3.values()) + list(return_counts4.values())
        rc_set = set(rc)
        if len(rc_set) == 1:
            print("Test case CEPH-%s passed" % (tc))
        else:
            print(("Test case CEPH-%s failed" % (tc)))
        log.info("Test completed for CEPH-%s" % (tc))
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        else:
            return 1
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            fs_util.client_clean_up(client_info['fuse_clients'],
                                    client_info['kernel_clients'],
                                    client_info['mounting_dir'], 'umount')
        else:
            fs_util.client_clean_up(client_info['fuse_clients'], '',
                                    client_info['mounting_dir'], 'umount')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #26
0
def run(ceph_cluster, **kw):
    """
     1. Create a LRC profile and then create a ec pool
            #ceph osd erasure-code-profile set $profile \
            plugin=lrc \
            k=4 m=2 l=3 \
            ruleset-failure-domain=osd
             # ceph osd pool create $poolname 1 1  erasure $profile

    2. start writing a large object so that we will get \
            sometime to fail the osd while the reads and writes are
            in progress on an object

    # rados put -p lrcpool obj1 /src/path
    #rados get -p lrcpool obj1 /tmp/obj1

    while above command is in progress kill primary
    osd responsible for the PG.
    primary can be found from
    # ceph pg dump

    3. Bring back primary

    4. Repeat the step 2 but this time kill some secondary osds

    Args:
        ceph_cluster (ceph.ceph.Ceph):
    """

    log.info("Running test CEPH-9281")
    ceph_nodes = kw.get("ceph_nodes")
    config = kw.get("config")
    build = config.get("build", config.get("rhbuild"))

    mons = []
    role = "client"

    for mnode in ceph_nodes:
        if mnode.role == role:
            mons.append(mnode)

    ctrlr = mons[0]
    log.info("chosing mon {cmon} as ctrlrmon".format(cmon=ctrlr.hostname))

    helper = RadosHelper(ctrlr, config, log)
    """ create LRC profile """
    sufix = random.randint(0, 10000)
    prof_name = "LRCprofile{suf}".format(suf=sufix)
    if build.startswith("4"):
        profile = "osd erasure-code-profile set {LRCprofile} plugin=lrc k=4 m=2 l=3 \
            crush-failure-domain=osd".format(LRCprofile=prof_name)
    else:
        profile = "osd erasure-code-profile set {LRCprofile} plugin=lrc k=4 m=2 l=3 \
            ruleset-failure-domain=osd crush-failure-domain=osd".format(
            LRCprofile=prof_name)
    try:
        (outbuf, err) = helper.raw_cluster_cmd(profile)
        log.info(outbuf)
        log.info("created profile {LRCprofile}".format(LRCprofile=prof_name))
    except Exception:
        log.error("LRC profile creation failed")
        log.error(traceback.format_exc())
        return 1
    """create LRC ec pool"""
    pool_name = "lrcpool{suf}".format(suf=sufix)
    try:
        helper.create_pool(pool_name, 1, prof_name)
        log.info("Pool {pname} created".format(pname=pool_name))
    except Exception:
        log.error("lrcpool create failed")
        log.error(traceback.format_exc())
        return 1
    """rados put and get in a parallel task"""
    with parallel() as p:
        p.spawn(do_rados_put, ctrlr, pool_name, 20)
        p.spawn(do_rados_get, ctrlr, pool_name, 10)

        for res in p:
            log.info(res)

    try:
        pri_osd_id = helper.get_pg_primary(pool_name, 0)
        log.info("PRIMARY={pri}".format(pri=pri_osd_id))
    except Exception:
        log.error("getting primary failed")
        log.error(traceback.format_exc())
        return 1

    log.info("SIGTERM osd")
    target_osd_hostname = ceph_cluster.get_osd_metadata(pri_osd_id).get(
        "hostname")
    pri_osd_node = ceph_cluster.get_node_by_hostname(target_osd_hostname)
    pri_osd_service = ceph_cluster.get_osd_service_name(pri_osd_id)
    try:
        helper.kill_osd(pri_osd_node, pri_osd_service)
        log.info("osd killed")
    except Exception:
        log.error("killing osd failed")
        log.error(traceback.format_exc())
    if not helper.wait_until_osd_state(osd_id=pri_osd_id, down=True):
        log.error("unexpected! osd is still up")
        return 1
    time.sleep(5)
    log.info("Reviving osd {osd}".format(osd=pri_osd_id))

    try:
        if helper.revive_osd(pri_osd_node, pri_osd_service):
            log.error("revive failed")
            return 1
    except Exception:
        log.error("revive failed")
        log.error(traceback.format_exc())
        return 1
    if not helper.wait_until_osd_state(pri_osd_id):
        log.error("osd is DOWN")
        return 1
    log.info(
        f"Revival of Primary OSD : {pri_osd_id} is complete\n Killing random OSD"
    )

    time.sleep(10)
    try:
        rand_osd_id = helper.get_pg_random(pool_name, 0)
        log.info("RANDOM OSD={rosd}".format(rosd=rand_osd_id))
    except Exception:
        log.error("getting  random osd failed")
        log.error(traceback.format_exc())
        return 1
    log.info("SIGTERM osd")
    target_osd_hostname = ceph_cluster.get_osd_metadata(rand_osd_id).get(
        "hostname")
    rand_osd_node = ceph_cluster.get_node_by_hostname(target_osd_hostname)
    rand_osd_service = ceph_cluster.get_osd_service_name(rand_osd_id)
    try:
        helper.kill_osd(rand_osd_node, rand_osd_service)
        log.info("osd killed")
    except Exception:
        log.error("killing osd failed")
        log.error(traceback.format_exc())
    if not helper.wait_until_osd_state(osd_id=rand_osd_id, down=True):
        log.error("unexpected! osd is still up")
        return 1
    time.sleep(5)
    log.info("Reviving osd {osd}".format(osd=rand_osd_id))
    try:
        if helper.revive_osd(rand_osd_node, rand_osd_service):
            log.error("revive failed")
            return 1
    except Exception:
        log.error("revive failed")
        log.error(traceback.format_exc())
        return 1
    if not helper.wait_until_osd_state(rand_osd_id):
        log.error("osd is DOWN")
        return 1
    log.info(f"Revival of Random OSD : {rand_osd_id} is complete")
    return 0
Example #27
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        config = kw.get('config')
        num_of_dirs = config.get('num_of_dirs')
        tc = '11227'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                '',
                0,
                2,
                iotype='crefi',
            )
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    '',
                    0,
                    2,
                    iotype='crefi')
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify('', return_counts)

        client1[0].exec_command(cmd='sudo mkdir %s%s' %
                                (client_info['mounting_dir'], 'testdir'))

        if result == 'Data validation success':
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info['mds_nodes'])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            num_of_dirs = int(num_of_dirs / 5)
            with parallel() as p:
                p.spawn(fs_util.mkdir_bulk, client1, 0, num_of_dirs * 1,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client2, num_of_dirs * 1 + 1,
                        num_of_dirs * 2,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client1, num_of_dirs * 2 + 1,
                        num_of_dirs * 3,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client2, num_of_dirs * 3 + 1,
                        num_of_dirs * 4,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client1, num_of_dirs * 4 + 1,
                        num_of_dirs * 5,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                for op in p:
                    rc = op
            if rc == 0:
                log.info('Directories created successfully')
            else:
                raise CommandFailed('Directory creation failed')

            with parallel() as p:
                p.spawn(fs_util.max_dir_io, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        num_of_dirs * 1, 10)
                p.spawn(fs_util.max_dir_io, client2,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 1, num_of_dirs * 2, 10)
                rc = fs_util.check_mount_exists(client1[0])
                if rc == 0:
                    fs_util.pinning(client1, 0, 10,
                                    client_info['mounting_dir'] + 'testdir/',
                                    dir_name, 0)

                p.spawn(fs_util.max_dir_io, client3,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 3, num_of_dirs * 4, 10)
                p.spawn(fs_util.max_dir_io, client4,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 4, num_of_dirs * 5, 10)

            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])

            with parallel() as p:
                p.spawn(fs_util.pinning, client2, 10, num_of_dirs * 1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client3, num_of_dirs * 1,
                        num_of_dirs * 2,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client4, num_of_dirs * 2,
                        num_of_dirs * 3,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client1, num_of_dirs * 3,
                        num_of_dirs * 4,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client3, num_of_dirs * 4,
                        num_of_dirs * 5,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)

            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client2,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client3,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client4,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])

            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'], client_info['kernel_clients'],
                    client_info['mounting_dir'], 'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'], '',
                    client_info['mounting_dir'], 'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            if rc_client == 0 and rc_mds == 0:
                log.info('Cleaning up successfull')
            else:
                return 1
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                client_info['kernel_clients'],
                                                client_info['mounting_dir'],
                                                'umount')
            rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

        else:
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                '',
                                                client_info['mounting_dir'],
                                                'umount')
            rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)
        if rc_client == 0 and rc_mds == 0:
            log.info('Cleaning up successfull')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #28
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11222"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )

        dir1 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info["clients"]:
            log.info("Creating directory:")
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir1))
            log.info("Creating directories with breadth and depth:")
            client.exec_command(
                sudo=True,
                cmd=
                f"python3 smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 --files 1000 "
                f"--files-per-dir 10 --dirs-per-dir 2 --top {client_info['mounting_dir']}{dir1}",
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)

            break

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify("", return_counts)
        print(result1)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf %s%s" %
                                (client_info["mounting_dir"], dir1))
            break

        for client in client_info["clients"]:
            log.info("Creating directories with breadth and depth:")
            client.exec_command(
                sudo=True,
                cmd="python3 smallfile/smallfile_cli.py "
                "--operation create --threads 10 "
                " --file-size 4 --files 1000 "
                "--files-per-dir 10 --dirs-per-dir 2"
                " --top %s%s" % (client_info["mounting_dir"], dir1),
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)
            log.info("Renaming the dirs:")
            client.exec_command(
                sudo=True,
                cmd="python3 smallfile/smallfile_cli.py "
                "--operation rename --threads 10 --file-size 4"
                " --file-size 4 --files 1000 "
                "--files-per-dir 10 --dirs-per-dir 2"
                " --top %s%s" % (client_info["mounting_dir"], dir1),
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)

            break
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify("", return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        client1[0].exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir_name))
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "read",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                "write",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client2,
                client_info["mounting_dir"],
                "read",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "write",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("11223", return_counts)
        print(result)

        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            if rc == 0:
                log.info("Cleaning up successfull")
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #29
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11298"
        source_dir = "/mnt/source"
        target_dir = "target"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf  %s" % source_dir)
            client.exec_command(cmd="sudo mkdir %s" % source_dir)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], target_dir))
            break
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    source_dir,
                    "",
                    0,
                    100,
                    iotype="touch")
            p.spawn(fs_util.read_write_IO, client1, source_dir, "g", "write")
            p.spawn(fs_util.stress_io,
                    client2,
                    source_dir,
                    "",
                    0,
                    10,
                    iotype="dd")
            p.spawn(fs_util.stress_io,
                    client3,
                    source_dir,
                    "",
                    0,
                    10,
                    iotype="smallfile")
            p.spawn(fs_util.stress_io,
                    client4,
                    source_dir,
                    "",
                    0,
                    1,
                    iotype="fio")
            for op in p:
                return_counts1, rc = op

        with parallel() as p:
            p.spawn(
                fs_util.rsync,
                client1,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )
            p.spawn(
                fs_util.rsync,
                client2,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )
            p.spawn(
                fs_util.rsync,
                client3,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )

            p.spawn(
                fs_util.rsync,
                client4,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )
            for op in p:
                return_counts2, rc = op

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                target_dir,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                target_dir,
                0,
                11,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                target_dir,
                0,
                3,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                target_dir,
                0,
                1,
                iotype="fio",
            )
            for op in p:
                return_counts3, rc = op
        with parallel() as p:
            p.spawn(
                fs_util.rsync,
                client1,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            p.spawn(
                fs_util.rsync,
                client2,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            p.spawn(
                fs_util.rsync,
                client3,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            p.spawn(
                fs_util.rsync,
                client4,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            for op in p:
                return_counts4, rc = op

        rc = (list(return_counts1.values()) + list(return_counts2.values()) +
              list(return_counts3.values()) + list(return_counts4.values()))
        rc_set = set(rc)
        if len(rc_set) == 1:
            print("Test case CEPH-%s passed" % (tc))
        else:
            print(("Test case CEPH-%s failed" % (tc)))
        log.info("Test completed for CEPH-%s" % (tc))
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        else:
            return 1
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #30
0
def run(ceph_cluster, **kw):
    try:
        tc = 'nfs-ganesha'
        nfs_mounting_dir = '/mnt/nfs_mount/'
        log.info("Running cephfs %s test case" % (tc))

        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        nfs_server = client_info['kernel_clients'][0]
        nfs_client = [client_info['kernel_clients'][1]]
        client1 = [client_info['fuse_clients'][0]]
        client2 = [client_info['fuse_clients'][1]]
        client3 = [client_info['kernel_clients'][0]]
        client4 = [client_info['kernel_clients'][1]]
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(
            client3,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(
            client4,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        dirs, rc = fs_util.mkdir(
            client1, 0, 4, client_info['mounting_dir'], 'dir')
        if rc == 0:
            log.info("Directories created")
        else:
            raise CommandFailed("Directory creation failed")
        dirs = dirs.split('\n')

        rc = fs_util.nfs_ganesha_install(nfs_server)
        if rc == 0:
            log.info('NFS ganesha installed successfully')
        else:
            raise CommandFailed('NFS ganesha installation failed')
        rc = fs_util.nfs_ganesha_conf(nfs_server, 'admin')
        if rc == 0:
            log.info('NFS ganesha config added successfully')
        else:
            raise CommandFailed('NFS ganesha config adding failed')
        rc = fs_util.nfs_ganesha_mount(
            nfs_client[0],
            nfs_mounting_dir,
            nfs_server.node.hostname)
        if rc == 0:
            log.info('NFS-ganesha mount passed')
        else:
            raise CommandFailed('NFS ganesha mount failed')
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[0],
                0,
                5,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[2],
                0,
                5,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[1],
                0,
                1,
                iotype='crefi')
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[3],
                0,
                1,
                iotype='smallfile_create', fnum=1000, fsize=1024)

        for client in nfs_client:
            log.info('Unmounting nfs-ganesha mount on client:')
            client.exec_command(cmd='sudo umount %s -l' % (nfs_mounting_dir))
            log.info('Removing nfs-ganesha mount dir on client:')
            client.exec_command(cmd='sudo rm -rf  %s' % (nfs_mounting_dir))

        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                client_info['kernel_clients'],
                client_info['mounting_dir'],
                'umount')
        else:
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                '',
                client_info['mounting_dir'],
                'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1