Esempio n. 1
0
def run(ceph_cluster, **kw):
    """

    Args:
        ceph_cluster (ceph.ceph.Ceph): ceph cluster
    """
    config = kw.get("config")
    log.info("Running RGW test version: %s", config.get("test-version", "v2"))

    rgw_ceph_object = ceph_cluster.get_ceph_object("rgw")
    run_io_verify = config.get("run_io_verify", False)
    extra_pkgs = config.get("extra-pkgs")
    install_start_kafka_broker = config.get("install_start_kafka")
    cloud_type = config.get("cloud-type")
    test_config = {"config": config.get("test-config", {})}
    rgw_node = rgw_ceph_object.node
    distro_version_id = rgw_node.distro_info["VERSION_ID"]

    # install extra package which are test specific
    if extra_pkgs:
        log.info(f"got extra pkgs: {extra_pkgs}")
        if isinstance(extra_pkgs, dict):
            _pkgs = extra_pkgs.get(int(distro_version_id[0]))
            pkgs = " ".join(_pkgs)
        else:
            pkgs = " ".join(extra_pkgs)

        rgw_node.exec_command(sudo=True,
                              cmd=f"yum install -y {pkgs}",
                              long_running=True)

    if install_start_kafka_broker:
        install_start_kafka(rgw_node, cloud_type)

    log.info("Flushing iptables")
    rgw_node.exec_command(cmd="sudo iptables -F", check_ec=False)
    config["git-url"] = config.get(
        "git-url", "https://github.com/red-hat-storage/ceph-qe-scripts.git")

    test_folder = "rgw-tests"
    test_folder_path = f"~/{test_folder}"
    rgw_node.exec_command(cmd=f"sudo rm -rf {test_folder}")
    rgw_node.exec_command(cmd=f"sudo mkdir {test_folder}")
    utils.clone_the_repo(config, rgw_node, test_folder_path)

    # Clone the repository once for the entire test suite
    pip_cmd = "venv/bin/pip"
    python_cmd = "venv/bin/python"
    out, err = rgw_node.exec_command(cmd="ls -l venv", check_ec=False)

    if not out:
        rgw_node.exec_command(cmd="yum install python3 -y --nogpgcheck",
                              check_ec=False,
                              sudo=True)
        rgw_node.exec_command(cmd="python3 -m venv venv")
        rgw_node.exec_command(cmd=f"{pip_cmd} install --upgrade pip")

        rgw_node.exec_command(
            cmd=f"{pip_cmd} install " +
            f"-r {test_folder}/ceph-qe-scripts/rgw/requirements.txt")

        if ceph_cluster.rhcs_version.version[0] > 4:
            setup_cluster_access(ceph_cluster, rgw_node)
            rgw_node.exec_command(
                sudo=True, cmd="yum install -y ceph-common --nogpgcheck")

        if ceph_cluster.rhcs_version.version[0] in [3, 4]:
            if ceph_cluster.containerized:
                # install ceph-common on the host hosting the container
                rgw_node.exec_command(
                    sudo=True, cmd="yum install -y ceph-common --nogpgcheck")

    script_name = config.get("script-name")
    config_file_name = config.get("config-file-name")
    test_version = config.get("test-version", "v2")
    script_dir = DIR[test_version]["script"]
    config_dir = DIR[test_version]["config"]
    lib_dir = DIR[test_version]["lib"]
    timeout = config.get("timeout", 300)

    if test_config["config"]:
        log.info("creating custom config")
        f_name = test_folder + config_dir + config_file_name
        remote_fp = rgw_node.remote_file(file_name=f_name, file_mode="w")
        remote_fp.write(yaml.dump(test_config, default_flow_style=False))

    cmd_env = " ".join(config.get("env-vars", []))
    rgw_node.exec_command(
        cmd=cmd_env + f"sudo {python_cmd} " + test_folder_path + script_dir +
        script_name + " -c " + test_folder + config_dir + config_file_name,
        long_running=True,
    )

    if run_io_verify:
        log.info("running io verify script")
        verify_out, err = rgw_node.exec_command(
            cmd=f"sudo {python_cmd} " + test_folder_path + lib_dir +
            "read_io_info.py",
            timeout=timeout,
        )
        log.info(verify_out)
        log.error(err)

    return 0
def run(ceph_cluster, **kw):
    rgw_ceph_object = ceph_cluster.get_ceph_object("rgw")
    rgw_node = rgw_ceph_object.node
    rgw_node_host = rgw_node.shortname
    test_folder = "rgw-tests"
    test_folder_path = f"~/{test_folder}"
    git_url = "https://github.com/red-hat-storage/ceph-qe-scripts.git"
    git_clone = f"git clone {git_url} -b master"
    rgw_node.exec_command(cmd=f"sudo rm -rf {test_folder}" +
                          f" && mkdir {test_folder}" +
                          f" && cd {test_folder}" + f" && {git_clone}")

    config = kw.get("config")
    script_name = config.get("script-name")
    config_file_name = config.get("config-file-name")
    test_version = config.get("test-version", "v2")
    script_dir = DIR[test_version]["script"]
    config_dir = DIR[test_version]["config"]
    timeout = config.get("timeout", 300)

    # Clone the repository once for the entire test suite
    pip_cmd = "venv/bin/pip"
    python_cmd = "venv/bin/python"
    out, err = rgw_node.exec_command(cmd="ls -l venv", check_ec=False)

    if not out.read().decode():
        rgw_node.exec_command(cmd="yum install python3 -y --nogpgcheck",
                              check_ec=False,
                              sudo=True)
        rgw_node.exec_command(cmd="python3 -m venv venv")
        rgw_node.exec_command(cmd=f"{pip_cmd} install --upgrade pip")

        rgw_node.exec_command(
            cmd=f"{pip_cmd} install " +
            f"-r {test_folder}/ceph-qe-scripts/rgw/requirements.txt")

        if ceph_cluster.rhcs_version.version[0] == 5:
            setup_cluster_access(ceph_cluster, rgw_node)
            rgw_node.exec_command(
                sudo=True,
                cmd="yum install -y ceph-common ceph-radosgw --nogpgcheck")
            rgw_node.exec_command(
                sudo=True, cmd="yum install -y ceph-common --nogpgcheck")
        if ceph_cluster.rhcs_version.version[0] in [3, 4]:
            if ceph_cluster.containerized:
                # install ceph-radosgw on the host hosting the container
                rgw_node.exec_command(
                    sudo=True,
                    cmd="yum install -y ceph-common ceph-radosgw --nogpgcheck",
                )
                rgw_node.exec_command(
                    sudo=True, cmd="yum install -y ceph-common --nogpgcheck")
    # Mount point ops
    mount_dir = config.get("mount-dir", "/mnt/ganesha/")

    checkdir_cmd = f"""[ -d '{mount_dir}' ] && [ ! -L '{mount_dir}' ] && echo 'Directory {mount_dir} exists.'
    || echo 'Error: Directory {mount_dir} exists but point to $(readlink -f {mount_dir}).'"""
    out, err = rgw_node.exec_command(sudo=True,
                                     cmd=checkdir_cmd,
                                     check_ec=False)

    rgw_node.exec_command(cmd=f"mkdir {mount_dir}", check_ec=False, sudo=True)
    nfs_version = config.get("nfs-version", "4")
    # Mount cmd: mount -t nfs -o nfsvers=<nfs version>,noauto,soft,sync,proto=tcp `hostname -s`:/ /mnt/ganesha/
    mount_cmd = f"mount -t nfs -o nfsvers={nfs_version},noauto,soft,sync,proto=tcp {rgw_node_host}:/ {mount_dir}"
    rgw_node.exec_command(cmd=mount_cmd, check_ec=False, sudo=True)
    log.info("nfs ganesha mounted successfully on the mountpoint")
    # To parse the nfs-ganesha configuration file : /etc/ganesha/ganesha.conf
    v_as_out, err = rgw_node.exec_command(cmd="cat /etc/ganesha/ganesha.conf",
                                          check_ec=False,
                                          sudo=True)

    def clean(x):
        return re.sub("[^A-Za-z0-9]+", "", x)

    ganesha_conf_out = v_as_out.read().decode()
    ganesha_conf = ganesha_conf_out.split("\n")
    for content in ganesha_conf:
        if "Access_Key_Id" in content:
            access_key = clean(content.split("=")[1])
        if "Secret_Access_Key" in content:
            secret_key = clean(content.split("=")[1])
        if "User_Id" in content:
            rgw_user_id = clean(content.split("=")[1])

    rgw_user_config = dict(
        user_id=rgw_user_id,
        access_key=access_key,
        secret_key=secret_key,
        rgw_hostname=
        rgw_node_host,  # short hostname of rgw to populate under rgw_user.yaml
        ganesha_config_exists=True,
        already_mounted=False,
        cleanup=True,
        do_unmount=True,
        nfs_version=nfs_version,
        nfs_mnt_point=mount_dir,
        Pseudo="cephobject",
    )

    rgw_user_config_fname = (
        "rgw_user.yaml"  # Destination: ceph-qe-scripts/rgw/v2/tests/nfs_ganesha/config/
    )
    local_file = (
        "/home/cephuser/rgw-tests/ceph-qe-scripts/rgw/v2/tests/nfs_ganesha/config/"
        + rgw_user_config_fname)
    log.info("creating rgw_user.yaml : %s" % rgw_user_config)
    local_conf_file = rgw_node.remote_file(file_name=local_file, file_mode="w")
    local_conf_file.write(yaml.dump(rgw_user_config, default_flow_style=False))
    log.info("rgw_user.yaml file written")

    test_config = {"config": config.get("test-config", {})}
    if test_config["config"]:
        log.info("creating custom config")
        f_name = test_folder + config_dir + config_file_name
        remote_fp = rgw_node.remote_file(file_name=f_name, file_mode="w")
        remote_fp.write(yaml.dump(test_config, default_flow_style=False))

    out, err = rgw_node.exec_command(
        cmd=f"sudo {python_cmd} " + test_folder_path + script_dir +
        script_name + " -c " + test_folder + config_dir + config_file_name +
        " -r " + local_file,
        timeout=timeout,
    )
    log.info(out.read().decode())
    log.error(err.read().decode())

    return 0
def run(**kw):
    log.info("Running test")
    clusters = kw.get("ceph_cluster_dict")
    config = kw.get("config")
    test_site = kw.get("ceph_cluster")
    log.info(f"test site: {test_site.name}")
    test_site_node = test_site.get_ceph_object("rgw").node

    set_env = config.get("set-env", False)
    primary_cluster = clusters.get("ceph-rgw1",
                                   clusters[list(clusters.keys())[0]])
    secondary_cluster = clusters.get("ceph-rgw2",
                                     clusters[list(clusters.keys())[1]])
    primary_rgw_node = primary_cluster.get_ceph_object("rgw").node
    secondary_rgw_node = secondary_cluster.get_ceph_object("rgw").node

    test_folder = "rgw-ms-tests"
    test_folder_path = f"/home/cephuser/{test_folder}"
    home_dir_path = "/home/cephuser/"
    config["test_folder"] = test_folder
    config["test_folder_path"] = test_folder_path

    if set_env:
        set_test_env(config, primary_rgw_node)
        set_test_env(config, secondary_rgw_node)

        if primary_cluster.rhcs_version.version[0] == 5:
            setup_cluster_access(primary_cluster, primary_rgw_node)
            setup_cluster_access(secondary_cluster, secondary_rgw_node)

    # run the test
    script_name = config.get("script-name")
    config_file_name = config.get("config-file-name")
    test_config = {"config": config.get("test-config", {})}
    test_version = config.get("test-version", "v2")
    script_dir = TEST_DIR[test_version]["script"]
    config_dir = TEST_DIR[test_version]["config"]
    lib_dir = TEST_DIR[test_version]["lib"]
    timeout = config.get("timeout", 300)

    log.info("flushing iptables")
    test_site_node.exec_command(cmd="sudo iptables -F", check_ec=False)

    if test_config["config"]:
        log.info("creating custom config")
        f_name = test_folder_path + config_dir + config_file_name
        remote_fp = test_site_node.remote_file(file_name=f_name,
                                               file_mode="w",
                                               sudo=True)
        remote_fp.write(yaml.dump(test_config, default_flow_style=False))

    out, err = test_site_node.exec_command(
        cmd="sudo python3 " + test_folder_path + script_dir + script_name +
        " -c " + test_folder + config_dir + config_file_name,
        timeout=timeout,
    )

    log.info(out.read().decode())
    log.error(err.read().decode())

    copy_user_to_site = clusters.get(config.get("copy-user-info-to-site"))
    if copy_user_to_site:
        log.info(f'copy_user_to_site: {config.get("copy-user-info-to-site")}')
        copy_user_to_site_node = copy_user_to_site.get_ceph_object("rgw").node
        user_details_file = test_folder_path + lib_dir + "user_details.json"
        copy_file_from_node_to_node(user_details_file, test_site_node,
                                    copy_user_to_site_node, user_details_file)

    verify_io_on_sites = config.get("verify-io-on-site", [])
    if verify_io_on_sites:
        io_info = home_dir_path + "io_info.yaml"
        for site in verify_io_on_sites:
            verify_io_on_site_node = clusters.get(site).get_ceph_object(
                "rgw").node
            log.info(f"Check sync status on {site}")
            sync_status_on_primary(verify_io_on_site_node)
            # adding sleep for 60 seconds before verification of data starts
            time.sleep(60)
            log.info(f"verification IO on {site}")
            if test_site != site:
                copy_file_from_node_to_node(io_info, test_site_node,
                                            verify_io_on_site_node, io_info)

            verify_out, err = verify_io_on_site_node.exec_command(
                cmd="sudo python3 " + test_folder_path + lib_dir +
                "read_io_info.py",
                timeout=timeout,
            )
            log.info(verify_out.read().decode())
            log.error(verify_out.read().decode())

    return 0
Esempio n. 4
0
def run(ceph_cluster, **kw):
    """

    Args:
        ceph_cluster (ceph.ceph.Ceph): ceph cluster
    """
    config = kw.get("config")
    log.info("Running RGW test version: %s", config.get("test-version", "v2"))

    rgw_ceph_object = ceph_cluster.get_ceph_object("rgw")
    run_io_verify = config.get("run_io_verify", False)
    extra_pkgs = config.get("extra-pkgs")
    rgw_node = rgw_ceph_object.node
    distro_version_id = rgw_node.distro_info["VERSION_ID"]

    # install extra package which are test specific
    if extra_pkgs:
        log.info(f"got extra pkgs: {extra_pkgs}")
        if isinstance(extra_pkgs, dict):
            _pkgs = extra_pkgs.get(int(distro_version_id[0]))
            pkgs = " ".join(_pkgs)
        else:
            pkgs = " ".join(extra_pkgs)

        rgw_node.exec_command(
            sudo=True, cmd=f"yum install -y {pkgs}", long_running=True
        )

    log.info("Flushing iptables")
    rgw_node.exec_command(cmd="sudo iptables -F", check_ec=False)

    test_folder = "rgw-tests"
    test_folder_path = f"~/{test_folder}"
    git_url = "https://github.com/red-hat-storage/ceph-qe-scripts.git"
    git_clone = f"git clone {git_url} -b master"
    rgw_node.exec_command(
        cmd=f"sudo rm -rf {test_folder}"
        + f" && mkdir {test_folder}"
        + f" && cd {test_folder}"
        + f" && {git_clone}"
    )

    # Clone the repository once for the entire test suite
    pip_cmd = "venv/bin/pip"
    python_cmd = "venv/bin/python"
    out, err = rgw_node.exec_command(cmd="ls -l venv", check_ec=False)

    if not out.read().decode():
        rgw_node.exec_command(
            cmd="yum install python3 -y --nogpgcheck", check_ec=False, sudo=True
        )
        rgw_node.exec_command(cmd="python3 -m venv venv")
        rgw_node.exec_command(cmd=f"{pip_cmd} install --upgrade pip")

        rgw_node.exec_command(
            cmd=f"{pip_cmd} install "
            + f"-r {test_folder}/ceph-qe-scripts/rgw/requirements.txt"
        )

        # Ceph object's containerized attribute is not initialized and bound to err
        # as a workaround for 5.x, checking if the environment is 5.0
        if ceph_cluster.rhcs_version == "5.0" or ceph_cluster.containerized:
            if ceph_cluster.rhcs_version == "5.0":
                setup_cluster_access(ceph_cluster, rgw_node)

            rgw_node.exec_command(
                sudo=True, cmd="yum install -y ceph-radosgw --nogpgcheck"
            )

    script_name = config.get("script-name")
    config_file_name = config.get("config-file-name")
    test_version = config.get("test-version", "v2")
    script_dir = DIR[test_version]["script"]
    config_dir = DIR[test_version]["config"]
    lib_dir = DIR[test_version]["lib"]
    timeout = config.get("timeout", 300)

    out, err = rgw_node.exec_command(
        cmd=f"sudo {python_cmd} "
        + test_folder_path
        + script_dir
        + script_name
        + " -c "
        + test_folder
        + config_dir
        + config_file_name,
        timeout=timeout,
    )
    log.info(out.read().decode())
    log.error(err.read().decode())

    if run_io_verify:
        log.info("running io verify script")
        verify_out, err = rgw_node.exec_command(
            cmd=f"sudo {python_cmd} " + test_folder_path + lib_dir + "read_io_info.py",
            timeout=timeout,
        )
        log.info(verify_out.read().decode())
        log.error(verify_out.read().decode())

    return 0