Exemplo n.º 1
0
def setup_rgw_conf(node: CephNode) -> None:
    """
    Execute the pre-setup workflow on the provided node.

    Below are the steps executed
        - Stop the RadosGW service
        - Add `rgw_lc_debug_interval` with interval as 10 seconds
        - Start the RadosGW service

    Args:
        node:   The node object has the rgw role

    Returns:
        None

    Raises:
        CommandFailed: when any remote command execution has returned a non-zero
    """
    commands = [
        "sudo systemctl stop ceph-radosgw.target",
        "sudo sed -i -e '$argw_lc_debug_interval = 10' /etc/ceph/ceph.conf",
        "sudo systemctl start ceph-radosgw.target",
    ]

    for cmd in commands:
        node.exec_command(cmd=cmd)
Exemplo n.º 2
0
def teardown_rgw_conf(node: CephNode) -> None:
    """
    Execute the pre-setup cleanup workflow on the given node.

    Below are the steps executed as part of the cleanup activity
        - Stop the RadosGW service
        - Remove the conf changes
        - Start the RadosGW service

    Args:
        node: The node that has the rgw role

    Returns:
        None

    Raises:
        CommandFailed: when any of the
    """
    commands = [
        "sudo systemctl stop ceph-radosgw.target",
        "sudo sed -i '/rgw_lc_debug_interval/d' /etc/ceph/ceph.conf",
        "sudo systemctl start ceph-radosgw.target",
    ]

    for cmd in commands:
        node.exec_command(cmd=cmd)
Exemplo n.º 3
0
def enable_ports(node: CephNode, port: int = 18088) -> None:
    """
    Opens the required firewall ports on the COSBench role type nodes.

    Args:
        node (CephNode):    The list of nodes for which the port has to be opened.
        port (int):         The network port that needs to be opened

    Returns:
        None

    Raises:
        CommandFailed
    """
    LOG.debug("Opening the required network ports if firewall is configured.")

    try:
        out, err = node.exec_command(sudo=True, cmd="firewall-cmd --state")

        if out.lower() != "running":
            return
    except CommandFailed:
        LOG.debug(f"{node.shortname} has no firewall configuration.")
        return

    node.exec_command(
        sudo=True,
        cmd=f"firewall-cmd --zone public --permanent --port {port}/tcp")
Exemplo n.º 4
0
def _create_agent_systemd(node: CephNode) -> None:
    """
    Configures and runs the vault-agent as a system daemon.

    This method creates two files i.e. a launcher file and a system service unit. It
    also enables the service to start.

    Args:
        node    The node for which the vault agent needs to be set.

    Returns:
        None

    Raises:
        CommandFailed
    """
    _write_remote_file(
        node=node,
        file_name="/usr/bin/vault-agent",
        content=AGENT_LAUNCHER,
    )
    _write_remote_file(
        node=node,
        file_name="/usr/lib/systemd/system/vault-agent.service",
        content=AGENT_SYSTEMD,
    )

    commands = [
        "chmod +x /usr/bin/vault-agent",
        "systemctl start vault-agent.service",
        "systemctl enable vault-agent.service",
    ]
    for command in commands:
        node.exec_command(sudo=True, cmd=command)
Exemplo n.º 5
0
def restart_service(node: CephNode) -> None:
    """
    restart  HAproxy service.

    Args:
        node : The node on which service need to restart.

    Returns:
        None

    Raises:
        CommandFailed
    """
    node.exec_command(sudo=True, cmd="systemctl restart haproxy")
    LOG.info("HAproxy Service restarted!!!")
Exemplo n.º 6
0
def create_s3_user(node: CephNode, user_prefix: str, data: Dict) -> None:
    """
    Create a S3 user with the given display_name.

    The other required information for creating an user is auto generated.

    Args:
        node: node in the cluster to create the user on
        user_prefix: Prefix to be added to the new user
        data: a reference to the payload that needs to be updated.

    Returns:
        user_info dict
    """
    uid = binascii.hexlify(os.urandom(32)).decode()
    display_name = f"{user_prefix}-user"
    log.info("Creating user: {display_name}".format(display_name=display_name))

    cmd = f"radosgw-admin user create --uid={uid} --display_name={display_name}"
    cmd += " --email={email}@foo.bar".format(email=uid)

    out, err = node.exec_command(sudo=True, cmd=cmd)
    user_info = json.loads(out)

    data[user_prefix] = {
        "id": user_info["keys"][0]["user"],
        "access_key": user_info["keys"][0]["access_key"],
        "secret_key": user_info["keys"][0]["secret_key"],
        "name": user_info["display_name"],
        "email": user_info["email"],
    }
Exemplo n.º 7
0
def install(node: CephNode) -> None:
    """
    Installs  HAproxy.

    Args:
        nodes (list):   The list of nodes on which the packages are installed.

    Returns:
        None

    Raises:
        CommandFailed
    """
    node.exec_command(sudo=True, cmd="yum install -y haproxy")
    node.exec_command(sudo=True, cmd="chmod 666 /etc/haproxy/haproxy.cfg")
    LOG.info("Successfully installed HAproxy!!!")
Exemplo n.º 8
0
def execute_s3_tests(node: CephNode, build: str) -> int:
    """
    Return the result of S3 test run.

    Args:
        node: The node from which the test execution is triggered.
        build: the RH build version

    Returns:
        0 - Success
        1 - Failure
    """
    log.info("Executing s3-tests")
    try:
        base_cmd = "cd s3-tests; S3TEST_CONF=config.yaml virtualenv/bin/nosetests -v"
        extra_args = "-a '!fails_on_rgw,!fails_strict_rfc2616,!encryption'"
        tests = "s3tests"

        if build.startswith("5"):
            extra_args = "-a '!fails_on_rgw,!fails_strict_rfc2616,!encryption"
            extra_args += ",!test_of_sts,!lifecycle,!s3select,!user-policy"
            extra_args += ",!webidentity_test'"
            tests = "s3tests_boto3"

        cmd = f"{base_cmd} {extra_args} {tests}"
        out, err = node.exec_command(cmd=cmd, timeout=3600)
        log.info(out.read().decode())
        log.error(err.read().decode())
    except CommandFailed as e:
        log.warning("Received CommandFailed")
        log.warning(e)
        return 1

    return 0
Exemplo n.º 9
0
def execute_s3_tests(node: CephNode,
                     build: str,
                     encryption: bool = False) -> int:
    """
    Return the result of S3 test run.

    Args:
        node        The node from which the test execution is triggered.
        build       The RH build version
        encryption  include encryption test or not
    Returns:
        0 - Success
        1 - Failure
    """
    log.debug("Executing s3-tests")
    try:
        base_cmd = "cd s3-tests; S3TEST_CONF=config.yaml virtualenv/bin/nosetests -v"
        extra_args = "-a '!fails_on_rgw,!fails_strict_rfc2616,!encryption'"
        tests = "s3tests"

        if build.startswith("5"):
            extra_args = "-a '!fails_on_rgw,!fails_strict_rfc2616"

            if not encryption:
                extra_args += ",!encryption"

            extra_args += ",!test_of_sts,!s3select,!user-policy,!webidentity_test'"
            tests = "s3tests_boto3"

        cmd = f"{base_cmd} {extra_args} {tests}"
        return node.exec_command(cmd=cmd, long_running=True)
    except CommandFailed as e:
        log.warning("Received CommandFailed")
        log.warning(e)
        return 1
Exemplo n.º 10
0
def _install_vault_packages(node: CephNode) -> None:
    """
    Installs the required packages for vault

    Args:
        node    The system on which the package needs to be installed

    Returns:
        None

    Raises:
        CommandFailed
    """
    vault_repo = "https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo"
    commands = [f"yum-config-manager --add-repo {vault_repo}", "yum install -y vault"]
    for command in commands:
        node.exec_command(sudo=True, cmd=command, check_ec=False)
Exemplo n.º 11
0
def get_or_create_user(node: CephNode) -> Dict:
    """Creates or retrieves a RADOS user.

    Returns:
         Dictionary holding the keys user, access_key & secret_key
    """
    LOG.debug("Get or Create cosbench01 user using radosgw-admin.")
    user = "******"
    try:
        out, err = node.exec_command(
            cmd=f"radosgw-admin user info --uid {user}")
        out = loads(out)
        return out["keys"][0]
    except CommandFailed:
        out, err = node.exec_command(
            cmd=f"radosgw-admin user create --uid {user} --display-name {user}"
            f" --email {user}@noreply.com")
        out = loads(out)
        return out["keys"][0]
Exemplo n.º 12
0
def exec_command(
    node: CephNode,
    command: str,
    sudo: Optional[bool] = False,
    check_ec: Optional[bool] = True,
) -> Tuple:
    """Executes the given command on the provided node."""
    out, err = node.exec_command(sudo=sudo, cmd=command, check_ec=check_ec)
    LOG.debug(f"Output: {out}")
    LOG.debug(f"Error: {err}")

    return out, err
Exemplo n.º 13
0
def _s3tests_req_install(node: CephNode, os_ver: str) -> None:
    """Install S3 prerequisites via pip."""
    packages = [
        "python2-virtualenv",
        "python2-devel",
        "libevent-devel",
        "libffi-devel",
        "libxml2-devel",
        "libxslt-devel",
        "zlib-devel",
    ]
    node.exec_command(sudo=True,
                      cmd="yum groupinstall -y 'Development Tools'",
                      check_ec=False)
    node.exec_command(sudo=True,
                      cmd=f"yum install -y --nogpgcheck {' '.join(packages)}")

    venv_cmd = "virtualenv" if os_ver == "7" else "virtualenv-2"
    commands = [
        f"{venv_cmd} -p python2 --no-site-packages --distribute s3-tests/virtualenv",
        "s3-tests/virtualenv/bin/pip install --upgrade pip setuptools",
        "s3-tests/virtualenv/bin/pip install -r s3-tests/requirements.txt",
        "s3-tests/virtualenv/bin/python s3-tests/setup.py develop",
    ]
    for cmd in commands:
        node.exec_command(cmd=cmd)
Exemplo n.º 14
0
def create_nodes(conf, inventory, osp_cred, run_id, report_portal_session=None, instances_name=None):
    if report_portal_session:
        name = create_unique_test_name("ceph node creation", test_names)
        test_names.append(name)
        desc = "Ceph cluster preparation"
        report_portal_session.start_test_item(name=name,
                                              description=desc,
                                              start_time=timestamp(),
                                              item_type="STEP")
    log.info("Destroying existing osp instances..")
    cleanup_ceph_nodes(osp_cred, instances_name)
    ceph_cluster_dict = {}
    log.info('Creating osp instances')
    for cluster in conf.get('globals'):
        ceph_vmnodes = create_ceph_nodes(cluster, inventory, osp_cred, run_id, instances_name)
        ceph_nodes = []
        clients = []
        for node in ceph_vmnodes.values():
            if node.role == 'win-iscsi-clients':
                clients.append(WinNode(ip_address=node.ip_address,
                                       private_ip=node.get_private_ip()))
            else:
                ceph = CephNode(username='******',
                                password='******',
                                root_password='******',
                                root_login=node.root_login,
                                role=node.role,
                                no_of_volumes=node.no_of_volumes,
                                ip_address=node.ip_address,
                                subnet=node.subnet,
                                private_ip=node.get_private_ip(),
                                hostname=node.hostname,
                                ceph_vmnode=node)
                ceph_nodes.append(ceph)
        cluster_name = cluster.get('ceph-cluster').get('name', 'ceph')
        ceph_cluster_dict[cluster_name] = Ceph(cluster_name, ceph_nodes)
    # TODO: refactor cluster dict to cluster list
    log.info('Done creating osp instances')
    log.info("Waiting for Floating IPs to be available")
    log.info("Sleeping 15 Seconds")
    time.sleep(15)
    for cluster_name, cluster in ceph_cluster_dict.items():
        for instance in cluster:
            try:
                instance.connect()
            except BaseException:
                if report_portal_session:
                    report_portal_session.finish_test_item(end_time=timestamp(), status="FAILED")
                raise
    if report_portal_session:
        report_portal_session.finish_test_item(end_time=timestamp(), status="PASSED")
    return ceph_cluster_dict, clients
Exemplo n.º 15
0
def _s3tests_req_install(node: CephNode) -> None:
    """Install S3 prerequisites via pip."""
    packages = [
        "python2-virtualenv",
        "python2-devel",
        "libevent-devel",
        "libffi-devel",
        "libxml2-devel",
        "libxslt-devel",
        "zlib-devel",
    ]
    node.exec_command(sudo=True,
                      cmd=f"yum install -y --nogpgcheck {' '.join(packages)}")

    commands = [
        "virtualenv -p python2 --no-site-packages --distribute s3-tests/virtualenv",
        "s3-tests/virtualenv/bin/pip install --upgrade pip setuptools",
        "s3-tests/virtualenv/bin/pip install -r s3-tests/requirements.txt",
        "s3-tests/virtualenv/bin/python s3-tests/setup.py develop",
    ]
    for cmd in commands:
        node.exec_command(cmd=cmd)
Exemplo n.º 16
0
def _create_agent_config(node: CephNode, config: Dict) -> None:
    """
    Writes the required configuration file to the provided node.

    The following files are created .app-role-id, .app-secret-id and agent.hcl

    Args:
        node    The system on which files have to be copied
        config  Dictionary holding the tokens

    Returns:
        None

    Raises:
        CommandFailed
    """
    node.exec_command(sudo=True, cmd="mkdir -p /usr/local/etc/vault/")

    _write_remote_file(
        node=node,
        file_name="/usr/local/etc/vault/.app-role-id",
        content=config["agent"]["role-id"],
    )
    _write_remote_file(
        node=node,
        file_name="/usr/local/etc/vault/.app-secret-id",
        content=config["agent"]["secret-id"],
    )
    # hcl file
    agent_conf = {"url": config["url"], "auth": config["agent"]["auth"]}
    tmpl = Template(AGENT_HCL)
    data = tmpl.render(data=agent_conf)
    _write_remote_file(
        node=node,
        file_name="/usr/local/etc/vault/agent.hcl",
        content=data,
    )
Exemplo n.º 17
0
def config(node: CephNode, data: List) -> None:
    """
    Writes the haproxy.cfg configuration file.

    Args:
        node:   Haproxy client node
        data:   A list having rgw endpoints(ip_address & port)
    Returns:
        None
    Raises:
        CommandFailed
    """
    LOG.info("Generating the HAproxy.cfg file.")
    templ = Template(HAPRX_CONF)
    conf = templ.render(data=data)

    conf_file = node.remote_file(file_name="/etc/haproxy/haproxy.cfg",
                                 file_mode="w")
    conf_file.write(conf)
    conf_file.flush()
Exemplo n.º 18
0
def _write_remote_file(node: CephNode, file_name: str, content: str) -> None:
    """
    Copies the provide content to the specified file on the given node.

    Args:
        node        The target system
        file_name   The name of the remote file to which the content needs to be written
        content     The content of the file to be written

    Returns:
          None

    Raises:
          CommandFailed
    """
    LOG.debug(f"Writing to remote file {file_name}")
    file_handle = node.remote_file(sudo=True, file_mode="w", file_name=file_name)
    file_handle.write(data=content)
    file_handle.flush()
    file_handle.close()
Exemplo n.º 19
0
def config(node: CephNode, data: List) -> None:
    """
    Writes the COS Bench controller configuration file.

    Args:
        node:   The node that is designated to be a COS controller
        data:   A list of dictionaries having driver details (name & ip_address)
    Returns:
        None
    Raises:
        CommandFailed
    """
    LOG.info("Generating the COS Bench controller file.")
    templ = Template(CTRL_CONF)
    conf = templ.render(data=data)

    conf_file = node.remote_file(
        file_name="/opt/cosbench/conf/controller.conf", file_mode="w")
    conf_file.write(conf)
    conf_file.flush()
Exemplo n.º 20
0
def install_s3test_requirements(node: CephNode, branch: str) -> None:
    """
    Install the required packages required by S3tests.

    The S3test requirements are installed via bootstrap however for RHCS it is a
    simulation of the manual steps.

    Args:
        node:   The node that is consider for running S3Tests.
        branch: The branch to be installed

    Raises:
        CommandFailed:  Whenever a command returns a non-zero value part of the method.
    """
    rhel8, err = node.exec_command(
        cmd="grep -i 'release 8' /etc/redhat-release", check_ec=False)
    rhel8 = rhel8.read().decode()

    if branch == "ceph-nautilus" and rhel8:
        return _s3tests_req_install(node)

    _s3tests_req_bootstrap(node)
Exemplo n.º 21
0
def _configure_rgw_daemons(node: CephNode, config: Dict) -> None:
    """
    Updates the RGW daemons with the provided configuration.

    Args:
         node       Server that has privilege to perform ceph config set commands.
         config     Key/value pairs to be used for configuration
    Returns:
        None
    Raises:
        CommandFailed
    """
    out, err = node.exec_command(
        sudo=True, cmd="ceph orch ps --daemon_type rgw --format json")
    rgw_daemons = [
        f"client.rgw.{x['daemon_id']}" for x in loads(out.read().decode())
    ]

    out, err = node.exec_command(
        sudo=True, cmd="ceph orch ls --service_type rgw --format json")
    rgw_services = [x["service_name"] for x in loads(out.read().decode())]

    configs = [
        ("rgw_crypt_s3_kms_backend", "vault"),
        ("rgw_crypt_vault_secret_engine", config["agent"]["engine"]),
        ("rgw_crypt_vault_auth", config["agent"]["auth"]),
    ]

    if config["agent"]["auth"] == "token":
        configs += [
            ("rgw_crypt_vault_token_file", config["agent"]["token_file"]),
            ("rgw_crypt_vault_addr", config["url"]),
        ]
    else:
        configs += [
            ("rgw_crypt_vault_prefix", config["agent"]["prefix"]),
            ("rgw_crypt_vault_addr", "http://127.0.0.1:8100"),
        ]

    for daemon in rgw_daemons:
        for key, value in configs:
            node.exec_command(sudo=True,
                              cmd=f"ceph config set {daemon} {key} {value}")

    for service in rgw_services:
        node.exec_command(sudo=True, cmd=f"ceph orch restart {service}")
Exemplo n.º 22
0
def create_nodes(
    conf,
    inventory,
    osp_cred,
    run_id,
    cloud_type="openstack",
    report_portal_session=None,
    instances_name=None,
    enable_eus=False,
    rp_logger: Optional[ReportPortal] = None,
):
    """Creates the system under test environment."""
    if report_portal_session:
        name = create_unique_test_name("ceph node creation", test_names)
        test_names.append(name)
        desc = "Ceph cluster preparation"
        rp_logger.start_test_item(name=name,
                                  description=desc,
                                  item_type="STEP")

    log.info("Destroying existing osp instances..")
    if cloud_type == "openstack":
        cleanup_ceph_nodes(osp_cred, instances_name)
    elif cloud_type == "ibmc":
        cleanup_ibmc_ceph_nodes(osp_cred, instances_name)

    ceph_cluster_dict = {}

    log.info("Creating osp instances")
    clients = []
    for cluster in conf.get("globals"):
        if cloud_type == "openstack":
            ceph_vmnodes = create_ceph_nodes(
                cluster,
                inventory,
                osp_cred,
                run_id,
                instances_name,
                enable_eus=enable_eus,
            )
        elif cloud_type == "ibmc":
            ceph_vmnodes = create_ibmc_ceph_nodes(cluster, inventory, osp_cred,
                                                  run_id, instances_name)

        elif cloud_type == "baremetal":
            ceph_vmnodes = create_baremetal_ceph_nodes(cluster)

        ceph_nodes = []
        root_password = None
        for node in ceph_vmnodes.values():
            look_for_key = False
            private_key_path = ""

            if cloud_type == "openstack":
                private_ip = node.get_private_ip()
            elif cloud_type == "baremetal":
                private_key_path = node.private_key if node.private_key else ""
                private_ip = node.ip_address
                look_for_key = True if node.private_key else False
                root_password = node.root_password
            elif cloud_type == "ibmc":
                glbs = osp_cred.get("globals")
                ibmc = glbs.get("ibm-credentials")
                private_key_path = ibmc.get("private_key_path")
                private_ip = node.ip_address
                look_for_key = True

            if node.role == "win-iscsi-clients":
                clients.append(
                    WinNode(ip_address=node.ip_address, private_ip=private_ip))
            else:
                ceph = CephNode(
                    username="******",
                    password="******",
                    root_password="******"
                    if not root_password else root_password,
                    look_for_key=look_for_key,
                    private_key_path=private_key_path,
                    root_login=node.root_login,
                    role=node.role,
                    no_of_volumes=node.no_of_volumes,
                    ip_address=node.ip_address,
                    subnet=node.subnet,
                    private_ip=private_ip,
                    hostname=node.hostname,
                    ceph_vmnode=node,
                )
                ceph_nodes.append(ceph)

        cluster_name = cluster.get("ceph-cluster").get("name", "ceph")
        ceph_cluster_dict[cluster_name] = Ceph(cluster_name, ceph_nodes)

    # TODO: refactor cluster dict to cluster list
    log.info("Done creating osp instances")
    log.info("Waiting for Floating IPs to be available")
    log.info("Sleeping 15 Seconds")
    time.sleep(15)

    for cluster_name, cluster in ceph_cluster_dict.items():
        for instance in cluster:
            try:
                instance.connect()
            except BaseException:
                rp_logger.finish_test_item(status="FAILED")
                raise

    rp_logger.finish_test_item(status="PASSED")

    return ceph_cluster_dict, clients
Exemplo n.º 23
0
def _s3tests_req_bootstrap(node: CephNode) -> None:
    """Install the S3tests using bootstrap script."""
    node.exec_command(cmd="cd s3-tests; ./bootstrap")
Exemplo n.º 24
0
def clone_s3_tests(node: CephNode, branch="ceph-luminous") -> None:
    """Clone the S3 repository on the given node."""
    repo_url = "https://github.com/ceph/s3-tests.git"
    node.exec_command(cmd="if test -d s3-tests; then sudo rm -r s3-tests; fi")
    node.exec_command(cmd=f"git clone -b {branch} {repo_url}")