Ejemplo n.º 1
0
def del_lc_debug(cluster: Ceph, build: str) -> None:
    """
    Modifies the RGW conf files to support lifecycle actions.

    Args:
        cluster:    The cluster participating in the tests.
        build:      The RHCS build version

    Raises:
        CommandFailed:  Whenever a command returns a non-zero value part of the method.
    """
    node = cluster.get_nodes(role="rgw")[0]
    commands = [
        "sed -i '/rgw_lc_debug_interval/d' /etc/ceph/ceph.conf",
        "systemctl restart ceph-radosgw.target",
    ]

    if build.startswith("5"):
        node = cluster.get_nodes(role="client")[0]
        rgw_service_name = _get_rgw_service_name(cluster)
        commands = [
            "ceph config rm client.rgw.* rgw_lc_debug_interval",
            f"ceph orch restart {rgw_service_name}",
        ]

    for cmd in commands:
        node.exec_command(sudo=True, cmd=cmd)
Ejemplo n.º 2
0
def rgw_frontend_port(cluster: Ceph, build: str) -> str:
    """
    Return the configured port number of RadosGW.

    For prior versions of RHCS 5.0, the port number is determined using the ceph.conf
    and for the higher versions, the value is retrieved from cephadm.

    Note: In case of RHCS 5.0, we assume that the installer node is provided.

    Args:
        cluster:    The cluster participating in the test.
        build:      RHCS version string.

    Returns:
        port_number:    The configured port number
    """
    node = cluster.get_nodes(role="rgw")[0]

    if build.startswith("5"):
        node = cluster.get_nodes(role="client")[0]
        # Allow realm & zone variability hence using config dump instead of config-key
        cmd1 = "ceph config dump | grep client.rgw | grep port | head -n 1"
        cmd2 = "cut -d '=' -f 2 | cut -d ' ' -f 1"
        command = f"{cmd1} | {cmd2}"
    else:
        # To determine the configured port, the line must start with rgw frontends
        # in ceph.conf. An example is given below,
        #
        # rgw frontends = civetweb port=192.168.122.199:8080 num_threads=100
        cmd1 = "grep -e '^rgw frontends' /etc/ceph/ceph.conf"
        cmd2 = "cut -d ':' -f 2 | cut -d ' ' -f 1"
        command = f"{cmd1} | {cmd2}"

    out, _ = node.exec_command(sudo=True, cmd=command)
    return out.read().decode().strip()
Ejemplo n.º 3
0
def _rgw_lc_debug_conf(cluster: Ceph, add: bool = True) -> None:
    """
    Modifies the LC debug config entry based on add being set or unset.

    The LC debug value is set in ceph conf file

    Args:
        cluster     The cluster participating in the tests.
        add         If set, the config flag is added else removed

    Returns:
        None
    """
    if add:
        command = (
            r"sed -i '/\[global\]/a rgw lc debug interval = 10' /etc/ceph/ceph.conf"
        )
    else:
        command = "sed -i '/rgw lc debug interval/d' /etc/ceph/ceph.conf"

    command += " && systemctl restart ceph-radosgw@rgw.`hostname -s`.rgw0.service"

    for node in cluster.get_nodes(role="rgw"):
        node.exec_command(sudo=True, cmd=command)

    # Service restart can take time
    sleep(60)

    log.debug("Lifecycle dev configuration set to 10")
Ejemplo n.º 4
0
def run(ceph_cluster: Ceph, config: Dict, **kwargs) -> int:
    """
    Entry point for module execution.

    Args:
        ceph_cluster    The cluster participating in the test.
        config          Configuration passed to the test
        kwargs          Additional configurations passed to the test.

    Returns:
        0 on Success else 1

    Raises:
        CommandFailure

    Example:

        - test:
            abort-on-fail: false
            config:
              install:
                - agent
            desc: Install and configure vault agent
            module: install_vault.py
            name: install vault agent
    """
    if "agent" in config["install"]:
        vault_cfg = get_cephci_config().get("vault")
        _install_agent(ceph_cluster, vault_cfg)

        client = ceph_cluster.get_nodes(role="client")[0]
        _configure_rgw_daemons(client, vault_cfg)

    return 0
Ejemplo n.º 5
0
def _get_rgw_service_name(cluster: Ceph) -> str:
    """Return the RGW service name."""
    node = cluster.get_nodes(role="client")[0]
    cmd_ = "ceph orch ls rgw --format json"
    out, err = node.exec_command(sudo=True, cmd=cmd_)

    json_out = json.loads(out.read().decode().strip())
    return json_out[0]["service_name"]
Ejemplo n.º 6
0
def execute_teardown(cluster: Ceph, build: str) -> None:
    """
    Execute the test teardown phase.
    """
    command = "rm -rf s3-tests"

    node = cluster.get_nodes(role="client")[0]
    node.exec_command(sudo=True, cmd=command)

    del_lc_debug(cluster, build)
Ejemplo n.º 7
0
def create_s3_conf(
    cluster: Ceph,
    build: str,
    host: str,
    port: str,
    secure: bool,
    kms_keyid: Optional[str] = None,
) -> None:
    """
    Generate the S3TestConf for test execution.

    Args:
        cluster     The cluster participating in the test
        build       The RHCS version string
        host        The RGW hostname to be set in the conf
        port        The RGW port number to be used in the conf
        secure      If the connection is secure or unsecure.
        kms_keyid   key to be used for encryption
    """
    log.info("Creating the S3TestConfig file")
    data = dict({"host": host, "port": int(port), "secure": secure})

    rgw_node = cluster.get_nodes(role="rgw")[0]
    client_node = cluster.get_nodes(role="client")[0]

    if build.startswith("5"):
        rgw_node = client_node

    create_s3_user(node=rgw_node, user_prefix="main", data=data)
    create_s3_user(node=rgw_node, user_prefix="alt", data=data)
    create_s3_user(node=rgw_node, user_prefix="tenant", data=data)

    if kms_keyid:
        data["main"]["kms_keyid"] = kms_keyid

    templ = Template(S3CONF)
    _config = templ.render(data=data)

    conf_file = client_node.remote_file(file_name="s3-tests/config.yaml",
                                        file_mode="w")
    conf_file.write(_config)
    conf_file.flush()
Ejemplo n.º 8
0
def run(ceph_cluster: Ceph, **kwargs) -> int:
    """
    Entry point to this module that executes the set of workflows.

    Here, Cloud Object Store Benchmark tool (COSBench) is installed on the nodes in the
    cluster having the following roles

        - cosbench-controller

    Args:
        ceph_cluster:   Cluster participating in the test.

    Returns:
        0 on Success and 1 on Failure.
    """
    LOG.info("Being COSBench deploy and configuration workflow.")
    client = ceph_cluster.get_nodes(role="client")[0]
    controllers = get_nodes_by_ids(ceph_cluster,
                                   kwargs["config"]["controllers"])
    drivers = get_nodes_by_ids(ceph_cluster,
                               kwargs["config"]["drivers"]["hosts"])

    try:
        install(controllers)
        for ctrl in controllers:
            enable_ports(ctrl, port=19088)

        install(drivers)
        data = list()
        driver_count = kwargs["config"]["drivers"].get("count", 1)
        for driver in drivers:
            for i in range(driver_count):
                port = 18088 + 100 * i
                enable_ports(driver, port)
                data.append({
                    "name": driver.shortname,
                    "ip_address": driver.ip_address,
                    "port": port,
                })

        config(controllers[0], data=data)

        execute_cosbench_script(drivers,
                                script=f"start-driver.sh {driver_count}")
        execute_cosbench_script(controllers, script="start-controller.sh")

        get_or_create_user(client)
    except BaseException as be:  # noqa
        LOG.error(be)
        return 1

    LOG.info("Successfully deployed COSBench!!!")
    return 0
Ejemplo n.º 9
0
def execute_setup(cluster: Ceph, config: dict) -> None:
    """
    Execute the prerequisites required to run the tests.

    It involves the following steps
        - install the required software (radosgw for CLI execution)
        - Clone the S3 tests repo in the client node
        - Install S3 pre-requisites in the client node
        - Open the firewall port on the RGW node
        - Add 'rgw_lc_debug_interval' key to the RGW node config
        - Restart the service

    Args:
        cluster: Ceph cluster participating in the test.
        config:  The key/value pairs passed by the tester.

    Raises:
        CommandFailed:  When a remote command returned non-zero value.
    """
    build = config.get("build", config.get("rhbuild"))
    client_node = cluster.get_nodes(role="client")[0]
    rgw_node = cluster.get_nodes(role="rgw")[0]

    branch = config.get("branch", "ceph-luminous")
    clone_s3_tests(node=client_node, branch=branch)
    install_s3test_requirements(client_node, branch)

    host = rgw_node.shortname
    secure = config.get("is_secure", "no")
    port = "443" if secure.lower() == "yes" else rgw_frontend_port(
        cluster, build)
    create_s3_conf(cluster, build, host, port, secure)

    if not build.startswith("5"):
        open_firewall_port(rgw_node, port=port, protocol="tcp")

    add_lc_debug(cluster, build)
Ejemplo n.º 10
0
def _install_agent(cluster: Ceph, config: Dict) -> None:
    """
    Installs and configures the vault-agent on all RGW nodes

    Args:
        cluster     Ceph cluster participating in the test
        config      key/value pairs useful for customization

    Returns:
        None

    Raises:
        CommandFailed
    """
    rgw_nodes = cluster.get_nodes(role="rgw")
    for node in rgw_nodes:
        LOG.debug(f"Vault install and configuration on {node.shortname}")
        _install_vault_packages(node)
        _create_agent_config(node, config)
        _create_agent_systemd(node)
Ejemplo n.º 11
0
def _rgw_lc_debug(cluster: Ceph, add: bool = True) -> None:
    """
    Modifies the Lifecycle interval parameter used for testing.

    The configurable is enable if add is set else disabled. In case of CephAdm, the
    value has to set for every daemon.

    Args:
        cluster     The cluster participating in the test
        add         If set adds the configurable else unsets it.

    Returns:
        None
    """
    node = cluster.get_nodes(role="client")[0]

    out, err = node.exec_command(
        sudo=True, cmd="ceph orch ps --daemon_type rgw --format json")
    rgw_daemons = [
        f"client.rgw.{x['daemon_id']}" for x in loads(out.read().decode())
    ]

    out, err = node.exec_command(
        sudo=True, cmd="ceph orch ls --service_type rgw --format json")
    rgw_services = [x["service_name"] for x in loads(out.read().decode())]

    # Set (or) Unset the lc_debug_interval for all daemons
    for daemon in rgw_daemons:
        if add:
            command = f"ceph config set {daemon} rgw_lc_debug_interval 10"
        else:
            command = f"ceph config rm {daemon} rgw_lc_debug_interval"

        node.exec_command(sudo=True, cmd=command)

    for service in rgw_services:
        node.exec_command(sudo=True, cmd=f"ceph orch restart {service}")

    # Restart can take time
    sleep(60)
Ejemplo n.º 12
0
def get_rgw_frontend(cluster: Ceph) -> Tuple:
    """
    Returns the RGW frontend information.

    The frontend information is found by
        - getting the config dump (or)
        - reading the config file

    Args:
         cluster     The cluster participating in the test

    Returns:
         Tuple(str, bool, int)
            lib         beast or civetweb
            secure      True if secure else False
            port        the configured port
    """
    frontend_value = None

    try:
        node = cluster.get_nodes(role="client")[0]
        out, err = node.exec_command(sudo=True,
                                     cmd="ceph config dump --format json")
        configs = loads(out)

        for config in configs:
            if config.get("name").lower() != "rgw_frontends":
                continue

            frontend_value = config.get("value").split()

        # the command would work but may not have the required values
        if not frontend_value:
            raise AttributeError(
                "Config has no frontend information. Trying conf file")

    except BaseException as e:
        log.debug(e)

        # Process via config
        node = cluster.get_nodes(role="rgw")[0]

        # rgw frontends = civetweb port=192.168.122.199:8080 num_threads=100
        command = "grep -e '^rgw frontends' /etc/ceph/ceph.conf"
        out, err = node.exec_command(sudo=True, cmd=command)
        key, sep, value = out.partition("=")
        frontend_value = value.lstrip().split()

        if not frontend_value:
            raise AttributeError("RGW frontend details not found in conf.")

    lib = "beast" if "beast" in frontend_value else "civetweb"
    secure = False
    port = 80

    # Double check the port number
    for value in frontend_value:
        # support values like endpoint=x.x.x.x:port ssl_port=443 port=x.x.x.x:8080
        if "port" in value or "endpoint" in value:
            sep = ":" if ":" in value else "="
            port = value.split(sep)[-1]
            continue

        if not secure and "ssl" in value.lower():
            secure = True
            continue

    return lib, secure, port
Ejemplo n.º 13
0
def run(ceph_cluster: Ceph, config: Dict, **kwargs) -> int:
    """CephCI framework entry point for the module.

    This method tunes the network based on the given set of configurations for the
    mentioned set of nodes.

    Note: Exercise the test module before deploying RHCS.

    Examples:
      # Apply network delay for the given nodes
        config:
          nodes:
            - node1
            - node2
          network-device: eth0
          rule: root netem delay 100ms 50ms distribution normal

      # Apply network delay for the given CephNode role
        config:
          roles:
            - rgw
            - client
          network-device: eth0
          rule: root netem loss 0.5%
          modify: true

    Args:
        ceph_cluster (Ceph):    Ceph Cluster participating in the test environment.
        config (dict):          Configuration that needs to be applied
        kwargs (dict):          Key-value pairs that can be leveraged.

    Returns:
        0 on success else 1 on failures
    """
    LOG.info("Executing network shaping workflow.")
    rc = 0

    nodes = list()
    if "roles" in config.keys():
        for role in config["roles"]:
            nodes += ceph_cluster.get_nodes(role=role)

    if "nodes" in config.keys():
        nodes += get_nodes_by_ids(ceph_cluster, config["nodes"])

    for node in nodes:
        dev = get_network_device(node)

        if config.get("network-device", dev) != dev:
            LOG.debug(f"The default network device is {dev}")
            LOG.error(f"{config['network-device']} is not found in {node.vmshortname}")
            rc = 1
            continue

        verb = "change" if config.get("modify", False) else "add"
        rule = config["rule"]

        try:
            exec_command(
                node=node,
                check_ec=True,
                sudo=True,
                command=f"tc qdisc {verb} dev {dev} {rule}",
            )
        except Exception as be:  # no-qa
            LOG.debug(f"Failed to apply tc rule on {node.vmshortname}")
            LOG.warning(be)
            rc = 1

    return rc
Ejemplo n.º 14
0
def create_s3_conf(cluster: Ceph, build: str, host: str, port: str,
                   secure: str) -> None:
    """
    Generate the S3TestConf for test execution.

    Args:
        cluster:    The cluster participating in the test
        build:      The RHCS version string
        host:       The RGW hostname to be set in the conf
        port:       The RGW port number to be used in the conf
        secure:     If the connection is secure or unsecure.
    """
    log.info("Creating the S3TestConfig file")

    rgw_node = cluster.get_nodes(role="rgw")[0]
    client_node = cluster.get_nodes(role="client")[0]

    if build.startswith("5"):
        rgw_node = client_node

    main_user = create_s3_user(node=rgw_node,
                               display_name="main-user",
                               email=True)
    alt_user = create_s3_user(node=rgw_node,
                              display_name="alt-user",
                              email=True)
    tenant_user = create_s3_user(node=rgw_node,
                                 display_name="tenant",
                                 email=True)

    _config = """
[DEFAULT]
host = {host}
port = {port}
is_secure = {secure}

[fixtures]
bucket prefix = cephuser-{random}-

[s3 main]
user_id = {main_id}
display_name = {main_name}
access_key = {main_access_key}
secret_key = {main_secret_key}
email = {main_email}
api_name = default

[s3 alt]
user_id = {alt_id}
display_name = {alt_name}
email = {alt_email}
access_key = {alt_access_key}
secret_key = {alt_secret_key}

[s3 tenant]
user_id = {tenant_id}
display_name = {tenant_name}
email = {tenant_email}
access_key = {tenant_access_key}
secret_key = {tenant_secret_key}""".format(
        host=host,
        port=port,
        secure=secure,
        random="{random}",
        main_id=main_user["user_id"],
        main_name=main_user["display_name"],
        main_access_key=main_user["keys"][0]["access_key"],
        main_secret_key=main_user["keys"][0]["secret_key"],
        main_email=main_user["email"],
        alt_id=alt_user["user_id"],
        alt_name=alt_user["display_name"],
        alt_email=alt_user["email"],
        alt_access_key=alt_user["keys"][0]["access_key"],
        alt_secret_key=alt_user["keys"][0]["secret_key"],
        tenant_id=tenant_user["user_id"],
        tenant_name=tenant_user["display_name"],
        tenant_email=tenant_user["email"],
        tenant_access_key=tenant_user["keys"][0]["access_key"],
        tenant_secret_key=tenant_user["keys"][0]["secret_key"],
    )

    conf_file = client_node.remote_file(file_name="s3-tests/config.yaml",
                                        file_mode="w")
    conf_file.write(_config)
    conf_file.flush()