예제 #1
0
파일: cosbench.py 프로젝트: udaysk23/cephci
def run(ceph_cluster: Ceph, **kwargs) -> int:
    """
    Entry point to this module that executes the set of workflows.

    Here, Cloud Object Store Benchmark tool (COSBench) is installed on the nodes in the
    cluster having the following roles

        - cosbench-controller

    Args:
        ceph_cluster:   Cluster participating in the test.

    Returns:
        0 on Success and 1 on Failure.
    """
    LOG.info("Being COSBench deploy and configuration workflow.")
    client = ceph_cluster.get_nodes(role="client")[0]
    controllers = get_nodes_by_ids(ceph_cluster,
                                   kwargs["config"]["controllers"])
    drivers = get_nodes_by_ids(ceph_cluster,
                               kwargs["config"]["drivers"]["hosts"])

    try:
        install(controllers)
        for ctrl in controllers:
            enable_ports(ctrl, port=19088)

        install(drivers)
        data = list()
        driver_count = kwargs["config"]["drivers"].get("count", 1)
        for driver in drivers:
            for i in range(driver_count):
                port = 18088 + 100 * i
                enable_ports(driver, port)
                data.append({
                    "name": driver.shortname,
                    "ip_address": driver.ip_address,
                    "port": port,
                })

        config(controllers[0], data=data)

        execute_cosbench_script(drivers,
                                script=f"start-driver.sh {driver_count}")
        execute_cosbench_script(controllers, script="start-controller.sh")

        get_or_create_user(client)
    except BaseException as be:  # noqa
        LOG.error(be)
        return 1

    LOG.info("Successfully deployed COSBench!!!")
    return 0
예제 #2
0
파일: test_rbd.py 프로젝트: udaysk23/cephci
def run(ceph_cluster, **kwargs) -> int:
    """
    Method that executes the external test suite.

    Args:
        ceph_cluster    The storage cluster participating in the test.
        kwargs          The supported keys are
                        config  contains the test configuration

    Returns:
        0 - Success
        1 - Failure
    """
    log.info("Running RBD Sanity tests.")

    config = kwargs["config"]
    script_dir = config["script_path"]
    script = config["script"]

    branch = config.get("branch", "pacific")
    nodes = config.get("nodes", [])
    rhbuild = config.get("rhbuild")

    if nodes:
        nodes = get_nodes_by_ids(ceph_cluster, nodes)
    else:
        # By default, tests would be executed on a single client node
        nodes = [ceph_cluster.get_nodes(role="client")[0]]

    os_ver = rhbuild.split("-")[-1]
    if "4." in rhbuild and os_ver == "8":
        nodes[0].exec_command(
            cmd="sudo /usr/sbin/alternatives --set python /usr/bin/python3"
        )

    if rhbuild[0] > "4":
        out, err = nodes[0].exec_command(
            sudo=True, cmd="ceph config get mon mon_allow_pool_delete --format json"
        )

        if not json.loads(out):
            nodes[0].exec_command(
                sudo=True, cmd="ceph config set mon mon_allow_pool_delete true"
            )
            nodes[0].exec_command(sudo=True, cmd="ceph orch restart mon")

    for node in nodes:
        one_time_setup(node, rhbuild, branch=branch)

        cmd = f"cd ceph/{script_dir}; sudo bash {script}"
        if script == "*":
            cmd = f"cd ceph/{script_dir}; for test in $(ls); do sudo bash $test; done"

        node.exec_command(cmd=cmd, check_ec=True, timeout=1200)

    return 0
예제 #3
0
파일: test_rbd.py 프로젝트: pdhiran/cephci
def run(ceph_cluster, **kwargs) -> int:
    """
    Method that executes the external test suite.

    Args:
        ceph_cluster    The storage cluster participating in the test.
        kwargs          The supported keys are
                        config  contains the test configuration

    Returns:
        0 - Success
        1 - Failure
    """
    LOG.info("Running RBD Sanity tests.")

    config = kwargs["config"]
    script_dir = config["script_path"]
    script = config["script"]

    branch = config.get("branch", "pacific")
    nodes = config.get("nodes", [])
    rhbuild = config.get("rhbuild")

    if nodes:
        nodes = get_nodes_by_ids(ceph_cluster, nodes)
    else:
        # By default, tests would be executed on a single client node
        nodes = [ceph_cluster.get_nodes(role="client")[0]]

    if "5." in rhbuild:
        nodes[0].exec_command(
            cmd="ceph config set mon mon_allow_pool_delete true")
        nodes[0].exec_command(cmd="ceph orch restart mon")
    else:
        nodes[0].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")
        mon_nodes = ceph_cluster.get_nodes(role="mon")
        for ceph_mon in mon_nodes:
            ceph_mon.exec_command(
                sudo=True,
                cmd=f"systemctl restart ceph-mon@{ceph_mon.hostname}",
                long_running=True,
            )

    for node in nodes:
        one_time_setup(node, branch=branch)

        cmd = f"cd ceph/{script_dir}; sudo bash {script}"
        if script == "*":
            cmd = f"cd ceph/{script_dir}; for test in $(ls); do sudo bash $test; done"

        node.exec_command(cmd=cmd, check_ec=True, timeout=1200)

    return 0
예제 #4
0
    def get_hostnames(self, node_names):
        """
        Return list of hostnames
        Args:
            node_names: node names

        Returns:
            list of hostanmes
        """
        nodes = get_nodes_by_ids(self.cluster, node_names)
        return [node.shortname for node in nodes]
예제 #5
0
def generate_endpoint_list(ceph_cluster: Ceph, endpoints):
    """Generates the list of endpoints.

    Returns:
         list holding the ip address and port
    """
    LOG.debug("Get endpoints")
    rgw_endpoints = []
    for endpt in endpoints:
        host = []
        if ":" in endpt:
            host.append(endpt.split(":")[0])
            hostname = get_nodes_by_ids(ceph_cluster, host)
            rgw_node_ip = hostname[0].ip_address
            port = endpt.split(":")[1]
        else:
            host.append(endpt)
            hostname = get_nodes_by_ids(ceph_cluster, host)
            rgw_node_ip = hostname[0].ip_address
            port = 80
        endpoint = str(rgw_node_ip) + ":" + str(port)
        rgw_endpoints.append(endpoint)
    return rgw_endpoints
예제 #6
0
    def apply(self, config: Dict) -> None:
        """
        Deploy ISCSI client daemon using the provided arguments.

        Args:
            config (Dict) : Key/value pairs provided from the test scenario

        Example::

            config:
                command: apply
                service: iscsi
                base_cmd_args:          # arguments to ceph orch
                    concise: true
                    verbose: true
                    input_file: <name of spec>
                pos_args:
                    - india             # name of the pool
                    - api_user          # name of the API user
                    - api_pass          # password of the api_user.

                args:
                    trusted_ip_list:       #it can be used both as positional/keyword arg in 5.x
                        - node1
                        - node2               # space separate list of IPs
                    placement:
                        label: iscsi    # either label or node.
                        nodes:
                            - node1
                        limit: 3    # no of daemons
                        sep: " "    # separator to be used for placements
                    dry-run: true
                    unmanaged: true

        """
        args = config.get("args")
        trusted_ip_list = args.get("trusted_ip_list")
        if trusted_ip_list:
            node_ips = get_nodes_by_ids(self.cluster, trusted_ip_list)
            args["trusted_ip_list"] = repr(" ".join(
                [node.ip_address for node in node_ips]))
        super().apply(config=config)
예제 #7
0
def run(ceph_cluster: Ceph, **kwargs) -> int:
    """
    Entry point to this module that configures haproxy.
    """
    LOG.info("Configuring HAProxy")
    haproxy_clients = get_nodes_by_ids(ceph_cluster,
                                       kwargs["config"]["haproxy_clients"])

    try:
        rgw_endpoints = generate_endpoint_list(
            ceph_cluster, kwargs["config"]["rgw_endpoints"])
        for hprx in haproxy_clients:
            install(hprx)
            enable_ports(hprx, port=5000)
            config(hprx, rgw_endpoints)
            restart_service(hprx)

    except BaseException as be:  # noqa
        LOG.error(be)
        return 1

    LOG.info("Successfully Configured HAProxy!!!")
    return 0
예제 #8
0
    def apply(self: ServiceProtocol, config: Dict) -> None:
        """
        Execute the apply method using the object's service name and provided input.

        Args:
            config (Dict):     Key/value pairs passed from the test suite.


        Example::

            base_cmd_args   - key/value pairs to set for base command
            pos_args        - List to be added as positional params
            args            - Key/value pairs as optional arguments.

            config:
                command: apply
                service: rgw
                base_cmd_args:          # arguments to ceph orch
                    concise: true
                    verbose: true
                    input_file: <name of spec>
                pos_args:               # positional arguments
                    - india             # realm
                    - south             # zone
                args:
                    placement:
                        label: rgw_south
                        nodes:              # A list of strings that would looked up
                            - node1
                        limit: 3            # no of daemons
                        sep: " "            # separator to be used for placements
                    dry-run: true
                    unmanaged: true
        """
        base_cmd = ["ceph", "orch"]

        if config.get("base_cmd_args"):
            base_cmd_args_str = config_dict_to_string(
                config.get("base_cmd_args"))
            base_cmd.append(base_cmd_args_str)

        base_cmd.append("apply")
        base_cmd.append(self.SERVICE_NAME)

        pos_args = config.get("pos_args")
        if pos_args:
            base_cmd += pos_args

        args = config.get("args")

        node_names = None
        verify_service = False
        placement = args.pop("placement", {})

        if placement:
            placement_str = "--placement="
            verify_service = True

            if "label" in placement:
                label = placement["label"]
                node_names = [
                    node["hostname"] for node in self.get_hosts_by_label(label)
                ]
                placement_str += f'"label:{label}"'
                base_cmd.append(placement_str)

            if "nodes" in placement:
                nodes = placement.get("nodes")

                if "*" in nodes:
                    placement_str += '"*"'
                    node_names = list()
                    for node in self.cluster.node_list:
                        if (len(node.role.role_list) == 1
                                and ["client"] == node.role.role_list):
                            continue
                        node_names.append(node.shortname)
                elif "[" in nodes:
                    placement_str += '"%s"' % nodes
                    verify_service = False
                else:
                    nodes_ = get_nodes_by_ids(self.cluster, nodes)
                    node_names = [node.shortname for node in nodes_]

                    sep = placement.get("sep", " ")

                    # Support RGW count-per-host placement option
                    if placement.get(
                            "count-per-host") and self.SERVICE_NAME == "rgw":
                        node_names.append(
                            f"count-per-host:{placement['count-per-host']}")

                    node_str = f"{sep}".join(node_names)

                    limit = placement.pop("limit", None)
                    if limit:
                        placement_str += f"'{limit}{sep}{node_str}'"
                    else:
                        placement_str += f"'{node_str}'"

                base_cmd.append(placement_str)

            # Odd scenario when limit is specified but without nodes
            if "limit" in placement:
                base_cmd.append(f"--placement={placement.get('limit')}")

        # At this junction, optional arguments are left in dict
        if args:
            base_cmd.append(config_dict_to_string(args))

        out, err = self.shell(args=base_cmd)

        if not out:
            raise OrchApplyServiceFailure(self.SERVICE_NAME)

        # out value is "Scheduled <service_name> update..."
        service_name = re.search(r"Scheduled\s(.*)\supdate", out).group(1)

        if not verify_service:
            return

        if not self.check_service_exists(service_name=service_name):
            raise OrchApplyServiceFailure(self.SERVICE_NAME)
예제 #9
0
def run(ceph_cluster: Ceph, config: Dict, **kwargs) -> int:
    """CephCI framework entry point for the module.

    This method tunes the network based on the given set of configurations for the
    mentioned set of nodes.

    Note: Exercise the test module before deploying RHCS.

    Examples:
      # Apply network delay for the given nodes
        config:
          nodes:
            - node1
            - node2
          network-device: eth0
          rule: root netem delay 100ms 50ms distribution normal

      # Apply network delay for the given CephNode role
        config:
          roles:
            - rgw
            - client
          network-device: eth0
          rule: root netem loss 0.5%
          modify: true

    Args:
        ceph_cluster (Ceph):    Ceph Cluster participating in the test environment.
        config (dict):          Configuration that needs to be applied
        kwargs (dict):          Key-value pairs that can be leveraged.

    Returns:
        0 on success else 1 on failures
    """
    LOG.info("Executing network shaping workflow.")
    rc = 0

    nodes = list()
    if "roles" in config.keys():
        for role in config["roles"]:
            nodes += ceph_cluster.get_nodes(role=role)

    if "nodes" in config.keys():
        nodes += get_nodes_by_ids(ceph_cluster, config["nodes"])

    for node in nodes:
        dev = get_network_device(node)

        if config.get("network-device", dev) != dev:
            LOG.debug(f"The default network device is {dev}")
            LOG.error(f"{config['network-device']} is not found in {node.vmshortname}")
            rc = 1
            continue

        verb = "change" if config.get("modify", False) else "add"
        rule = config["rule"]

        try:
            exec_command(
                node=node,
                check_ec=True,
                sudo=True,
                command=f"tc qdisc {verb} dev {dev} {rule}",
            )
        except Exception as be:  # no-qa
            LOG.debug(f"Failed to apply tc rule on {node.vmshortname}")
            LOG.warning(be)
            rc = 1

    return rc