コード例 #1
0
ファイル: scale_lib.py プロジェクト: ypersky1980/ocs-ci
def add_worker_based_on_pods_count_per_node(node_count,
                                            expected_count,
                                            role_type=None,
                                            machineset_name=None):
    """
    Function to evaluate number of pods up in node and add new node accordingly.

    Args:
        machineset_name (list): Machineset_names to add more nodes if required.
        node_count (int): Additional nodes to be added
        expected_count (int): Expected pod count in one node
        role_type (str): To add type to the nodes getting added

    Returns:
        bool: True if Nodes gets added, else false.

    """
    # Check for POD running count on each nodes
    if (config.ENV_DATA["deployment_type"] == "ipi"
            and config.ENV_DATA["platform"].lower() == "aws"):
        app_nodes = node.get_nodes(node_type=role_type)
        pod_count_dict = node.get_running_pod_count_from_node(
            node_type=role_type)
        high_count_nodes, less_count_nodes = ([] for i in range(2))
        for node_obj in app_nodes:
            count = pod_count_dict[f"{node_obj.name}"]
            if count >= expected_count:
                high_count_nodes.append(node_obj.name)
            else:
                less_count_nodes.append(node_obj.name)
        if len(less_count_nodes) <= 1:
            for name in machineset_name:
                count = machine.get_replica_count(machine_set=name)
                machine.add_node(machine_set=name, count=(count + node_count))
                machine.wait_for_new_node_to_be_ready(name)
            return True
        else:
            logging.info(
                f"Enough pods can be created with available nodes {pod_count_dict}"
            )
            return False
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "vsphere"):
        raise UnsupportedPlatformError("Unsupported Platform to add worker")
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "baremetal"):
        raise UnsupportedPlatformError("Unsupported Platform to add worker")
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "azure"):
        raise UnsupportedPlatformError("Unsupported Platform to add worker")
コード例 #2
0
def add_worker_based_on_cpu_utilization(node_count,
                                        expected_percent,
                                        role_type=None,
                                        machineset_name=None):
    """
    Function to evaluate CPU utilization of nodes and add node if required.

    Args:
        machineset_name (list): Machineset_names to add more nodes if required.
        node_count (int): Additional nodes to be added
        expected_percent (int): Expected utilization precent
        role_type (str): To add type to the nodes getting added

    Returns:
        bool: True if Nodes gets added, else false.

    """
    # Check for CPU utilization on each nodes
    if (config.ENV_DATA["deployment_type"] == "ipi"
            and config.ENV_DATA["platform"].lower() == "aws"):
        app_nodes = node.get_nodes(node_type=role_type)
        uti_dict = node.get_node_resource_utilization_from_oc_describe(
            node_type=role_type)
        uti_high_nodes, uti_less_nodes = ([] for i in range(2))
        for node_obj in app_nodes:
            utilization_percent = uti_dict[f"{node_obj.name}"]["cpu"]
            if utilization_percent > expected_percent:
                uti_high_nodes.append(node_obj.name)
            else:
                uti_less_nodes.append(node_obj.name)
        if len(uti_less_nodes) <= 1:
            for name in machineset_name:
                count = machine.get_replica_count(machine_set=name)
                machine.add_node(machine_set=name, count=(count + node_count))
                machine.wait_for_new_node_to_be_ready(name)
            return True
        else:
            logging.info(
                f"Enough resource available for more pod creation {uti_dict}")
            return False
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "vsphere"):
        raise UnsupportedPlatformError("Unsupported Platform to add worker")
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "baremetal"):
        raise UnsupportedPlatformError("Unsupported Platform to add worker")
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "azure"):
        raise UnsupportedPlatformError("Unsupported Platform to add worker")
コード例 #3
0
def check_enough_resource_available_in_workers(ms_name=None,
                                               pod_dict_path=None):
    """
    Function to check if there is enough resource in worker, if not add worker
    for automation supported platforms

    Args:
        ms_name (list): Require param in-case of aws platform to increase the worker
        pod_dict_path (str): Pod dict path for nginx pod.

    """
    # Check for enough worker nodes
    if (config.ENV_DATA["deployment_type"] == "ipi"
            and config.ENV_DATA["platform"].lower() == "aws"):
        if pod_dict_path == constants.NGINX_POD_YAML:
            # Below expected count value is kind of hardcoded based on the manual
            # execution result i.e. With m5.4xlarge instance and nginx pod
            # TODO: Revisit the expected_count value once there is support for
            # TODO: more pod creation in one worker node
            if add_worker_based_on_pods_count_per_node(
                    machineset_name=ms_name,
                    node_count=1,
                    expected_count=140,
                    role_type="app,worker",
            ):
                logging.info("Nodes added for app pod creation")
            else:
                logging.info(
                    "Existing resource are enough to create more pods")
        else:
            if add_worker_based_on_cpu_utilization(
                    machineset_name=ms_name,
                    node_count=1,
                    expected_percent=59,
                    role_type="app,worker",
            ):
                logging.info("Nodes added for app pod creation")
            else:
                logging.info(
                    "Existing resource are enough to create more pods")
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "vsphere"):
        raise UnsupportedPlatformError("Unsupported Platform")
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "baremetal"):
        raise UnsupportedPlatformError("Unsupported Platform")
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "azure"):
        raise UnsupportedPlatformError("Unsupported Platform")
コード例 #4
0
def get_expected_worker_count(scale_count=1500):
    """
    Function to get expected worker count based on platform to scale pods in cluster

    Args:
        scale_count (int): Scale count of the PVC+POD to be created

    Returns:
        expected_worker_count (int): Expected worker count to scale required number of pod

    """
    # Get expected worker count based on dict in constants.py
    worker_count_dict = constants.SCALE_WORKER_DICT
    if scale_count in worker_count_dict:
        if (config.ENV_DATA["deployment_type"] == "ipi"
                and config.ENV_DATA["platform"].lower() == "aws"):
            expected_worker_count = worker_count_dict[scale_count]["aws"]
        elif (config.ENV_DATA["deployment_type"] == "upi"
              and config.ENV_DATA["platform"].lower() == "vsphere"):
            expected_worker_count = worker_count_dict[scale_count]["vmware"]
        elif (config.ENV_DATA["deployment_type"] == "upi"
              and config.ENV_DATA["platform"].lower() == "baremetal"):
            expected_worker_count = worker_count_dict[scale_count]["bm"]
        elif (config.ENV_DATA["deployment_type"] == "upi"
              and config.ENV_DATA["platform"].lower() == "azure"):
            expected_worker_count = worker_count_dict[scale_count]["azure"]
        else:
            raise UnsupportedPlatformError("Unsupported Platform")
        return expected_worker_count
    else:
        raise UnexpectedBehaviour(
            "Scale_count value is not matching the dict key")
コード例 #5
0
def get_device_paths(worker_names):
    """
    Retrieve a list of the device paths for each worker node

    Args:
        worker_names (list): worker node names

    Returns:
        list: device path ids
    """
    device_paths = []
    platform = config.ENV_DATA.get('platform').lower()
    if platform == 'aws':
        pattern = 'nvme-Amazon_EC2_NVMe_Instance_Storage'
    # TODO: add patterns for vsphere and bare metal
    else:
        raise UnsupportedPlatformError(
            'LSO deployment is not supported for platform: %s', platform)
    for worker in worker_names:
        logger.info("Retrieving device path for node: %s", worker)
        cmd = (f"oc debug nodes/{worker} "
               f"-- chroot /host ls -la /dev/disk/by-id/")
        out = run_cmd(cmd)
        out_lines = out.split('\n')
        nvme_lines = [line for line in out_lines if pattern in line]
        for nvme_line in nvme_lines:
            device_path = [
                part for part in nvme_line.split(' ') if pattern in part
            ][0]
            logger.info("Adding %s to device paths", device_path)
            device_paths.append(f'/dev/disk/by-id/{device_path}')

    return device_paths
コード例 #6
0
ファイル: lso_helpers.py プロジェクト: sidhant-agrawal/ocs-ci
def get_device_paths(worker_names):
    """
    Retrieve a list of the device paths for each worker node

    Args:
        worker_names (list): worker node names

    Returns:
        list: device path ids
    """
    device_paths = []
    platform = config.ENV_DATA.get("platform").lower()

    if platform == constants.IBM_POWER_PLATFORM:
        device_paths = config.ENV_DATA.get("disk_pattern").lower()
        return [device_paths]
    if platform == "aws":
        pattern = "nvme-Amazon_EC2_NVMe_Instance_Storage"
    elif platform == "vsphere":
        pattern = "wwn"
    elif platform == "baremetal":
        pattern = config.ENV_DATA.get("disk_pattern")
    elif platform == "baremetalpsi":
        pattern = "virtio"
    # TODO: add patterns bare metal
    else:
        raise UnsupportedPlatformError(
            "LSO deployment is not supported for platform: %s", platform)
    for worker in worker_names:
        logger.info("Retrieving device path for node: %s", worker)
        out = _get_disk_by_id(worker)
        out_lines = out.split("\n")
        nvme_lines = [
            line for line in out_lines
            if (pattern in line and constants.ROOT_DISK_NAME not in line)
        ]
        for nvme_line in nvme_lines:
            device_path = [
                part for part in nvme_line.split(" ") if pattern in part
            ][0]
            logger.info("Adding %s to device paths", device_path)
            device_paths.append(f"/dev/disk/by-id/{device_path}")

    return device_paths
コード例 #7
0
ファイル: deployment.py プロジェクト: phlogistonjohn/ocs-ci
def get_device_paths(worker_names):
    """
    Retrieve a list of the device paths for each worker node

    Args:
        worker_names (list): worker node names

    Returns:
        list: device path ids
    """
    device_paths = []
    platform = config.ENV_DATA.get('platform').lower()
    if platform == 'aws':
        pattern = 'nvme-Amazon_EC2_NVMe_Instance_Storage'
    elif platform == 'vsphere':
        pattern = 'wwn'
    elif platform == 'baremetal':
        pattern = config.ENV_DATA.get('disk_pattern')
    elif platform == 'baremetalpsi':
        pattern = 'virtio'
    # TODO: add patterns bare metal
    else:
        raise UnsupportedPlatformError(
            'LSO deployment is not supported for platform: %s', platform
        )
    for worker in worker_names:
        logger.info("Retrieving device path for node: %s", worker)
        out = _get_disk_by_id(worker)
        out_lines = out.split('\n')
        nvme_lines = [
            line for line in out_lines if (
                pattern in line and constants.ROOT_DISK_NAME not in line
            )
        ]
        for nvme_line in nvme_lines:
            device_path = [
                part for part in nvme_line.split(' ') if pattern in part
            ][0]
            logger.info("Adding %s to device paths", device_path)
            device_paths.append(f'/dev/disk/by-id/{device_path}')

    return device_paths
コード例 #8
0
def get_gather_bootstrap_node_data():
    """
    Retrieve node IPs required by the gather bootstrap command

    Raises:
        UnsupportedPlatformError: If we do not support gathering bootstrap
            data for the configured provider

    Returns:
        dict: Public IP of the bootstrap node and Private IPs of master nodes

    """
    logger.info("Retrieving bootstrap node data")
    platform = config.ENV_DATA['platform'].lower()
    if platform == constants.AWS_PLATFORM:
        return get_node_data_aws()
    elif platform == constants.VSPHERE_PLATFORM:
        return get_node_data_vsphere()
    else:
        raise UnsupportedPlatformError(
            "Platform '%s' is not supported, "
            "unable to retrieve gather bootstrap node data", platform)
コード例 #9
0
def check_and_add_enough_worker(worker_count):
    """
    Function to check if there is enough workers available to scale pods.
    IF there is no enough worker then worker will be added based on supported platforms
    Function also adds scale label to the respective worker nodes.

    Args:
        worker_count (int): Expected worker count to be present in the setup

    Returns:
        book: True is there is enough worker count else raise exception.

    """
    # Check either to use OCS workers for scaling app pods
    # Further continue to label the worker with scale label else not
    worker_list = node.get_worker_nodes()
    ocs_worker_list = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
    scale_worker = machine.get_labeled_nodes(constants.SCALE_LABEL)
    if config.RUN.get("use_ocs_worker_for_scale"):
        if not scale_worker:
            helpers.label_worker_node(node_list=worker_list,
                                      label_key="scale-label",
                                      label_value="app-scale")
    else:
        if not scale_worker:
            for node_item in ocs_worker_list:
                worker_list.remove(node_item)
            if worker_list:
                helpers.label_worker_node(
                    node_list=worker_list,
                    label_key="scale-label",
                    label_value="app-scale",
                )
    scale_worker_list = machine.get_labeled_nodes(constants.SCALE_LABEL)
    logging.info(f"Print existing scale worker {scale_worker_list}")

    # Check if there is enough nodes to continue scaling of app pods
    if len(scale_worker_list) >= worker_count:
        logging.info(f"Setup has expected worker count {worker_count} "
                     "to continue scale of pods")
        return True
    else:
        logging.info(
            "There is no enough worker in the setup, will add enough worker "
            "for the automation supported platforms")
        # Add enough worker for AWS
        if (config.ENV_DATA["deployment_type"] == "ipi"
                and config.ENV_DATA["platform"].lower() == "aws"):
            # Create machineset for app worker nodes on each aws zone
            # Each zone will have one app worker node
            ms_name = list()
            labels = [("node-role.kubernetes.io/app", "app-scale")]
            for obj in machine.get_machineset_objs():
                if "app" in obj.name:
                    ms_name.append(obj.name)
            if not ms_name:
                if len(machine.get_machineset_objs()) == 3:
                    for zone in ["a", "b", "c"]:
                        ms_name.append(
                            machine.create_custom_machineset(
                                instance_type="m5.4xlarge",
                                labels=labels,
                                zone=zone,
                            ))
                else:
                    ms_name.append(
                        machine.create_custom_machineset(
                            instance_type="m5.4xlarge",
                            labels=labels,
                            zone="a",
                        ))
                for ms in ms_name:
                    machine.wait_for_new_node_to_be_ready(ms)
            if len(ms_name) == 3:
                exp_count = int(worker_count / 3)
            else:
                exp_count = worker_count
            for name in ms_name:
                machine.add_node(machine_set=name, count=exp_count)
            for ms in ms_name:
                machine.wait_for_new_node_to_be_ready(ms)
            worker_list = node.get_worker_nodes()
            ocs_worker_list = machine.get_labeled_nodes(
                constants.OPERATOR_NODE_LABEL)
            scale_label_worker = machine.get_labeled_nodes(
                constants.SCALE_LABEL)
            ocs_worker_list.extend(scale_label_worker)
            final_list = list(dict.fromkeys(ocs_worker_list))
            for node_item in final_list:
                if node_item in worker_list:
                    worker_list.remove(node_item)
            if worker_list:
                helpers.label_worker_node(
                    node_list=worker_list,
                    label_key="scale-label",
                    label_value="app-scale",
                )
            return True
        elif (config.ENV_DATA["deployment_type"] == "upi"
              and config.ENV_DATA["platform"].lower() == "vsphere"):
            raise UnsupportedPlatformError(
                "Unsupported Platform to add worker")
        elif (config.ENV_DATA["deployment_type"] == "upi"
              and config.ENV_DATA["platform"].lower() == "baremetal"):
            raise UnsupportedPlatformError(
                "Unsupported Platform to add worker")
        elif (config.ENV_DATA["deployment_type"] == "upi"
              and config.ENV_DATA["platform"].lower() == "azure"):
            raise UnsupportedPlatformError(
                "Unsupported Platform to add worker")
        else:
            raise UnavailableResourceException(
                "There is no enough worker nodes to continue app pod scaling")
コード例 #10
0
def create_custom_machineset(
    role="app",
    instance_type="m4.xlarge",
    labels=None,
    taints=None,
    zone="a",
):
    """
    Function to create custom machineset works only for AWS
    i.e. Using this user can create nodes with different instance type and role.
    https://docs.openshift.com/container-platform/4.1/machine_management/creating-machineset.html

    Args:
        role (str): Role type to be added for node eg: it will be app,worker
        instance_type (str): Type of aws instance
        label (list): List of Labels (key, val) to be added to the node
        taints (list): List of taints to be applied
        zone (str): Machineset zone for node creation.

    Returns:
        machineset (str): Created machineset name

    Raise:
        ResourceNotFoundError: Incase machineset creation failed
        UnsupportedPlatformError: Incase of wrong platform

    """
    # check for platform, since it's supported only for IPI
    if config.ENV_DATA["deployment_type"] == "ipi":
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE,
        )
        for machine in machinesets_obj.get()["items"]:
            # Get inputs from existing machineset config.
            region = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("placement").get("region"))
            aws_zone = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("placement").get(
                    "availabilityZone"))
            cls_id = (machine.get("spec").get("selector").get(
                "matchLabels").get("machine.openshift.io/cluster-api-cluster"))
            ami_id = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("ami").get("id"))
            if aws_zone == f"{region}{zone}":
                machineset_yaml = templating.load_yaml(
                    constants.MACHINESET_YAML)

                # Update machineset_yaml with required values.
                machineset_yaml["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["metadata"][
                    "name"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-role"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-type"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["ami"]["id"] = ami_id
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["iamInstanceProfile"][
                        "id"] = f"{cls_id}-worker-profile"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["instanceType"] = instance_type
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["placement"]["availabilityZone"] = aws_zone
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["placement"]["region"] = region
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["securityGroups"][0]["filters"][0]["values"][
                        0] = f"{cls_id}-worker-sg"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["subnet"]["filters"][0]["values"][
                        0] = f"{cls_id}-private-{aws_zone}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["tags"][0][
                        "name"] = f"kubernetes.io/cluster/{cls_id}"

                # Apply the labels
                if labels:
                    for label in labels:
                        machineset_yaml["spec"]["template"]["spec"][
                            "metadata"]["labels"][label[0]] = label[1]

                # Apply the Taints
                # ex taint list looks like:
                # [ {'effect': 'NoSchedule',
                #    'key': 'node.ocs.openshift.io/storage',
                #    'value': 'true',
                #  }, {'effect': 'Schedule', 'key': 'xyz', 'value': 'False'} ]
                if taints:
                    machineset_yaml["spec"]["template"]["spec"].update(
                        {"taints": taints})

                # Create new custom machineset
                ms_obj = OCS(**machineset_yaml)
                ms_obj.create()
                if check_machineset_exists(f"{cls_id}-{role}-{aws_zone}"):
                    logging.info(
                        f"Machineset {cls_id}-{role}-{aws_zone} created")
                    return f"{cls_id}-{role}-{aws_zone}"
                else:
                    raise ResourceNotFoundError(
                        "Machineset resource not found")
    else:
        raise UnsupportedPlatformError("Functionality not supported in UPI")
コード例 #11
0
ファイル: mcg.py プロジェクト: pkalever/ocs-ci
    def create_connection(self, cld_mgr, platform, conn_name=None):
        """
        Creates a new NooBaa connection to an AWS backend

        Args:
            cld_mgr (obj): A cloud manager instance
            platform (str): Platform to use for new connection
            conn_name (str): The connection name to be used
                If None provided then the name will be generated

        Returns:
            bool: False if the connection creation failed

        """
        if conn_name is None:
            conn_name = create_unique_resource_name(f"{platform}-connection",
                                                    "mcgconn")

        if platform == constants.AWS_PLATFORM:
            params = {
                "auth_method": "AWS_V4",
                "endpoint": constants.MCG_NS_AWS_ENDPOINT,
                "endpoint_type": "AWS",
                "identity": cld_mgr.aws_client.access_key,
                "name": conn_name,
                "secret": cld_mgr.aws_client.secret_key,
            }
        elif platform == constants.AZURE_PLATFORM:
            params = {
                "endpoint": constants.MCG_NS_AZURE_ENDPOINT,
                "endpoint_type": "AZURE",
                "identity": cld_mgr.azure_client.account_name,
                "name": conn_name,
                "secret": cld_mgr.azure_client.credential,
            }
        elif platform == constants.RGW_PLATFORM:
            params = {
                "auth_method": "AWS_V4",
                "endpoint": cld_mgr.rgw_client.endpoint,
                "endpoint_type": "S3_COMPATIBLE",
                "identity": cld_mgr.rgw_client.access_key,
                "name": conn_name,
                "secret": cld_mgr.rgw_client.secret_key,
            }
        else:
            raise UnsupportedPlatformError(f"Unsupported Platform: {platform}")

        try:
            for resp in TimeoutSampler(
                    30,
                    3,
                    self.send_rpc_query,
                    "account_api",
                    "add_external_connection",
                    params,
            ):
                if "error" not in resp.text:
                    logger.info(f"Connection {conn_name} created successfully")
                    return True
                else:
                    logger.info(
                        f"{platform} IAM {conn_name} did not yet propagate: {resp.text}"
                    )
        except TimeoutExpiredError:
            logger.error(f"Could not create connection {conn_name}")
            assert False
コード例 #12
0
def create_custom_machineset(
    role="app",
    instance_type=None,
    labels=None,
    taints=None,
    zone="a",
):
    """
    Function to create custom machineset works only for AWS
    i.e. Using this user can create nodes with different instance type and role.
    https://docs.openshift.com/container-platform/4.1/machine_management/creating-machineset.html

    Args:
        role (str): Role type to be added for node eg: it will be app,worker
        instance_type (str): Type of instance
        labels (list): List of Labels (key, val) to be added to the node
        taints (list): List of taints to be applied
        zone (str): Machineset zone for node creation.

    Returns:
        machineset (str): Created machineset name

    Raise:
        ResourceNotFoundError: Incase machineset creation failed
        UnsupportedPlatformError: Incase of wrong platform

    """
    # check for aws and IPI platform
    if config.ENV_DATA["platform"].lower() == "aws":
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE,
        )
        m4_xlarge = "m4.xlarge"
        aws_instance = instance_type if instance_type else m4_xlarge
        for machine in machinesets_obj.get()["items"]:
            # Get inputs from existing machineset config.
            region = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("placement").get("region"))
            aws_zone = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("placement").get(
                    "availabilityZone"))
            cls_id = (machine.get("spec").get("selector").get(
                "matchLabels").get("machine.openshift.io/cluster-api-cluster"))
            ami_id = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("ami").get("id"))
            if aws_zone == f"{region}{zone}":
                machineset_yaml = templating.load_yaml(
                    constants.MACHINESET_YAML)

                # Update machineset_yaml with required values.
                machineset_yaml["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["metadata"][
                    "name"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-role"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-type"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["ami"]["id"] = ami_id
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["iamInstanceProfile"][
                        "id"] = f"{cls_id}-worker-profile"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["instanceType"] = aws_instance
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["placement"]["availabilityZone"] = aws_zone
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["placement"]["region"] = region
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["securityGroups"][0]["filters"][0]["values"][
                        0] = f"{cls_id}-worker-sg"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["subnet"]["filters"][0]["values"][
                        0] = f"{cls_id}-private-{aws_zone}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["tags"][0][
                        "name"] = f"kubernetes.io/cluster/{cls_id}"

                # Apply the labels
                if labels:
                    for label in labels:
                        machineset_yaml["spec"]["template"]["spec"][
                            "metadata"]["labels"][label[0]] = label[1]
                    # Remove app label in case of infra nodes
                    if role == "infra":
                        machineset_yaml["spec"]["template"]["spec"][
                            "metadata"]["labels"].pop(constants.APP_LABEL,
                                                      None)

                # Apply the Taints
                # ex taint list looks like:
                # [ {'effect': 'NoSchedule',
                #    'key': 'node.ocs.openshift.io/storage',
                #    'value': 'true',
                #  }, {'effect': 'Schedule', 'key': 'xyz', 'value': 'False'} ]
                if taints:
                    machineset_yaml["spec"]["template"]["spec"].update(
                        {"taints": taints})

                # Create new custom machineset
                ms_obj = OCS(**machineset_yaml)
                ms_obj.create()
                if check_machineset_exists(f"{cls_id}-{role}-{aws_zone}"):
                    log.info(f"Machineset {cls_id}-{role}-{aws_zone} created")
                    return f"{cls_id}-{role}-{aws_zone}"
                else:
                    raise ResourceNotFoundError(
                        "Machineset resource not found")

    # check for azure and IPI platform
    elif config.ENV_DATA["platform"] == "azure":
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE,
        )
        vmsize = constants.AZURE_PRODUCTION_INSTANCE_TYPE
        azure_instance = instance_type if instance_type else vmsize
        for machine in machinesets_obj.get()["items"]:
            # Get inputs from existing machineset config.
            region = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("location"))
            azure_zone = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("zone"))
            cls_id = (machine.get("spec").get("selector").get(
                "matchLabels").get("machine.openshift.io/cluster-api-cluster"))
            if azure_zone == zone:
                az_zone = f"{region}{zone}"
                machineset_yaml = templating.load_yaml(
                    constants.MACHINESET_YAML_AZURE)

                # Update machineset_yaml with required values.
                machineset_yaml["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["metadata"][
                    "name"] = f"{cls_id}-{role}-{az_zone}"
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{az_zone}"
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-role"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-type"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{az_zone}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["image"][
                        "resourceID"] = f"/resourceGroups/{cls_id}-rg/providers/Microsoft.Compute/images/{cls_id}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["location"] = region
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["managedIdentity"] = f"{cls_id}-identity"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["resourceGroup"] = f"{cls_id}-rg"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["subnet"] = f"{cls_id}-worker-subnet"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["vmSize"] = azure_instance
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["vnet"] = f"{cls_id}-vnet"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["zone"] = zone

                # Apply the labels
                if labels:
                    for label in labels:
                        machineset_yaml["spec"]["template"]["spec"][
                            "metadata"]["labels"][label[0]] = label[1]
                    # Remove app label in case of infra nodes
                    if role == "infra":
                        machineset_yaml["spec"]["template"]["spec"][
                            "metadata"]["labels"].pop(constants.APP_LABEL,
                                                      None)

                if taints:
                    machineset_yaml["spec"]["template"]["spec"].update(
                        {"taints": taints})

                # Create new custom machineset
                ms_obj = OCS(**machineset_yaml)
                ms_obj.create()
                if check_machineset_exists(f"{cls_id}-{role}-{az_zone}"):
                    log.info(f"Machineset {cls_id}-{role}-{az_zone} created")
                    return f"{cls_id}-{role}-{az_zone}"
                else:
                    raise ResourceNotFoundError(
                        "Machineset resource not found")

    # check for RHV and IPI platform
    elif config.ENV_DATA["platform"] == "rhv":
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE,
        )
        for machine in machinesets_obj.get()["items"]:
            # Get inputs from existing machineset config.
            cls_uuid = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("cluster_id"))
            template_name = (machine.get("spec").get("template").get(
                "spec").get("providerSpec").get("value").get("template_name"))
            cls_id = (machine.get("spec").get("selector").get(
                "matchLabels").get("machine.openshift.io/cluster-api-cluster"))
            socket = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("cpu").get("sockets"))

            machineset_yaml = templating.load_yaml(
                constants.MACHINESET_YAML_RHV)

            # Update machineset_yaml with required values.
            machineset_yaml["metadata"]["labels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["metadata"]["name"] = f"{cls_id}-{role}-{zone}"
            machineset_yaml["spec"]["selector"]["matchLabels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["spec"]["selector"]["matchLabels"][
                "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{zone}"
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machine-role"] = role
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machine-type"] = role
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{zone}"
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["cluster_id"] = cls_uuid
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["template_name"] = template_name
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["cpu"]["sockets"] = socket

            # Apply the labels
            if labels:
                for label in labels:
                    machineset_yaml["spec"]["template"]["spec"]["metadata"][
                        "labels"][label[0]] = label[1]
                # Remove app label in case of infra nodes
                if role == "infra":
                    machineset_yaml["spec"]["template"]["spec"]["metadata"][
                        "labels"].pop(constants.APP_LABEL, None)

            if taints:
                machineset_yaml["spec"]["template"]["spec"].update(
                    {"taints": taints})

            # Create new custom machineset
            ms_obj = OCS(**machineset_yaml)
            ms_obj.create()
            if check_machineset_exists(f"{cls_id}-{role}-{zone}"):
                log.info(f"Machineset {cls_id}-{role}-{zone} created")
                return f"{cls_id}-{role}-{zone}"
            else:
                raise ResourceNotFoundError("Machineset resource not found")

    # check for vmware and IPI platform
    elif config.ENV_DATA["platform"] == constants.VSPHERE_PLATFORM:
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE,
        )
        for machine in machinesets_obj.get()["items"]:
            # Get inputs from existing machineset config.
            cls_id = machine.get("spec")["selector"]["matchLabels"][
                "machine.openshift.io/cluster-api-cluster"]
            disk_size = machine.get(
                "spec")["template"]["spec"]["providerSpec"]["value"]["diskGiB"]
            memory = machine.get("spec")["template"]["spec"]["providerSpec"][
                "value"]["memoryMiB"]
            network_name = machine.get(
                "spec")["template"]["spec"]["providerSpec"]["value"][
                    "network"]["devices"][0]["networkName"]
            num_cpu = machine.get(
                "spec")["template"]["spec"]["providerSpec"]["value"]["numCPUs"]
            num_core = machine.get("spec")["template"]["spec"]["providerSpec"][
                "value"]["numCoresPerSocket"]
            vm_template = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["template"]
            datacenter = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["workspace"]["datacenter"]
            datastore = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["workspace"]["datastore"]
            ds_folder = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["workspace"]["folder"]
            ds_resourcepool = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["workspace"]["resourcePool"]
            ds_server = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["workspace"]["server"]

            machineset_yaml = templating.load_yaml(
                constants.MACHINESET_YAML_VMWARE)

            # Update machineset_yaml with required values.
            machineset_yaml["metadata"]["labels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["metadata"]["name"] = f"{cls_id}-{role}"
            machineset_yaml["spec"]["selector"]["matchLabels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["spec"]["selector"]["matchLabels"][
                "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}"
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machine-role"] = role
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machine-type"] = role
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}"
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["diskGiB"] = disk_size
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["memoryMiB"] = memory
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["network"]["devices"][0]["networkName"] = network_name
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["numCPUs"] = num_cpu
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["numCoresPerSocket"] = num_core
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["template"] = vm_template
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["workspace"]["datacenter"] = datacenter
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["workspace"]["datastore"] = datastore
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["workspace"]["folder"] = ds_folder
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["workspace"]["resourcepool"] = ds_resourcepool
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["workspace"]["server"] = ds_server

            # Apply the labels
            if labels:
                for label in labels:
                    machineset_yaml["spec"]["template"]["spec"]["metadata"][
                        "labels"][label[0]] = label[1]
                # Remove app label in case of infra nodes
                if role == "infra":
                    machineset_yaml["spec"]["template"]["spec"]["metadata"][
                        "labels"].pop(constants.APP_LABEL, None)

            if taints:
                machineset_yaml["spec"]["template"]["spec"].update(
                    {"taints": taints})

            # Create new custom machineset
            ms_obj = OCS(**machineset_yaml)
            ms_obj.create()
            if check_machineset_exists(f"{cls_id}-{role}"):
                log.info(f"Machineset {cls_id}-{role} created")
                return f"{cls_id}-{role}"
            else:
                raise ResourceNotFoundError("Machineset resource not found")

    else:
        raise UnsupportedPlatformError(
            "Functionality not supported in this platform")
コード例 #13
0
ファイル: machine.py プロジェクト: humblec/ocs-ci
def create_custom_machineset(role='app',
                             instance_type='m4.xlarge',
                             label='app-scale',
                             zone='a'):
    """
    Function to create custom machineset works only for AWS
    i.e. Using this user can create nodes with different instance type and role.
    https://docs.openshift.com/container-platform/4.1/machine_management/creating-machineset.html

    Args:
        role (str): Role type to be added for node eg: it will be app,worker
        instance_type (str): Type of aws instance
        label (str): Label to be added to the node
        zone (str): Machineset zone for node creation.

    Returns:
        machineset (str): Created machineset name

    Raise:
        ResourceNotFoundError: Incase machineset creation failed
        UnsupportedPlatformError: Incase of wrong platform

    """
    # check for platform, since it's supported only for IPI
    if config.ENV_DATA['deployment_type'] == 'ipi':
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE)
        for machine in machinesets_obj.get()['items']:
            # Get inputs from existing machineset config.
            region = machine.get('spec').get('template').get('spec').get(
                'providerSpec').get('value').get('placement').get('region')
            aws_zone = machine.get('spec').get('template').get('spec').get(
                'providerSpec').get('value').get('placement').get(
                    'availabilityZone')
            cls_id = machine.get('spec').get('selector').get(
                'matchLabels').get('machine.openshift.io/cluster-api-cluster')
            ami_id = machine.get('spec').get('template').get('spec').get(
                'providerSpec').get('value').get('ami').get('id')
            if aws_zone == f"{region}{zone}":
                machineset_yaml = templating.load_yaml(
                    constants.MACHINESET_YAML)

                # Update machineset_yaml with required values.
                machineset_yaml['metadata']['labels'][
                    'machine.openshift.io/cluster-api-cluster'] = cls_id
                machineset_yaml['metadata'][
                    'name'] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml['spec']['selector']['matchLabels'][
                    'machine.openshift.io/cluster-api-cluster'] = cls_id
                machineset_yaml['spec']['selector']['matchLabels'][
                    'machine.openshift.io/cluster-api-machineset'] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml['spec']['template']['metadata']['labels'][
                    'machine.openshift.io/cluster-api-cluster'] = cls_id
                machineset_yaml['spec']['template']['metadata']['labels'][
                    'machine.openshift.io/cluster-api-machine-role'] = role
                machineset_yaml['spec']['template']['metadata']['labels'][
                    'machine.openshift.io/cluster-api-machine-type'] = role
                machineset_yaml['spec']['template']['metadata']['labels'][
                    'machine.openshift.io/cluster-api-machineset'] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml['spec']['template']['spec']['metadata'][
                    'labels'][f"node-role.kubernetes.io/{role}"] = f"{label}"
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['ami']['id'] = ami_id
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['iamInstanceProfile'][
                        'id'] = f"{cls_id}-worker-profile"
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['instanceType'] = instance_type
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['placement']['availabilityZone'] = aws_zone
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['placement']['region'] = region
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['securityGroups'][0]['filters'][0]['values'][
                        0] = f"{cls_id}-worker-sg"
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['subnet']['filters'][0]['values'][
                        0] = f"{cls_id}-private-{aws_zone}"
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['tags'][0][
                        'name'] = f"kubernetes.io/cluster/{cls_id}"

                # Create new custom machineset
                ms_obj = OCS(**machineset_yaml)
                ms_obj.create()
                if check_machineset_exists(f"{cls_id}-{role}-{aws_zone}"):
                    logging.info(
                        f"Machineset {cls_id}-{role}-{aws_zone} created")
                    return f"{cls_id}-{role}-{aws_zone}"
                else:
                    raise ResourceNotFoundError(
                        f"Machineset resource not found")
    else:
        raise UnsupportedPlatformError("Functionality not supported in UPI")
コード例 #14
0
def add_worker_node(instance_type=None):
    global ms_name
    ms_name = list()
    worker_list = node.get_worker_nodes()
    ocs_worker_list = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
    scale_worker = machine.get_labeled_nodes(constants.SCALE_LABEL)
    if config.RUN.get("use_ocs_worker_for_scale"):
        if not scale_worker:
            helpers.label_worker_node(node_list=worker_list,
                                      label_key="scale-label",
                                      label_value="app-scale")
    else:
        if not scale_worker:
            for node_item in ocs_worker_list:
                worker_list.remove(node_item)
            if worker_list:
                helpers.label_worker_node(
                    node_list=worker_list,
                    label_key="scale-label",
                    label_value="app-scale",
                )
    scale_worker_list = machine.get_labeled_nodes(constants.SCALE_LABEL)
    logging.info(f"Print existing scale worker {scale_worker_list}")

    if (config.ENV_DATA["deployment_type"] == "ipi"
            and config.ENV_DATA["platform"].lower() == "aws"):
        log.info("Adding worker nodes on the current cluster")
        labels = [("node-role.kubernetes.io/app", "app-scale")]
        # Create machineset for app worker nodes on each zone
        for obj in machine.get_machineset_objs():
            if "app" in obj.name:
                ms_name.append(obj.name)
        if instance_type is not None:
            instance_type = instance_type
        else:
            instance_type = "m5.4xlarge"
        if not ms_name:
            if len(machine.get_machineset_objs()) == 3:
                for zone in ["a", "b", "c"]:
                    ms_name.append(
                        machine.create_custom_machineset(
                            instance_type=instance_type,
                            labels=labels,
                            zone=zone,
                        ))
            else:
                ms_name.append(
                    machine.create_custom_machineset(
                        instance_type=instance_type,
                        labels=labels,
                        zone="a",
                    ))
            for ms in ms_name:
                machine.wait_for_new_node_to_be_ready(ms)

        worker_list = node.get_worker_nodes()
        ocs_worker_list = machine.get_labeled_nodes(
            constants.OPERATOR_NODE_LABEL)
        scale_label_worker = machine.get_labeled_nodes(constants.SCALE_LABEL)
        ocs_worker_list.extend(scale_label_worker)
        final_list = list(dict.fromkeys(ocs_worker_list))
        for node_item in final_list:
            if node_item in worker_list:
                worker_list.remove(node_item)
        if worker_list:
            helpers.label_worker_node(node_list=worker_list,
                                      label_key="scale-label",
                                      label_value="app-scale")
        return True
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "vsphere"):
        log.info("Running scale test on existing worker nodes.")
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "baremetal"):
        log.info("Running scale test on existing worker nodes.")
    elif (config.ENV_DATA["deployment_type"] == "upi"
          and config.ENV_DATA["platform"].lower() == "azure"):
        raise UnsupportedPlatformError("Unsupported Platform")
コード例 #15
0
def add_worker_node(instance_type=None):
    global ms_name
    ms_name = list()
    worker_list = helpers.get_worker_nodes()
    ocs_worker_list = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
    scale_worker = machine.get_labeled_nodes(constants.SCALE_LABEL)
    if config.RUN.get('use_ocs_worker_for_scale'):
        if not scale_worker:
            helpers.label_worker_node(node_list=worker_list,
                                      label_key='scale-label',
                                      label_value='app-scale')
    else:
        if not scale_worker:
            for node_item in ocs_worker_list:
                worker_list.remove(node_item)
            if worker_list:
                helpers.label_worker_node(node_list=worker_list,
                                          label_key='scale-label',
                                          label_value='app-scale')
    scale_worker_list = machine.get_labeled_nodes(constants.SCALE_LABEL)
    logging.info(f"Print existing scale worker {scale_worker_list}")

    if config.ENV_DATA['deployment_type'] == 'ipi' and config.ENV_DATA[
            'platform'].lower() == 'aws':
        log.info("Adding worker nodes on the current cluster")
        # Create machineset for app worker nodes on each zone
        for obj in machine.get_machineset_objs():
            if 'app' in obj.name:
                ms_name.append(obj.name)
        if instance_type is not None:
            instance_type = instance_type
        else:
            instance_type = 'm5.4xlarge'
        if not ms_name:
            if len(machine.get_machineset_objs()) == 3:
                for zone in ['a', 'b', 'c']:
                    ms_name.append(
                        machine.create_custom_machineset(
                            instance_type=instance_type, zone=zone))
            else:
                ms_name.append(
                    machine.create_custom_machineset(
                        instance_type=instance_type, zone='a'))
            for ms in ms_name:
                machine.wait_for_new_node_to_be_ready(ms)

        worker_list = helpers.get_worker_nodes()
        ocs_worker_list = machine.get_labeled_nodes(
            constants.OPERATOR_NODE_LABEL)
        scale_label_worker = machine.get_labeled_nodes(constants.SCALE_LABEL)
        ocs_worker_list.extend(scale_label_worker)
        final_list = list(dict.fromkeys(ocs_worker_list))
        for node_item in final_list:
            if node_item in worker_list:
                worker_list.remove(node_item)
        if worker_list:
            helpers.label_worker_node(node_list=worker_list,
                                      label_key='scale-label',
                                      label_value='app-scale')
        return True
    elif config.ENV_DATA['deployment_type'] == 'upi' and config.ENV_DATA[
            'platform'].lower() == 'vsphere':
        log.info('Running pgsql on existing worker nodes')
    elif config.ENV_DATA['deployment_type'] == 'upi' and config.ENV_DATA[
            'platform'].lower() == 'baremetal':
        log.info('Running pgsql on existing worker nodes')
    elif config.ENV_DATA['deployment_type'] == 'upi' and config.ENV_DATA[
            'platform'].lower() == 'azure':
        raise UnsupportedPlatformError("Unsupported Platform")