Esempio n. 1
0
    def deploy_ocp(self, log_cli_level='DEBUG'):
        """
        Deployment specific to OCP cluster on this platform

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")
        """
        if not config.DEPLOYMENT.get('force_deploy_multiple_clusters'):
            cluster_name = config.ENV_DATA['cluster_name']
            cluster_name_parts = cluster_name.split("-")
            prefix = cluster_name_parts[0]
            if prefix.lower() in CLUSTER_PREFIXES_SPECIAL_RULES.keys():
                # if the prefix is a cleanup special rule, use the next part of
                # the cluster name as the prefix
                prefix = cluster_name_parts[1]
            prefix += "*"

            if self.check_cluster_existence(prefix):
                raise exceptions.SameNamePrefixClusterAlreadyExistsException(
                    f"Cluster with name prefix {prefix} already exists. "
                    f"Please destroy the existing cluster for a new cluster "
                    f"deployment")
        super(AWSIPI, self).deploy_ocp(log_cli_level)
        if config.DEPLOYMENT.get('host_network'):
            self.host_network_update()
Esempio n. 2
0
    def deploy_ocp(self, log_cli_level="DEBUG"):
        """
        Deployment specific to OCP cluster on vSphere platform

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")

        """
        cluster_name_parts = config.ENV_DATA.get("cluster_name").split("-")
        prefix = cluster_name_parts[0]
        if not (
            prefix.startswith(tuple(constants.PRODUCTION_JOBS_PREFIX))
            or config.DEPLOYMENT.get("force_deploy_multiple_clusters")
        ):
            if self.check_cluster_existence(prefix):
                raise exceptions.SameNamePrefixClusterAlreadyExistsException(
                    f"Cluster with name prefix {prefix} already exists. "
                    f"Please destroy the existing cluster for a new cluster "
                    f"deployment"
                )
        super(VSPHEREUPI, self).deploy_ocp(log_cli_level)
        if config.ENV_DATA.get("scale_up"):
            logger.info("Adding extra nodes to cluster")
            self.add_nodes()

        # remove RHCOS compute nodes
        if config.ENV_DATA.get("scale_up") and not config.ENV_DATA.get("mixed_cluster"):
            rhcos_nodes = get_typed_worker_nodes()
            logger.info(
                f"RHCOS compute nodes to delete: "
                f"{[node.name for node in rhcos_nodes]}"
            )
            logger.info("Removing RHCOS compute nodes from a cluster")
            remove_nodes(rhcos_nodes)

        if config.DEPLOYMENT.get("thick_sc"):
            sc_data = templating.load_yaml(constants.VSPHERE_THICK_STORAGECLASS_YAML)
            sc_data_yaml = tempfile.NamedTemporaryFile(
                mode="w+", prefix="storageclass", delete=False
            )
            if config.DEPLOYMENT.get("eager_zeroed_thick_sc"):
                sc_data["parameters"]["diskformat"] = "eagerzeroedthick"
            else:
                sc_data["parameters"]["diskformat"] = "zeroedthick"
            templating.dump_data_to_temp_yaml(sc_data, sc_data_yaml.name)
            run_cmd(f"oc create -f {sc_data_yaml.name}")
            self.DEFAULT_STORAGECLASS = "thick"
Esempio n. 3
0
    def deploy_ocp(self, log_cli_level="DEBUG"):
        """
        Deployment specific to OCP cluster on vSphere platform

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")

        """
        cluster_name_parts = config.ENV_DATA.get("cluster_name").split("-")
        prefix = cluster_name_parts[0]
        if not (
            prefix.startswith(tuple(constants.PRODUCTION_JOBS_PREFIX))
            or config.DEPLOYMENT.get("force_deploy_multiple_clusters")
        ):
            if self.check_cluster_existence(prefix):
                raise exceptions.SameNamePrefixClusterAlreadyExistsException(
                    f"Cluster with name prefix {prefix} already exists. "
                    f"Please destroy the existing cluster for a new cluster "
                    f"deployment"
                )
        super(VSPHEREUPI, self).deploy_ocp(log_cli_level)
        if config.ENV_DATA.get("scale_up"):
            logger.info("Adding extra nodes to cluster")
            self.add_nodes()

        # remove RHCOS compute nodes
        if config.ENV_DATA.get("scale_up") and not config.ENV_DATA.get("mixed_cluster"):
            rhcos_nodes = get_typed_worker_nodes()
            logger.info(
                f"RHCOS compute nodes to delete: "
                f"{[node.name for node in rhcos_nodes]}"
            )
            logger.info("Removing RHCOS compute nodes from a cluster")
            remove_nodes(rhcos_nodes)

        # get datastore type and configure chrony for all nodes ONLY if
        # datastore type is vsan
        datastore_type = self.vsphere.get_datastore_type_by_name(
            self.datastore, self.datacenter
        )
        if datastore_type != constants.VMFS:
            configure_chrony_and_wait_for_machineconfig_status(
                node_type="all", timeout=1800
            )
Esempio n. 4
0
    def deploy_ocp(self, log_cli_level='DEBUG'):
        """
        Deployment specific to OCP cluster on this platform

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")
        """
        if not config.DEPLOYMENT.get('force_deploy_multiple_clusters'):
            cluster_name = config.ENV_DATA['cluster_name']
            prefix = cluster_name.split("-")[0] + '*'
            if self.check_cluster_existence(prefix):
                raise exceptions.SameNamePrefixClusterAlreadyExistsException(
                    f"Cluster with name prefix {prefix} already exists. "
                    f"Please destroy the existing cluster for a new cluster "
                    f"deployment")
        super(AWSIPI, self).deploy_ocp(log_cli_level)
        if config.DEPLOYMENT.get('host_network'):
            self.host_network_update()
Esempio n. 5
0
    def deploy_ocp(self, log_cli_level="DEBUG"):
        """
        Deployment specific to OCP cluster on a cloud platform.

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")

        """
        if not config.DEPLOYMENT.get("force_deploy_multiple_clusters"):
            prefix = get_cluster_prefix(
                self.cluster_name, self.cluster_prefixes_special_rules
            )
            if self.check_cluster_existence(prefix):
                raise exceptions.SameNamePrefixClusterAlreadyExistsException(
                    f"Cluster with name prefix {prefix} already exists. "
                    f"Please destroy the existing cluster for a new cluster "
                    f"deployment"
                )
        super(CloudDeploymentBase, self).deploy_ocp(log_cli_level)
Esempio n. 6
0
    def deploy_ocp(self, log_cli_level="DEBUG"):
        """
        Deployment specific to OCP cluster in on-premise platform

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")

        """
        cluster_name_parts = config.ENV_DATA.get("cluster_name").split("-")
        prefix = cluster_name_parts[0]
        if not (
            prefix.startswith(tuple(constants.PRODUCTION_JOBS_PREFIX))
            or config.DEPLOYMENT.get("force_deploy_multiple_clusters")
        ):
            if self.check_cluster_existence(prefix):
                raise exceptions.SameNamePrefixClusterAlreadyExistsException(
                    f"Cluster with name prefix {prefix} already exists. "
                    f"Please destroy the existing cluster for a new cluster "
                    f"deployment"
                )
        super(OnPremDeploymentBase, self).deploy_ocp(log_cli_level)
Esempio n. 7
0
    def deploy_ocp(self, log_cli_level='DEBUG'):
        """
        Deployment specific to OCP cluster on a cloud platform.

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")

        """
        if not config.DEPLOYMENT.get('force_deploy_multiple_clusters'):
            cluster_name_parts = self.cluster_name.split("-")
            prefix = cluster_name_parts[0]
            if prefix.lower() in self.cluster_prefixes_special_rules:
                # if the prefix is a cleanup special rule, use the next part of
                # the cluster name as the prefix
                prefix = cluster_name_parts[1]
            if self.check_cluster_existence(prefix):
                raise exceptions.SameNamePrefixClusterAlreadyExistsException(
                    f"Cluster with name prefix {prefix} already exists. "
                    f"Please destroy the existing cluster for a new cluster "
                    f"deployment")
        super(CloudDeploymentBase, self).deploy_ocp(log_cli_level)