Пример #1
0
    def create_nodes(self, node_conf, node_type, num_nodes):
        """
        create aws instances of nodes

        Args:
            node_conf (dict): of node configuration
            node_type (str): type of node to be created RHCOS/RHEL
            num_nodes (int): Number of node instances to be created

        Returns:
           list: of AWSUPINode/AWSIPINode objects

        """
        node_list = []
        node_cls = self.nodes_map[
            f'{self.platform.upper()}{self.deployment_type.upper()}Node'
        ]

        rhel_cnt = len(get_typed_worker_nodes('rhel'))
        rhcos_cnt = len(get_typed_worker_nodes('rhcos'))
        for i in range(num_nodes):
            node_id = rhel_cnt + rhcos_cnt + i
            node_list.append(node_cls(node_conf, node_type))
            node_list[i]._prepare_node(node_id)

        return node_list
Пример #2
0
    def deploy_ocp(self, log_cli_level='DEBUG'):
        """
        Deployment specific to OCP cluster on vSphere platform

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")

        """
        super(VSPHEREUPI, self).deploy_ocp(log_cli_level)
        if config.ENV_DATA.get('scale_up'):
            logger.info("Adding extra nodes to cluster")
            self.add_nodes()

        # remove RHCOS compute nodes
        if (
                config.ENV_DATA.get('scale_up')
                and not config.ENV_DATA.get('mixed_cluster')
        ):
            rhcos_nodes = get_typed_worker_nodes()
            logger.info(
                f"RHCOS compute nodes to delete: "
                f"{[node.name for node in rhcos_nodes]}"
            )
            logger.info("Removing RHCOS compute nodes from a cluster")
            remove_nodes(rhcos_nodes)
Пример #3
0
    def deploy_ocp(self, log_cli_level='DEBUG'):
        """
        Deployment specific to OCP cluster on vSphere platform

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")

        """
        super(VSPHEREUPI, self).deploy_ocp(log_cli_level)
        if config.ENV_DATA.get('scale_up'):
            logger.info("Adding extra nodes to cluster")
            self.add_nodes()

        # remove RHCOS compute nodes
        if (config.ENV_DATA.get('scale_up')
                and not config.ENV_DATA.get('mixed_cluster')):
            rhcos_nodes = get_typed_worker_nodes()
            logger.info(f"RHCOS compute nodes to delete: "
                        f"{[node.name for node in rhcos_nodes]}")
            logger.info("Removing RHCOS compute nodes from a cluster")
            remove_nodes(rhcos_nodes)

        # get datastore type and configure chrony for all nodes ONLY if
        # datstore type is vsan
        datastore_type = self.vsphere.get_datastore_type_by_name(
            self.datastore, self.datacenter)
        if datastore_type != constants.VMFS:
            configure_chrony_and_wait_for_machineconfig_status(node_type="all",
                                                               timeout=1800)
Пример #4
0
    def get_default_gateway_node(self):
        """
        Return the default node to be used as submariner gateway

        Returns:
            str: Name of the gateway node

        """
        # Always return the first worker node
        return get_typed_worker_nodes()[0]
Пример #5
0
    def deploy_ocp(self, log_cli_level="DEBUG"):
        """
        Deployment specific to OCP cluster on vSphere platform

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")

        """
        cluster_name_parts = config.ENV_DATA.get("cluster_name").split("-")
        prefix = cluster_name_parts[0]
        if not (
            prefix.startswith(tuple(constants.PRODUCTION_JOBS_PREFIX))
            or config.DEPLOYMENT.get("force_deploy_multiple_clusters")
        ):
            if self.check_cluster_existence(prefix):
                raise exceptions.SameNamePrefixClusterAlreadyExistsException(
                    f"Cluster with name prefix {prefix} already exists. "
                    f"Please destroy the existing cluster for a new cluster "
                    f"deployment"
                )
        super(VSPHEREUPI, self).deploy_ocp(log_cli_level)
        if config.ENV_DATA.get("scale_up"):
            logger.info("Adding extra nodes to cluster")
            self.add_nodes()

        # remove RHCOS compute nodes
        if config.ENV_DATA.get("scale_up") and not config.ENV_DATA.get("mixed_cluster"):
            rhcos_nodes = get_typed_worker_nodes()
            logger.info(
                f"RHCOS compute nodes to delete: "
                f"{[node.name for node in rhcos_nodes]}"
            )
            logger.info("Removing RHCOS compute nodes from a cluster")
            remove_nodes(rhcos_nodes)

        if config.DEPLOYMENT.get("thick_sc"):
            sc_data = templating.load_yaml(constants.VSPHERE_THICK_STORAGECLASS_YAML)
            sc_data_yaml = tempfile.NamedTemporaryFile(
                mode="w+", prefix="storageclass", delete=False
            )
            if config.DEPLOYMENT.get("eager_zeroed_thick_sc"):
                sc_data["parameters"]["diskformat"] = "eagerzeroedthick"
            else:
                sc_data["parameters"]["diskformat"] = "zeroedthick"
            templating.dump_data_to_temp_yaml(sc_data, sc_data_yaml.name)
            run_cmd(f"oc create -f {sc_data_yaml.name}")
            self.DEFAULT_STORAGECLASS = "thick"
Пример #6
0
    def deploy_ocp(self, log_cli_level="DEBUG"):
        """
        Deployment specific to OCP cluster on vSphere platform

        Args:
            log_cli_level (str): openshift installer's log level
                (default: "DEBUG")

        """
        cluster_name_parts = config.ENV_DATA.get("cluster_name").split("-")
        prefix = cluster_name_parts[0]
        if not (
            prefix.startswith(tuple(constants.PRODUCTION_JOBS_PREFIX))
            or config.DEPLOYMENT.get("force_deploy_multiple_clusters")
        ):
            if self.check_cluster_existence(prefix):
                raise exceptions.SameNamePrefixClusterAlreadyExistsException(
                    f"Cluster with name prefix {prefix} already exists. "
                    f"Please destroy the existing cluster for a new cluster "
                    f"deployment"
                )
        super(VSPHEREUPI, self).deploy_ocp(log_cli_level)
        if config.ENV_DATA.get("scale_up"):
            logger.info("Adding extra nodes to cluster")
            self.add_nodes()

        # remove RHCOS compute nodes
        if config.ENV_DATA.get("scale_up") and not config.ENV_DATA.get("mixed_cluster"):
            rhcos_nodes = get_typed_worker_nodes()
            logger.info(
                f"RHCOS compute nodes to delete: "
                f"{[node.name for node in rhcos_nodes]}"
            )
            logger.info("Removing RHCOS compute nodes from a cluster")
            remove_nodes(rhcos_nodes)

        # get datastore type and configure chrony for all nodes ONLY if
        # datastore type is vsan
        datastore_type = self.vsphere.get_datastore_type_by_name(
            self.datastore, self.datacenter
        )
        if datastore_type != constants.VMFS:
            configure_chrony_and_wait_for_machineconfig_status(
                node_type="all", timeout=1800
            )
Пример #7
0
 def post_ocp_deploy(self):
     """
     Function does post OCP deployment stuff we need to do.
     """
     # Workaround for #1777384 - enable container_use_cephfs on RHEL workers
     # Ticket: RHSTOR-787, see more details in the issue: #1151
     logger.info("Running WA for ticket: RHSTOR-787")
     ocp_obj = ocp.OCP()
     cmd = ['/usr/sbin/setsebool -P container_use_cephfs on']
     workers = get_typed_worker_nodes(os_id="rhel")
     for worker in workers:
         cmd_list = cmd.copy()
         node = worker.get().get('metadata').get('name')
         logger.info(
             f"{node} is a RHEL based worker - applying '{cmd_list}'")
         # We saw few times there was an issue to spawn debug RHEL pod.
         # Let's use retry decorator to make sure our CI is more stable.
         retry(CommandFailed)(ocp_obj.exec_oc_debug_cmd)(node=node,
                                                         cmd_list=cmd_list)
     # end of workaround
     self.add_stage_cert()
Пример #8
0
def cluster(request, log_cli_level):
    """
    This fixture initiates deployment for both OCP and OCS clusters.
    Specific platform deployment classes will handle the fine details
    of action
    """
    log.info(f"All logs located at {ocsci_log_path()}")

    teardown = config.RUN['cli_params']['teardown']
    deploy = config.RUN['cli_params']['deploy']
    factory = dep_factory.DeploymentFactory()
    deployer = factory.get_deployment()

    # Add a finalizer to teardown the cluster after test execution is finished
    if teardown:

        def cluster_teardown_finalizer():
            deployer.destroy_cluster(log_cli_level)

        request.addfinalizer(cluster_teardown_finalizer)
        log.info("Will teardown cluster because --teardown was provided")

    # Download client
    force_download = (config.RUN['cli_params'].get('deploy')
                      and config.DEPLOYMENT['force_download_client'])
    get_openshift_client(force_download=force_download)

    if deploy:
        # Deploy cluster
        deployer.deploy_cluster(log_cli_level)
        # Workaround for #1777384 - enable container_use_cephfs on RHEL workers
        ocp_obj = ocp.OCP()
        cmd = ['/usr/sbin/setsebool -P container_use_cephfs on']
        workers = get_typed_worker_nodes(os_id="rhel")
        for worker in workers:
            cmd_list = cmd.copy()
            node = worker.get().get('metadata').get('name')
            log.info(f"{node} is a RHEL based worker - applying '{cmd_list}'")
            ocp_obj.exec_oc_debug_cmd(node=node, cmd_list=cmd_list)