コード例 #1
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster for AWS UPI

        Args:
            log_level (str): log level for openshift-installer (
                default:DEBUG)

        """
        cluster_name = get_cluster_name(self.cluster_path)
        if config.ENV_DATA.get('rhel_workers'):
            terminate_rhel_workers(get_rhel_worker_instances(
                self.cluster_path))

        # Destroy extra volumes
        destroy_volumes(cluster_name)

        # Delete master, bootstrap, security group, and worker stacks
        suffixes = ['ma', 'bs', 'sg']

        num_workers = config.ENV_DATA['worker_replicas']
        for i in range(num_workers - 1, -1, -1):
            suffixes.insert(0, f'no{i}')

        stack_names = [f'{cluster_name}-{suffix}' for suffix in suffixes]
        self.aws.delete_cloudformation_stacks(stack_names)

        # Call openshift-installer destroy cluster
        super(AWSUPI, self).destroy_cluster(log_level)

        # Delete inf and vpc stacks
        suffixes = ['inf', 'vpc']
        stack_names = [f'{cluster_name}-{suffix}' for suffix in suffixes]
        self.aws.delete_cloudformation_stacks(stack_names)
コード例 #2
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster for AWS UPI

        Args:
            log_level (str): log level for openshift-installer (
                default:DEBUG)

        """
        cluster_name = get_cluster_name(self.cluster_path)
        if config.ENV_DATA.get('rhel_workers'):
            terminate_rhel_workers(get_rhel_worker_instances(
                self.cluster_path))

        # Destroy extra volumes
        destroy_volumes(cluster_name)

        # Delete master, bootstrap, security group, and worker stacks
        suffixes = ['ma', 'bs', 'sg']

        stack_names = self.aws.get_worker_stacks()
        stack_names.sort()
        stack_names.reverse()
        stack_names.extend([f'{cluster_name}-{s}' for s in suffixes])
        logger.info(f"Deleteing stacks: {stack_names}")
        self.aws.delete_cloudformation_stacks(stack_names)

        # Call openshift-installer destroy cluster
        super(AWSUPI, self).destroy_cluster(log_level)

        # Delete inf and vpc stacks
        suffixes = ['inf', 'vpc']
        stack_names = [f'{cluster_name}-{suffix}' for suffix in suffixes]
        self.aws.delete_cloudformation_stacks(stack_names)
コード例 #3
0
 def __init__(self):
     """
     This would be base for both IPI and UPI deployment
     """
     super(AWSBase, self).__init__()
     self.region = config.ENV_DATA['region']
     self.aws = AWSUtil(self.region)
     if config.ENV_DATA.get('cluster_name'):
         self.cluster_name = config.ENV_DATA['cluster_name']
     else:
         self.cluster_name = get_cluster_name(self.cluster_path)
コード例 #4
0
ファイル: platform_nodes.py プロジェクト: ekuric/ocs-ci
 def __init__(self, node_conf, node_type):
     super(AWSUPINode, self).__init__()
     self.node_conf = node_conf
     #  RHEL/RHCOS
     self.node_type = node_type
     #  This variable will hold the AWS instance object
     self.aws_instance_obj = None
     self.region = config.ENV_DATA['region']
     self.cluster_name = get_cluster_name(self.cluster_path)
     self.client = boto3.client('ec2', region_name=self.region)
     # cloudformation
     self.cf = boto3.client('cloudformation', region_name=self.region)
コード例 #5
0
 def __init__(self):
     """
     Any cloud platform deployment requires region and cluster name.
     """
     super(CloudDeploymentBase, self).__init__()
     self.region = config.ENV_DATA["region"]
     if config.ENV_DATA.get("cluster_name"):
         self.cluster_name = config.ENV_DATA["cluster_name"]
     else:
         self.cluster_name = get_cluster_name(self.cluster_path)
     # dict of cluster prefixes with special handling rules (for existence
     # check or during a cluster cleanup)
     self.cluster_prefixes_special_rules = {}
コード例 #6
0
ファイル: aws.py プロジェクト: AaruniAggarwal/ocs-ci
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster for AWS UPI

        Args:
            log_level (str): log level for openshift-installer (
                default:DEBUG)

        """
        cluster_name = get_cluster_name(self.cluster_path)
        if config.ENV_DATA.get("rhel_workers"):
            terminate_rhel_workers(get_rhel_worker_instances(
                self.cluster_path))

        # Destroy extra volumes
        destroy_volumes(cluster_name)

        # Destroy buckets
        delete_cluster_buckets(self.cluster_name)

        self.aws.delete_apps_record_set()

        # Delete master, bootstrap, security group, and worker stacks
        suffixes = ["ma", "bs", "sg"]

        stack_names = self.aws.get_worker_stacks()
        stack_names.sort()
        stack_names.reverse()
        stack_names.extend([f"{cluster_name}-{s}" for s in suffixes])
        logger.info(f"Deleting stacks: {stack_names}")
        self.aws.delete_cloudformation_stacks(stack_names)

        # Call openshift-installer destroy cluster
        super(AWSUPI, self).destroy_cluster(log_level)

        # Delete inf and vpc stacks
        suffixes = ["inf", "vpc"]
        stack_names = [f"{cluster_name}-{suffix}" for suffix in suffixes]
        self.aws.delete_cloudformation_stacks(stack_names)
コード例 #7
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster for AWS UPI

        Args:
            log_level (str): log level for openshift-installer (
                default:DEBUG)
        """
        cluster_name = get_cluster_name(self.cluster_path)
        if config.ENV_DATA.get('rhel_workers'):
            self.terminate_rhel_workers(self.get_rhel_worker_instances())
        # Destroy extra volumes
        self.destroy_volumes()

        # Create cloudformation client
        cf = boto3.client('cloudformation', region_name=self.region)

        # Delete master, bootstrap, security group, and worker stacks
        suffixes = ['ma', 'bs', 'sg']
        # TODO: read in num_workers in a better way
        num_workers = int(os.environ.get('num_workers', 3))
        for i in range(num_workers - 1, -1, -1):
            suffixes.insert(0, f'no{i}')
        stack_names = [f'{cluster_name}-{suffix}' for suffix in suffixes]
        for stack_name in stack_names:
            logger.info("Destroying stack: %s", stack_name)
            cf.delete_stack(StackName=stack_name)
            verify_stack_deleted(stack_name)

        # Call openshift-installer destroy cluster
        super(AWSUPI, self).destroy_cluster(log_level)

        # Delete inf and vpc stacks
        suffixes = ['inf', 'vpc']
        stack_names = [f'{cluster_name}-{suffix}' for suffix in suffixes]
        for stack_name in stack_names:
            logger.info("Destroying stack: %s", stack_name)
            cf.delete_stack(StackName=stack_name)
            verify_stack_deleted(stack_name)
コード例 #8
0
ファイル: ocscilib.py プロジェクト: sidhant-agrawal/ocs-ci
def process_cluster_cli_params(config):
    """
    Process cluster related cli parameters

    Args:
        config (pytest.config): Pytest config object

    Raises:
        ClusterPathNotProvidedError: If a cluster path is missing
        ClusterNameNotProvidedError: If a cluster name is missing
        ClusterNameLengthError: If a cluster name is too short or too long
    """
    suffix = ocsci_config.cur_index + 1 if ocsci_config.multicluster else ""
    cluster_path = get_cli_param(config, f"cluster_path{suffix}")
    if not cluster_path:
        raise ClusterPathNotProvidedError()
    cluster_path = os.path.expanduser(cluster_path)
    if not os.path.exists(cluster_path):
        os.makedirs(cluster_path)
    # Importing here cause once the function is invoked we have already config
    # loaded, so this is OK to import once you sure that config is loaded.
    from ocs_ci.ocs.openshift_ops import OCP

    OCP.set_kubeconfig(
        os.path.join(cluster_path, ocsci_config.RUN["kubeconfig_location"])
    )
    ocsci_config.RUN["kubeconfig"] = os.path.join(
        cluster_path, ocsci_config.RUN["kubeconfig_location"]
    )
    cluster_name = get_cli_param(config, f"cluster_name{suffix}")
    ocsci_config.RUN["cli_params"]["teardown"] = get_cli_param(
        config, "teardown", default=False
    )
    ocsci_config.RUN["cli_params"]["deploy"] = get_cli_param(
        config, "deploy", default=False
    )
    live_deployment = get_cli_param(
        config, "live_deploy", default=False
    ) or ocsci_config.DEPLOYMENT.get("live_deployment", False)
    ocsci_config.DEPLOYMENT["live_deployment"] = live_deployment
    if live_deployment:
        update_live_must_gather_image()
    io_in_bg = get_cli_param(config, "io_in_bg")
    if io_in_bg:
        ocsci_config.RUN["io_in_bg"] = True
        io_load = get_cli_param(config, "io_load")
        if io_load:
            ocsci_config.RUN["io_load"] = io_load
    log_utilization = get_cli_param(config, "log_cluster_utilization")
    if log_utilization:
        ocsci_config.RUN["log_utilization"] = True
    upgrade_ocs_version = get_cli_param(config, "upgrade_ocs_version")
    if upgrade_ocs_version:
        ocsci_config.UPGRADE["upgrade_ocs_version"] = upgrade_ocs_version
    ocs_registry_image = get_cli_param(config, f"ocs_registry_image{suffix}")
    if ocs_registry_image:
        ocsci_config.DEPLOYMENT["ocs_registry_image"] = ocs_registry_image
    upgrade_ocs_registry_image = get_cli_param(config, "upgrade_ocs_registry_image")
    if upgrade_ocs_registry_image:
        ocsci_config.UPGRADE["upgrade_ocs_registry_image"] = upgrade_ocs_registry_image
    ocsci_config.ENV_DATA["cluster_name"] = cluster_name
    ocsci_config.ENV_DATA["cluster_path"] = cluster_path
    get_cli_param(config, "collect-logs")
    if ocsci_config.RUN.get("cli_params").get("deploy"):
        if not cluster_name:
            raise ClusterNameNotProvidedError()
        if (
            len(cluster_name) < CLUSTER_NAME_MIN_CHARACTERS
            or len(cluster_name) > CLUSTER_NAME_MAX_CHARACTERS
        ):
            raise ClusterNameLengthError(cluster_name)
    elif not cluster_name:
        try:
            ocsci_config.ENV_DATA["cluster_name"] = get_cluster_name(cluster_path)
        except FileNotFoundError:
            raise ClusterNameNotProvidedError()
    if get_cli_param(config, "email") and not get_cli_param(config, "--html"):
        pytest.exit("--html option must be provided to send email reports")
    get_cli_param(config, "squad_analysis")
    get_cli_param(config, "-m")
    osd_size = get_cli_param(config, "--osd-size")
    if osd_size:
        ocsci_config.ENV_DATA["device_size"] = osd_size
    ocp_version = get_cli_param(config, "--ocp-version")
    if ocp_version:
        version_config_file = f"ocp-{ocp_version}-config.yaml"
        version_config_file_path = os.path.join(
            OCP_VERSION_CONF_DIR, version_config_file
        )
        load_config_file(version_config_file_path)
    upgrade_ocp_version = get_cli_param(config, "--upgrade-ocp-version")
    if upgrade_ocp_version:
        version_config_file = f"ocp-{upgrade_ocp_version}-upgrade.yaml"
        version_config_file_path = os.path.join(
            OCP_VERSION_CONF_DIR, version_config_file
        )
        load_config_file(version_config_file_path)
    upgrade_ocp_image = get_cli_param(config, "--upgrade-ocp-image")
    if upgrade_ocp_image:
        ocp_image = upgrade_ocp_image.rsplit(":", 1)
        ocsci_config.UPGRADE["ocp_upgrade_path"] = ocp_image[0]
        ocsci_config.UPGRADE["ocp_upgrade_version"] = ocp_image[1]
    ocp_installer_version = get_cli_param(config, "--ocp-installer-version")
    if ocp_installer_version:
        ocsci_config.DEPLOYMENT["installer_version"] = ocp_installer_version
        ocsci_config.RUN["client_version"] = ocp_installer_version
    csv_change = get_cli_param(config, "--csv-change")
    if csv_change:
        csv_change = csv_change.split("::")
        ocsci_config.DEPLOYMENT["csv_change_from"] = csv_change[0]
        ocsci_config.DEPLOYMENT["csv_change_to"] = csv_change[1]
    collect_logs_on_success_run = get_cli_param(config, "collect_logs_on_success_run")
    if collect_logs_on_success_run:
        ocsci_config.REPORTING["collect_logs_on_success_run"] = True
    get_cli_param(config, "dev_mode")
    ceph_debug = get_cli_param(config, "ceph_debug")
    if ceph_debug:
        ocsci_config.DEPLOYMENT["ceph_debug"] = True
    skip_download_client = get_cli_param(config, "skip_download_client")
    if skip_download_client:
        ocsci_config.DEPLOYMENT["skip_download_client"] = True
    re_trigger_failed_tests = get_cli_param(config, "--re-trigger-failed-tests")
    if re_trigger_failed_tests:
        ocsci_config.RUN["re_trigger_failed_tests"] = os.path.expanduser(
            re_trigger_failed_tests
        )
コード例 #9
0
ファイル: ocscilib.py プロジェクト: xenolinux/ocs-ci
def process_cluster_cli_params(config):
    """
    Process cluster related cli parameters

    Args:
        config (pytest.config): Pytest config object

    Raises:
        ClusterPathNotProvidedError: If a cluster path is missing
        ClusterNameNotProvidedError: If a cluster name is missing
        ClusterNameLengthError: If a cluster name is too short or too long
    """
    cluster_path = get_cli_param(config, 'cluster_path')
    if not cluster_path:
        raise ClusterPathNotProvidedError()
    cluster_path = os.path.expanduser(cluster_path)
    if not os.path.exists(cluster_path):
        os.makedirs(cluster_path)
    # Importing here cause once the function is invoked we have already config
    # loaded, so this is OK to import once you sure that config is loaded.
    from ocs_ci.ocs.openshift_ops import OCP
    OCP.set_kubeconfig(
        os.path.join(cluster_path, ocsci_config.RUN['kubeconfig_location']))
    cluster_name = get_cli_param(config, 'cluster_name')
    ocsci_config.RUN['cli_params']['teardown'] = get_cli_param(config,
                                                               "teardown",
                                                               default=False)
    ocsci_config.RUN['cli_params']['deploy'] = get_cli_param(config,
                                                             "deploy",
                                                             default=False)
    live_deployment = get_cli_param(config, "live_deploy", default=False)
    ocsci_config.DEPLOYMENT['live_deployment'] = live_deployment or (
        ocsci_config.DEPLOYMENT.get('live_deployment', False))
    io_in_bg = get_cli_param(config, 'io_in_bg')
    if io_in_bg:
        ocsci_config.RUN['io_in_bg'] = True
        io_load = get_cli_param(config, 'io_load')
        if io_load:
            ocsci_config.RUN['io_load'] = io_load
    log_utilization = get_cli_param(config, 'log_cluster_utilization')
    if log_utilization:
        ocsci_config.RUN['log_utilization'] = True
    upgrade_ocs_version = get_cli_param(config, "upgrade_ocs_version")
    if upgrade_ocs_version:
        ocsci_config.UPGRADE['upgrade_ocs_version'] = upgrade_ocs_version
    ocs_registry_image = get_cli_param(config, "ocs_registry_image")
    if ocs_registry_image:
        ocsci_config.DEPLOYMENT['ocs_registry_image'] = ocs_registry_image
    upgrade_ocs_registry_image = get_cli_param(config,
                                               "upgrade_ocs_registry_image")
    if upgrade_ocs_registry_image:
        ocsci_config.UPGRADE[
            'upgrade_ocs_registry_image'] = upgrade_ocs_registry_image
    ocsci_config.ENV_DATA['cluster_name'] = cluster_name
    ocsci_config.ENV_DATA['cluster_path'] = cluster_path
    get_cli_param(config, 'collect-logs')
    if ocsci_config.RUN.get("cli_params").get("deploy"):
        if not cluster_name:
            raise ClusterNameNotProvidedError()
        if (len(cluster_name) < CLUSTER_NAME_MIN_CHARACTERS
                or len(cluster_name) > CLUSTER_NAME_MAX_CHARACTERS):
            raise ClusterNameLengthError(cluster_name)
    elif not cluster_name:
        try:
            ocsci_config.ENV_DATA['cluster_name'] = get_cluster_name(
                cluster_path)
        except FileNotFoundError:
            raise ClusterNameNotProvidedError()
    if get_cli_param(config, 'email') and not get_cli_param(config, '--html'):
        pytest.exit("--html option must be provided to send email reports")
    get_cli_param(config, 'squad_analysis')
    get_cli_param(config, '-m')
    osd_size = get_cli_param(config, '--osd-size')
    if osd_size:
        ocsci_config.ENV_DATA['device_size'] = osd_size
    ocp_version = get_cli_param(config, '--ocp-version')
    if ocp_version:
        version_config_file = f"ocp-{ocp_version}-config.yaml"
        version_config_file_path = os.path.join(OCP_VERSION_CONF_DIR,
                                                version_config_file)
        load_config_file(version_config_file_path)
    ocp_installer_version = get_cli_param(config, '--ocp-installer-version')
    if ocp_installer_version:
        ocsci_config.DEPLOYMENT['installer_version'] = ocp_installer_version
        ocsci_config.RUN['client_version'] = ocp_installer_version
    csv_change = get_cli_param(config, '--csv-change')
    if csv_change:
        csv_change = csv_change.split("::")
        ocsci_config.DEPLOYMENT['csv_change_from'] = csv_change[0]
        ocsci_config.DEPLOYMENT['csv_change_to'] = csv_change[1]
    collect_logs_on_success_run = get_cli_param(config,
                                                'collect_logs_on_success_run')
    if collect_logs_on_success_run:
        ocsci_config.REPORTING['collect_logs_on_success_run'] = True
コード例 #10
0
ファイル: on_prem.py プロジェクト: yosibsh/ocs-ci
 def __init__(self):
     super(OnPremDeploymentBase, self).__init__()
     if config.ENV_DATA.get("cluster_name"):
         self.cluster_name = config.ENV_DATA["cluster_name"]
     else:
         self.cluster_name = get_cluster_name(self.cluster_path)