示例#1
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to AWS UPI Flexy

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)

        """
        # Destroy extra volumes
        destroy_volumes(self.cluster_name)
        # Delete apps records
        self.aws.delete_apps_record_set()
        self.aws.delete_apps_record_set(from_base_domain=True)

        super(AWSUPIFlexy, self).destroy_cluster(log_level)

        # Delete master, bootstrap, security group, and worker stacks
        suffixes = ["ma", "bs", "sg"]
        stack_names = self.aws.get_worker_stacks()
        stack_names.sort()
        stack_names.reverse()
        stack_names.extend([f"{self.cluster_name}-{s}" for s in suffixes])
        logger.info(f"Deleting stacks: {stack_names}")
        self.aws.delete_cloudformation_stacks(stack_names)

        # WORKAROUND for Flexy issue:
        # https://issues.redhat.com/browse/OCPQE-1521
        self.aws.delete_cf_stack_including_dependencies(
            f"{self.cluster_name}-vpc")
        # cleanup related S3 buckets
        delete_cluster_buckets(self.cluster_name)
示例#2
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to AWS IPI

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)
        """
        destroy_volumes(self.cluster_name)
        delete_cluster_buckets(self.cluster_name)
        super(AWSIPI, self).destroy_cluster(log_level)
示例#3
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to AWS UPI Flexy

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)

        """
        # Destroy extra volumes
        destroy_volumes(self.cluster_name)
        # Delete apps records
        self.aws.delete_apps_record_set()
        self.aws.delete_apps_record_set(from_base_domain=True)

        stack_names = self.aws.get_worker_stacks()
        stack_names.sort()

        # add additional worker nodes to the cf_stack_list2 (if there are any)
        cf_stack_list2_file_path = os.path.join(
            self.cluster_path,
            constants.FLEXY_HOST_DIR,
            constants.FLEXY_RELATIVE_CLUSTER_DIR,
            "cf_stack_list2",
        )
        if os.path.exists(cf_stack_list2_file_path):
            with open(cf_stack_list2_file_path, "r+") as f:
                lines = f.readlines()
                for stack_name in stack_names:
                    if f"{stack_name}\n" not in lines:
                        f.write(f"{stack_name}\n")
        else:
            logger.warning(f"File {cf_stack_list2_file_path} doesn't exists!")

        super(AWSUPIFlexy, self).destroy_cluster(log_level)

        # Delete master, bootstrap, security group, and worker stacks
        suffixes = ["ma", "bs", "sg"]
        stack_names.reverse()
        stack_names.extend([f"{self.cluster_name}-{s}" for s in suffixes])
        logger.info(f"Deleting stacks: {stack_names}")
        self.aws.delete_cloudformation_stacks(stack_names)

        # WORKAROUND for Flexy issue:
        # https://issues.redhat.com/browse/OCPQE-1521
        self.aws.delete_cf_stack_including_dependencies(
            f"{self.cluster_name}-vpc")
        # cleanup related S3 buckets
        delete_cluster_buckets(self.cluster_name)
示例#4
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster for AWS UPI

        Args:
            log_level (str): log level for openshift-installer (
                default:DEBUG)

        """
        cluster_name = get_cluster_name(self.cluster_path)
        if config.ENV_DATA.get("rhel_workers"):
            terminate_rhel_workers(get_rhel_worker_instances(
                self.cluster_path))

        # Destroy extra volumes
        destroy_volumes(cluster_name)

        # Destroy buckets
        delete_cluster_buckets(self.cluster_name)

        self.aws.delete_apps_record_set()

        # Delete master, bootstrap, security group, and worker stacks
        suffixes = ["ma", "bs", "sg"]

        stack_names = self.aws.get_worker_stacks()
        stack_names.sort()
        stack_names.reverse()
        stack_names.extend([f"{cluster_name}-{s}" for s in suffixes])
        logger.info(f"Deleting stacks: {stack_names}")
        self.aws.delete_cloudformation_stacks(stack_names)

        # Call openshift-installer destroy cluster
        super(AWSUPI, self).destroy_cluster(log_level)

        # Delete inf and vpc stacks
        suffixes = ["inf", "vpc"]
        stack_names = [f"{cluster_name}-{suffix}" for suffix in suffixes]
        self.aws.delete_cloudformation_stacks(stack_names)
示例#5
0
def cleanup(cluster_name, cluster_id, upi=False, failed_deletions=None):
    """
    Cleanup existing cluster in AWS

    Args:
        cluster_name (str): Name of the cluster
        cluster_id (str): Cluster id to cleanup
        upi (bool): True for UPI cluster, False otherwise
        failed_deletions (list): list of clusters we failed to delete, used
            for reporting purposes

    """
    data = {'cluster_name': cluster_name, 'cluster_id': cluster_id}
    template = templating.Templating(base_path=TEMPLATE_CLEANUP_DIR)
    cleanup_template = template.render_template(CLEANUP_YAML, data)
    cleanup_path = tempfile.mkdtemp(prefix='cleanup_')
    cleanup_file = os.path.join(cleanup_path, 'metadata.json')
    with open(cleanup_file, "w") as temp:
        temp.write(cleanup_template)
    bin_dir = os.path.expanduser(config.RUN['bin_dir'])
    oc_bin = os.path.join(bin_dir, "openshift-install")

    if upi:
        aws = AWS()
        rhel_workers = get_rhel_worker_instances(cleanup_path)
        logger.info(f"{cluster_name}'s RHEL workers: {rhel_workers}")
        if rhel_workers:
            terminate_rhel_workers(rhel_workers)
        # Destroy extra volumes
        destroy_volumes(cluster_name)

        stack_names = list()
        # Get master, bootstrap and security group stacks
        for stack_type in ['ma', 'bs', 'sg']:
            try:
                stack_names.append(
                    aws.get_cloudformation_stacks(
                        pattern=f"{cluster_name}-{stack_type}")[0]
                    ['StackName'])
            except ClientError:
                continue

        # Get the worker stacks
        worker_index = 0
        worker_stack_exists = True
        while worker_stack_exists:
            try:
                stack_names.append(
                    aws.get_cloudformation_stacks(
                        pattern=f"{cluster_name}-no{worker_index}")[0]
                    ['StackName'])
                worker_index += 1
            except ClientError:
                worker_stack_exists = False

        logger.info(f"Deleting stacks: {stack_names}")
        aws.delete_cloudformation_stacks(stack_names)

        # Destroy the cluster
        logger.info(f"cleaning up {cluster_id}")
        destroy_cluster(installer=oc_bin, cluster_path=cleanup_path)

        for stack_type in ['inf', 'vpc']:
            try:
                stack_names.append(
                    aws.get_cloudformation_stacks(
                        pattern=f"{cluster_name}-{stack_type}")[0]
                    ['StackName'])
            except ClientError:
                continue
        try:
            aws.delete_cloudformation_stacks(stack_names)
        except StackStatusError:
            logger.error('Failed to fully destroy cluster %s', cluster_name)
            if failed_deletions:
                failed_deletions.append(cluster_name)
            raise
    else:
        logger.info(f"cleaning up {cluster_id}")
        try:
            destroy_cluster(installer=oc_bin, cluster_path=cleanup_path)
        except CommandFailed:
            logger.error('Failed to fully destroy cluster %s', cluster_name)
            if failed_deletions:
                failed_deletions.append(cluster_name)
            raise

    delete_cluster_buckets(cluster_name)