def destroy(self, log_level="DEBUG"): """ Destroy OCP cluster specific Args: log_level (str): log level openshift-installer (default: DEBUG) """ # Retrieve cluster metadata metadata_file = os.path.join(self.cluster_path, "metadata.json") with open(metadata_file) as f: self.metadata = json.loads(f.read()) utils.destroy_cluster(installer=self.installer, cluster_path=self.cluster_path, log_level=log_level)
def test_destroy_cluster(self, log_cli_level): log.info("Running OCS cluster destroy") destroy_cluster(config.ENV_DATA['cluster_path'], log_cli_level)
def cleanup(cluster_name, cluster_id, upi=False, failed_deletions=None): """ Cleanup existing cluster in AWS Args: cluster_name (str): Name of the cluster cluster_id (str): Cluster id to cleanup upi (bool): True for UPI cluster, False otherwise failed_deletions (list): list of clusters we failed to delete, used for reporting purposes """ data = {'cluster_name': cluster_name, 'cluster_id': cluster_id} template = templating.Templating(base_path=TEMPLATE_CLEANUP_DIR) cleanup_template = template.render_template(CLEANUP_YAML, data) cleanup_path = tempfile.mkdtemp(prefix='cleanup_') cleanup_file = os.path.join(cleanup_path, 'metadata.json') with open(cleanup_file, "w") as temp: temp.write(cleanup_template) bin_dir = os.path.expanduser(config.RUN['bin_dir']) oc_bin = os.path.join(bin_dir, "openshift-install") if upi: aws = AWS() rhel_workers = get_rhel_worker_instances(cleanup_path) logger.info(f"{cluster_name}'s RHEL workers: {rhel_workers}") if rhel_workers: terminate_rhel_workers(rhel_workers) # Destroy extra volumes destroy_volumes(cluster_name) stack_names = list() # Get master, bootstrap and security group stacks for stack_type in ['ma', 'bs', 'sg']: try: stack_names.append( aws.get_cloudformation_stacks( pattern=f"{cluster_name}-{stack_type}")[0] ['StackName']) except ClientError: continue # Get the worker stacks worker_index = 0 worker_stack_exists = True while worker_stack_exists: try: stack_names.append( aws.get_cloudformation_stacks( pattern=f"{cluster_name}-no{worker_index}")[0] ['StackName']) worker_index += 1 except ClientError: worker_stack_exists = False logger.info(f"Deleting stacks: {stack_names}") aws.delete_cloudformation_stacks(stack_names) # Destroy the cluster logger.info(f"cleaning up {cluster_id}") destroy_cluster(installer=oc_bin, cluster_path=cleanup_path) for stack_type in ['inf', 'vpc']: try: stack_names.append( aws.get_cloudformation_stacks( pattern=f"{cluster_name}-{stack_type}")[0] ['StackName']) except ClientError: continue try: aws.delete_cloudformation_stacks(stack_names) except StackStatusError: logger.error('Failed to fully destroy cluster %s', cluster_name) if failed_deletions: failed_deletions.append(cluster_name) raise else: logger.info(f"cleaning up {cluster_id}") try: destroy_cluster(installer=oc_bin, cluster_path=cleanup_path) except CommandFailed: logger.error('Failed to fully destroy cluster %s', cluster_name) if failed_deletions: failed_deletions.append(cluster_name) raise
def cluster_teardown(): log.info("Destroying the test cluster") destroy_cluster(config.ENV_DATA['cluster_path']) log.info("Destroying the test cluster complete")
def cluster_teardown(log_level="DEBUG"): log.info("Destroying the test cluster") destroy_cluster(config.ENV_DATA['cluster_path'], log_level) log.info("Destroying the test cluster complete")