예제 #1
0
파일: update.py 프로젝트: jonahjon/eksboot
 def post(self, name):
     '''
     Create s3 bucket for stack # Maybe we can have people pass this in as an input?
     Jinja template out our clutster creation file using the post input
     Update the CDK json context with name/crud call and invoke the CDK
     Create some sort of data structure to pass back for auth into the cluster
     '''
     aws_logger.info('/update/ POST')
     args = create_parser.parse_args()
     create_addon_args = create_addon_parser.parse_args(req=create_parser)
     chdir = os.getcwd()
     aws_logger.info(args)
     # template_dict = args['addons']
     # template_dict.update({'name':args['name']})
     #
     s3 = S3(aws_logger)
     write_jinja_file(aws_logger,
                      d=args,
                      i_filename='cluster.yaml.j2',
                      o_filename='cluster.yaml',
                      path=f"{chdir}/codebuild/")
     write_jinja_file(aws_logger,
                      d=args,
                      i_filename='buildspec_create.yml.j2',
                      o_filename='buildspec.yml',
                      path=f"{chdir}/codebuild/")
     zipped = zip_function_upload(aws_logger,
                                  zip_file_name='buildspec.yml.zip',
                                  path=f"{chdir}/codebuild/")
     aws_logger.info(
         f"Create zipfile {zipped}.... Uploading to bucket: {args['s3bucket']}"
     )
     s3.upload_file(bucket=args['s3bucket'],
                    file_name=f"{chdir}/codebuild/buildspec.yml.zip",
                    file_obj=zipped)
     create_cdk_json(
         {
             'name': args['name'],
             's3_bucket': args['s3bucket'],
             'zipfile': 'buildspec.yml.zip',
             'iamrole': args['iamrole']
         }, f"{chdir}/cdk/", aws_logger)
     aws_logger.info('created the cdk.json file for the CDK params')
     streaming_output(["cdk", "deploy", "--require-approval", "never"],
                      f"{chdir}/cdk/", aws_logger)
     passback = {'name': args['name']}
     try:
         return args
     except KeyError as e:
         print(e)
         api.abort(500,
                   e.__doc__,
                   status="Could not save information",
                   statusCode="500")
     except Exception as e:
         print(e)
         api.abort(400,
                   e.__doc__,
                   status="Could not save information",
                   statusCode="400")
예제 #2
0
def get_manifest_file_path(artifact_name, temp_dir):
    try:
        logger.info("Downloading the artifact from Pipeline S3 Artifact bucket: {}".format(artifact_name))
        artifact_bucket_creds =  pipeline.get_credentials()
        artifact_bucket, artifact_key = pipeline.get_artifact_location(artifact_name)

        temp_zip_file = os.path.join(temp_dir,"lz-config.zip")
        s3 = S3(logger, credentials=artifact_bucket_creds)
        s3.download_file(artifact_bucket, artifact_key, temp_zip_file)

        with zipfile.ZipFile(temp_zip_file, 'r') as zip:
            zip.extractall(temp_dir)

        mf_file_path = os.path.join(temp_dir, MANIFEST_FILE_NAME)

        if os.path.isfile(mf_file_path):
            return mf_file_path
        else:
            mf_file_path = os.path.join(temp_dir, "aws-landing-zone-configuration", MANIFEST_FILE_NAME)
            if os.path.isfile(mf_file_path):
                return mf_file_path
            else:
                raise Exception("manifest.yaml does not exist at the root level of aws-landing-zone-configuration.zip or inside aws-landing-zone-configuration folder, please check the ZIP file.")
    except Exception as e:
        message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
        logger.exception(message)
        raise
    def upload_console_files(self):
        try:
            s3 = S3(self.logger)
            self.logger.info("Executing: " + self.__class__.__name__ + "/" +
                             inspect.stack()[0][3])

            file_path = os.path.join(os.path.dirname(__file__),
                                     "console-manifest.json")
            if os.path.exists(file_path):
                with open(file_path, 'r') as json_data:
                    data = json.load(json_data)

                destination_bucket = self.params.get('ConsoleBucket')
                source_bucket = self.params.get('SrcBucket')
                key_prefix = self.params.get('SrcPath') + '/'

                for file in data["files"]:
                    key = 'console/' + file
                    s3.copy_object(source_bucket, key_prefix, key,
                                   destination_bucket)
        except Exception as e:
            message = {
                'FILE': __file__.split('/')[-1],
                'CLASS': self.__class__.__name__,
                'METHOD': inspect.stack()[0][3],
                'EXCEPTION': str(e)
            }
            self.logger.exception(message)
            raise
예제 #4
0
 def __init__(self, logger):
     self.logger = logger
     self.org = Organizations(logger)
     self.s3 = S3(logger)
     self.param_handler = CFNParamsHandler(logger)
     self.manifest = Manifest(os.environ.get('MANIFEST_FILE_PATH'))
     self.manifest_folder = os.environ.get('MANIFEST_FOLDER')
예제 #5
0
 def __init__(self, logger, sm_input_list):
     self.logger = logger
     self.sm_input_list = sm_input_list
     self.list_sm_exec_arns = []
     self.stack_set_exist = True
     self.solution_metrics = Metrics(logger)
     self.param_handler = CFNParamsHandler(logger)
     self.state_machine = StateMachine(logger)
     self.stack_set = StackSet(logger)
     self.s3 = S3(logger)
     self.wait_time = os.environ.get('WAIT_TIME')
     self.execution_mode = os.environ.get('EXECUTION_MODE')
def download_remote_file(logger, remote_s3_path):
    try:
        _file = tempfile.mkstemp()[1]
        t = remote_s3_path.split("/", 3) # s3://bucket-name/key
        remote_bucket = t[2] # Bucket name
        remote_key = t[3] # Key
        logger.info("Downloading {}/{} from S3 to {}".format(remote_bucket, remote_key, _file))
        s3 = S3(logger)
        s3.download_file(remote_bucket, remote_key, _file)
        return _file
    except Exception as e:
        message = {'FILE': __file__.split('/')[-1],
                   'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
        logger.exception(message)
        raise
 def __init__(self, logger, wait_time, manifest_file_path, sm_arn_scp,
              staging_bucket):
     self.state_machine = StateMachine(logger)
     self.s3 = S3(logger)
     self.send = Metrics(logger)
     self.param_handler = ParamsHandler(logger)
     self.logger = logger
     self.manifest_file_path = manifest_file_path
     self.manifest_folder = manifest_file_path[:-len(MANIFEST_FILE_NAME)]
     self.wait_time = wait_time
     self.sm_arn_scp = sm_arn_scp
     self.manifest = None
     self.list_sm_exec_arns = []
     self.nested_ou_delimiter = ""
     self.staging_bucket = staging_bucket
     self.root_id = None
예제 #8
0
def get_result_set(region, bucket, table, base):
    """get result set from storage object
    """
    connection = Connection(base, region)
    base = base.lower()
    storage_obj = None

    if base == "s3":
        storage_obj = S3(connection=connection.new_connection())
        storage_obj.set_storage_set_name(bucket)
    elif base == "dynamodb":
        storage_obj = DynamoDB(connection=connection.new_connection())
        storage_obj.set_storage_set_name(table)
    storage_set = storage_obj.get_storage_set()
    result_set = storage_obj.list(storage_set) if storage_set else None

    return result_set
 def __init__(self, logger, sm_arns_map, staging_bucket, manifest_file_path,
              pipeline_stage, token, execution_mode, primary_account_id):
     self.state_machine = StateMachine(logger)
     self.ssm = SSM(logger)
     self.s3 = S3(logger)
     self.send = Metrics(logger)
     self.param_handler = ParamsHandler(logger)
     self.logger = logger
     self.sm_arns_map = sm_arns_map
     self.manifest = None
     self.staging_bucket = staging_bucket
     self.manifest_file_path = manifest_file_path
     self.token = token
     self.pipeline_stage = pipeline_stage
     self.manifest_folder = manifest_file_path[:-len(MANIFEST_FILE_NAME)]
     if execution_mode.lower() == 'sequential':
         self.isSequential = True
     else:
         self.isSequential = False
     self.index = 100
     self.primary_account_id = primary_account_id
 def __init__(self, logger, wait_time, manifest_file_path, sm_arn_stackset, staging_bucket, execution_mode):
     self.state_machine = StateMachine(logger)
     self.ssm = SSM(logger)
     self.s3 = S3(logger)
     self.send = Metrics(logger)
     self.param_handler = ParamsHandler(logger)
     self.logger = logger
     self.manifest_file_path = manifest_file_path
     self.manifest_folder = manifest_file_path[:-len(MANIFEST_FILE_NAME)]
     self.wait_time = wait_time
     self.sm_arn_stackset = sm_arn_stackset
     self.manifest = None
     self.list_sm_exec_arns = []
     self.staging_bucket = staging_bucket
     self.root_id = None
     self.uuid = uuid4()
     self.state_machine_event = {}
     if execution_mode.lower() == 'sequential':
         self.logger.info("Running {} mode".format(execution_mode))
         self.sequential_flag = True
     else:
         self.logger.info("Running {} mode".format(execution_mode))
         self.sequential_flag = False
예제 #11
0
 def post(self, name):
     '''
     Create s3 bucket for stack # Maybe we can have people pass this in as an input?
     Jinja template out our clutster creation file using the post input
     Update the CDK json context with id/crud call and invoke the CDK
     Create some sort of data structure to pass back for auth into the cluster
     '''
     aws_logger.info(f"/configof/{name} POST")
     args = config_parser.parse_args()
     s3 = S3(aws_logger, region=region)
     config = s3.download_dict(f"{name}.json", args['s3bucket'])
     config['clicommand'] = f"aws eks update-kubeconfig --name {name}"
     config['cloudformation_cp'] = f"eksctl-{name}-cluster"
     config['cloudformation_ng'] = f"eksctl-{name}-nodegroup-{name}-ng"
     s3.upload_dict(f"{name}.json", config, args['s3bucket'])
     aws_logger.info(config)
     try:
         return config
     except KeyError as e:
         print(e)
         api.abort(500, e.__doc__, status = "Could not save information", statusCode = "500")
     except Exception as e:
         print(e)
         api.abort(400, e.__doc__, status = "Could not save information", statusCode = "400")
import inspect
import zipfile
from hashlib import md5
from lib.crhelper import cfn_handler
from uuid import uuid4
from lib.helper import get_available_regions

# initialize logger
log_level = os.environ.get('log_level')
logger = Logger(loglevel=log_level)
init_failed = False

# instantiate classes from lib
kms = KMS(logger)
ssm = SSM(logger)
s3 = S3(logger)


def unique_email_validator(email_list):
    result = set([x for x in email_list if email_list.count(x) > 1])
    duplicate_list = list(result)
    logger.info("Duplicate Emails: {}".format(duplicate_list))
    if not duplicate_list:
        logger.info("Duplicate emails not found")
    else:
        raise Exception("Found duplicate email(s) {} in the parameters.".format(duplicate_list))


def unzip_function(zip_file_name, function_path, output_path):
    try:
        orig_path = os.getcwd()
예제 #13
0
                        "--base",
                        type=str,
                        help="s3 | dynamodb",
                        default="s3",
                        required=False)

    # logging
    logging.config.fileConfig('logging.ini',
                              disable_existing_loggers=False,
                              defaults={'logfilename': LOG_PATH})
    logger = logging.getLogger(__name__)

    # FIXME - file logging
    fh = logging.FileHandler(LOG_PATH)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    logger.addHandler(fh)

    # parse args
    args = parser.parse_args()
    db_connection = Connection("dynamodb", args.region)
    db_table = args.table
    db = DynamoDB(connection=db_connection.new_connection())
    s3_connection = Connection("s3", args.region)
    s3_bucket = args.bucket
    s3 = S3(connection=s3_connection.new_connection())
    if db and s3:
        main(args.region, args.bucket, args.table, args.base, args.threadcount)
    else:
        raise ImportError("No DB connection")
예제 #14
0
def config_deployer(event, RequestType='Create'):
    try:
        s3 = S3(logger)
        base_path = '/tmp/lz'

        # set variables
        source_bucket_name = event.get('bucket_config',
                                       {}).get('source_bucket_name')
        source_key_name = event.get('bucket_config', {}).get('source_s3_key')
        destination_bucket_name = event.get('bucket_config',
                                            {}).get('destination_bucket_name')
        destination_key_name = event.get('bucket_config',
                                         {}).get('destination_s3_key')
        add_on_zip_file_name = source_key_name.split(
            "/")[-1] if "/" in source_key_name else source_key_name
        add_on_file_path = base_path + "/" + add_on_zip_file_name
        lzconfig_file_path = base_path + "/" + destination_key_name
        add_on_extract_path = base_path + "/" + 'add_on_extract'
        lzconfig_extract_path = base_path + "/" + 'lzconfig_extract'
        lzconfig_add_on_path = lzconfig_extract_path + "/" + "add-on"
        output_path = base_path + "/" + 'out'
        merge_add_on_flag = event.get('bucket_config', {}).get('merge_add_on')

        logger.info("add_on_zip_file_name: {}".format(add_on_zip_file_name))
        logger.info("destination_key_name: {}".format(destination_key_name))
        logger.info("merge_add_on_flag: {}".format(merge_add_on_flag))

        if RequestType == 'Create':
            # Download the Add-On ZIP from Solutions S3 bucket
            make_dir(base_path)
            s3.download_file(source_bucket_name, source_key_name,
                             add_on_file_path)

            # Unzip the Add-On ZIP file
            unzip_function(add_on_zip_file_name, base_path,
                           add_on_extract_path)

            # Find and replace the variable in user-input.yaml
            for item in event.get('find_replace'):
                f = item.get('file_name')
                filename, file_extension = os.path.splitext(f)
                destination_file_path = add_on_extract_path + "/" + filename if file_extension == '.j2' else add_on_extract_path + "/" + f
                find_replace(add_on_extract_path, f, destination_file_path,
                             item.get('parameters'))

            # Zip the contents
            make_dir(output_path)
            zip_function(add_on_zip_file_name, add_on_extract_path,
                         output_path)

            if (merge_add_on_flag.upper() == 'YES'):
                try:
                    # Download the LZ Configuration ZIP from Customer's S3 bucket
                    s3.download_file(destination_bucket_name,
                                     destination_key_name, lzconfig_file_path)
                except Exception as e:
                    message = {
                        'FILE': __file__.split('/')[-1],
                        'METHOD': inspect.stack()[0][3],
                        'EXCEPTION': str(e)
                    }
                    error_message = "Check the S3 Bucket name: {} and Bucket Permissions. " \
                                    "Check if the file: {} exists inside the bucket.".format(destination_bucket_name,
                                                                                             destination_key_name)
                    logger.exception(message)
                    raise Exception(error_message)

                # Unzip the LZ Configuration ZIP file
                unzip_function(destination_key_name, base_path,
                               lzconfig_extract_path)
                make_dir(lzconfig_add_on_path)
                shutil.copyfile(
                    output_path + "/" + add_on_zip_file_name,
                    lzconfig_add_on_path + "/" + add_on_zip_file_name)

                zip_function(destination_key_name, lzconfig_extract_path,
                             output_path)
                # Upload the file in the customer S3 bucket
                local_file = output_path + "/" + destination_key_name
                remote_file = destination_key_name
                s3.upload_file(destination_bucket_name, local_file,
                               remote_file)
            else:
                # Upload the file in the customer S3 bucket
                local_file = output_path + "/" + add_on_zip_file_name
                remote_file = add_on_zip_file_name
                s3.upload_file(destination_bucket_name, local_file,
                               remote_file)
        elif RequestType == 'Delete':
            if (merge_add_on_flag.upper() == 'YES'):
                try:
                    make_dir(base_path)
                    # Download the LZ Configuration ZIP from Customer's S3 bucket
                    s3.download_file(destination_bucket_name,
                                     destination_key_name, lzconfig_file_path)
                except Exception as e:
                    message = {
                        'FILE': __file__.split('/')[-1],
                        'METHOD': inspect.stack()[0][3],
                        'EXCEPTION': str(e)
                    }
                    error_message = "Check the S3 Bucket name: {} and Bucket Permissions. " \
                                    "Check if the file: {} exists inside the bucket.".format(destination_bucket_name,
                                                                                             destination_key_name)
                    logger.exception(message)
                    raise Exception(error_message)

                # Unzip the LZ Configuration ZIP file
                unzip_function(destination_key_name, base_path,
                               lzconfig_extract_path)

                my_file = Path(lzconfig_add_on_path + "/" +
                               add_on_zip_file_name)
                if my_file.is_file():
                    os.remove(lzconfig_add_on_path + "/" +
                              add_on_zip_file_name)

                make_dir(output_path)
                zip_function(destination_key_name, lzconfig_extract_path,
                             output_path)

                # Upload the file in the customer S3 bucket
                local_file = output_path + "/" + destination_key_name
                s3.upload_file(destination_bucket_name, local_file,
                               destination_key_name)

        return None
    except Exception as e:
        message = {
            'FILE': __file__.split('/')[-1],
            'METHOD': inspect.stack()[0][3],
            'EXCEPTION': str(e)
        }
        logger.exception(message)
        raise
    finally:
        try:
            shutil.rmtree('/tmp/lz')  # delete directory
        except OSError as exc:
            if exc.errno != errno.ENOENT:  # ENOENT - no such file or directory
                raise
def config_deployer(event, previous_event, RequestType = 'Create'):
    try:
        s3 = S3(logger)
        base_path = '/tmp/lz'

        # set variables
        source_bucket_name = event.get('bucket_config', {}).get('source_bucket_name')
        source_key_name = event.get('bucket_config', {}).get('source_s3_key')
        destination_bucket_name = event.get('bucket_config', {}).get('destination_bucket_name')
        destination_key_name = event.get('bucket_config', {}).get('destination_s3_key')
        add_on_zip_file_name = source_key_name.split("/")[-1] if "/" in source_key_name else source_key_name
        add_on_file_path = base_path + "/" + add_on_zip_file_name
        lzconfig_file_path = base_path + "/" + destination_key_name
        add_on_extract_path = base_path + "/" + 'add_on_extract'
        lzconfig_extract_path = base_path + "/" + 'lzconfig_extract'
        output_path = base_path + "/" + 'out'
        merge_add_on_flag = event.get('bucket_config', {}).get('merge_add_on')

        logger.info("add_on_zip_file_name: {}".format(add_on_zip_file_name))
        logger.info("destination_key_name: {}".format(destination_key_name))
        logger.info("merge_add_on_flag: {}".format(merge_add_on_flag))

        if RequestType == 'Create':
            # Download the Add-On ZIP from Solutions S3 bucket
            make_dir(base_path)
            s3.download_file(source_bucket_name, source_key_name, add_on_file_path)

            # Unzip the Add-On ZIP file
            unzip_function(add_on_zip_file_name, base_path, add_on_extract_path)

            # Find and replace the variable in user-input.yaml
            for item in event.get('find_replace'):
                f = item.get('file_name')
                filename, file_extension = os.path.splitext(f)
                destination_file_path = add_on_extract_path + "/" + filename if file_extension == '.j2' else add_on_extract_path + "/" + f
                find_replace(add_on_extract_path, f, destination_file_path, item.get('parameters'))

            # Zip the contents
            make_dir(output_path)
            zip_function(add_on_zip_file_name, add_on_extract_path, output_path)

            if (merge_add_on_flag.upper() == 'YES'):
                try:
                    # Download the LZ Configuration ZIP from Customer's S3 bucket
                    s3.download_file(destination_bucket_name, destination_key_name, lzconfig_file_path)
                except Exception as e:
                    message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
                    error_message = "Check the S3 Bucket name: {} and Bucket Permissions. " \
                                    "Check if the file: {} exists inside the bucket.".format(destination_bucket_name,
                                                                                             destination_key_name)
                    logger.exception(message)
                    raise Exception(error_message)

                # Unzip the LZ Configuration ZIP file
                unzip_function(destination_key_name, base_path, lzconfig_extract_path)

                #Check if manifest.yaml exists at the root level
                if os.path.isfile(os.path.join(lzconfig_extract_path, 'manifest.yaml')):
                    lzconfig_add_on_path = lzconfig_extract_path + "/" + "add-on"
                #OR inside aws-landing-zone-configuration/manifest.yaml
                elif os.path.isfile(os.path.join(lzconfig_extract_path, 'aws-landing-zone-configuration', 'manifest.yaml')):
                    lzconfig_add_on_path = lzconfig_extract_path + "/" + "aws-landing-zone-configuration/add-on"

                make_dir(lzconfig_add_on_path)

                # if previous_event exists - delete the old zip file from the landing zone config zip
                if previous_event is not None:
                    # old event variables - for update path
                    previous_source_key_name = previous_event.get('bucket_config', {}).get('source_s3_key')
                    previous_add_on_zip_file_name = previous_source_key_name.split("/")[
                        -1] if "/" in previous_source_key_name else previous_source_key_name
                    logger.info("Found old resource properties in the CFN event. Printing old resource properties.")
                    logger.info(previous_event)
                    my_file = Path(lzconfig_add_on_path + "/" + previous_add_on_zip_file_name)
                    logger.info("Searching for {} in the ALZ config zip contents".format(my_file))
                    if my_file.is_file():
                        logger.info("Found the old add-on zip file in the ALZ config zip, deleting the file")
                        os.remove(lzconfig_add_on_path + "/" + previous_add_on_zip_file_name)

                # copy the latest add-on zip into the ALZ config
                shutil.copyfile(output_path + "/" + add_on_zip_file_name,
                                lzconfig_add_on_path + "/" + add_on_zip_file_name)

                zip_function(destination_key_name, lzconfig_extract_path, output_path)
                # Upload the file in the customer S3 bucket
                local_file = output_path + "/" + destination_key_name
                remote_file = destination_key_name
                s3.upload_file(destination_bucket_name, local_file, remote_file)
            else:
                # Upload the file in the customer S3 bucket
                local_file = output_path + "/" + add_on_zip_file_name
                remote_file = add_on_zip_file_name
                s3.upload_file(destination_bucket_name, local_file, remote_file)
        elif RequestType == 'Delete':
            if (merge_add_on_flag.upper() == 'YES'):
                try:
                    make_dir(base_path)
                    # Download the LZ Configuration ZIP from Customer's S3 bucket
                    s3.download_file(destination_bucket_name, destination_key_name, lzconfig_file_path)
                except Exception as e:
                    message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
                    error_message = "Check the S3 Bucket name: {} and Bucket Permissions. " \
                                    "Check if the file: {} exists inside the bucket.".format(destination_bucket_name,
                                                                                             destination_key_name)
                    logger.exception(message)
                    raise Exception(error_message)

                # Unzip the LZ Configuration ZIP file
                unzip_function(destination_key_name, base_path, lzconfig_extract_path)

                #Check if manifest.yaml exists at the root level
                if os.path.isfile(os.path.join(lzconfig_extract_path, 'manifest.yaml')):
                    lzconfig_add_on_path = lzconfig_extract_path + "/" + "add-on"
                #OR inside aws-landing-zone-configuration/manifest.yaml
                elif os.path.isfile(os.path.join(lzconfig_extract_path, 'aws-landing-zone-configuration', 'manifest.yaml')):
                    lzconfig_add_on_path = lzconfig_extract_path + "/" + "aws-landing-zone-configuration/add-on"

                my_file = Path(lzconfig_add_on_path + "/" + add_on_zip_file_name)
                if my_file.is_file():
                    os.remove(lzconfig_add_on_path + "/" + add_on_zip_file_name)

                make_dir(output_path)
                zip_function(destination_key_name, lzconfig_extract_path, output_path)

                # Upload the file in the customer S3 bucket
                local_file = output_path + "/" + destination_key_name
                s3.upload_file(destination_bucket_name, local_file, destination_key_name)

        return None
    except Exception as e:
        message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
        logger.exception(message)
        raise
    finally:
        try:
            shutil.rmtree('/tmp/lz')  # delete directory
        except OSError as exc:
            if exc.errno != errno.ENOENT:  # ENOENT - no such file or directory
                raise
예제 #16
0
파일: delete.py 프로젝트: jonahjon/eksboot
    def post(self, name):
        '''
        1. Delete EKS cluster cloudformation
        2. Delete CDK stack
        3. Delete Items from s3 bucket
        '''
        aws_logger.info('/delete/{} POST'.format(name))
        args = delete_parser.parse_args()
        aws_logger.info(args)
        s3r = S3(aws_logger, region=region)
        config = s3r.download_dict(f"{name}.json", args['s3bucket'])
        if config is None:
            return f"already deleted stack {name}"
        aws_logger.info(config)
        cf = Cloudformation(aws_logger, region=region)
        if config['addons']['togca']:
            try:
                cfoutput = cf.describe_stack(
                    StackName=config['cloudformation_ng'])
                for keys in cfoutput['Stacks'][0]['Outputs']:
                    if keys['OutputKey'] == 'InstanceRoleARN':
                        iam_role_arn = keys['OutputValue']
                        iam_role_arn = iam_role_arn.split("/")[-1]
                if iam_role_arn:
                    iam = IAM(aws_logger, region=region)
                    aws_logger.info(
                        f"trying to delete 'ASG-Policy-For-Worker' from role {iam_role_arn}"
                    )
                    iam.delete_role_policy(RoleName=iam_role_arn,
                                           PolicyName='ASG-Policy-For-Worker')
            except Exception as e:
                aws_logger.info(
                    f"error removing ASG policy from worker nodes, consider manually removing policy:  {e}"
                )

        cf.delete_stack(StackName=config['cloudformation_ng'])
        for i in range(60):
            check = cf.describe_stack(StackName=config['cloudformation_ng'])
            if check:
                sleep(4)
                i += 1
            else:
                aws_logger.info("NodeGroup Stack Deleted")
                break
        cf.delete_stack(StackName=config['cloudformation_cp'])
        for i in range(120):
            check = cf.describe_stack(StackName=config['cloudformation_cp'])
            if check:
                sleep(4)
                i += 1
            else:
                aws_logger.info("ControlPlane Stack Deleted")
                break
        chdir = os.getcwd()
        streaming_output(["cdk", "destroy", "-f"], f"{chdir}/cdk/", aws_logger)
        s3c = S3Client(aws_logger, region=region)
        s3c.delete_object(bucket_name=args['s3bucket'],
                          key=f"{config['name']}.json")
        try:
            return config
        except KeyError as e:
            print(e)
            api.abort(500,
                      e.__doc__,
                      status="Could not save information",
                      statusCode="500")
        except Exception as e:
            print(e)
            api.abort(400,
                      e.__doc__,
                      status="Could not save information",
                      statusCode="400")