Esempio n. 1
0
def get_deleted_beanstalk_environment_names_and_creation_order(cfg, aws):
    result = []

    try:
        cfg.get_logger().info(f"Getting all terminated BeanStalk environments ...")
        response = aws.get_boto3_client('elasticbeanstalk').describe_environments(
            IncludeDeleted=True,
            IncludedDeletedBackTo=datetime.datetime(2015, 1, 1)
        )
        cfg.get_logger().info(f"Successfully finished getting all BeanStalk environments")
        env_list = response['Environments']
    except NoRegionError as e:
        cfg.get_logger().error(f"No region provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-start:",
            f"No AWS Region provided!!!"
        )
        raise e

    for environment in env_list:
        if environment['Status'] == 'Terminated':
            # Get environment deletion order from s3 bucket
            environment = get_beanstalk_environment_deletion_order_from_state_bucket(
                cfg, aws, environment['EnvironmentName']
            )
            if environment is not None:
                result.append(environment)

    return result
Esempio n. 2
0
def save_beanstalk_environment_deletion_order_to_state_bucket(
        cfg, aws, client, environment):
    cfg.get_logger().info(
        f"Looking for environment_deletion_order tag and saving in to bucket "
        f"{cfg.get_state_bucket_name(aws.get_region(), aws.get_account_id())}")
    for tag in client.list_tags_for_resource(
            ResourceArn=environment['environment_arn'])['ResourceTags']:
        if tag['Key'] == 'environment_deletion_order':
            try:
                cfg.get_logger().info(
                    f"Tag environment_deletion_order={tag['Value']} found")
                boto3.resource('s3'). \
                    Bucket(cfg.get_state_bucket_name(aws.get_region(), aws.get_account_id())). \
                    put_object(Key=environment['environment_name'],
                               Body=json.dumps(environment))
                cfg.get_logger().info(
                    f"Tag environment_deletion_order successfully written to "
                    f"s3://{cfg.get_state_bucket_name(aws.get_region(), aws.get_account_id())}/{environment['environment_name']}"
                )
            except Exception:
                cfg.get_logger().error(
                    f"Error saving beanstalk environment_deletion_order to bucket"
                )
                Notification.send_notification(
                    f"Account ID {aws.get_account_id()} aws-ass-stop:",
                    f"Error saving beanstalk environment_deletion_order to bucket"
                )
                raise

            break
Esempio n. 3
0
def backup_tagged_buckets(cfg, aws):
    backup_bucket_name = cfg.get_backup_bucket_name(aws.get_region(),
                                                    aws.get_account_id())
    aws.create_bucket(backup_bucket_name, True)

    s3_resource = boto3.resource('s3', region_name=aws.get_region())
    try:
        cfg.get_logger().info("Start getting S3-Buckets")
        for bucket in s3_resource.buckets.all():
            bucket_name = bucket.name
            cfg.get_logger().debug(
                f"Checking bucket {bucket_name} for backup-and-empty tags")
            if aws.s3_has_tag(
                    bucket_name,
                    cfg.full_ass_tag("ass:s3:backup-and-empty-bucket-on-stop"),
                    "yes"):
                cfg.get_logger().info(
                    f"Bucket {bucket_name} will be backed up")
                aws.backup_bucket(bucket_name, backup_bucket_name)
    except Exception as e:
        cfg.get_logger().error(
            f"An error occurred while taking a backup of the buckets")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"An error occurred while taking a backup of the buckets"
            f"Details: \n Account-id: {aws.get_account_id()} \n Traceback: \n{e}"
        )
        raise
Esempio n. 4
0
def empty_lb_access_log_buckets(cfg, aws):
    lb_client = boto3.client('elbv2', region_name=aws.get_region())

    try:
        cfg.get_logger().info("Start getting LB ARNs")
        response = lb_client.describe_load_balancers()
        lb_list = response['LoadBalancers']
        cfg.get_logger().info("Getting LB ARNs finished successfully")
    except NoRegionError:
        cfg.get_logger().error("No region provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"No region provided!!!")
        raise
    except NoCredentialsError:
        cfg.get_logger().error("No credentials provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()}: aws-ass-stop:",
            f"No credentials provided!!!")
        raise

    for lb in lb_list:
        bucket = get_lb_access_log_bucket(cfg, lb_client,
                                          lb['LoadBalancerArn'], aws)
        disable_lb_access_logs(cfg, lb_client, lb['LoadBalancerArn'], aws)
        if bucket != '':
            cfg.get_logger().info(
                "Disabled LB logs, waiting 30 seconds before emptying the bucket."
            )
            time.sleep(30)
            cfg.get_logger().info("Nice powernap, ready to empty bucket now.")
            empty_bucket(cfg, bucket, aws)
Esempio n. 5
0
def create_deleted_tagged_beanstalk_environments(cfg, aws):
    if os.getenv('ASS_SKIP_ELASTICBEANSTALK', '0') == '1':
        cfg.get_logger().info(f"Skipping Elastic Beanstalk tasks because "
                              f"envvar ASS_SKIP_ELASTICBEANSTALK is set")
        return True

    cfg.get_logger().info(f"Start creation of deleted BeanStalk environments tagged with environment_deletion_order")

    result = get_deleted_beanstalk_environment_names_and_creation_order(cfg, aws)

    for environment in sorted(result, key=lambda k: k['environment_deletion_order'], reverse=True):
        try:
            aws.get_boto3_client('elasticbeanstalk').rebuild_environment(EnvironmentId=environment['environment_id'])
            cfg.get_logger().info(f"Async re-creation of terminated BeanStalk environment "
                                  f"{environment['environment_name']} ended successfully")
            cfg.get_logger().info(f"Please allow a few minutes for the environment to start.")
        except Exception:
            cfg.get_logger().error(f"Async re-creation of terminated BeanStalk environment "
                                   f"{environment['environment_name']} failed")
            Notification.send_notification(
                f"Account ID {aws.get_account_id()} aws-ass-start:",
                f"Async re-creation of terminated BeanStalk environment \n"
                f"{environment['environment_name']} failed")
            raise

    cfg.get_logger().info(f"Creation of terminated BeanStalk environments ended")
Esempio n. 6
0
def restore_s3_backup(cfg, aws):
    s3_client = aws.get_boto3_client('s3')

    try:
        cfg.get_logger().info("Start getting bucket names")
        response = s3_client.list_buckets()
        s3_list = response['Buckets']
        cfg.get_logger().debug(response)
        cfg.get_logger().debug(s3_list)
        cfg.get_logger().info("Getting bucket names finished successfully")
        for bucket in s3_list:
            bucket_name = bucket['Name']
            bucket_arn = f"arn:aws:s3:::{bucket_name}"
            cfg.get_logger().debug(f"Checking bucket {bucket_name} ({bucket_arn})")
            if aws.s3_has_tag(bucket_name, cfg.full_ass_tag("ass:s3:backup-and-empty-bucket-on-stop"), "yes"):
                cfg.get_logger().info(f"Bucket {bucket_name} will be restored")
                aws.restore_bucket(bucket_name, cfg.get_backup_bucket_name(aws.get_region(), aws.get_account_id()))
    except NoRegionError:
        cfg.get_logger().error("No region provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"No region provided!!!"
        )
        raise
    except NoCredentialsError:
        cfg.get_logger().error("No credentials provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"No credentials provided!!!"
        )
        raise
    except Exception:
        raise
Esempio n. 7
0
def get_lb_access_log_bucket(cfg, lb_client, lb, aws):
    """
    Retrieve and return the name of the bucket used to store the loadbalancer access logs (if any).

    :param cfg:
    :param lb_client:
    :param lb:
    :param aws:
    :return bucket_name:
    """

    try:
        cfg.get_logger().info('Get access log bucket name')
        response = lb_client.describe_load_balancer_attributes(
            LoadBalancerArn=lb)
        bucket = list(
            filter(lambda attr: attr['Key'] == 'access_logs.s3.bucket',
                   response['Attributes']))
        if len(bucket) > 0:
            return bucket[0]['Value']
        else:
            return ''
    except Exception:
        cfg.get_logger().error(
            "An error occurred while determining the loadbalancer access log bucket name"
        )
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"An error occurred while determining the loadbalancer access log bucket name"
        )
        raise
Esempio n. 8
0
def main():
    cfg = Config("aws-ass-start")
    aws = AWS(cfg.get_logger())

    try:
        cfg.get_logger().info(f"Region:       {aws.get_region()}")
        cfg.get_logger().info(f"AccountId:    {aws.get_account_id()}")
        cfg.get_logger().info(f"State Bucket: {cfg.get_state_bucket_name(aws.get_region(), aws.get_account_id())}")

        aws.create_bucket(cfg.get_template_bucket_name())

        start_tagged_rds_clusters_and_instances(cfg, aws)
        create_deleted_tagged_cloudformation_stacks(cfg, aws)
        create_deleted_tagged_beanstalk_environments(cfg, aws)
        restore_s3_backup(cfg, aws)
    except Exception as e:
        cfg.get_logger().error("An exception occurred")
        cfg.get_logger().error(e)
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-start:",
            f"An exception occured"
        )
    finally:
        if cfg.get_template_bucket_name():
            aws.remove_bucket(cfg.get_template_bucket_name())
        logging.shutdown()
Esempio n. 9
0
def disable_lb_access_logs(cfg, lb_client, lb, aws):
    try:
        cfg.get_logger().info("Disable access logs for loadbalancer %s" % lb)
        lb_client.modify_load_balancer_attributes(
            LoadBalancerArn=lb,
            Attributes=[
                {
                    'Key': 'access_logs.s3.enabled',
                    'Value': 'false'
                },
            ])
        cfg.get_logger().info(
            f"Access logs for loadbalancer {lb} successfully disabled")
    except Exception:
        cfg.get_logger().error(
            f"An error occurred while disabling the loadbalancer access logs")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"An error occurred while disabling the loadbalancer access logs")
        raise
Esempio n. 10
0
def terminate_beanstalk_environment(cfg, aws, client, environment):
    try:
        cfg.get_logger().info(
            "Start deletion of environment %s (deletion order is %i)" %
            (environment['environment_name'],
             environment['environment_deletion_order']))
        client.terminate_environment(
            EnvironmentName=environment['environment_name'])
    except Exception as e:
        cfg.get_logger().error(
            f"Environment deletion for {environment['environment_name']} has failed, check the logs."
        )
        cfg.get_logger().error(e)
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"Stack deletion for {environment['environment_name']} has failed, check the CloudFormation logs."
        )
        raise

    return True
Esempio n. 11
0
def get_stack_names_and_deletion_order(cfg, aws, client):
    result = []

    try:
        cfg.get_logger().info('Getting all CloudFormation Stacks ...')
        response = client.describe_stacks()
        cfg.get_logger().info(
            'Successfully finished getting all CloudFormation templates')
        stack_list = response['Stacks']
    except NoRegionError as e:
        cfg.get_logger().error("No region provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"No region provided.")
        raise e

    for stack in stack_list:
        if 'Tags' in stack:
            for tag in stack['Tags']:
                if (tag['Key'] == 'stack_deletion_order' or tag['Key']
                        == cfg.full_ass_tag('ass:cfn:deletion-order')) and int(
                            tag['Value']) > 0:
                    if not is_nested_stack(stack):
                        if 'Parameters' in stack:
                            parameters = stack['Parameters']
                        else:
                            parameters = []

                        this_stack = {
                            "stack_name": stack['StackName'],
                            "stack_id": stack['StackId'],
                            "stack_deletion_order": int(tag['Value']),
                            "stack_parameters": parameters
                        }
                        save_stack_parameters_to_state_bucket(
                            cfg, aws, this_stack)
                        result.append(this_stack)
    return result
Esempio n. 12
0
def save_stack_parameters_to_state_bucket(cfg, aws, stack):
    state_bucket_name = cfg.get_state_bucket_name(aws.get_region(),
                                                  aws.get_account_id())
    cfg.get_logger().info(
        f"Saving stack information for {stack['stack_name']} to bucket {state_bucket_name}"
    )

    try:
        cfg.get_logger().info(f"Writing stack parameters to bucket")
        boto3.resource('s3'). \
            Bucket(state_bucket_name). \
            put_object(Key=stack['stack_name'],
                       Body=json.dumps(stack))
        cfg.get_logger().info(
            f"Stack parameters successfully written to "
            f"s3://{state_bucket_name}/{stack['stack_name']}")
    except Exception:
        cfg.get_logger().error(
            f"Error saving beanstalk environment_deletion_order to bucket")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop: ",
            f"Error saving beanstalk environment_deletion_order to bucket")
        raise
Esempio n. 13
0
def empty_bucket(cfg, bucket, aws):
    try:
        cfg.get_logger().info(f"Connect to bucket {bucket}")
        s3 = boto3.resource('s3')
        bucket = s3.Bucket(bucket)
        cfg.get_logger().info(
            f"Start deletion of all objects in bucket {bucket}")
        bucket.objects.all().delete()
        cfg.get_logger().info(
            f"Finished deletion of all objects in bucket {bucket}")
    except ClientError as e:
        if e.response['Error']['Code'] == 'NoSuchBucket':
            cfg.get_logger().warning(
                f"Bucket ({bucket}) does not exist error when deleting objects, continuing"
            )
    except Exception as e:
        cfg.get_logger().error(
            f"Error occurred while deleting all objects in {bucket}")
        cfg.get_logger().debug(e)
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop: ",
            f"Error occurred while deleting all objects in {bucket}")
        raise
Esempio n. 14
0
def delete_stack(cfg, client, stack, aws):
    waiter = client.get_waiter('stack_delete_complete')

    try:
        cfg.get_logger().info(
            "Start deletion of stack %s (deletion order is %i)" %
            (stack['stack_name'], stack['stack_deletion_order']))
        client.delete_stack(StackName=stack['stack_name'])
        waiter.wait(StackName=stack['stack_name'])
    except WaiterError as e:
        cfg.get_logger().error(
            f"Stack deletion for {stack['stack_name']} has failed, check the CloudFormation logs."
        )
        cfg.get_logger().error(e)
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"Stack deletion for {stack['stack_name']} has failed, check the CloudFormation logs."
        )
        raise
    except Exception as e:
        raise e

    return True
Esempio n. 15
0
def get_beanstalk_env_names_and_deletion_order(cfg, aws, client):
    result = []

    try:
        cfg.get_logger().info('Getting all BeanStalk environments ...')
        response = client.describe_environments()
        cfg.get_logger().info(
            'Successfully finished getting all BeanStalk environments')
        env_list = response['Environments']
    except NoRegionError as e:
        cfg.get_logger().error("No region provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"No region provided.")
        raise e

    for environment in env_list:
        try:
            for tag in client.list_tags_for_resource(
                    ResourceArn=environment['EnvironmentArn'])['ResourceTags']:
                if tag['Key'] == 'environment_deletion_order' and int(
                        tag['Value']) > 0:
                    result.append({
                        "environment_name":
                        environment['EnvironmentName'],
                        "environment_id":
                        environment['EnvironmentId'],
                        "environment_arn":
                        environment['EnvironmentArn'],
                        "environment_deletion_order":
                        int(tag['Value'])
                    })
        except:
            cfg.get_logger().error(
                f"Resource {environment['EnvironmentArn']} not found, continuing."
            )
    return result
Esempio n. 16
0
def empty_tagged_s3_buckets(cfg, aws):
    s3client = boto3.client('s3', region_name=aws.get_region())
    try:
        cfg.get_logger().info("Start getting bucket names")
        response = s3client.list_buckets()
        s3_list = response['Buckets']
        cfg.get_logger().debug(response)
        cfg.get_logger().debug(s3_list)
        cfg.get_logger().info("Getting bucket names finished successfully")
    except NoRegionError:
        cfg.get_logger().error("No region provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"No region provided!!!")
        raise
    except NoCredentialsError:
        cfg.get_logger().error("No credentials provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"No credentials provided!!!")
        raise
    except Exception as e:
        cfg.get_logger().error("Empty_tagged_s3_buckets error!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"An error occurred while deleting content of the S3 buckets"
            f"Details: \n Account-id: {aws.get_account_id()} \n Traceback: \n{e}"
        )
        raise

    for bucket in s3_list:
        bucket_name = bucket['Name']
        bucket_arn = f"arn:aws:s3:::{bucket_name}"
        cfg.get_logger().debug(f"Checking bucket {bucket_name} ({bucket_arn})")
        if (aws.s3_has_tag(bucket_name,
                           cfg.full_ass_tag("ass:s3:clean-bucket-on-stop"),
                           "yes")
                or aws.s3_has_tag(
                    bucket_name,
                    cfg.full_ass_tag("ass:s3:backup-and-empty-bucket-on-stop"),
                    "yes")):
            cfg.get_logger().info(f"Bucket {bucket_name} will be cleaned")
            aws.empty_bucket(bucket)
Esempio n. 17
0
def get_stack_names_and_creation_order(cfg, aws):
    stack_list = []
    result = []
    most_recent_only_dict = dict()
    root_stacks_only_dict = dict()
    client = aws.get_boto3_client('cloudformation')

    try:
        cfg.get_logger().info(f"Getting all CloudFormation Stacks ...")
        response = client.list_stacks(StackStatusFilter=['DELETE_COMPLETE'])
        cfg.get_logger().info(f"Successfully finished getting all CloudFormation templates")
        stack_list.extend(response['StackSummaries'])
        while 'NextToken' in response:
            cfg.get_logger().info("Sleeping a second between calls to list_stacks to avoid rate errors")
            time.sleep(1)
            response = client.list_stacks(StackStatusFilter=['DELETE_COMPLETE'], NextToken=response['NextToken'])
            stack_list.extend(response['StackSummaries'])

        cfg.get_logger().info(f"Retrieve the most recently deleted stacks per stack name")
        for stack in stack_list:
            stack_name = stack['StackName']
            if stack_name in most_recent_only_dict:
                if stack['DeletionTime'] > most_recent_only_dict[stack_name]['DeletionTime']:
                    most_recent_only_dict[stack_name] = stack
            else:
                most_recent_only_dict[stack_name] = stack
        cfg.get_logger().info(f"{len(most_recent_only_dict)} stacks in most recent only stack dict")

        cfg.get_logger().info(f"Remove nested stack from remaining stack list")
        for stack in most_recent_only_dict.keys():
            if not is_nested_stack(cfg.get_logger(), most_recent_only_dict.keys(), stack):
                root_stacks_only_dict[stack] = most_recent_only_dict[stack]
        cfg.get_logger().info(f"{len(root_stacks_only_dict)} stacks in root only stack dict")

        cfg.get_logger().info(f"Filter remaining stacks on existence of the stack_deletion_order tag")
        for stack_name in root_stacks_only_dict:
            response = client.describe_stacks(StackName=root_stacks_only_dict[stack_name]['StackId'])
            stack = response['Stacks'][0]

            if 'Tags' in stack:
                for tag in stack['Tags']:
                    if tag['Key'] == 'stack_deletion_order' and int(tag['Value']) > 0:
                        result.append({"stack_name": stack['StackName'],
                                       "stack_id": stack['StackId'],
                                       "stack_deletion_order": int(tag['Value']),
                                       "stack_deletion_time": stack['DeletionTime'],
                                       "stack_tags": stack['Tags']
                                       })
                        break

    except NoRegionError:
        cfg.get_logger().error(f"No AWS Credentials provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-start:",
            f"No AWS Credentials provided!!!"
        )
        raise
    except ClientError as e:
        cfg.get_logger().error(e.response['Error']['Code'])
        cfg.get_logger().error(e.response['Error']['Message'])
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-start:",
            f"{e.response['Error']['Message']}"
        )
        raise

    return result
Esempio n. 18
0
    def start_rds(rds_type, main_key, identifier_key, arn_key, status_key):

        rds_client = aws.get_boto3_client('rds')

        cfg.get_logger().info(f"Get list of all RDS {rds_type}s")
        try:
            if rds_type == 'instance':
                response = rds_client.describe_db_instances()
            elif rds_type == 'cluster':
                response = rds_client.describe_db_clusters()
            else:
                raise Exception('rds_type must be one of instance or cluster')

            for item in response[main_key]:
                identifier = item[identifier_key]
                arn = item[arn_key]
                status = item[status_key]

                if (aws.resource_has_tag(rds_client, arn, 'stop_or_start_with_cfn_stacks', 'yes') or
                        aws.resource_has_tag(rds_client, arn, cfg.full_ass_tag('ass:rds:include'), 'yes')):
                    cfg.get_logger().info(f"RDS {rds_type} {arn} is tagged with {cfg.full_ass_tag('ass:rds:include')} "
                                          f"and tag value is yes")
                    cfg.get_logger().info(f"Starting RDS {rds_type} {arn}")
                    if status != 'stopped':
                        cfg.get_logger().info(f"RDS {rds_type} {identifier} in state "
                                              f"{status} (!= stopped): Skipping start")
                    elif rds_type == 'instance' and 'DBClusterIdentifier' in item:
                        # Skip instances that are part of a RDS Cluster, they will be processed
                        # in the DBCluster part, when rds_type is 'cluster'
                        cfg.get_logger().info("RDS %s %s is part of RDS Cluster %s: Skipping start".format(
                            rds_type, item['DBInstanceIdentifier'], item['DBClusterIdentifier']
                        ))
                    else:
                        if rds_type == 'instance':
                            rds_client.start_db_instance(DBInstanceIdentifier=item['DBInstanceIdentifier'])
                        elif rds_type == 'cluster':
                            rds_client.start_db_cluster(DBClusterIdentifier=item['DBClusterIdentifier'])

                        if (aws.resource_has_tag(rds_client, arn, 'start_wait_until_available', 'yes') or
                                aws.resource_has_tag(
                                    rds_client, arn, cfg.full_ass_tag('ass:rds:start-wait-until-available'), 'yes')):
                            cfg.get_logger().info(f"RDS {identifier} is tagged with "
                                                  f"{cfg.full_ass_tag('ass:rds:start-wait-until-available')} "
                                                  f"and tag value is yes")
                            if rds_type == 'cluster':
                                cfg.get_logger().warning("No waiters in boto3 for Aurora Clusters (yet).")
                                cfg.get_logger().warning("Cluster start will continue in parallel.")
                            elif rds_type == 'instance':
                                cfg.get_logger().info("Waiting until instance {} is available".format(identifier))
                                rds_client.get_waiter('db_instance_available').wait(DBInstanceIdentifier=identifier)
                                cfg.get_logger().info("Instance {} is available now".format(identifier))
                            else:
                                raise ValueError('rds_type must be one of instance or cluster')

                        else:
                            cfg.get_logger().info(f"Starting RDS {rds_type} {arn} successfully triggered")
                else:
                    cfg.get_logger().info("RDS {} {} is not tagged with {} or tag value is not yes".format(
                        rds_type, arn, 'stop_or_start_with_cfn_stacks'))

        except NoRegionError:
            cfg.get_logger().error("No region provided!!!")
            Notification.send_notification(
                f"Account ID {aws.get_account_id()} aws-ass-start:",
                f"No region provided."
            )
            raise
        except NoCredentialsError:
            cfg.get_logger().error("No credentials provided!!!")
            Notification.send_notification(
                f"Account ID {aws.get_account_id()} aws-ass-start:",
                f"No credentials provided."
            )
            raise

        cfg.get_logger().info(f"Finished getting list of all RDS {rds_type}s")
Esempio n. 19
0
def get_stack_template_and_create_template(cfg, aws, stack):
    waiter = aws.get_boto3_client('cloudformation').get_waiter('stack_create_complete')
    s3_client = aws.get_boto3_client('s3')
    state_bucket_name = cfg.get_state_bucket_name(aws.get_region(), aws.get_account_id())
    stack_dict = {}
    retries = 3

    try:
        # First check if stack with same name already exists
        if not aws.cfn_stack_exists(stack['stack_name']):
            # Get parameters from state_bucket_name
            try:
                cfg.get_logger().info(f"Get saved state for {stack['stack_name']} from S3 bucket {state_bucket_name}")
                stack_dict = json.loads(
                    boto3.resource('s3').
                        Object(state_bucket_name, stack['stack_name']).
                        get()['Body'].
                        read().
                        decode('utf-8')
                )
                cfg.get_logger().info("Saved data is: %s " % stack_dict)
            except Exception as e:
                cfg.get_logger().debug(e)
                cfg.get_logger().warning("An error occurred retrieving stack information from the S3 state bucket")
                cfg.get_logger().warning("Continuing without restoring data from S3")
                Notification.send_notification(
                    f"Account ID {aws.get_account_id()} aws-ass-start:",
                    f"An error occurred retrieving stack information from the S3 state bucket"
                )
                stack_dict['stack_parameters'] = []

            cfg.get_logger().info("Get template string for template %s" % stack['stack_name'])
            response = aws.get_boto3_client('cloudformation').get_template(
                StackName=stack['stack_id'], TemplateStage='Processed'
            )
            cfg.get_logger().info("Copy the template to the template bucket %s" % cfg.get_template_bucket_name())
            s3_client.put_object(
                Bucket=cfg.get_template_bucket_name(),
                Body=response['TemplateBody'],
                Key=stack['stack_name'],
                ServerSideEncryption='AES256'
            )
            template_url = 'https://s3.amazonaws.com/' + cfg.get_template_bucket_name() + '/' + stack['stack_name']

            for counter in range(0, retries):
                cfg.get_logger().info("Create the CloudFormation stack from the template of the deleted stack")
                aws.get_boto3_client('cloudformation').create_stack(
                    StackName=stack['stack_name'],
                    TemplateURL=template_url,
                    Parameters=stack_dict['stack_parameters'],
                    Capabilities=['CAPABILITY_NAMED_IAM'],
                    Tags=stack['stack_tags']
                )

                cfg.get_logger().info(f"Wait for stack creation to finish, iteration {counter + 1} out of {retries}")
                try:
                    waiter.wait(StackName=stack['stack_name'])
                    cfg.get_logger().info("Stack creation finished in  iteration %i out of %i" % (counter + 1, retries))
                    # Leave the loop upon success
                    break
                except botocore.exceptions.WaiterError as e:
                    if counter == retries - 1:
                        cfg.get_logger().error(
                            f"Stack re-creation for {stack['stack_name']} has failed, check the CloudFormation logs."
                        )
                        cfg.get_logger().error(e)
                        Notification.send_notification(
                            f"Account ID {aws.get_account_id()} aws-ass-start:",
                            f"Stack re-creation for {stack['stack_name']} has failed, check the CloudFormation logs."
                        )
                        raise
                    else:
                        cfg.get_logger().warning("Stack creation failed, retrying after deletion ...")
                        cfg.get_logger().info("Start deletion of stack %s" % stack['stack_name'])
                        try:
                            aws.get_boto3_client('cloudformation').delete_stack(StackName=stack['stack_name'])
                            aws.get_boto3_client('cloudformation').get_waiter('stack_delete_complete') \
                                .wait(StackName=stack['stack_name'])
                            cfg.get_logger().info("Deletion of stack %s was successful" % stack['stack_name'])
                        except Exception:
                            cfg.get_logger().error("An error occurred while deleting stack {stack['stack_name']}")
                            cfg.get_logger().error("No use to retry when stack already exists (in a failed state).")
                            Notification.send_notification(
                                f"Account ID {aws.get_account_id()} aws-ass-start:",
                                f"An error occurred while deleting stack {stack['stack_name']}."
                            )
                            raise

        else:
            cfg.get_logger().warning("Skipping creation of stack %s because stack with same name already exists" %
                                     stack['stack_name'])

    except ClientError as e:
        if e.response['Error']['Code'] == 'AlreadyExistsException':
            cfg.get_logger().warning(
                "The stack already exists and probably is in a ROLLBACK_COMPLETE state and needs manual removal")
        raise
Esempio n. 20
0
def empty_cloudfront_access_log_buckets(cfg, aws):
    s3_client = boto3.client('s3', region_name=aws.get_region())
    cloudfront_client = boto3.client('cloudfront',
                                     region_name=aws.get_region())

    try:
        if 'Items' in cloudfront_client.list_distributions(
        )['DistributionList']:
            cfg.get_logger().info("Cloudfront distribution found")
            cf_distibution_items = cloudfront_client.list_distributions(
            )['DistributionList']['Items']

            for distro in cf_distibution_items:
                if (int(
                        aws.resource_has_tag(cloudfront_client, distro['ARN'],
                                             'stack_deletion_order')) > 0
                        or int(
                            aws.resource_has_tag(
                                cloudfront_client, distro['ARN'],
                                cfg.full_ass_tag('ass:cfn:deletion-order'))) >
                        0):
                    # Distro Id
                    distrib_id = distro['Id']
                    distrib_info = cloudfront_client.get_distribution(
                        Id=distrib_id)
                    # Distro etag (required for updating cloudfront distro)
                    distrib_etag = distrib_info['ResponseMetadata'][
                        'HTTPHeaders']['etag']
                    distrib_config = distrib_info['Distribution'][
                        'DistributionConfig']
                    # Getting the bucket name
                    cfg.get_logger().info("Looking for Cloudfront S3 Bucket")
                    distrib_log_bucket = distrib_info['Distribution'][
                        'DistributionConfig']['Logging']['Bucket']
                    distrib_log_bucket = str(distrib_log_bucket)

                    if ".s3.amazonaws.com" in distrib_log_bucket:
                        bucket = s3_client.list_objects_v2(
                            Bucket=distrib_log_bucket[:-17])
                        cfg.get_logger().info(
                            f"Found the Bucket {bucket['Name']}")

                        if distrib_config['Logging']['Enabled'] is True:
                            cfg.get_logger().info(
                                f"Disable Cloudfront logging ID: {distrib_id}")
                            distrib_config['Logging']['Enabled'] = False
                            response = cloudfront_client.update_distribution(
                                Id=distrib_id,
                                DistributionConfig=distrib_config,
                                IfMatch=distrib_etag)
                            if response['ResponseMetadata'][
                                    'HTTPStatusCode'] == 200:
                                if 'Contents' in bucket:
                                    aws.empty_bucket(bucket)
                                else:
                                    cfg.get_logger().info(
                                        f"Bucket already empty: {bucket['Name']}"
                                    )
                            else:
                                cfg.get_logger().warning(
                                    f"Error during disabling cloudfront logging ID: {distrib_id}"
                                )
                    else:
                        cfg.get_logger().info(
                            f"Cloudfront logging disabled ID: {distrib_id}")
                        cfg.get_logger().info(
                            "No Cloudfront logging bucket found!")
        else:
            cfg.get_logger().info("No Cloudfront distribution")
    except NoRegionError:
        cfg.get_logger().error("No region provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"No region provided!!!")
        raise
    except NoCredentialsError:
        cfg.get_logger().error("No credentials provided!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"No credentials provided!!!")
        raise
    except cloudfront_client.exceptions.IllegalUpdate:
        cfg.get_logger().error("Error during cloudfront update!!!")
        Notification.send_notification(
            f"Account ID {aws.get_account_id()} aws-ass-stop:",
            f"Illegal Update while disabling Cloudfront logging!!!")
        raise
Esempio n. 21
0
    def stop_rds(rds_type, main_key, identifier_key, arn_key, status_key):
        rds_client = boto3.client('rds', region_name=aws.get_region())

        cfg.get_logger().info(f"Get list of all RDS {rds_type}s")
        try:
            if rds_type == 'instance':
                response = rds_client.describe_db_instances()
            elif rds_type == 'cluster':
                response = rds_client.describe_db_clusters()
            else:
                raise Exception(
                    'rds_type should be one of instance or cluster')

            for item in response[main_key]:
                identifier = item[identifier_key]
                arn = item[arn_key]
                status = item[status_key]

                if (aws.resource_has_tag(
                        rds_client, arn, 'stop_or_start_with_cfn_stacks',
                        'yes') or aws.resource_has_tag(
                            rds_client, arn,
                            cfg.full_ass_tag('ass:rds:include'), 'yes')):
                    cfg.get_logger().info(
                        f"RDS {rds_type} {arn} is tagged with {cfg.full_ass_tag('ass:rds:include')} "
                        f"and tag value is yes")
                    cfg.get_logger().info(f"Stopping RDS {rds_type} {arn}")
                    if status != 'available':
                        cfg.get_logger().info(
                            f"RDS {rds_type} {identifier} is in state {status} "
                            f"( != available ): Skipping stop")
                    elif rds_type == 'instance' and 'DBClusterIdentifier' in item:
                        # Skip instances that are part of a RDS Cluster, they will be processed
                        # in the DBCluster part, when rds_type is 'cluster'
                        cfg.get_logger().info(
                            f"RDS {rds_type} {item['DBInstanceIdentifier']} is part of RDS Cluster "
                            f"{item['DBClusterIdentifier']}: Skipping stop")
                    else:
                        if rds_type == 'instance':
                            rds_client.stop_db_instance(
                                DBInstanceIdentifier=identifier)
                        elif rds_type == 'cluster':
                            rds_client.stop_db_cluster(
                                DBClusterIdentifier=identifier)
                        else:
                            raise Exception(
                                'rds_type should be on of instance or cluster')

                        cfg.get_logger().info(
                            f"Stopping RDS {rds_type} {arn} successfully triggered"
                        )
                else:
                    cfg.get_logger().info(
                        f"RDS {rds_type} {arn} is not tagged with "
                        f"{cfg.full_ass_tag('ass:rds:include')}, or tag value is not yes"
                    )

        except NoRegionError:
            cfg.get_logger().error("No region provided!!!")
            Notification.send_notification(
                f"Account ID {aws.get_account_id()} aws-ass-stop:",
                f"No region provided!!!")
            raise
        except NoCredentialsError:
            cfg.get_logger().error("No credentials provided!!!")
            Notification.send_notification(
                f"Account ID {aws.get_account_id()} aws-ass-stop:",
                f"No credentials provided!!!")
            raise

        cfg.get_logger().info(f"Finished getting list of all RDS {rds_type}s")