Пример #1
0
 def _bucket_policy_changed(self, region, bucket):
     client = boto3.client('s3', region_name=region)
     current_policy = client.get_bucket_policy(Bucket=bucket)['Policy']
     shelvery_bucket_policy = AwsHelper.get_shelvery_bucket_policy(
         self.account_id, RuntimeConfig.get_share_with_accounts(self),
         bucket)
     return current_policy != shelvery_bucket_policy
Пример #2
0
    def do_copy_backup(self, map_args={}, **kwargs):
        """
        Copy backup to another region, actual implementation
        """

        kwargs.update(map_args)

        # if backup is not available, exit and rely on recursive lambda call copy backup
        # in non lambda mode this should never happen
        if not self.wait_backup_available(backup_region=kwargs['OriginRegion'],
                                          backup_id=kwargs['BackupId'],
                                          lambda_method='do_copy_backup',
                                          lambda_args=kwargs):
            return

        self.logger.info(
            f"Do copy backup {kwargs['BackupId']} ({kwargs['OriginRegion']}) to region {kwargs['Region']}"
        )

        # copy backup
        src_region = kwargs['OriginRegion']
        dst_region = kwargs['Region']
        regional_backup_id = self.copy_backup_to_region(
            kwargs['BackupId'], dst_region)

        # create tags on backup copy
        original_backup_id = kwargs['BackupId']
        original_backup = self.get_backup_resource(src_region,
                                                   original_backup_id)
        resource_copy = BackupResource(None, None, True)
        resource_copy.backup_id = regional_backup_id
        resource_copy.region = kwargs['Region']
        resource_copy.tags = original_backup.tags.copy()

        # add metadata to dr copy and original
        dr_copies_tag_key = f"{RuntimeConfig.get_tag_prefix()}:dr_copies"
        resource_copy.tags[
            f"{RuntimeConfig.get_tag_prefix()}:region"] = dst_region
        resource_copy.tags[
            f"{RuntimeConfig.get_tag_prefix()}:dr_copy"] = 'true'
        resource_copy.tags[
            f"{RuntimeConfig.get_tag_prefix()}:dr_source_backup"] = f"{src_region}:{original_backup_id}"

        if dr_copies_tag_key not in original_backup.tags:
            original_backup.tags[dr_copies_tag_key] = ''
        original_backup.tags[dr_copies_tag_key] = original_backup.tags[
            dr_copies_tag_key] + f"{dst_region}:{regional_backup_id} "

        self.tag_backup_resource(resource_copy)
        self.tag_backup_resource(original_backup)

        # shared backup copy with same accounts
        for shared_account_id in RuntimeConfig.get_share_with_accounts(self):
            backup_resource = BackupResource(None, None, True)
            backup_resource.backup_id = regional_backup_id
            backup_resource.region = kwargs['Region']
            self.share_backup(backup_resource, shared_account_id)
Пример #3
0
 def create_data_buckets(self):
     regions = [self.region]
     regions.extend(RuntimeConfig.get_dr_regions(None, self))
     for region in regions:
         bucket = self._get_data_bucket(region)
         AwsHelper.boto3_client('s3', region_name=region).put_bucket_policy(
             Bucket=bucket.name,
             Policy=AwsHelper.get_shelvery_bucket_policy(
                 self.account_id,
                 RuntimeConfig.get_share_with_accounts(self), bucket.name))
Пример #4
0
    def create_backups(self) -> List[BackupResource]:
        """Create backups from all collected entities marked for backup by using specific tag"""

        # collect resources to be backed up
        resource_type = self.get_resource_type()
        self.logger.info(
            f"Collecting entities of type {resource_type} tagged with "
            f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")
        resources = self.get_entities_to_backup(
            f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")

        # allows user to select single entity to be backed up
        if RuntimeConfig.get_shelvery_select_entity(self) is not None:
            entity_id = RuntimeConfig.get_shelvery_select_entity(self)
            self.logger.info(f"Creating backups only for entity {entity_id}")
            resources = list(
                filter(lambda x: x.resource_id == entity_id, resources))

        self.logger.info(
            f"{len(resources)} resources of type {resource_type} collected for backup"
        )

        # create and collect backups
        backup_resources = []
        for r in resources:
            backup_resource = BackupResource(
                tag_prefix=RuntimeConfig.get_tag_prefix(), entity_resource=r)
            self.logger.info(
                f"Processing {resource_type} with id {r.resource_id}")
            self.logger.info(f"Creating backup {backup_resource.name}")
            try:
                self.backup_resource(backup_resource)
                self.tag_backup_resource(backup_resource)
                self.logger.info(
                    f"Created backup of type {resource_type} for entity {backup_resource.entity_id} "
                    f"with id {backup_resource.backup_id}")
                backup_resources.append(backup_resource)
            except Exception as e:
                self.logger.exception(
                    f"Failed to create backup {backup_resource.name}:{e}")

        # create backups and disaster recovery region
        for br in backup_resources:
            self.copy_backup(
                br, RuntimeConfig.get_dr_regions(br.entity_resource.tags,
                                                 self))

        for aws_account_id in RuntimeConfig.get_share_with_accounts(self):
            for br in backup_resources:
                self.share_backup(br, aws_account_id)

        return backup_resources
Пример #5
0
    def _bucket_policy_changed(self, region, bucket):
        client = boto3.client('s3', region_name=region)

        try:
            current_policy = client.get_bucket_policy(Bucket=bucket)['Policy']
        except ClientError as error:
            if error.response["Error"]["Code"] == "NoSuchBucketPolicy":
                current_policy = None

        shelvery_bucket_policy = AwsHelper.get_shelvery_bucket_policy(
            self.account_id, RuntimeConfig.get_share_with_accounts(self),
            bucket)
        return current_policy != shelvery_bucket_policy
Пример #6
0
    def _get_data_bucket(self, region=None):
        bucket_name = self.get_local_bucket_name(region)
        if region is None:
            loc_constraint = boto3.session.Session().region_name
        else:
            loc_constraint = region

        s3 = boto3.resource('s3')
        try:
            AwsHelper.boto3_client('s3').head_bucket(Bucket=bucket_name)
            bucket = s3.Bucket(bucket_name)
            AwsHelper.boto3_client('s3').put_public_access_block(
                Bucket=bucket_name,
                PublicAccessBlockConfiguration={
                    'BlockPublicAcls': True,
                    'IgnorePublicAcls': True,
                    'BlockPublicPolicy': True,
                    'RestrictPublicBuckets': True
                },
            )

        except ClientError as e:
            if e.response['Error']['Code'] == '404':
                client_region = loc_constraint
                s3client = AwsHelper.boto3_client('s3',
                                                  region_name=client_region)
                if loc_constraint == "us-east-1":
                    bucket = s3client.create_bucket(Bucket=bucket_name)
                else:
                    if loc_constraint == "eu-west-1":
                        loc_constraint = "EU"

                    bucket = s3client.create_bucket(Bucket=bucket_name,
                                                    CreateBucketConfiguration={
                                                        'LocationConstraint':
                                                        loc_constraint
                                                    })

                # store the bucket policy, so the bucket can be accessed from other accounts
                # that backups are shared with
                s3client.put_bucket_policy(
                    Bucket=bucket_name,
                    Policy=AwsHelper.get_shelvery_bucket_policy(
                        self.account_id,
                        RuntimeConfig.get_share_with_accounts(self),
                        bucket_name))
                return s3.Bucket(bucket_name)
            else:
                raise e
        return bucket
Пример #7
0
    def clean_backups(self):
        # collect backups
        existing_backups = self.get_existing_backups(RuntimeConfig.get_tag_prefix())

        # allows user to select single entity backups to be cleaned
        if RuntimeConfig.get_shelvery_select_entity(self) is not None:
            entity_id = RuntimeConfig.get_shelvery_select_entity(self)
            self.logger.info(f"Checking only for backups of entity {entity_id}")
            existing_backups = list(
                filter(
                    lambda x: x.entity_id == entity_id,
                    existing_backups)
            )

        self.logger.info(f"Collected {len(existing_backups)} backups to be checked for expiry date")
        self.logger.info(f"""Using following retention settings from runtime environment (resource overrides enabled):
                            Keeping last {RuntimeConfig.get_keep_daily(None, self)} daily backups
                            Keeping last {RuntimeConfig.get_keep_weekly(None, self)} weekly backups
                            Keeping last {RuntimeConfig.get_keep_monthly(None, self)} monthly backups
                            Keeping last {RuntimeConfig.get_keep_yearly(None, self)} yearly backups""")

        # check backups for expire date, delete if necessary
        for backup in existing_backups:
            self.logger.info(f"Checking backup {backup.backup_id}")
            try:
                if backup.is_stale(self, RuntimeConfig.get_custom_retention_types(self)):
                    self.logger.info(
                        f"{backup.retention_type} backup {backup.name} has expired on {backup.expire_date}, cleaning up")
                    self.delete_backup(backup)
                    backup.date_deleted = datetime.utcnow()
                    self._archive_backup_metadata(backup, self._get_data_bucket(), RuntimeConfig.get_share_with_accounts(self))
                    self.snspublisher.notify({
                        'Operation': 'DeleteBackup',
                        'Status': 'OK',
                        'BackupType': self.get_engine_type(),
                        'BackupName': backup.name,
                    })
                else:
                    self.logger.info(f"{backup.retention_type} backup {backup.name} is valid "
                                     f"until {backup.expire_date}, keeping this backup")
            except Exception as e:
                self.snspublisher_error.notify({
                    'Operation': 'DeleteBackup',
                    'Status': 'ERROR',
                    'ExceptionInfo': e.__dict__,
                    'BackupType': self.get_engine_type(),
                    'BackupName': backup.name,
                })
                self.logger.exception(f"Error checking backup {backup.backup_id} for cleanup: {e}")
Пример #8
0
    def create_data_buckets(self):
        regions = [self.region]
        regions.extend(RuntimeConfig.get_dr_regions(None, self))
        for region in regions:
            bucket = self._get_data_bucket(region)

            if self._bucket_policy_changed(region, bucket.name):
                policy = AwsHelper.get_shelvery_bucket_policy(
                    self.account_id,
                    RuntimeConfig.get_share_with_accounts(self), bucket.name)
                self.logger.info(
                    f"Bucket policy has changed, updating policy to {policy}")
                AwsHelper.boto3_client('s3',
                                       region_name=region).put_bucket_policy(
                                           Bucket=bucket.name, Policy=policy)
            else:
                self.logger.info(f"Bucket policy hasn't changed")
Пример #9
0
    def create_backups(self):
        """Create backups from all collected entities marked for backup by using specific tag"""

        # collect resources to be backed up
        resource_type = self.get_resource_type()
        self.logger.info(
            f"Collecting entities of type {resource_type} tagged with "
            f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")
        resources = self.get_entities_to_backup(
            f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")
        self.logger.info(
            f"{len(resources)} resources of type {resource_type} collected for backup"
        )

        # create and collect backups
        backup_resources = []
        for r in resources:
            backup_resource = BackupResource(
                tag_prefix=RuntimeConfig.get_tag_prefix(), entity_resource=r)
            self.logger.info(
                f"Processing {resource_type} with id {r.resource_id}")
            self.logger.info(f"Creating backup {backup_resource.name}")
            self.backup_resource(backup_resource)
            self.tag_backup_resource(backup_resource)
            self.logger.info(
                f"Created backup of type {resource_type} for entity {backup_resource.entity_id} "
                f"with id {backup_resource.backup_id}")
            backup_resources.append(backup_resource)

        # create backups and disaster recovery region
        for br in backup_resources:
            self.copy_backup(
                br, RuntimeConfig.get_dr_regions(br.entity_resource.tags,
                                                 self))

        for aws_account_id in RuntimeConfig.get_share_with_accounts(self):
            for br in backup_resources:
                self.share_backup(br, aws_account_id)
Пример #10
0
    def do_copy_backup(self, map_args={}, **kwargs):
        """
        Copy backup to another region, actual implementation
        """

        kwargs.update(map_args)
        backup_id = kwargs['BackupId']
        origin_region = kwargs['OriginRegion']
        backup_resource = self.get_backup_resource(origin_region, backup_id)
        # if backup is not available, exit and rely on recursive lambda call copy backup
        # in non lambda mode this should never happen
        if RuntimeConfig.is_offload_queueing(self):
            if not self.is_backup_available(origin_region,backup_id):
                self.copy_backup(self.get_backup_resource(backup_resource, RuntimeConfig.get_dr_regions(backup_resource.entity_resource.tags, self)))
        else:
            if not self.wait_backup_available(backup_region=origin_region,
                                              backup_id=backup_id,
                                              lambda_method='do_copy_backup',
                                              lambda_args=kwargs):
                return

        self.logger.info(f"Do copy backup {kwargs['BackupId']} ({kwargs['OriginRegion']}) to region {kwargs['Region']}")

        # copy backup
        try:
            src_region = kwargs['OriginRegion']
            dst_region = kwargs['Region']
            regional_backup_id = self.copy_backup_to_region(kwargs['BackupId'], dst_region)

            # create tags on backup copy
            original_backup_id = kwargs['BackupId']
            original_backup = self.get_backup_resource(src_region, original_backup_id)
            resource_copy = BackupResource(None, None, True)
            resource_copy.backup_id = regional_backup_id
            resource_copy.region = kwargs['Region']
            resource_copy.tags = original_backup.tags.copy()

            # add metadata to dr copy and original
            dr_copies_tag_key = f"{RuntimeConfig.get_tag_prefix()}:dr_copies"
            resource_copy.tags[f"{RuntimeConfig.get_tag_prefix()}:region"] = dst_region
            resource_copy.tags[f"{RuntimeConfig.get_tag_prefix()}:dr_copy"] = 'true'
            resource_copy.tags[
                f"{RuntimeConfig.get_tag_prefix()}:dr_source_backup"] = f"{src_region}:{original_backup_id}"

            if dr_copies_tag_key not in original_backup.tags:
                original_backup.tags[dr_copies_tag_key] = ''
            original_backup.tags[dr_copies_tag_key] = original_backup.tags[
                                                          dr_copies_tag_key] + f"{dst_region}:{regional_backup_id} "

            self.tag_backup_resource(resource_copy)
            self.tag_backup_resource(original_backup)
            self.snspublisher.notify({
                'Operation': 'CopyBackupToRegion',
                'Status': 'OK',
                'DestinationRegion': kwargs['Region'],
                'BackupType': self.get_engine_type(),
                'BackupId': kwargs['BackupId'],
            })
            self.store_backup_data(resource_copy)
        except Exception as e:
            self.snspublisher_error.notify({
                'Operation': 'CopyBackupToRegion',
                'Status': 'ERROR',
                'ExceptionInfo': e.__dict__,
                'DestinationRegion': kwargs['Region'],
                'BackupType': self.get_engine_type(),
                'BackupId': kwargs['BackupId'],
            })
            self.logger.exception(f"Error copying backup {kwargs['BackupId']} to {dst_region}")

        # shared backup copy with same accounts
        for shared_account_id in RuntimeConfig.get_share_with_accounts(self):
            backup_resource = BackupResource(None, None, True)
            backup_resource.backup_id = regional_backup_id
            backup_resource.region = kwargs['Region']
            try:
                self.share_backup(backup_resource, shared_account_id)
                self.snspublisher.notify({
                    'Operation': 'ShareRegionalBackupCopy',
                    'Status': 'OK',
                    'DestinationAccount': shared_account_id,
                    'DestinationRegion': kwargs['Region'],
                    'BackupType': self.get_engine_type(),
                    'BackupId': kwargs['BackupId'],
                })
            except Exception as e:
                self.snspublisher_error.notify({
                    'Operation': 'ShareRegionalBackupCopy',
                    'Status': 'ERROR',
                    'DestinationAccount': shared_account_id,
                    'DestinationRegion': kwargs['Region'],
                    'ExceptionInfo': e.__dict__,
                    'BackupType': self.get_engine_type(),
                    'BackupId': kwargs['BackupId'],
                })
                self.logger.exception(f"Error sharing copied backup {kwargs['BackupId']} to {dst_region}")
Пример #11
0
    def create_backups(self) -> List[BackupResource]:
        """Create backups from all collected entities marked for backup by using specific tag"""

        # collect resources to be backed up
        resource_type = self.get_resource_type()
        self.logger.info(f"Collecting entities of type {resource_type} tagged with "
                         f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")
        resources = self.get_entities_to_backup(f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")

        # allows user to select single entity to be backed up
        if RuntimeConfig.get_shelvery_select_entity(self) is not None:
            entity_id = RuntimeConfig.get_shelvery_select_entity(self)
            self.logger.info(f"Creating backups only for entity {entity_id}")
            resources = list(
                filter(
                    lambda x: x.resource_id == entity_id,
                    resources)
            )

        self.logger.info(f"{len(resources)} resources of type {resource_type} collected for backup")

        # create and collect backups
        backup_resources = []
        current_retention_type = RuntimeConfig.get_current_retention_type(self)
        for r in resources:
            backup_resource = BackupResource(
                tag_prefix=RuntimeConfig.get_tag_prefix(),
                entity_resource=r,
                copy_resource_tags=RuntimeConfig.copy_resource_tags(self),
                exluded_resource_tag_keys=RuntimeConfig.get_exluded_resource_tag_keys(self)
            )
            # if retention is explicitly given by runtime environment
            if current_retention_type is not None:
                backup_resource.set_retention_type(current_retention_type)

            dr_regions = RuntimeConfig.get_dr_regions(backup_resource.entity_resource.tags, self)
            backup_resource.tags[f"{RuntimeConfig.get_tag_prefix()}:dr_regions"] = ','.join(dr_regions)
            self.logger.info(f"Processing {resource_type} with id {r.resource_id}")
            self.logger.info(f"Creating backup {backup_resource.name}")
            try:
                self.backup_resource(backup_resource)
                self.tag_backup_resource(backup_resource)
                self.logger.info(f"Created backup of type {resource_type} for entity {backup_resource.entity_id} "
                                 f"with id {backup_resource.backup_id}")
                backup_resources.append(backup_resource)
                self.store_backup_data(backup_resource)
                self.snspublisher.notify({
                    'Operation': 'CreateBackup',
                    'Status': 'OK',
                    'BackupType': self.get_engine_type(),
                    'BackupName': backup_resource.name,
                    'EntityId': backup_resource.entity_id
                })
            except Exception as e:
                self.snspublisher_error.notify({
                    'Operation': 'CreateBackup',
                    'Status': 'ERROR',
                    'ExceptionInfo': e.__dict__,
                    'BackupType': self.get_engine_type(),
                    'BackupName': backup_resource.name,
                    'EntityId': backup_resource.entity_id
                })
                self.logger.exception(f"Failed to create backup {backup_resource.name}:{e}")

        # create backups and disaster recovery region
        for br in backup_resources:
            self.copy_backup(br, RuntimeConfig.get_dr_regions(br.entity_resource.tags, self))

        for aws_account_id in RuntimeConfig.get_share_with_accounts(self):
            for br in backup_resources:
                self.share_backup(br, aws_account_id)

        return backup_resources