コード例 #1
0
    def backup_from_latest_automated(self, backup_resource: BackupResource):
        rds_client = AwsHelper.boto3_client('rds',
                                            arn=self.role_arn,
                                            external_id=self.role_external_id)
        auto_snapshots = rds_client.describe_db_cluster_snapshots(
            DBClusterIdentifier=backup_resource.entity_id,
            SnapshotType='automated',
            # API always returns in date descending order, and we only need last one
            MaxRecords=20)
        auto_snapshots = sorted(auto_snapshots['DBClusterSnapshots'],
                                key=lambda k: k['SnapshotCreateTime'],
                                reverse=True)

        if len(auto_snapshots) == 0:
            self.logger.info(
                f"There is no latest automated backup for cluster {backup_resource.entity_id},"
                f" fallback to RDS_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster..."
            )
            return self.backup_from_cluster(backup_resource)

        # TODO handle case when there are no latest automated backups
        automated_snapshot_id = auto_snapshots[0][
            'DBClusterSnapshotIdentifier']
        response = rds_client.copy_db_cluster_snapshot(
            SourceDBClusterSnapshotIdentifier=automated_snapshot_id,
            TargetDBClusterSnapshotIdentifier=backup_resource.name,
            CopyTags=False)
        backup_resource.resource_properties = response['DBClusterSnapshot']
        backup_resource.backup_id = backup_resource.name
        return backup_resource
コード例 #2
0
    def get_existing_backups(self,
                             backup_tag_prefix: str) -> List[BackupResource]:
        """
		Collect existing backups on system of given type, marked with given tag
		"""
        local_region = boto3.session.Session().region_name
        marker_tag = f"{backup_tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}"
        response = self.redshift_client.describe_cluster_snapshots(
            SnapshotType='manual',
            TagKeys=[marker_tag],
            TagValues=SHELVERY_DO_BACKUP_TAGS)

        snapshots = response['Snapshots']
        backups = []

        for snap in snapshots:
            cluster_id = snap['ClusterIdentifier']
            d_tags = BackupResource.dict_from_boto3_tags(snap['Tags'])
            create_time = snap['ClusterCreateTime']
            redshift_entity = EntityResource(cluster_id, local_region,
                                             create_time, d_tags)
            backup_id = f"arn:aws:redshift:{local_region}:{snap['OwnerAccount']}"
            backup_id = f"{backup_id}:snapshot:{snap['ClusterIdentifier']}/{snap['SnapshotIdentifier']}"
            backup_resource = BackupResource.construct(backup_tag_prefix,
                                                       backup_id, d_tags)
            backup_resource.entity_resource = redshift_entity
            backup_resource.entity_id = redshift_entity.resource_id

            backups.append(backup_resource)

        return backups
コード例 #3
0
    def backup_from_latest_automated(self, backup_resource: BackupResource):
        rds_client = AwsHelper.boto3_client('rds',
                                            arn=self.role_arn,
                                            external_id=self.role_external_id)
        response = rds_client.describe_db_snapshots(
            DBInstanceIdentifier=backup_resource.entity_id,
            SnapshotType='automated',
            # API always returns in date descending order, and we only need last one
            MaxRecords=20)
        # filter out any snapshots that could be in progress
        available_snapshots = [
            snap for snap in response['DBSnapshots']
            if snap['Status'] == 'available'
        ]
        auto_snapshots = sorted(available_snapshots,
                                key=lambda k: k['SnapshotCreateTime'],
                                reverse=True)

        if len(auto_snapshots) == 0:
            self.logger.info(
                f"There is no latest automated backup for cluster {backup_resource.entity_id},"
                f" fallback to RDS_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster..."
            )
            return self.backup_from_instance(backup_resource)

        automated_snapshot_id = auto_snapshots[0]['DBSnapshotIdentifier']
        response = rds_client.copy_db_snapshot(
            SourceDBSnapshotIdentifier=automated_snapshot_id,
            TargetDBSnapshotIdentifier=backup_resource.name,
            CopyTags=False)
        backup_resource.resource_properties = response['DBSnapshot']
        backup_resource.backup_id = backup_resource.name
        return backup_resource
コード例 #4
0
    def do_copy_backup(self, map_args={}, **kwargs):
        """
        Copy backup to another region, actual implementation
        """

        kwargs.update(map_args)

        # if backup is not available, exit and rely on recursive lambda call copy backup
        # in non lambda mode this should never happen
        if not self.wait_backup_available(backup_region=kwargs['OriginRegion'],
                                          backup_id=kwargs['BackupId'],
                                          lambda_method='do_copy_backup',
                                          lambda_args=kwargs):
            return

        self.logger.info(
            f"Do copy backup {kwargs['BackupId']} ({kwargs['OriginRegion']}) to region {kwargs['Region']}"
        )

        # copy backup
        src_region = kwargs['OriginRegion']
        dst_region = kwargs['Region']
        regional_backup_id = self.copy_backup_to_region(
            kwargs['BackupId'], dst_region)

        # create tags on backup copy
        original_backup_id = kwargs['BackupId']
        original_backup = self.get_backup_resource(src_region,
                                                   original_backup_id)
        resource_copy = BackupResource(None, None, True)
        resource_copy.backup_id = regional_backup_id
        resource_copy.region = kwargs['Region']
        resource_copy.tags = original_backup.tags.copy()

        # add metadata to dr copy and original
        dr_copies_tag_key = f"{RuntimeConfig.get_tag_prefix()}:dr_copies"
        resource_copy.tags[
            f"{RuntimeConfig.get_tag_prefix()}:region"] = dst_region
        resource_copy.tags[
            f"{RuntimeConfig.get_tag_prefix()}:dr_copy"] = 'true'
        resource_copy.tags[
            f"{RuntimeConfig.get_tag_prefix()}:dr_source_backup"] = f"{src_region}:{original_backup_id}"

        if dr_copies_tag_key not in original_backup.tags:
            original_backup.tags[dr_copies_tag_key] = ''
        original_backup.tags[dr_copies_tag_key] = original_backup.tags[
            dr_copies_tag_key] + f"{dst_region}:{regional_backup_id} "

        self.tag_backup_resource(resource_copy)
        self.tag_backup_resource(original_backup)

        # shared backup copy with same accounts
        for shared_account_id in RuntimeConfig.get_share_with_accounts(self):
            backup_resource = BackupResource(None, None, True)
            backup_resource.backup_id = regional_backup_id
            backup_resource.region = kwargs['Region']
            self.share_backup(backup_resource, shared_account_id)
コード例 #5
0
    def backup_from_latest_automated(self, backup_resource: BackupResource):
        auto_snapshots = self.redshift_client.describe_cluster_snapshots(
            ClusterIdentifier=backup_resource.entity_id,
            SnapshotType='automated',
            # API always returns in date descending order, and we only need last one
            MaxRecords=20)
        auto_snapshots = sorted(auto_snapshots['Snapshots'],
                                key=lambda k: k['SnapshotCreateTime'],
                                reverse=True)

        if len(auto_snapshots) == 0:
            self.logger.error(
                f"There is no latest automated backup for cluster {backup_resource.entity_id},"
                f" fallback to REDSHIFT_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster..."
            )
            return self.backup_from_cluster(backup_resource)

        # TODO handle case when there are no latest automated backups
        snapshot = self.redshift_client.copy_cluster_snapshot(
            SourceSnapshotIdentifier=auto_snapshots[0]['SnapshotIdentifier'],
            SourceSnapshotClusterIdentifier=auto_snapshots[0]
            ['ClusterIdentifier'],
            TargetSnapshotIdentifier=backup_resource.name)['Snapshot']
        backup_resource.backup_id = f"arn:aws:redshift:{backup_resource.region}:{backup_resource.account_id}"
        backup_resource.backup_id = f"{backup_resource.backup_id}:snapshot:{snapshot['ClusterIdentifier']}/{snapshot['SnapshotIdentifier']}"
        return backup_resource
コード例 #6
0
    def backup_from_latest_automated(self, backup_resource: BackupResource):
        rds_client = boto3.client('rds')
        auto_snapshots = rds_client.describe_db_snapshots(
            DBInstanceIdentifier=backup_resource.entity_id,
            SnapshotType='automated',
            # API always returns in date descending order, and we only need last one
            MaxRecords=20)
        auto_snapshots = sorted(auto_snapshots['DBSnapshots'],
                                key=lambda k: k['SnapshotCreateTime'],
                                reverse=True)

        if len(auto_snapshots) == 0:
            self.logger.error(
                f"There is no latest automated backup for cluster {backup_resource.entity_id},"
                f" fallback to RDS_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster..."
            )
            return self.backup_from_instance(backup_resource)

        automated_snapshot_id = auto_snapshots[0]['DBSnapshotIdentifier']
        rds_client.copy_db_snapshot(
            SourceDBSnapshotIdentifier=automated_snapshot_id,
            TargetDBSnapshotIdentifier=backup_resource.name,
            CopyTags=False)
        backup_resource.backup_id = backup_resource.name
        return backup_resource
コード例 #7
0
    def get_entities_to_backup(self, tag_name: str) -> List[EntityResource]:
        """Get all instances that contain `tag_name` as a tag."""
        clusters = self.collect_clusters(tag_name)

        # TODO: To get the cluster's creation time, we need to query the "events" with the
        # cluster ID.

        entities = []
        for cluster in clusters:
            if cluster['ClusterStatus'] != 'available':
                self.logger.info(
                    f"Skipping cluster '{cluster['ClusterIdentifier']}' as its status is '{cluster['ClusterStatus']}'."
                )
                continue

            d_tags = BackupResource.dict_from_boto3_tags(cluster['Tags'])

            entity = EntityResource(
                resource_id=cluster['ClusterIdentifier'],
                resource_region=self.region,
                date_created=f"{datetime.datetime.utcnow():%Y-%m-%d %H:%M:%S}",
                tags=d_tags)
            entities.append(entity)

        return entities
コード例 #8
0
 def get_backup_resource(self, region: str,
                         backup_id: str) -> BackupResource:
     ec2 = boto3.session.Session(region_name=region).resource('ec2')
     snapshot = ec2.Snapshot(backup_id)
     d_tags = dict(map(lambda t: (t['Key'], t['Value']), snapshot.tags))
     return BackupResource.construct(d_tags['shelvery:tag_name'], backup_id,
                                     d_tags)
コード例 #9
0
    def get_existing_backups(self, tag_prefix: str) -> List[BackupResource]:
        ec2client = AwsHelper.boto3_client('ec2',
                                           arn=self.role_arn,
                                           external_id=self.role_external_id)
        # lookup snapshots by tags
        snapshots = ec2client.describe_snapshots(Filters=[{
            'Name': f"tag:{tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}",
            'Values': ['true']
        }])
        backups = []

        # create backup resource objects
        for snap in snapshots['Snapshots']:
            snap_tags = dict(
                map(lambda t: (t['Key'], t['Value']), snap['Tags']))
            if f"{tag_prefix}:ami_id" in snap_tags:
                self.logger.info(
                    f"EBS snapshot {snap['SnapshotId']} created by AMI shelvery backup, skiping..."
                )
                continue

            backup = BackupResource.construct(tag_prefix=tag_prefix,
                                              backup_id=snap['SnapshotId'],
                                              tags=snap_tags)
            # legacy code - entity id should be picked up from tags
            if backup.entity_id is None:
                self.logger.info(
                    f"SnapshotId is None, using VolumeId {snap['VolumeId']}")
                backup.entity_id = snap['VolumeId']
            backups.append(backup)

        self.populate_volume_information(backups)

        return backups
コード例 #10
0
 def get_backup_resource(self, backup_region: str, backup_id: str) -> BackupResource:
     rds_client = boto3.client('rds', region_name=backup_region)
     snapshots = rds_client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=backup_id)
     snapshot = snapshots['DBClusterSnapshots'][0]
     tags = rds_client.list_tags_for_resource(ResourceName=snapshot['DBClusterSnapshotArn'])['TagList']
     d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags))
     return BackupResource.construct(d_tags['shelvery:tag_name'], backup_id, d_tags)
コード例 #11
0
    def get_shelvery_backups_only(self, all_snapshots, backup_tag_prefix,
                                  rds_client):
        """
        :param all_snapshots: all snapshots within region
        :param backup_tag_prefix:  prefix of shelvery backup system
        :param rds_client:  amazon boto3 rds client
        :return: snapshots created using shelvery
        """
        all_backups = []
        marker_tag = f"{backup_tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}"
        for snap in all_snapshots:
            tags = snap['TagList']
            self.logger.info(
                f"Checking RDS Snap {snap['DBClusterSnapshotIdentifier']}")
            d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags))
            if marker_tag in d_tags:
                if d_tags[marker_tag] in SHELVERY_DO_BACKUP_TAGS:
                    backup_resource = BackupResource.construct(
                        backup_tag_prefix, snap['DBClusterSnapshotIdentifier'],
                        d_tags)
                    backup_resource.entity_resource = snap['EntityResource']
                    backup_resource.entity_id = snap[
                        'EntityResource'].resource_id

                    all_backups.append(backup_resource)

        return all_backups
コード例 #12
0
 def backup_from_cluster(self, backup_resource: BackupResource):
     snapshot = self.redshift_client.create_cluster_snapshot(
         SnapshotIdentifier=backup_resource.name,
         ClusterIdentifier=backup_resource.entity_id,
     )['Snapshot']
     backup_resource.backup_id = f"arn:aws:redshift:{backup_resource.region}:{backup_resource.account_id}"
     backup_resource.backup_id = f"{backup_resource.backup_id}:snapshot:{snapshot['ClusterIdentifier']}/{snapshot['SnapshotIdentifier']}"
     return backup_resource
コード例 #13
0
 def backup_resource(self,
                     backup_resource: BackupResource) -> BackupResource:
     # create snapshot
     snap = self.ec2client.create_snapshot(
         VolumeId=backup_resource.entity_id,
         Description=backup_resource.name)
     backup_resource.backup_id = snap['SnapshotId']
     return backup_resource
コード例 #14
0
    def get_backup_resource(self, backup_region: str,
                            backup_id: str) -> BackupResource:
        """
		Get Backup Resource within region, identified by its backup_id
		"""
        redshift_client = AwsHelper.boto3_client(
            'redshift',
            region_name=backup_region,
            arn=self.role_arn,
            external_id=self.role_external_id)
        snapshot_id = backup_id.split(":")[-1].split("/")[1]
        snapshots = redshift_client.describe_cluster_snapshots(
            SnapshotIdentifier=snapshot_id)
        snapshot = snapshots['Snapshots'][0]
        d_tags = BackupResource.dict_from_boto3_tags(snapshot['Tags'])
        return BackupResource.construct(d_tags['shelvery:tag_name'], backup_id,
                                        d_tags)
コード例 #15
0
    def get_backup_resource(self, region: str,
                            backup_id: str) -> BackupResource:
        ami = self.ec2client.describe_images(ImageIds=[backup_id])['Images'][0]

        d_tags = dict(map(lambda x: (x['Key'], x['Value']), ami['Tags']))
        backup_tag_prefix = d_tags['shelvery:tag_name']

        backup = BackupResource.construct(backup_tag_prefix, backup_id, d_tags)
        return backup
コード例 #16
0
    def get_backup_resource(self, region: str, backup_id: str) -> BackupResource:
        ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id)
        ami = ec2client.describe_images(ImageIds=[backup_id])['Images'][0]

        d_tags = dict(map(lambda x: (x['Key'], x['Value']), ami['Tags']))
        backup_tag_prefix = d_tags['shelvery:tag_name']

        backup = BackupResource.construct(backup_tag_prefix, backup_id, d_tags)
        return backup
コード例 #17
0
 def get_backup_resource(self, backup_region: str, backup_id: str) -> BackupResource:
     docdb_client = AwsHelper.boto3_client('docdb', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id)
     snapshots = docdb_client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=backup_id)
     snapshot = snapshots['DBClusterSnapshots'][0]
     tags = docdb_client.list_tags_for_resource(ResourceName=snapshot['DBClusterSnapshotArn'])['TagList']
     d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags))
     resource = BackupResource.construct(d_tags['shelvery:tag_name'], backup_id, d_tags)
     resource.resource_properties = snapshot
     return resource
コード例 #18
0
 def get_backup_resource(self, region: str,
                         backup_id: str) -> BackupResource:
     ec2 = AwsHelper.boto3_session('ec2',
                                   region_name=region,
                                   arn=self.role_arn,
                                   external_id=self.role_external_id)
     snapshot = ec2.Snapshot(backup_id)
     d_tags = dict(map(lambda t: (t['Key'], t['Value']), snapshot.tags))
     return BackupResource.construct(d_tags['shelvery:tag_name'], backup_id,
                                     d_tags)
コード例 #19
0
 def backup_resource(self,
                     backup_resource: BackupResource) -> BackupResource:
     ec2client = AwsHelper.boto3_client('ec2',
                                        arn=self.role_arn,
                                        external_id=self.role_external_id)
     # create snapshot
     snap = ec2client.create_snapshot(VolumeId=backup_resource.entity_id,
                                      Description=backup_resource.name)
     backup_resource.backup_id = snap['SnapshotId']
     return backup_resource
コード例 #20
0
    def backup_resource(self, backup_resource: BackupResource):
        regional_client = AwsHelper.boto3_client('ec2', region_name=backup_resource.region, arn=self.role_arn, external_id=self.role_external_id)
        ami = regional_client.create_image(
            NoReboot=True,
            Name=backup_resource.name,
            Description=f"Shelvery created backup for {backup_resource.entity_id}",
            InstanceId=backup_resource.entity_id,

        )
        backup_resource.backup_id = ami['ImageId']
        return backup_resource
コード例 #21
0
 def backup_resource(self, backup_resource: BackupResource):
     regional_client = boto3.client('ec2',
                                    region_name=backup_resource.region)
     ami = regional_client.create_image(
         NoReboot=True,
         Name=backup_resource.name,
         Description=
         f"Shelvery created backup for {backup_resource.entity_id}",
         InstanceId=backup_resource.entity_id,
     )
     backup_resource.backup_id = ami['ImageId']
     return backup_resource
コード例 #22
0
    def create_backups(self) -> List[BackupResource]:
        """Create backups from all collected entities marked for backup by using specific tag"""

        # collect resources to be backed up
        resource_type = self.get_resource_type()
        self.logger.info(
            f"Collecting entities of type {resource_type} tagged with "
            f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")
        resources = self.get_entities_to_backup(
            f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")

        # allows user to select single entity to be backed up
        if RuntimeConfig.get_shelvery_select_entity(self) is not None:
            entity_id = RuntimeConfig.get_shelvery_select_entity(self)
            self.logger.info(f"Creating backups only for entity {entity_id}")
            resources = list(
                filter(lambda x: x.resource_id == entity_id, resources))

        self.logger.info(
            f"{len(resources)} resources of type {resource_type} collected for backup"
        )

        # create and collect backups
        backup_resources = []
        for r in resources:
            backup_resource = BackupResource(
                tag_prefix=RuntimeConfig.get_tag_prefix(), entity_resource=r)
            self.logger.info(
                f"Processing {resource_type} with id {r.resource_id}")
            self.logger.info(f"Creating backup {backup_resource.name}")
            try:
                self.backup_resource(backup_resource)
                self.tag_backup_resource(backup_resource)
                self.logger.info(
                    f"Created backup of type {resource_type} for entity {backup_resource.entity_id} "
                    f"with id {backup_resource.backup_id}")
                backup_resources.append(backup_resource)
            except Exception as e:
                self.logger.exception(
                    f"Failed to create backup {backup_resource.name}:{e}")

        # create backups and disaster recovery region
        for br in backup_resources:
            self.copy_backup(
                br, RuntimeConfig.get_dr_regions(br.entity_resource.tags,
                                                 self))

        for aws_account_id in RuntimeConfig.get_share_with_accounts(self):
            for br in backup_resources:
                self.share_backup(br, aws_account_id)

        return backup_resources
コード例 #23
0
    def backup_from_latest_automated(self, backup_resource: BackupResource):
        rds_client = boto3.client('rds')
        auto_snapshots = rds_client.describe_db_snapshots(
            DBInstanceIdentifier=backup_resource.entity_id,
            SnapshotType='automated',
            # API always returns in date descending order, and we only need last one
            MaxRecords=20)
        auto_snapshots = sorted(auto_snapshots['DBSnapshots'],
                                key=lambda k: k['SnapshotCreateTime'],
                                reverse=True)

        # TODO handle case when there are no latest automated backups
        automated_snapshot_id = auto_snapshots[0]['DBSnapshotIdentifier']
        rds_client.copy_db_snapshot(
            SourceDBSnapshotIdentifier=automated_snapshot_id,
            TargetDBSnapshotIdentifier=backup_resource.name,
            CopyTags=False)
        backup_resource.backup_id = backup_resource.name
        return backup_resource
コード例 #24
0
    def get_existing_backups(self, tag_prefix: str) -> List[BackupResource]:
        # lookup snapshots by tags
        snapshots = self.ec2client.describe_snapshots(
            Filters=[{
                'Name': f"tag:{tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}",
                'Values': ['true']
            }])
        backups = []

        # create backup resource objects
        for snap in snapshots['Snapshots']:
            backup = BackupResource.construct(
                tag_prefix=tag_prefix,
                backup_id=snap['SnapshotId'],
                tags=dict(map(lambda t: (t['Key'], t['Value']), snap['Tags'])))
            backup.entity_id = snap['VolumeId']
            backups.append(backup)

        self.populate_volume_information(backups)

        return backups
コード例 #25
0
    def get_existing_backups(self,
                             backup_tag_prefix: str) -> List[BackupResource]:
        amis = self.ec2client.describe_images(Filters=[{
            'Name':
            f"tag:{backup_tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}",
            'Values': ['true']
        }])['Images']
        backups = []
        instances = dict(
            map(lambda x: (x.resource_id, x), self._get_all_entities()))
        for ami in amis:
            backup = BackupResource.construct(
                backup_tag_prefix, ami['ImageId'],
                dict(map(lambda x: (x['Key'], x['Value']), ami['Tags'])))

            if backup.entity_id in instances:
                backup.entity_resource = instances[backup.entity_id]

            backups.append(backup)

        return backups
コード例 #26
0
ファイル: engine.py プロジェクト: conare/shelvery
    def create_backups(self):
        """Create backups from all collected entities marked for backup by using specific tag"""

        # collect resources to be backed up
        resource_type = self.get_resource_type()
        self.logger.info(
            f"Collecting entities of type {resource_type} tagged with "
            f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")
        resources = self.get_entities_to_backup(
            f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")
        self.logger.info(
            f"{len(resources)} resources of type {resource_type} collected for backup"
        )

        # create and collect backups
        backup_resources = []
        for r in resources:
            backup_resource = BackupResource(
                tag_prefix=RuntimeConfig.get_tag_prefix(), entity_resource=r)
            self.logger.info(
                f"Processing {resource_type} with id {r.resource_id}")
            self.logger.info(f"Creating backup {backup_resource.name}")
            self.backup_resource(backup_resource)
            self.tag_backup_resource(backup_resource)
            self.logger.info(
                f"Created backup of type {resource_type} for entity {backup_resource.entity_id} "
                f"with id {backup_resource.backup_id}")
            backup_resources.append(backup_resource)

        # create backups and disaster recovery region
        for br in backup_resources:
            self.copy_backup(
                br, RuntimeConfig.get_dr_regions(br.entity_resource.tags,
                                                 self))

        for aws_account_id in RuntimeConfig.get_share_with_accounts(self):
            for br in backup_resources:
                self.share_backup(br, aws_account_id)
コード例 #27
0
    def create_backups(self) -> List[BackupResource]:
        """Create backups from all collected entities marked for backup by using specific tag"""

        # collect resources to be backed up
        resource_type = self.get_resource_type()
        self.logger.info(f"Collecting entities of type {resource_type} tagged with "
                         f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")
        resources = self.get_entities_to_backup(f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}")

        # allows user to select single entity to be backed up
        if RuntimeConfig.get_shelvery_select_entity(self) is not None:
            entity_id = RuntimeConfig.get_shelvery_select_entity(self)
            self.logger.info(f"Creating backups only for entity {entity_id}")
            resources = list(
                filter(
                    lambda x: x.resource_id == entity_id,
                    resources)
            )

        self.logger.info(f"{len(resources)} resources of type {resource_type} collected for backup")

        # create and collect backups
        backup_resources = []
        current_retention_type = RuntimeConfig.get_current_retention_type(self)
        for r in resources:
            backup_resource = BackupResource(
                tag_prefix=RuntimeConfig.get_tag_prefix(),
                entity_resource=r,
                copy_resource_tags=RuntimeConfig.copy_resource_tags(self),
                exluded_resource_tag_keys=RuntimeConfig.get_exluded_resource_tag_keys(self)
            )
            # if retention is explicitly given by runtime environment
            if current_retention_type is not None:
                backup_resource.set_retention_type(current_retention_type)

            dr_regions = RuntimeConfig.get_dr_regions(backup_resource.entity_resource.tags, self)
            backup_resource.tags[f"{RuntimeConfig.get_tag_prefix()}:dr_regions"] = ','.join(dr_regions)
            self.logger.info(f"Processing {resource_type} with id {r.resource_id}")
            self.logger.info(f"Creating backup {backup_resource.name}")
            try:
                self.backup_resource(backup_resource)
                self.tag_backup_resource(backup_resource)
                self.logger.info(f"Created backup of type {resource_type} for entity {backup_resource.entity_id} "
                                 f"with id {backup_resource.backup_id}")
                backup_resources.append(backup_resource)
                self.store_backup_data(backup_resource)
                self.snspublisher.notify({
                    'Operation': 'CreateBackup',
                    'Status': 'OK',
                    'BackupType': self.get_engine_type(),
                    'BackupName': backup_resource.name,
                    'EntityId': backup_resource.entity_id
                })
            except Exception as e:
                self.snspublisher_error.notify({
                    'Operation': 'CreateBackup',
                    'Status': 'ERROR',
                    'ExceptionInfo': e.__dict__,
                    'BackupType': self.get_engine_type(),
                    'BackupName': backup_resource.name,
                    'EntityId': backup_resource.entity_id
                })
                self.logger.exception(f"Failed to create backup {backup_resource.name}:{e}")

        # create backups and disaster recovery region
        for br in backup_resources:
            self.copy_backup(br, RuntimeConfig.get_dr_regions(br.entity_resource.tags, self))

        for aws_account_id in RuntimeConfig.get_share_with_accounts(self):
            for br in backup_resources:
                self.share_backup(br, aws_account_id)

        return backup_resources
コード例 #28
0
    def do_copy_backup(self, map_args={}, **kwargs):
        """
        Copy backup to another region, actual implementation
        """

        kwargs.update(map_args)
        backup_id = kwargs['BackupId']
        origin_region = kwargs['OriginRegion']
        backup_resource = self.get_backup_resource(origin_region, backup_id)
        # if backup is not available, exit and rely on recursive lambda call copy backup
        # in non lambda mode this should never happen
        if RuntimeConfig.is_offload_queueing(self):
            if not self.is_backup_available(origin_region,backup_id):
                self.copy_backup(self.get_backup_resource(backup_resource, RuntimeConfig.get_dr_regions(backup_resource.entity_resource.tags, self)))
        else:
            if not self.wait_backup_available(backup_region=origin_region,
                                              backup_id=backup_id,
                                              lambda_method='do_copy_backup',
                                              lambda_args=kwargs):
                return

        self.logger.info(f"Do copy backup {kwargs['BackupId']} ({kwargs['OriginRegion']}) to region {kwargs['Region']}")

        # copy backup
        try:
            src_region = kwargs['OriginRegion']
            dst_region = kwargs['Region']
            regional_backup_id = self.copy_backup_to_region(kwargs['BackupId'], dst_region)

            # create tags on backup copy
            original_backup_id = kwargs['BackupId']
            original_backup = self.get_backup_resource(src_region, original_backup_id)
            resource_copy = BackupResource(None, None, True)
            resource_copy.backup_id = regional_backup_id
            resource_copy.region = kwargs['Region']
            resource_copy.tags = original_backup.tags.copy()

            # add metadata to dr copy and original
            dr_copies_tag_key = f"{RuntimeConfig.get_tag_prefix()}:dr_copies"
            resource_copy.tags[f"{RuntimeConfig.get_tag_prefix()}:region"] = dst_region
            resource_copy.tags[f"{RuntimeConfig.get_tag_prefix()}:dr_copy"] = 'true'
            resource_copy.tags[
                f"{RuntimeConfig.get_tag_prefix()}:dr_source_backup"] = f"{src_region}:{original_backup_id}"

            if dr_copies_tag_key not in original_backup.tags:
                original_backup.tags[dr_copies_tag_key] = ''
            original_backup.tags[dr_copies_tag_key] = original_backup.tags[
                                                          dr_copies_tag_key] + f"{dst_region}:{regional_backup_id} "

            self.tag_backup_resource(resource_copy)
            self.tag_backup_resource(original_backup)
            self.snspublisher.notify({
                'Operation': 'CopyBackupToRegion',
                'Status': 'OK',
                'DestinationRegion': kwargs['Region'],
                'BackupType': self.get_engine_type(),
                'BackupId': kwargs['BackupId'],
            })
            self.store_backup_data(resource_copy)
        except Exception as e:
            self.snspublisher_error.notify({
                'Operation': 'CopyBackupToRegion',
                'Status': 'ERROR',
                'ExceptionInfo': e.__dict__,
                'DestinationRegion': kwargs['Region'],
                'BackupType': self.get_engine_type(),
                'BackupId': kwargs['BackupId'],
            })
            self.logger.exception(f"Error copying backup {kwargs['BackupId']} to {dst_region}")

        # shared backup copy with same accounts
        for shared_account_id in RuntimeConfig.get_share_with_accounts(self):
            backup_resource = BackupResource(None, None, True)
            backup_resource.backup_id = regional_backup_id
            backup_resource.region = kwargs['Region']
            try:
                self.share_backup(backup_resource, shared_account_id)
                self.snspublisher.notify({
                    'Operation': 'ShareRegionalBackupCopy',
                    'Status': 'OK',
                    'DestinationAccount': shared_account_id,
                    'DestinationRegion': kwargs['Region'],
                    'BackupType': self.get_engine_type(),
                    'BackupId': kwargs['BackupId'],
                })
            except Exception as e:
                self.snspublisher_error.notify({
                    'Operation': 'ShareRegionalBackupCopy',
                    'Status': 'ERROR',
                    'DestinationAccount': shared_account_id,
                    'DestinationRegion': kwargs['Region'],
                    'ExceptionInfo': e.__dict__,
                    'BackupType': self.get_engine_type(),
                    'BackupId': kwargs['BackupId'],
                })
                self.logger.exception(f"Error sharing copied backup {kwargs['BackupId']} to {dst_region}")