def do_copy_backup(self, map_args={}, **kwargs): """ Copy backup to another region, actual implementation """ kwargs.update(map_args) # if backup is not available, exit and rely on recursive lambda call copy backup # in non lambda mode this should never happen if not self.wait_backup_available(backup_region=kwargs['OriginRegion'], backup_id=kwargs['BackupId'], lambda_method='do_copy_backup', lambda_args=kwargs): return self.logger.info( f"Do copy backup {kwargs['BackupId']} ({kwargs['OriginRegion']}) to region {kwargs['Region']}" ) # copy backup src_region = kwargs['OriginRegion'] dst_region = kwargs['Region'] regional_backup_id = self.copy_backup_to_region( kwargs['BackupId'], dst_region) # create tags on backup copy original_backup_id = kwargs['BackupId'] original_backup = self.get_backup_resource(src_region, original_backup_id) resource_copy = BackupResource(None, None, True) resource_copy.backup_id = regional_backup_id resource_copy.region = kwargs['Region'] resource_copy.tags = original_backup.tags.copy() # add metadata to dr copy and original dr_copies_tag_key = f"{RuntimeConfig.get_tag_prefix()}:dr_copies" resource_copy.tags[ f"{RuntimeConfig.get_tag_prefix()}:region"] = dst_region resource_copy.tags[ f"{RuntimeConfig.get_tag_prefix()}:dr_copy"] = 'true' resource_copy.tags[ f"{RuntimeConfig.get_tag_prefix()}:dr_source_backup"] = f"{src_region}:{original_backup_id}" if dr_copies_tag_key not in original_backup.tags: original_backup.tags[dr_copies_tag_key] = '' original_backup.tags[dr_copies_tag_key] = original_backup.tags[ dr_copies_tag_key] + f"{dst_region}:{regional_backup_id} " self.tag_backup_resource(resource_copy) self.tag_backup_resource(original_backup) # shared backup copy with same accounts for shared_account_id in RuntimeConfig.get_share_with_accounts(self): backup_resource = BackupResource(None, None, True) backup_resource.backup_id = regional_backup_id backup_resource.region = kwargs['Region'] self.share_backup(backup_resource, shared_account_id)
def backup_from_latest_automated(self, backup_resource: BackupResource): rds_client = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id) auto_snapshots = rds_client.describe_db_cluster_snapshots( DBClusterIdentifier=backup_resource.entity_id, SnapshotType='automated', # API always returns in date descending order, and we only need last one MaxRecords=20) auto_snapshots = sorted(auto_snapshots['DBClusterSnapshots'], key=lambda k: k['SnapshotCreateTime'], reverse=True) if len(auto_snapshots) == 0: self.logger.info( f"There is no latest automated backup for cluster {backup_resource.entity_id}," f" fallback to RDS_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster..." ) return self.backup_from_cluster(backup_resource) # TODO handle case when there are no latest automated backups automated_snapshot_id = auto_snapshots[0][ 'DBClusterSnapshotIdentifier'] response = rds_client.copy_db_cluster_snapshot( SourceDBClusterSnapshotIdentifier=automated_snapshot_id, TargetDBClusterSnapshotIdentifier=backup_resource.name, CopyTags=False) backup_resource.resource_properties = response['DBClusterSnapshot'] backup_resource.backup_id = backup_resource.name return backup_resource
def backup_from_latest_automated(self, backup_resource: BackupResource): rds_client = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id) response = rds_client.describe_db_snapshots( DBInstanceIdentifier=backup_resource.entity_id, SnapshotType='automated', # API always returns in date descending order, and we only need last one MaxRecords=20) # filter out any snapshots that could be in progress available_snapshots = [ snap for snap in response['DBSnapshots'] if snap['Status'] == 'available' ] auto_snapshots = sorted(available_snapshots, key=lambda k: k['SnapshotCreateTime'], reverse=True) if len(auto_snapshots) == 0: self.logger.info( f"There is no latest automated backup for cluster {backup_resource.entity_id}," f" fallback to RDS_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster..." ) return self.backup_from_instance(backup_resource) automated_snapshot_id = auto_snapshots[0]['DBSnapshotIdentifier'] response = rds_client.copy_db_snapshot( SourceDBSnapshotIdentifier=automated_snapshot_id, TargetDBSnapshotIdentifier=backup_resource.name, CopyTags=False) backup_resource.resource_properties = response['DBSnapshot'] backup_resource.backup_id = backup_resource.name return backup_resource
def backup_from_latest_automated(self, backup_resource: BackupResource): auto_snapshots = self.redshift_client.describe_cluster_snapshots( ClusterIdentifier=backup_resource.entity_id, SnapshotType='automated', # API always returns in date descending order, and we only need last one MaxRecords=20) auto_snapshots = sorted(auto_snapshots['Snapshots'], key=lambda k: k['SnapshotCreateTime'], reverse=True) if len(auto_snapshots) == 0: self.logger.error( f"There is no latest automated backup for cluster {backup_resource.entity_id}," f" fallback to REDSHIFT_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster..." ) return self.backup_from_cluster(backup_resource) # TODO handle case when there are no latest automated backups snapshot = self.redshift_client.copy_cluster_snapshot( SourceSnapshotIdentifier=auto_snapshots[0]['SnapshotIdentifier'], SourceSnapshotClusterIdentifier=auto_snapshots[0] ['ClusterIdentifier'], TargetSnapshotIdentifier=backup_resource.name)['Snapshot'] backup_resource.backup_id = f"arn:aws:redshift:{backup_resource.region}:{backup_resource.account_id}" backup_resource.backup_id = f"{backup_resource.backup_id}:snapshot:{snapshot['ClusterIdentifier']}/{snapshot['SnapshotIdentifier']}" return backup_resource
def backup_from_latest_automated(self, backup_resource: BackupResource): rds_client = boto3.client('rds') auto_snapshots = rds_client.describe_db_snapshots( DBInstanceIdentifier=backup_resource.entity_id, SnapshotType='automated', # API always returns in date descending order, and we only need last one MaxRecords=20) auto_snapshots = sorted(auto_snapshots['DBSnapshots'], key=lambda k: k['SnapshotCreateTime'], reverse=True) if len(auto_snapshots) == 0: self.logger.error( f"There is no latest automated backup for cluster {backup_resource.entity_id}," f" fallback to RDS_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster..." ) return self.backup_from_instance(backup_resource) automated_snapshot_id = auto_snapshots[0]['DBSnapshotIdentifier'] rds_client.copy_db_snapshot( SourceDBSnapshotIdentifier=automated_snapshot_id, TargetDBSnapshotIdentifier=backup_resource.name, CopyTags=False) backup_resource.backup_id = backup_resource.name return backup_resource
def backup_resource(self, backup_resource: BackupResource) -> BackupResource: # create snapshot snap = self.ec2client.create_snapshot( VolumeId=backup_resource.entity_id, Description=backup_resource.name) backup_resource.backup_id = snap['SnapshotId'] return backup_resource
def backup_from_cluster(self, backup_resource: BackupResource): snapshot = self.redshift_client.create_cluster_snapshot( SnapshotIdentifier=backup_resource.name, ClusterIdentifier=backup_resource.entity_id, )['Snapshot'] backup_resource.backup_id = f"arn:aws:redshift:{backup_resource.region}:{backup_resource.account_id}" backup_resource.backup_id = f"{backup_resource.backup_id}:snapshot:{snapshot['ClusterIdentifier']}/{snapshot['SnapshotIdentifier']}" return backup_resource
def backup_resource(self, backup_resource: BackupResource) -> BackupResource: ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) # create snapshot snap = ec2client.create_snapshot(VolumeId=backup_resource.entity_id, Description=backup_resource.name) backup_resource.backup_id = snap['SnapshotId'] return backup_resource
def backup_resource(self, backup_resource: BackupResource): regional_client = AwsHelper.boto3_client('ec2', region_name=backup_resource.region, arn=self.role_arn, external_id=self.role_external_id) ami = regional_client.create_image( NoReboot=True, Name=backup_resource.name, Description=f"Shelvery created backup for {backup_resource.entity_id}", InstanceId=backup_resource.entity_id, ) backup_resource.backup_id = ami['ImageId'] return backup_resource
def backup_resource(self, backup_resource: BackupResource): regional_client = boto3.client('ec2', region_name=backup_resource.region) ami = regional_client.create_image( NoReboot=True, Name=backup_resource.name, Description= f"Shelvery created backup for {backup_resource.entity_id}", InstanceId=backup_resource.entity_id, ) backup_resource.backup_id = ami['ImageId'] return backup_resource
def backup_from_latest_automated(self, backup_resource: BackupResource): rds_client = boto3.client('rds') auto_snapshots = rds_client.describe_db_snapshots( DBInstanceIdentifier=backup_resource.entity_id, SnapshotType='automated', # API always returns in date descending order, and we only need last one MaxRecords=20) auto_snapshots = sorted(auto_snapshots['DBSnapshots'], key=lambda k: k['SnapshotCreateTime'], reverse=True) # TODO handle case when there are no latest automated backups automated_snapshot_id = auto_snapshots[0]['DBSnapshotIdentifier'] rds_client.copy_db_snapshot( SourceDBSnapshotIdentifier=automated_snapshot_id, TargetDBSnapshotIdentifier=backup_resource.name, CopyTags=False) backup_resource.backup_id = backup_resource.name return backup_resource
def do_copy_backup(self, map_args={}, **kwargs): """ Copy backup to another region, actual implementation """ kwargs.update(map_args) backup_id = kwargs['BackupId'] origin_region = kwargs['OriginRegion'] backup_resource = self.get_backup_resource(origin_region, backup_id) # if backup is not available, exit and rely on recursive lambda call copy backup # in non lambda mode this should never happen if RuntimeConfig.is_offload_queueing(self): if not self.is_backup_available(origin_region,backup_id): self.copy_backup(self.get_backup_resource(backup_resource, RuntimeConfig.get_dr_regions(backup_resource.entity_resource.tags, self))) else: if not self.wait_backup_available(backup_region=origin_region, backup_id=backup_id, lambda_method='do_copy_backup', lambda_args=kwargs): return self.logger.info(f"Do copy backup {kwargs['BackupId']} ({kwargs['OriginRegion']}) to region {kwargs['Region']}") # copy backup try: src_region = kwargs['OriginRegion'] dst_region = kwargs['Region'] regional_backup_id = self.copy_backup_to_region(kwargs['BackupId'], dst_region) # create tags on backup copy original_backup_id = kwargs['BackupId'] original_backup = self.get_backup_resource(src_region, original_backup_id) resource_copy = BackupResource(None, None, True) resource_copy.backup_id = regional_backup_id resource_copy.region = kwargs['Region'] resource_copy.tags = original_backup.tags.copy() # add metadata to dr copy and original dr_copies_tag_key = f"{RuntimeConfig.get_tag_prefix()}:dr_copies" resource_copy.tags[f"{RuntimeConfig.get_tag_prefix()}:region"] = dst_region resource_copy.tags[f"{RuntimeConfig.get_tag_prefix()}:dr_copy"] = 'true' resource_copy.tags[ f"{RuntimeConfig.get_tag_prefix()}:dr_source_backup"] = f"{src_region}:{original_backup_id}" if dr_copies_tag_key not in original_backup.tags: original_backup.tags[dr_copies_tag_key] = '' original_backup.tags[dr_copies_tag_key] = original_backup.tags[ dr_copies_tag_key] + f"{dst_region}:{regional_backup_id} " self.tag_backup_resource(resource_copy) self.tag_backup_resource(original_backup) self.snspublisher.notify({ 'Operation': 'CopyBackupToRegion', 'Status': 'OK', 'DestinationRegion': kwargs['Region'], 'BackupType': self.get_engine_type(), 'BackupId': kwargs['BackupId'], }) self.store_backup_data(resource_copy) except Exception as e: self.snspublisher_error.notify({ 'Operation': 'CopyBackupToRegion', 'Status': 'ERROR', 'ExceptionInfo': e.__dict__, 'DestinationRegion': kwargs['Region'], 'BackupType': self.get_engine_type(), 'BackupId': kwargs['BackupId'], }) self.logger.exception(f"Error copying backup {kwargs['BackupId']} to {dst_region}") # shared backup copy with same accounts for shared_account_id in RuntimeConfig.get_share_with_accounts(self): backup_resource = BackupResource(None, None, True) backup_resource.backup_id = regional_backup_id backup_resource.region = kwargs['Region'] try: self.share_backup(backup_resource, shared_account_id) self.snspublisher.notify({ 'Operation': 'ShareRegionalBackupCopy', 'Status': 'OK', 'DestinationAccount': shared_account_id, 'DestinationRegion': kwargs['Region'], 'BackupType': self.get_engine_type(), 'BackupId': kwargs['BackupId'], }) except Exception as e: self.snspublisher_error.notify({ 'Operation': 'ShareRegionalBackupCopy', 'Status': 'ERROR', 'DestinationAccount': shared_account_id, 'DestinationRegion': kwargs['Region'], 'ExceptionInfo': e.__dict__, 'BackupType': self.get_engine_type(), 'BackupId': kwargs['BackupId'], }) self.logger.exception(f"Error sharing copied backup {kwargs['BackupId']} to {dst_region}")