Пример #1
0
    def test_CreateDataBucket(self):

        s3client = AwsHelper.boto3_client('s3', region_name='ap-southeast-2')
        sts = AwsHelper.boto3_client('sts')
        id = str(sts.get_caller_identity()['Account'])
        os.environ['SHELVERY_MONO_THREAD'] = '1'

        share_with_id = destination_account
        os.environ["shelvery_share_aws_account_ids"] = str(share_with_id)

        engine = ShelveryEngine()

        print("Creating Data Buckets")
        engine.create_data_buckets()

        bucket_name = f"shelvery.data.{id}-ap-southeast-2.base2tools"

        response = s3client.get_bucket_policy(Bucket=bucket_name)['Policy']

        policy = json.loads(response)['Statement']

        print("Policy: " + str(policy))

        valid = False

        #Add other checks on policy?

        for statement in policy:
            if statement['Effect'] == "Allow" and str(
                    share_with_id) in statement['Principal']['AWS']:
                valid = True

        self.assertTrue(valid)
Пример #2
0
 def create_data_buckets(self):
     regions = [self.region]
     regions.extend(RuntimeConfig.get_dr_regions(None, self))
     for region in regions:
         bucket = self._get_data_bucket(region)
         AwsHelper.boto3_client('s3', region_name=region).put_bucket_policy(
             Bucket=bucket.name,
             Policy=AwsHelper.get_shelvery_bucket_policy(
                 self.account_id,
                 RuntimeConfig.get_share_with_accounts(self), bucket.name))
def cleanEC2Snapshots():
    #EC2 AMI 
    ec2client = AwsHelper.boto3_client('ec2', region_name='ap-southeast-2')
    sts = AwsHelper.boto3_client('sts')
    id = str(sts.get_caller_identity()['Account'])

    snapshots = ec2client.describe_snapshots(
        OwnerIds=[id] 
    )['Snapshots']

    for snapshot in snapshots:
        snapid = snapshot['SnapshotId']
        if 'Tags' in snapshot:
            tags = snapshot['Tags']
            try:
                name = [tag['Value'] for tag in tags if tag['Key'] == 'Name'][0]
                if 'shelvery-test-ec2' in name:
                    print("Cleaning up EC2 AMI Snapshots")
                    ami_id = [tag['Value'] for tag in tags if tag['Key'] == 'shelvery:ami_id'][0]
                    if ami_id != []:
                        print(f"De-registering image: {ami_id}")
                        ec2client.deregister_image(ImageId=ami_id)
                    ec2client.delete_snapshot(SnapshotId=snapid)
                    print(f'Deleting EC2 snapshot: {snapid}')
                if 'shelvery-test-ebs' in name:
                    print("Cleaning up EBS Snapshots")
                    print(f'Deleting EBS snapshot: {snapid}')
                    ec2client.delete_snapshot(SnapshotId=snapid)
            except Exception as e:
                print(f"Failed to delete {snapid}:{str(e)}")
        
        else:
           print(f'Deleting Untagged EC2 Snapshots')
           if snapshot['VolumeId'] == 'vol-ffffffff' and 'Copied for' in snapshot['Description']:

                search_filter = [{'Name':'block-device-mapping.snapshot-id',
                                  'Values': [snapid],
                                  'Name':'tag:ResourceName',
                                  'Values':['shelvery-test-ec2']
                                }]
                                  
                        

                ami_id = ec2client.describe_images(
                    Filters=search_filter
                )['Images'][0]['ImageId']
                try:
                    print(f"De-registering image: {ami_id}")
                    print(f'Deleting EC2 snapshot: {snapid}')
                    ec2client.deregister_image(ImageId=ami_id)
                    ec2client.delete_snapshot(SnapshotId=snapid)
                except Exception as e:
                    print(f"Failed to delete {snapid}:{str(e)}")
 def copy_backup_to_region(self, backup_id: str, region: str) -> str:
     local_region = boto3.session.Session().region_name
     local_client = AwsHelper.boto3_client('ec2', region_name=local_region, arn=self.role_arn, external_id=self.role_external_id)
     regional_client = AwsHelper.boto3_client('ec2', region_name=region, arn=self.role_arn, external_id=self.role_external_id)
     ami = local_client.describe_images(ImageIds=[backup_id])['Images'][0]
     idempotency_token = f"shelverycopy{backup_id.replace('-','')}to{region.replace('-','')}"
     return regional_client.copy_image(Name=ami['Name'],
                                       ClientToken=idempotency_token,
                                       Description=f"Shelvery copy of {backup_id} to {region} from {local_region}",
                                       SourceImageId=backup_id,
                                       SourceRegion=local_region
                                       )['ImageId']
Пример #5
0
 def copy_backup_to_region(self, backup_id: str, region: str) -> str:
     local_region = boto3.session.Session().region_name
     client_local = AwsHelper.boto3_client('docdb', arn=self.role_arn, external_id=self.role_external_id)
     docdb_client = AwsHelper.boto3_client('docdb', region_name=region)
     snapshots = client_local.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=backup_id)
     snapshot = snapshots['DBClusterSnapshots'][0]
     docdb_client.copy_db_cluster_snapshot(
         SourceDBClusterSnapshotIdentifier=snapshot['DBClusterSnapshotArn'],
         TargetDBClusterSnapshotIdentifier=backup_id,
         SourceRegion=local_region,
         # tags are created explicitly
         CopyTags=False
     )
     return backup_id
Пример #6
0
    def _get_data_bucket(self, region=None):
        bucket_name = self.get_local_bucket_name(region)
        if region is None:
            loc_constraint = boto3.session.Session().region_name
        else:
            loc_constraint = region

        s3 = boto3.resource('s3')
        try:
            AwsHelper.boto3_client('s3').head_bucket(Bucket=bucket_name)
            bucket = s3.Bucket(bucket_name)
            AwsHelper.boto3_client('s3').put_public_access_block(
                Bucket=bucket_name,
                PublicAccessBlockConfiguration={
                    'BlockPublicAcls': True,
                    'IgnorePublicAcls': True,
                    'BlockPublicPolicy': True,
                    'RestrictPublicBuckets': True
                },
            )

        except ClientError as e:
            if e.response['Error']['Code'] == '404':
                client_region = loc_constraint
                s3client = AwsHelper.boto3_client('s3',
                                                  region_name=client_region)
                if loc_constraint == "us-east-1":
                    bucket = s3client.create_bucket(Bucket=bucket_name)
                else:
                    if loc_constraint == "eu-west-1":
                        loc_constraint = "EU"

                    bucket = s3client.create_bucket(Bucket=bucket_name,
                                                    CreateBucketConfiguration={
                                                        'LocationConstraint':
                                                        loc_constraint
                                                    })

                # store the bucket policy, so the bucket can be accessed from other accounts
                # that backups are shared with
                s3client.put_bucket_policy(
                    Bucket=bucket_name,
                    Policy=AwsHelper.get_shelvery_bucket_policy(
                        self.account_id,
                        RuntimeConfig.get_share_with_accounts(self),
                        bucket_name))
                return s3.Bucket(bucket_name)
            else:
                raise e
        return bucket
    def test_PullRdsClusterBackup(self):
        
        os.environ['SHELVERY_MONO_THREAD'] = '1'
        cleanRdsClusterSnapshots()

        source_aws_id = source_account
        os.environ["shelvery_source_aws_account_ids"] = str(source_aws_id)

        print(f"rds cluster - Running pull shared backups test")
    
        rds_cluster_client = AwsHelper.boto3_client('rds', region_name='ap-southeast-2')
        rds_cluster_backup_engine = ShelveryRDSClusterBackup()

        
        print("Pulling shared backups")
        rds_cluster_backup_engine.pull_shared_backups()

        #Get post-pull snapshot count
        pulled_snapshot = rds_cluster_client.describe_db_cluster_snapshots(
            DBClusterIdentifier='shelvery-test-rds-cluster',
            SnapshotType='Manual'
        )
       
        print("PULLED:" + str(pulled_snapshot))

        self.assertTrue(len(pulled_snapshot['DBClusterSnapshots']) == 1)
    def is_backup_available(self, backup_region: str, backup_id: str) -> bool:
        """
		Determine whether backup has completed and is available to be copied
		to other regions and shared with other AWS accounts
		"""
        redshift_client = AwsHelper.boto3_client(
            'redshift',
            region_name=backup_region,
            arn=self.role_arn,
            external_id=self.role_external_id)
        snapshot_id = backup_id.split(":")[-1].split("/")[1]
        snapshots = None
        try:
            snapshots = redshift_client.describe_cluster_snapshots(
                SnapshotIdentifier=snapshot_id)
        except ClientError as e:
            self.logger.warning(f"Backup {backup_id} not found")
            print(e.response)
            if e.response['Error']['Code'] == '404':
                return False
            else:
                self.logger.exception(
                    f"Problem waiting for {backup_id} availability")
                raise e
        return snapshots['Snapshots'][0]['Status'] == 'available'
    def test_NameTransformed(self):
        ebs_backups_engine = ShelveryEBSBackup()
        try:
            backups = ebs_backups_engine.create_backups()
        except Exception as e:
            print(e)
            print(f"Failed with {e}")
            traceback.print_exc(file=sys.stdout)
            raise e
        ec2client = AwsHelper.boto3_client('ec2')
        
        valid = False
        # validate there is
        for backup in backups:
            if backup.entity_id == self.volume['VolumeId']:
                snapshot_id = backup.backup_id
                self.created_snapshots.append(snapshot_id)
                
                # wait for snapshot to become available
                snapshots = ec2client.describe_snapshots(SnapshotIds=[snapshot_id])['Snapshots']
                self.assertTrue(len(snapshots) == 1)
                self.assertTrue(snapshots[0]['VolumeId'] == self.volume['VolumeId'])
                d_tags = dict(map(lambda x: (x['Key'], x['Value']), snapshots[0]['Tags']))

                self.assertTrue(d_tags['Name'].startswith(NAME_TRANSFORMED))
                print(f"required: {backup.date_created.strftime(BackupResource.TIMESTAMP_FORMAT)}-{backup.retention_type}")
                print(f"actual: {d_tags['Name']}")
                self.assertTrue(d_tags['Name'].endswith(f"{backup.date_created.strftime(BackupResource.TIMESTAMP_FORMAT)}-{backup.retention_type}"))
                
                valid = True
        
        self.assertTrue(valid)
Пример #10
0
    def backup_from_latest_automated(self, backup_resource: BackupResource):
        rds_client = AwsHelper.boto3_client('rds',
                                            arn=self.role_arn,
                                            external_id=self.role_external_id)
        response = rds_client.describe_db_snapshots(
            DBInstanceIdentifier=backup_resource.entity_id,
            SnapshotType='automated',
            # API always returns in date descending order, and we only need last one
            MaxRecords=20)
        # filter out any snapshots that could be in progress
        available_snapshots = [
            snap for snap in response['DBSnapshots']
            if snap['Status'] == 'available'
        ]
        auto_snapshots = sorted(available_snapshots,
                                key=lambda k: k['SnapshotCreateTime'],
                                reverse=True)

        if len(auto_snapshots) == 0:
            self.logger.info(
                f"There is no latest automated backup for cluster {backup_resource.entity_id},"
                f" fallback to RDS_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster..."
            )
            return self.backup_from_instance(backup_resource)

        automated_snapshot_id = auto_snapshots[0]['DBSnapshotIdentifier']
        response = rds_client.copy_db_snapshot(
            SourceDBSnapshotIdentifier=automated_snapshot_id,
            TargetDBSnapshotIdentifier=backup_resource.name,
            CopyTags=False)
        backup_resource.resource_properties = response['DBSnapshot']
        backup_resource.backup_id = backup_resource.name
        return backup_resource
Пример #11
0
    def tag_backup_resource(self, backup_resource: BackupResource):
        regional_client = AwsHelper.boto3_client(
            'ec2',
            region_name=backup_resource.region,
            arn=self.role_arn,
            external_id=self.role_external_id)
        regional_client.create_tags(
            Resources=[backup_resource.backup_id],
            Tags=list(
                map(lambda k: {
                    'Key': k,
                    'Value': backup_resource.tags[k]
                }, backup_resource.tags)))

        attempts = 0
        while attempts < 10:
            snapshots = self._get_snapshots_from_ami(backup_resource)
            if snapshots:
                break
            sleep(2)
            attempts += 1

        # tag all snapshots associated with the ami
        backup_resource.tags[
            f"{backup_resource.tags['shelvery:tag_name']}:ami_id"] = backup_resource.backup_id
        for snapshot in snapshots:
            regional_client.create_tags(
                Resources=[snapshot],
                Tags=list(
                    map(lambda k: {
                        'Key': k,
                        'Value': backup_resource.tags[k]
                    }, backup_resource.tags)))
Пример #12
0
def instanceShareBackups(self, backup):
    print(f"BackupId={backup.backup_id}")
    print(f"Accountd={backup.account_id}")

    snapshot_id = backup.backup_id
    print(
        f"Testing if snapshot {snapshot_id} is shared with {self.share_with_id}"
    )

    source_client = AwsHelper.boto3_client('rds')

    #Get source snapshot
    source_snapshot = source_client.describe_db_snapshots(
        DBInstanceIdentifier=backup.entity_id,
        DBSnapshotIdentifier=snapshot_id)

    attributes = source_client.describe_db_snapshot_attributes(
        DBSnapshotIdentifier=snapshot_id
    )['DBSnapshotAttributesResult']['DBSnapshotAttributes']

    #Restore attribute indicating restoreable snapshot
    restore_attribute = [
        attr for attr in attributes if attr['AttributeName'] == 'restore'
    ][0]['AttributeValues']

    print("Attributes: " + str(restore_attribute))

    #Assert Snapshot(s) exist
    self.assertTrue(len(source_snapshot['DBSnapshots']) == 1)

    #Assert that snapshot is shared with dest account
    self.assertTrue(destination_account in restore_attribute)

    return True
Пример #13
0
    def copy_shared_backup(self, source_account: str,
                           source_backup: BackupResource):
        rds_client = AwsHelper.boto3_client('rds',
                                            arn=self.role_arn,
                                            external_id=self.role_external_id)
        # copying of tags happens outside this method
        source_arn = f"arn:aws:rds:{source_backup.region}:{source_backup.account_id}:snapshot:{source_backup.backup_id}"

        params = {
            'SourceDBSnapshotIdentifier': source_arn,
            'SourceRegion': source_backup.region,
            'CopyTags': False,
            'TargetDBSnapshotIdentifier': source_backup.backup_id
        }

        # If the backup is encrypted, include the KMS key ID in the request.
        # We have to check the attribute to support our previous YAML file format for backup data stored in S3
        if hasattr(source_backup, 'resource_properties'
                   ) and source_backup.resource_properties['Encrypted']:
            kms_key = source_backup.resource_properties['KmsKeyId']
            self.logger.info(
                f"Snapshot {source_backup.backup_id} is encrypted. Copying backup with KMS key {kms_key} ..."
            )
            params['KmsKeyId'] = kms_key

        snap = rds_client.copy_db_snapshot(**params)
        return snap['DBSnapshot']['DBSnapshotIdentifier']
Пример #14
0
 def tag_backup_resource(self, backup_resource: BackupResource):
     regional_client = AwsHelper.boto3_client(
         'ec2',
         region_name=backup_resource.region,
         arn=self.role_arn,
         external_id=self.role_external_id)
     regional_client.create_tags(
         Resources=[backup_resource.backup_id],
         Tags=list(
             map(lambda k: {
                 'Key': k,
                 'Value': backup_resource.tags[k]
             }, backup_resource.tags)))
     snapshots = self._get_snapshots_from_ami(backup_resource)
     # tag all snapshots associated with the ami
     backup_resource.tags[
         f"{backup_resource.tags['shelvery:tag_name']}:ami_id"] = backup_resource.backup_id
     self.logger.info(
         f"Tagging {len(snapshots)} AMI snapshots: {snapshots}")
     regional_client.create_tags(
         Resources=snapshots,
         Tags=list(
             map(lambda k: {
                 'Key': k,
                 'Value': backup_resource.tags[k]
             }, backup_resource.tags)))
Пример #15
0
 def share_backup_with_account(self, backup_region: str, backup_id: str, aws_account_id: str):
     docdb_client = AwsHelper.boto3_client('docdb', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id)
     docdb_client.modify_db_cluster_snapshot_attribute(
         DBClusterSnapshotIdentifier=backup_id,
         AttributeName='restore',
         ValuesToAdd=[aws_account_id]
     )
Пример #16
0
    def copy_shared_backup(self, source_account: str, source_backup: BackupResource):
        docdb_client = AwsHelper.boto3_client('docdb', arn=self.role_arn, external_id=self.role_external_id)
        # copying of tags happens outside this method
        source_arn = f"arn:aws:rds:{source_backup.region}:{source_backup.account_id}:cluster-snapshot:{source_backup.backup_id}"

        params = {
            'SourceDBClusterSnapshotIdentifier': source_arn,
            'SourceRegion': source_backup.region,
            'CopyTags': False,
            'TargetDBClusterSnapshotIdentifier': source_backup.backup_id
        }

        # If the backup is encrypted, include the KMS key ID in the request.
        if source_backup.resource_properties['StorageEncrypted']:
            kms_key = source_backup.resource_properties['KmsKeyId']
            self.logger.info(f"Snapshot {source_backup.backup_id} is encrypted with the kms key {kms_key}")
            
            copy_kms_key = RuntimeConfig.get_copy_kms_key_id(backup_resource.entity_resource.tags, self)
            # if a new key is provided by config encypt the copy with the new kms key
            if copy_kms_key is not None:
                self.logger.info(f"Snapshot {source_backup.backup_id} will be copied and encrypted with the kms key {copy_kms_key}")
                kms_key = copy_kms_key
                
            params['KmsKeyId'] = kms_key
        else:
            # if the backup is not encrypted and the encrypt_copy is enabled, encrypted the backup with the provided kms key
            if RuntimeConfig.get_encrypt_copy(backup_resource.entity_resource.tags, self):
                kms_key = RuntimeConfig.get_copy_kms_key_id(backup_resource.entity_resource.tags, self)
                if kms_key is not None:
                    self.logger.info(f"Snapshot {source_backup.backup_id} is not encrypted. Encrypting the copy with KMS key {kms_key}")
                    params['KmsKeyId'] = kms_key

        snap = docdb_client.copy_db_cluster_snapshot(**params)
        return snap['DBClusterSnapshot']['DBClusterSnapshotIdentifier']
    def delete_backup(self, backup_resource: BackupResource):
        """
		Remove given backup from system
		"""
        redshift_client = AwsHelper.boto3_client(
            'redshift',
            region_name=backup_resource.region,
            arn=self.role_arn,
            external_id=self.role_external_id)
        cluster_id = backup_resource.backup_id.split(":")[-1].split("/")[0]
        snapshot_id = backup_resource.backup_id.split(":")[-1].split("/")[1]
        try:
            redshift_client.delete_cluster_snapshot(
                SnapshotIdentifier=snapshot_id,
                SnapshotClusterIdentifier=cluster_id)
        except ClientError as ex:
            if 'other accounts still have access to it' in ex.response[
                    'Error']['Message']:
                self.logger.exception(
                    f"Could not delete {backup_resource.backup_id} as"
                    f"other accounts still have access to this snapshot")
                return
            else:
                self.logger.error(ex.response)
                self.logger.exception(
                    f"Could not delete {backup_resource.backup_id}")
Пример #18
0
    def populate_volume_information(self, backups):
        volume_ids = []
        volumes = {}
        ec2client = AwsHelper.boto3_client('ec2',
                                           arn=self.role_arn,
                                           external_id=self.role_external_id)
        local_region = boto3.session.Session().region_name

        # create list of all volume ids
        for backup in backups:
            if backup.entity_id not in volume_ids:
                volume_ids.append(backup.entity_id)

        # populate map volumeid->volume if present
        for volume_id in volume_ids:
            try:
                volume = ec2client.describe_volumes(
                    VolumeIds=[volume_id])['Volumes'][0]
                d_tags = dict(
                    map(lambda t: (t['Key'], t['Value']), volume['Tags']))
                volumes[volume_id] = EntityResource(volume_id, local_region,
                                                    volume['CreateTime'],
                                                    d_tags)
            except ClientError as e:
                if 'InvalidVolume.NotFound' in str(e):
                    volumes[volume_id] = EntityResource.empty()
                    volumes[volume_id].resource_id = volume_id
                else:
                    raise e

        # add info to backup resource objects
        for backup in backups:
            if backup.entity_id in volumes:
                backup.entity_resource = volumes[backup.entity_id]
Пример #19
0
    def get_existing_backups(self, tag_prefix: str) -> List[BackupResource]:
        ec2client = AwsHelper.boto3_client('ec2',
                                           arn=self.role_arn,
                                           external_id=self.role_external_id)
        # lookup snapshots by tags
        snapshots = ec2client.describe_snapshots(Filters=[{
            'Name': f"tag:{tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}",
            'Values': ['true']
        }])
        backups = []

        # create backup resource objects
        for snap in snapshots['Snapshots']:
            snap_tags = dict(
                map(lambda t: (t['Key'], t['Value']), snap['Tags']))
            if f"{tag_prefix}:ami_id" in snap_tags:
                self.logger.info(
                    f"EBS snapshot {snap['SnapshotId']} created by AMI shelvery backup, skiping..."
                )
                continue

            backup = BackupResource.construct(tag_prefix=tag_prefix,
                                              backup_id=snap['SnapshotId'],
                                              tags=snap_tags)
            # legacy code - entity id should be picked up from tags
            if backup.entity_id is None:
                self.logger.info(
                    f"SnapshotId is None, using VolumeId {snap['VolumeId']}")
                backup.entity_id = snap['VolumeId']
            backups.append(backup)

        self.populate_volume_information(backups)

        return backups
Пример #20
0
    def populate_snap_entity_resource(self, all_snapshots):
        instance_ids = []
        for snap in all_snapshots:
            if snap['DBInstanceIdentifier'] not in instance_ids:
                instance_ids.append(snap['DBInstanceIdentifier'])
        entities = {}
        rds_client = AwsHelper.boto3_client('rds',
                                            arn=self.role_arn,
                                            external_id=self.role_external_id)
        local_region = boto3.session.Session().region_name

        for instance_id in instance_ids:
            try:
                rds_instance = rds_client.describe_db_instances(
                    DBInstanceIdentifier=instance_id)['DBInstances'][0]
                tags = rds_client.list_tags_for_resource(
                    ResourceName=rds_instance['DBInstanceArn'])['TagList']
                d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags))
                d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags))
                rds_entity = EntityResource(instance_id, local_region,
                                            rds_instance['InstanceCreateTime'],
                                            d_tags)
                entities[instance_id] = rds_entity
            except ClientError as e:
                if 'DBInstanceNotFoundFault' in str(type(e)):
                    entities[instance_id] = EntityResource.empty()
                    entities[instance_id].resource_id = instance_id
                else:
                    raise e

        for snap in all_snapshots:
            if snap['DBInstanceIdentifier'] in entities:
                snap['EntityResource'] = entities[snap['DBInstanceIdentifier']]
    def is_backup_available(self, backup_region: str, backup_id: str) -> bool:
        regional_client = AwsHelper.boto3_client('ec2', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id)
        ami = regional_client.describe_images(ImageIds=[backup_id])
        if len(ami['Images']) > 0:
            return ami['Images'][0]['State'] == 'available'

        return False
    def test_CreateBackups(self):
        ebs_backups_engine = ShelveryEBSBackup()
        try:
            backups = ebs_backups_engine.create_backups()
        except Exception as e:
            print(e)
            print(f"Failed with {e}")
            traceback.print_exc(file=sys.stdout)
            raise e
        ec2client = AwsHelper.boto3_client('ec2')

        valid = False
        # validate there is
        for backup in backups:
            if backup.entity_id == self.volume['VolumeId']:
                snapshot_id = backup.backup_id
                self.created_snapshots.append(snapshot_id)
                snapshots = ec2client.describe_snapshots(
                    SnapshotIds=[snapshot_id])['Snapshots']
                self.assertTrue(len(snapshots) == 1)
                self.assertTrue(
                    snapshots[0]['VolumeId'] == self.volume['VolumeId'])
                d_tags = dict(
                    map(lambda x: (x['Key'], x['Value']),
                        snapshots[0]['Tags']))
                marker_tag = f"{RuntimeConfig.get_tag_prefix()}:{BackupResource.BACKUP_MARKER_TAG}"
                self.assertTrue(marker_tag in d_tags)
                self.assertTrue(
                    f"{RuntimeConfig.get_tag_prefix()}:entity_id" in d_tags)
                self.assertTrue(
                    d_tags[f"{RuntimeConfig.get_tag_prefix()}:entity_id"] ==
                    self.volume['VolumeId'])
                valid = True

        self.assertTrue(valid)
Пример #23
0
    def backup_from_latest_automated(self, backup_resource: BackupResource):
        rds_client = AwsHelper.boto3_client('rds',
                                            arn=self.role_arn,
                                            external_id=self.role_external_id)
        auto_snapshots = rds_client.describe_db_cluster_snapshots(
            DBClusterIdentifier=backup_resource.entity_id,
            SnapshotType='automated',
            # API always returns in date descending order, and we only need last one
            MaxRecords=20)
        auto_snapshots = sorted(auto_snapshots['DBClusterSnapshots'],
                                key=lambda k: k['SnapshotCreateTime'],
                                reverse=True)

        if len(auto_snapshots) == 0:
            self.logger.info(
                f"There is no latest automated backup for cluster {backup_resource.entity_id},"
                f" fallback to RDS_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster..."
            )
            return self.backup_from_cluster(backup_resource)

        # TODO handle case when there are no latest automated backups
        automated_snapshot_id = auto_snapshots[0][
            'DBClusterSnapshotIdentifier']
        response = rds_client.copy_db_cluster_snapshot(
            SourceDBClusterSnapshotIdentifier=automated_snapshot_id,
            TargetDBClusterSnapshotIdentifier=backup_resource.name,
            CopyTags=False)
        backup_resource.resource_properties = response['DBClusterSnapshot']
        backup_resource.backup_id = backup_resource.name
        return backup_resource
Пример #24
0
    def populate_snap_entity_resource(self, all_snapshots):
        cluster_ids = []

        for snap in all_snapshots:
            if snap['DBClusterIdentifier'] not in cluster_ids:
                cluster_ids.append(snap['DBClusterIdentifier'])

        entities = {}
        rds_client = AwsHelper.boto3_client('rds',
                                            arn=self.role_arn,
                                            external_id=self.role_external_id)
        local_region = boto3.session.Session().region_name

        for cluster_id in cluster_ids:
            try:
                self.logger.info(
                    f"Collecting tags from DB cluster {cluster_id} ...")
                rds_instance = rds_client.describe_db_clusters(
                    DBClusterIdentifier=cluster_id)['DBClusters'][0]
                tags = rds_instance['TagList']
                d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags))
                rds_entity = EntityResource(cluster_id, local_region,
                                            rds_instance['ClusterCreateTime'],
                                            d_tags)
                entities[cluster_id] = rds_entity
            except ClientError as e:
                if 'DBClusterNotFoundFault' in str(type(e)):
                    entities[cluster_id] = EntityResource.empty()
                    entities[cluster_id].resource_id = cluster_id
                else:
                    raise e

        for snap in all_snapshots:
            if snap['DBClusterIdentifier'] in entities:
                snap['EntityResource'] = entities[snap['DBClusterIdentifier']]
 def get_backup_resource(self, backup_region: str, backup_id: str) -> BackupResource:
     rds_client = AwsHelper.boto3_client('rds', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id)
     snapshots = rds_client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=backup_id)
     snapshot = snapshots['DBClusterSnapshots'][0]
     tags = rds_client.list_tags_for_resource(ResourceName=snapshot['DBClusterSnapshotArn'])['TagList']
     d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags))
     return BackupResource.construct(d_tags['shelvery:tag_name'], backup_id, d_tags)
Пример #26
0
    def get_entities_to_backup(self, tag_name: str) -> List[EntityResource]:
        # region and api client
        local_region = boto3.session.Session().region_name
        rds_client = AwsHelper.boto3_client('rds',
                                            arn=self.role_arn,
                                            external_id=self.role_external_id)

        # list of models returned from api
        db_cluster_entities = []

        db_clusters = self.get_all_clusters(rds_client)

        # collect tags in check if instance tagged with marker tag

        for instance in db_clusters:
            tags = instance['TagList']

            # convert api response to dictionary
            d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags))

            # check if marker tag is present
            if tag_name in d_tags and d_tags[
                    tag_name] in SHELVERY_DO_BACKUP_TAGS:
                resource = EntityResource(instance['DBClusterIdentifier'],
                                          local_region,
                                          instance['ClusterCreateTime'],
                                          d_tags)
                db_cluster_entities.append(resource)

        return db_cluster_entities
Пример #27
0
    def test_CreateEc2AmiBackup(self):
        print(f"ec2 ami - Running backup test")

        ec2_ami_backup_engine = ShelveryEC2AMIBackup()
        print(f"ec2 ami - Shelvery backup initialised")

        backups = initCreateBackups(ec2_ami_backup_engine)

        ec2_client = AwsHelper.boto3_client('ec2')

        valid = False
        # validate there is
        for backup in backups:

            #Get source snapshot
            source_snapshot = ec2_client.describe_snapshots(
                Filters=[{
                    'Name': 'tag:Name',
                    'Values': [backup.name]
                }])

            #Get snapshot id and add to created snapshot list for removal in teardown later
            dest_snapshot_id = source_snapshot['Snapshots'][0]['SnapshotId']
            self.created_snapshots.append(dest_snapshot_id)

            valid = compareBackups(self=self,
                                   backup=backup,
                                   backup_engine=ec2_ami_backup_engine)

        self.assertTrue(valid)
Пример #28
0
    def setUp(self):
        self.created_snapshots = []
        self.regional_snapshots = {'ap-southeast-1': [], 'ap-southeast-2': []}
        os.environ['SHELVERY_MONO_THREAD'] = '1'

        # Create and configure RDS artefact
        initSetup(self, 'ec2')
        ec2client = AwsHelper.boto3_client('ec2', region_name='ap-southeast-2')

        #Find ec2 instance
        search_filter = [{
            'Name': 'tag:Name',
            'Values': ['shelvery-test-ec2'],
            'Name': 'instance-state-name',
            'Values': ['running']
        }]

        #Get ec2 instance
        ec2_instance = ec2client.describe_instances(Filters=search_filter)

        #Get instance id
        instance_id = ec2_instance['Reservations'][0]['Instances'][0][
            'InstanceId']
        print("INSTANCE ID: " + str(instance_id))

        createBackupTags(ec2client, [instance_id], "shelvery-test-ec2")

        self.share_with_id = destination_account
Пример #29
0
 def is_backup_available(self, backup_region: str, backup_id: str) -> bool:
     rds_client = AwsHelper.boto3_client('rds',
                                         region_name=backup_region,
                                         arn=self.role_arn,
                                         external_id=self.role_external_id)
     snapshots = rds_client.describe_db_cluster_snapshots(
         DBClusterSnapshotIdentifier=backup_id)
     return snapshots['DBClusterSnapshots'][0]['Status'] == 'available'
 def _get_all_entities(self) -> List[EntityResource]:
     ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id)
     instances = ec2client.describe_instances()
     while 'NextToken' in instances:
         instances += ec2client.describe_instances(
             NextToken=instances['NextToken']
         )
     return self._convert_instances_to_entities(instances)