Пример #1
0
    def do_store_backup_data(self, map_args={}, **kwargs):
        """
        Actual logic for writing backup resource data to s3. Waits for backup
        availability
        :param map_args:
        :param kwargs:
        :return:
        """
        kwargs.update(map_args)
        backup_id = kwargs['BackupId']
        backup_region = kwargs['BackupRegion']
        backup_resource = self.get_backup_resource(backup_region, backup_id)
        # if backup is not available, exit and rely on recursive lambda call write metadata
        # in non lambda mode this should never happen
        if RuntimeConfig.is_offload_queueing(self):
            if not self.is_backup_available(backup_region, backup_id):
                self.store_backup_data(backup_resource)
        else:
            if not self.wait_backup_available(backup_region=backup_region,
                                              backup_id=backup_id,
                                              lambda_method='do_store_backup_data',
                                              lambda_args=kwargs):
                return

        if backup_resource.account_id is None:
            backup_resource.account_id = self.account_id
        bucket = self._get_data_bucket(backup_resource.region)
        self._write_backup_data(backup_resource, bucket)
Пример #2
0
    def do_share_backup(self, map_args={}, **kwargs):
        """Share backup with other AWS account, actual implementation"""
        kwargs.update(map_args)
        backup_id = kwargs['BackupId']
        backup_region = kwargs['Region']
        destination_account_id = kwargs['AwsAccountId']
        backup_resource = self.get_backup_resource(backup_region, backup_id)
        # if backup is not available, exit and rely on recursive lambda call do share backup
        # in non lambda mode this should never happen
        if RuntimeConfig.is_offload_queueing(self):
            if not self.is_backup_available(backup_region, backup_id):
                self.share_backup(backup_resource, destination_account_id)
        else:
            if not self.wait_backup_available(backup_region=backup_region,
                                              backup_id=backup_id,
                                              lambda_method='do_share_backup',
                                              lambda_args=kwargs):
                return

        self.logger.info(
            f"Do share backup {backup_id} ({backup_region}) with {destination_account_id}"
        )
        try:
            self.share_backup_with_account(backup_region, backup_id,
                                           destination_account_id)
            backup_resource = self.get_backup_resource(backup_region,
                                                       backup_id)
            self._write_backup_data(backup_resource,
                                    self._get_data_bucket(backup_region),
                                    destination_account_id)
            self.snspublisher.notify({
                'Operation':
                'ShareBackup',
                'Status':
                'OK',
                'BackupType':
                self.get_engine_type(),
                'BackupName':
                backup_resource.name,
                'DestinationAccount':
                kwargs['AwsAccountId']
            })
        except Exception as e:
            self.snspublisher_error.notify({
                'Operation':
                'ShareBackup',
                'Status':
                'ERROR',
                'ExceptionInfo':
                e.__dict__,
                'BackupType':
                self.get_engine_type(),
                'BackupId':
                backup_id,
                'DestinationAccount':
                kwargs['AwsAccountId']
            })
            self.logger.exception(
                f"Failed to share backup {backup_id} ({backup_region}) with account {destination_account_id}"
            )
Пример #3
0
    def invoke_shelvery_operation(self, engine, method_name: str, method_arguments: Dict):
        """
        Invokes shelvery engine asynchronously
        If shelvery is running within lambda environment, new lambda function invocation will be made. If running
        on server, it will start new thread and invoke the function
        Function invoke must accept arguments in form of map
        """
        is_lambda_context = RuntimeConfig.is_lambda_runtime(engine)
        is_offload_queueing = RuntimeConfig.is_offload_queueing(engine)
        parameters = {
            'backup_type': engine.get_engine_type(),
            'action': method_name,
            'arguments': method_arguments
        }
        if is_lambda_context:
            if 'config' in engine.lambda_payload:
                parameters['config'] = engine.lambda_payload['config']

            if is_offload_queueing:
                sqs = ShelveryQueue(RuntimeConfig.get_sqs_queue_url(engine),RuntimeConfig.get_sqs_queue_wait_period(engine))
                sqs.send(parameters)
            else:
                parameters['is_started_internally'] = True
                payload = json.dumps(parameters)
                bytes_payload = bytearray()
                bytes_payload.extend(map(ord, payload))
                function_name = os.environ['AWS_LAMBDA_FUNCTION_NAME']
                lambda_client = AwsHelper.boto3_client('lambda')
                lambda_client.invoke_async(FunctionName=function_name, InvokeArgs=bytes_payload)
        else:
            resource_type = engine.get_engine_type()

            def execute():
                from shelvery.factory import ShelveryFactory
                backup_engine = ShelveryFactory.get_shelvery_instance(resource_type)
                method = backup_engine.__getattribute__(method_name)
                method(method_arguments)

            logging.info(f"Start new thread to execute :{method_name}")
            if 'SHELVERY_MONO_THREAD' in os.environ and os.environ['SHELVERY_MONO_THREAD'] == "1":
                execute()
            else:
                thread = Thread(target=execute)
                thread.start()
Пример #4
0
    def do_copy_backup(self, map_args={}, **kwargs):
        """
        Copy backup to another region, actual implementation
        """

        kwargs.update(map_args)
        backup_id = kwargs['BackupId']
        origin_region = kwargs['OriginRegion']
        backup_resource = self.get_backup_resource(origin_region, backup_id)
        # if backup is not available, exit and rely on recursive lambda call copy backup
        # in non lambda mode this should never happen
        if RuntimeConfig.is_offload_queueing(self):
            if not self.is_backup_available(origin_region,backup_id):
                self.copy_backup(self.get_backup_resource(backup_resource, RuntimeConfig.get_dr_regions(backup_resource.entity_resource.tags, self)))
        else:
            if not self.wait_backup_available(backup_region=origin_region,
                                              backup_id=backup_id,
                                              lambda_method='do_copy_backup',
                                              lambda_args=kwargs):
                return

        self.logger.info(f"Do copy backup {kwargs['BackupId']} ({kwargs['OriginRegion']}) to region {kwargs['Region']}")

        # copy backup
        try:
            src_region = kwargs['OriginRegion']
            dst_region = kwargs['Region']
            regional_backup_id = self.copy_backup_to_region(kwargs['BackupId'], dst_region)

            # create tags on backup copy
            original_backup_id = kwargs['BackupId']
            original_backup = self.get_backup_resource(src_region, original_backup_id)
            resource_copy = BackupResource(None, None, True)
            resource_copy.backup_id = regional_backup_id
            resource_copy.region = kwargs['Region']
            resource_copy.tags = original_backup.tags.copy()

            # add metadata to dr copy and original
            dr_copies_tag_key = f"{RuntimeConfig.get_tag_prefix()}:dr_copies"
            resource_copy.tags[f"{RuntimeConfig.get_tag_prefix()}:region"] = dst_region
            resource_copy.tags[f"{RuntimeConfig.get_tag_prefix()}:dr_copy"] = 'true'
            resource_copy.tags[
                f"{RuntimeConfig.get_tag_prefix()}:dr_source_backup"] = f"{src_region}:{original_backup_id}"

            if dr_copies_tag_key not in original_backup.tags:
                original_backup.tags[dr_copies_tag_key] = ''
            original_backup.tags[dr_copies_tag_key] = original_backup.tags[
                                                          dr_copies_tag_key] + f"{dst_region}:{regional_backup_id} "

            self.tag_backup_resource(resource_copy)
            self.tag_backup_resource(original_backup)
            self.snspublisher.notify({
                'Operation': 'CopyBackupToRegion',
                'Status': 'OK',
                'DestinationRegion': kwargs['Region'],
                'BackupType': self.get_engine_type(),
                'BackupId': kwargs['BackupId'],
            })
            self.store_backup_data(resource_copy)
        except Exception as e:
            self.snspublisher_error.notify({
                'Operation': 'CopyBackupToRegion',
                'Status': 'ERROR',
                'ExceptionInfo': e.__dict__,
                'DestinationRegion': kwargs['Region'],
                'BackupType': self.get_engine_type(),
                'BackupId': kwargs['BackupId'],
            })
            self.logger.exception(f"Error copying backup {kwargs['BackupId']} to {dst_region}")

        # shared backup copy with same accounts
        for shared_account_id in RuntimeConfig.get_share_with_accounts(self):
            backup_resource = BackupResource(None, None, True)
            backup_resource.backup_id = regional_backup_id
            backup_resource.region = kwargs['Region']
            try:
                self.share_backup(backup_resource, shared_account_id)
                self.snspublisher.notify({
                    'Operation': 'ShareRegionalBackupCopy',
                    'Status': 'OK',
                    'DestinationAccount': shared_account_id,
                    'DestinationRegion': kwargs['Region'],
                    'BackupType': self.get_engine_type(),
                    'BackupId': kwargs['BackupId'],
                })
            except Exception as e:
                self.snspublisher_error.notify({
                    'Operation': 'ShareRegionalBackupCopy',
                    'Status': 'ERROR',
                    'DestinationAccount': shared_account_id,
                    'DestinationRegion': kwargs['Region'],
                    'ExceptionInfo': e.__dict__,
                    'BackupType': self.get_engine_type(),
                    'BackupId': kwargs['BackupId'],
                })
                self.logger.exception(f"Error sharing copied backup {kwargs['BackupId']} to {dst_region}")
Пример #5
0
    def do_share_backup(self, map_args={}, **kwargs):
        """Share backup with other AWS account, actual implementation"""
        kwargs.update(map_args)
        backup_id = kwargs['BackupId']
        backup_region = kwargs['Region']
        destination_account_id = kwargs['AwsAccountId']
        backup_resource = self.get_backup_resource(backup_region, backup_id)
        # if backup is not available, exit and rely on recursive lambda call do share backup
        # in non lambda mode this should never happen
        if RuntimeConfig.is_offload_queueing(self):
            if not self.is_backup_available(backup_region, backup_id):
                self.share_backup(backup_resource, destination_account_id)
        else:
            if not self.wait_backup_available(backup_region=backup_region,
                                              backup_id=backup_id,
                                              lambda_method='do_share_backup',
                                              lambda_args=kwargs):
                return

        self.logger.info(
            f"Do share backup {backup_id} ({backup_region}) with {destination_account_id}"
        )
        try:
            self.share_backup_with_account(backup_region, backup_id,
                                           destination_account_id)
            backup_resource = self.get_backup_resource(backup_region,
                                                       backup_id)
            self._write_backup_data(backup_resource,
                                    self._get_data_bucket(backup_region),
                                    destination_account_id)
            self.snspublisher.notify({
                'Operation':
                'ShareBackup',
                'Status':
                'OK',
                'BackupType':
                self.get_engine_type(),
                'BackupName':
                backup_resource.name,
                'DestinationAccount':
                kwargs['AwsAccountId']
            })
        except ClientError as e:
            if e.response['Error']['Code'] == 'InvalidDBSnapshotState':
                # This will occasionally happen due to AWS eventual consistency model
                self.logger.warn(
                    f"Retrying to share backup {backup_id} ({backup_region}) with account {destination_account_id} due to exception InvalidDBSnapshotState"
                )
                self.share_backup(backup_resource, destination_account_id)

            elif e.response['Error']['Code'] == 'InvalidParameterValue':
                # Some backups may fail to be shared due to AWS limitations
                self.logger.warn(
                    f"Attempt to share backup '{backup_id}' in ({backup_region}) with account {destination_account_id} failed: {str(e)}"
                )
            else:
                self.snspublisher_error.notify({
                    'Operation':
                    'ShareBackup',
                    'Status':
                    'ERROR',
                    'ExceptionInfo':
                    e.__dict__,
                    'BackupType':
                    self.get_engine_type(),
                    'BackupId':
                    backup_id,
                    'DestinationAccount':
                    kwargs['AwsAccountId']
                })
                self.logger.exception(
                    f"Failed to share backup {backup_id} ({backup_region}) with account {destination_account_id}"
                )