def create_backups(self) -> List[BackupResource]: """Create backups from all collected entities marked for backup by using specific tag""" # collect resources to be backed up resource_type = self.get_resource_type() self.logger.info( f"Collecting entities of type {resource_type} tagged with " f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}") resources = self.get_entities_to_backup( f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}") # allows user to select single entity to be backed up if RuntimeConfig.get_shelvery_select_entity(self) is not None: entity_id = RuntimeConfig.get_shelvery_select_entity(self) self.logger.info(f"Creating backups only for entity {entity_id}") resources = list( filter(lambda x: x.resource_id == entity_id, resources)) self.logger.info( f"{len(resources)} resources of type {resource_type} collected for backup" ) # create and collect backups backup_resources = [] for r in resources: backup_resource = BackupResource( tag_prefix=RuntimeConfig.get_tag_prefix(), entity_resource=r) self.logger.info( f"Processing {resource_type} with id {r.resource_id}") self.logger.info(f"Creating backup {backup_resource.name}") try: self.backup_resource(backup_resource) self.tag_backup_resource(backup_resource) self.logger.info( f"Created backup of type {resource_type} for entity {backup_resource.entity_id} " f"with id {backup_resource.backup_id}") backup_resources.append(backup_resource) except Exception as e: self.logger.exception( f"Failed to create backup {backup_resource.name}:{e}") # create backups and disaster recovery region for br in backup_resources: self.copy_backup( br, RuntimeConfig.get_dr_regions(br.entity_resource.tags, self)) for aws_account_id in RuntimeConfig.get_share_with_accounts(self): for br in backup_resources: self.share_backup(br, aws_account_id) return backup_resources
def clean_backups(self): # collect backups existing_backups = self.get_existing_backups(RuntimeConfig.get_tag_prefix()) # allows user to select single entity backups to be cleaned if RuntimeConfig.get_shelvery_select_entity(self) is not None: entity_id = RuntimeConfig.get_shelvery_select_entity(self) self.logger.info(f"Checking only for backups of entity {entity_id}") existing_backups = list( filter( lambda x: x.entity_id == entity_id, existing_backups) ) self.logger.info(f"Collected {len(existing_backups)} backups to be checked for expiry date") self.logger.info(f"""Using following retention settings from runtime environment (resource overrides enabled): Keeping last {RuntimeConfig.get_keep_daily(None, self)} daily backups Keeping last {RuntimeConfig.get_keep_weekly(None, self)} weekly backups Keeping last {RuntimeConfig.get_keep_monthly(None, self)} monthly backups Keeping last {RuntimeConfig.get_keep_yearly(None, self)} yearly backups""") # check backups for expire date, delete if necessary for backup in existing_backups: self.logger.info(f"Checking backup {backup.backup_id}") try: if backup.is_stale(self, RuntimeConfig.get_custom_retention_types(self)): self.logger.info( f"{backup.retention_type} backup {backup.name} has expired on {backup.expire_date}, cleaning up") self.delete_backup(backup) backup.date_deleted = datetime.utcnow() self._archive_backup_metadata(backup, self._get_data_bucket(), RuntimeConfig.get_share_with_accounts(self)) self.snspublisher.notify({ 'Operation': 'DeleteBackup', 'Status': 'OK', 'BackupType': self.get_engine_type(), 'BackupName': backup.name, }) else: self.logger.info(f"{backup.retention_type} backup {backup.name} is valid " f"until {backup.expire_date}, keeping this backup") except Exception as e: self.snspublisher_error.notify({ 'Operation': 'DeleteBackup', 'Status': 'ERROR', 'ExceptionInfo': e.__dict__, 'BackupType': self.get_engine_type(), 'BackupName': backup.name, }) self.logger.exception(f"Error checking backup {backup.backup_id} for cleanup: {e}")
def clean_backups(self): # collect backups existing_backups = self.get_existing_backups( RuntimeConfig.get_tag_prefix()) # allows user to select single entity backups to be cleaned if RuntimeConfig.get_shelvery_select_entity(self) is not None: entity_id = RuntimeConfig.get_shelvery_select_entity(self) self.logger.info( f"Checking only for backups of entity {entity_id}") existing_backups = list( filter(lambda x: x.entity_id == entity_id, existing_backups)) self.logger.info( f"Collected {len(existing_backups)} backups to be checked for expiry date" ) self.logger.info( f"""Using following retention settings from runtime environment (resource overrides enabled): Keeping last {RuntimeConfig.get_keep_daily(None, self)} daily backups Keeping last {RuntimeConfig.get_keep_weekly(None, self)} weekly backups Keeping last {RuntimeConfig.get_keep_monthly(None, self)} monthly backups Keeping last {RuntimeConfig.get_keep_yearly(None, self)} yearly backups""" ) # check backups for expire date, delete if necessary for backup in existing_backups: self.logger.info(f"Checking backup {backup.backup_id}") try: if backup.is_stale(self): self.logger.info( f"{backup.retention_type} backup {backup.name} has expired on {backup.expire_date}, cleaning up" ) self.delete_backup(backup) else: self.logger.info( f"{backup.retention_type} backup {backup.name} is valid " f"until {backup.expire_date}, keeping this backup") except Exception as ex: # TODO notify via SNS self.logger.exception( f"Error checking backup {backup.backup_id} for cleanup: {ex}" )
def create_backups(self) -> List[BackupResource]: """Create backups from all collected entities marked for backup by using specific tag""" # collect resources to be backed up resource_type = self.get_resource_type() self.logger.info(f"Collecting entities of type {resource_type} tagged with " f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}") resources = self.get_entities_to_backup(f"{RuntimeConfig.get_tag_prefix()}:{self.BACKUP_RESOURCE_TAG}") # allows user to select single entity to be backed up if RuntimeConfig.get_shelvery_select_entity(self) is not None: entity_id = RuntimeConfig.get_shelvery_select_entity(self) self.logger.info(f"Creating backups only for entity {entity_id}") resources = list( filter( lambda x: x.resource_id == entity_id, resources) ) self.logger.info(f"{len(resources)} resources of type {resource_type} collected for backup") # create and collect backups backup_resources = [] current_retention_type = RuntimeConfig.get_current_retention_type(self) for r in resources: backup_resource = BackupResource( tag_prefix=RuntimeConfig.get_tag_prefix(), entity_resource=r, copy_resource_tags=RuntimeConfig.copy_resource_tags(self), exluded_resource_tag_keys=RuntimeConfig.get_exluded_resource_tag_keys(self) ) # if retention is explicitly given by runtime environment if current_retention_type is not None: backup_resource.set_retention_type(current_retention_type) dr_regions = RuntimeConfig.get_dr_regions(backup_resource.entity_resource.tags, self) backup_resource.tags[f"{RuntimeConfig.get_tag_prefix()}:dr_regions"] = ','.join(dr_regions) self.logger.info(f"Processing {resource_type} with id {r.resource_id}") self.logger.info(f"Creating backup {backup_resource.name}") try: self.backup_resource(backup_resource) self.tag_backup_resource(backup_resource) self.logger.info(f"Created backup of type {resource_type} for entity {backup_resource.entity_id} " f"with id {backup_resource.backup_id}") backup_resources.append(backup_resource) self.store_backup_data(backup_resource) self.snspublisher.notify({ 'Operation': 'CreateBackup', 'Status': 'OK', 'BackupType': self.get_engine_type(), 'BackupName': backup_resource.name, 'EntityId': backup_resource.entity_id }) except Exception as e: self.snspublisher_error.notify({ 'Operation': 'CreateBackup', 'Status': 'ERROR', 'ExceptionInfo': e.__dict__, 'BackupType': self.get_engine_type(), 'BackupName': backup_resource.name, 'EntityId': backup_resource.entity_id }) self.logger.exception(f"Failed to create backup {backup_resource.name}:{e}") # create backups and disaster recovery region for br in backup_resources: self.copy_backup(br, RuntimeConfig.get_dr_regions(br.entity_resource.tags, self)) for aws_account_id in RuntimeConfig.get_share_with_accounts(self): for br in backup_resources: self.share_backup(br, aws_account_id) return backup_resources