def wrapped_function(*args, **kwargs): request_id = bind_new_request_id() backup_id = get_requested_backup_id() backup_id_str = "(backupId=%s)" % backup_id if backup_id else "" start_date = date_utils.date_now() queue_size = self._waitress_server.task_dispatcher.queue.qsize() logger.info("%s: NEW REQUEST (requestId=%s) %s [%s total requests queued]" % ( request.path, request_id, backup_id_str, queue_size)) result = f(*args, **kwargs) elapsed = date_utils.timedelta_total_seconds(date_utils.date_now() - start_date) logger.info("%s: FINISHED (requestId=%s) %s in %s seconds" % (request.path, request_id, backup_id_str, elapsed)) return result
def filter_backups_due_for_expiration(self, backups): earliest_date_to_keep = date_minus_seconds(date_now(), self.max_time) return filter(lambda backup: backup.created_date < earliest_date_to_keep, backups)
def do_delete_target_ref(self, backup, target, target_ref): if target_ref.preserve: logger.info("Skipping deletion for target ref %s (backup '%s') because" " it is preserved" % (target_ref, backup.id)) return try: target_ref.deleted_date = date_now() # if the target reference is a cloud storage one then make the cloud # storage object take care of it if isinstance(target_ref, CloudBlockStorageSnapshotReference): logger.info("Deleting backup '%s' snapshot " % backup.id) return target_ref.cloud_block_storage.delete_snapshot(target_ref) else: logger.info("Deleting backup '%s file" % backup.id) return target.delete_file(target_ref) except Exception as e: if self.is_whitelisted_target_delete_error(backup, target, target_ref, e): msg = ("Caught a whitelisted error while attempting to delete backup %s." " Marking backup as deleted. Error: %s" % (backup.id, e)) logger.warn(msg) persistence.update_backup(backup, event_name="WHITELIST_DELETE_ERROR", message=msg, event_type=EventType.WARNING) return False else: # raise error raise
def do_delete_target_ref(self, backup, target, target_ref): if target_ref.preserve: logger.info( "Skipping deletion for target ref %s (backup '%s') because" " it is preserved" % (target_ref, backup.id)) return try: target_ref.deleted_date = date_now() # if the target reference is a cloud storage one then make the cloud # storage object take care of it if isinstance(target_ref, CloudBlockStorageSnapshotReference): logger.info("Deleting backup '%s' snapshot " % backup.id) return target_ref.cloud_block_storage.delete_snapshot( target_ref) else: logger.info("Deleting backup '%s file" % backup.id) return target.delete_file(target_ref) except Exception as e: if self.is_whitelisted_target_delete_error(backup, target, target_ref, e): msg = ( "Caught a whitelisted error while attempting to delete backup %s." " Marking backup as deleted. Error: %s" % (backup.id, e)) logger.warn(msg) persistence.update_backup(backup, event_name="WHITELIST_DELETE_ERROR", message=msg, event_type=EventType.WARNING) return False else: # raise error raise
def filter_backups_due_for_expiration(self, backups): earliest_date_to_keep = date_minus_seconds(date_now(), self.max_time) return filter( lambda backup: backup.created_date < earliest_date_to_keep, backups)
def wrapped_function(*args, **kwargs): request_id = bind_new_request_id() backup_id = get_requested_backup_id() backup_id_str = "(backupId=%s)" % backup_id if backup_id else "" start_date = date_utils.date_now() logger.info("%s: NEW REQUEST (requestId=%s) %s" % (request.path, request_id, backup_id_str)) result = f(*args, **kwargs) elapsed = date_utils.timedelta_total_seconds( date_utils.date_now() - start_date) logger.info("%s: FINISHED (requestId=%s) %s in %s seconds" % (request.path, request_id, backup_id_str, elapsed)) return result
def run(self): self._setup_logging() self._sweep_queue = multiprocessing.JoinableQueue() # create an inlined schedule runner while True: next_occurrence = self.schedule.next_natural_occurrence() while date_now() < next_occurrence: time.sleep(1) self.tick()
def wrapped_function(*args, **kwargs): request_id = bind_new_request_id() backup_id = get_requested_backup_id() backup_id_str = "(backupId=%s)" % backup_id if backup_id else "" start_date = date_utils.date_now() logger.info("%s: NEW REQUEST (requestId=%s) %s" % ( request.path, request_id, backup_id_str)) try: result = f(*args, **kwargs) elapsed = date_utils.timedelta_total_seconds(date_utils.date_now() - start_date) logger.info("%s: FINISHED (requestId=%s) %s in %s seconds" % (request.path, request_id, backup_id_str, elapsed)) return result except Exception, ex: elapsed = date_utils.timedelta_total_seconds(date_utils.date_now() - start_date) logger.info("%s: FINISHED WITH ERROR (requestId=%s) %s in %s seconds. " "Error: %s" % (request.path, request_id, backup_id_str, elapsed, ex)) raise
def robustified_delete_backup(self, backup): """ deletes the backup targets """ # do some validation, target_ref = backup.target_reference if backup.state == State.SUCCEEDED and not target_ref: raise BackupSweepError("Cannot delete backup '%s'. " "Backup never uploaded" % backup.id) logger.info("Deleting target references for backup '%s'." % backup.id) logger.info("Deleting primary target reference for backup '%s'." % backup.id) # target ref can be None for CANCELED backups if target_ref: self.do_delete_target_ref(backup, backup.target, target_ref) # delete log file if backup.log_target_reference: logger.info("Deleting log target reference for backup '%s'." % backup.id) self.do_delete_target_ref(backup, backup.target, backup.log_target_reference) if backup.secondary_target_references: logger.info( "Deleting secondary target references for backup '%s'." % backup.id) sec_targets = backup.secondary_targets sec_target_refs = backup.secondary_target_references for (sec_target, sec_tgt_ref) in zip(sec_targets, sec_target_refs): logger.info( "Deleting secondary target reference %s for backup " "'%s'." % (sec_tgt_ref, backup.id)) self.do_delete_target_ref(backup, sec_target, sec_tgt_ref) # set deleted date backup.deleted_date = date_now() update_props = [ "deletedDate", "targetReference", "secondaryTargetReferences" ] persistence.update_backup(backup, properties=update_props, event_name="DELETING", message="Deleting target references") logger.info("Backup %s target references deleted successfully!" % backup.id)
def robustified_delete_backup(self, backup): """ deletes the backup targets """ # do some validation, target_ref = backup.target_reference if backup.state == State.SUCCEEDED and not target_ref: raise BackupSweepError("Cannot delete backup '%s'. " "Backup never uploaded" % backup.id) logger.info("Deleting target references for backup '%s'." % backup.id) logger.info("Deleting primary target reference for backup '%s'." % backup.id) # target ref can be None for CANCELED backups if target_ref: self.do_delete_target_ref(backup, backup.target, target_ref) # delete log file if backup.log_target_reference: logger.info("Deleting log target reference for backup '%s'." % backup.id) self.do_delete_target_ref(backup, backup.target, backup.log_target_reference) if backup.secondary_target_references: logger.info("Deleting secondary target references for backup '%s'." % backup.id) sec_targets = backup.secondary_targets sec_target_refs = backup.secondary_target_references for (sec_target, sec_tgt_ref) in zip(sec_targets, sec_target_refs): logger.info("Deleting secondary target reference %s for backup " "'%s'." % (sec_tgt_ref, backup.id)) self.do_delete_target_ref(backup, sec_target, sec_tgt_ref) # set deleted date backup.deleted_date = date_now() update_props = ["deletedDate", "targetReference", "secondaryTargetReferences"] persistence.update_backup(backup, properties=update_props, event_name="DELETING", message="Deleting target references") logger.info("Backup %s target references deleted successfully!" % backup.id)
def expire_backup(self, backup, force=False): # do some validation if backup.state == State.SUCCEEDED and not backup.target_reference: raise BackupExpirationError("Cannot expire backup '%s'. " "Backup never uploaded" % backup.id) if not(force or backup.state == State.CANCELED): self.validate_backup_expiration(backup) if not self.test_mode: try: logger.info("BackupExpirationManager: Expiring backup '%s'" % backup.id) backup.expired_date = date_now() persistence.update_backup(backup, properties="expiredDate", event_name="EXPIRING", message="Expiring") except Exception, e: msg = "Error while attempting to expire backup '%s': " % e logger.exception(msg)
def validate_recurring_backup_expiration(self, backup): logger.info("Validating if recurring backup '%s' should be " "expired now" % backup.id) # Ensure we have the latest revision of the backup plan when possible plan = persistence.get_backup_plan(backup.plan.id) or backup.plan rp = plan.retention_policy if not rp: raise BackupExpirationError( "Bad attempt to expire backup '%s'. " "Backup plan does not have a retention policy" % backup.id) occurrences_to_retain = \ rp.get_plan_occurrences_to_retain_as_of(plan, date_now()) if backup.plan_occurrence in occurrences_to_retain: raise BackupExpirationError( "Bad attempt to expire backup '%s'. Backup must not be" " expired now." % backup.id) else: logger.info("Backup '%s' good be expired now" % backup.id)
def expire_backup(self, backup, force=False): # do some validation if backup.state == State.SUCCEEDED and not backup.target_reference: raise BackupExpirationError("Cannot expire backup '%s'. " "Backup never uploaded" % backup.id) if not (force or backup.state == State.CANCELED): self.validate_backup_expiration(backup) if not self.test_mode: try: logger.info("BackupExpirationManager: Expiring backup '%s'" % backup.id) backup.expired_date = date_now() persistence.update_backup(backup, properties="expiredDate", event_name="EXPIRING", message="Expiring") except Exception, e: msg = "Error while attempting to expire backup '%s': " % e logger.exception(msg)
def max_expire_date_to_delete(self): return date_minus_seconds(date_now(), self.delete_delay_in_seconds)
def expired_canceled_cutoff_date(self): return date_minus_seconds(date_now(), self.expire_canceled_delay_in_seconds)