def execute(state): """Execute state""" # Debug config _D.DEBUG(__name__, "App execute") # Execute runlogr err, msg = runlogr.execute(state.states.runlogr) if err: _D.ERROR(__name__, "Error executing runlogr", "msg", msg) return 0, None # Check if last run completed running = runlogr.get_section_item(state.states.runlogr, "app", "running<bool>") # Disallow to run if another instance is active if running: msg = "Oops, exiting. Another instance of Aeroback is still marked as running. If you believe that's not the case then manually change in section [app] parameter 'running' to False in file: {}".format( runlogr.get_filepath(state.states.runlogr) ) state.add_msg_error(msg) _D.ERROR(__name__, "Another instance may still be running", "msg", msg) return 0, None # Update runlog with app start runlogr.set_section(state.states.runlogr, "app", {"last_run<time>": state.model.date, "running<bool>": True}) # Execute main logic inside try-catch because # we need to mark execution as finished # regardless of exception try: err, msg = _execute(state) except: _D.EXCEPTION(__name__, "Exception executing app") # Update runlog with app finish _runlog_update_finish(state, "app", err_bool=True) # Update runlog with app finish _runlog_update_finish(state, "app", err_bool=False) return 0, None
def _runlog_update_finish(state, section_name, err_bool): """ Update runlog.ini file with finishing parameters """ # TODO make time count from value in section # if section is absent, count from current app start # ... start = runlogr.get_section_item(state.states.runlogr, section_name, "last_run<time>") if not start: start = state.model.date now = datetime.datetime.today() duration = now - start duration = int(duration.total_seconds()) duration = str(datetime.timedelta(seconds=duration)) runlogr.set_section( state.states.runlogr, section_name, {"running<bool>": False, "err<bool>": err_bool, "finish<time>": now, "duration": duration}, )
def _exec_job(state, job, job_n, jobs_count): # Init & exec each active storage err, msg = context.set_param("gsutil", job["gsutil"]) if err: return 1, msg # Report errors and keep working # allow the job run on healthy storages job["storstates"] = [] for storparams in job["storages"]: if storparams["active"]: # Create temp storager directory dir_temp = tempfile.mkdtemp(dir=state.model.dir_temp) # Init storager storstate, err, msg = storager.init(state.model.date_str, state.model.date_int, dir_temp, storparams) if err: _D.ERROR(__name__, "Skipping storage due to error in init", "msg", msg) continue # Exec storager err, msg = storager.execute(storstate) if err: _D.ERROR(__name__, "Skipping storage due to error in exec", "msg", msg) continue # Add to list of storagers states job["storstates"].append(storstate) # Any storagers configured ? if not job["storstates"]: return 1, "No storagers configured, aborting job" # ... do backup jobs ... # Run each backup type for each storage # Report errors and keep working for backup in job["backups"]: if backup["active"]: # Check previous backup finished section_name = "{}:{}".format(backup["type"], backup["dirstorage"]) running = runlogr.get_section_item(state.states.runlogr, section_name, "running<bool>") if running: msg = "Previous run of the backup is still marked as running. If you believe that's not the case then manually change in section [{}] parameter 'running' to False in file: {}".format( backup["type"], runlogr.get_filepath(state.states.runlogr) ) state.add_msg_error(msg) _D.ERROR( __name__, "Previous run of backup not yet finished", "backup type", backup["type"], "distorage", backup["dirstorage"], "msg", msg, ) continue # Check if time to run # _time_to_run(last_run, now, period): if not _time_to_run( backup["type"], runlogr.get_section_item(state.states.runlogr, section_name, "last_run<time>"), state.model.date, backup.get("frequency", None), ): # Time hasn't come yet, skip this backup continue # Update runlog with backup type runlogr.set_section( state.states.runlogr, section_name, {"last_run<time>": state.model.date, "running<bool>": True} ) # Run backup on each storage errs = False for storstate in job["storstates"]: # Create unique temp directory inside dir_temp dir_temp = tempfile.mkdtemp(dir=state.model.dir_temp) # Execute err, msg = _exec_backup_type(state, storstate, backup, dir_temp) # Delete temp directory fsutil.remove_dir_tree(dir_temp) if err: errs = True _D.ERROR(__name__, "Error executing backup", "msg", msg, "params", backup) continue # Update runlog with backup finish _runlog_update_finish(state, section_name, err_bool=errs) # ... done backup jobs ... # Add storage stats to reporting for storstate in job["storstates"]: cat = "Job {}/{} uploaded".format(job_n, jobs_count) state.set_descriptor_category( cat, storstate.model.atype, fmtutil.byte_size(storager.get_stored_stats(storstate)) ) # Cleanup storagers # Delete temp directory for storstate in job["storstates"]: storager.cleanup(storstate) fsutil.remove_dir_tree(storstate.model.dir_temp) return 0, None