def change_user_status_or_level(id, new_status=None, new_level=None): user = load_user(id) if not new_status is None: user.status = new_status if not new_level is None: user.level = new_level db_commit()
def change_password(id, old_password, new_password, new_rep_password): user = load_user(id) if not user.check_password(old_password): sys.exit("Old password is not valid.") elif new_password != new_rep_password: sys.exit("New passwords do not match.") elif not check_format_conformance("password", new_password): sys.exit(format_errors["password"]) else: user.set_password(new_password) db_commit()
def get_run_info(job_id, run_ids, return_pid=False, return_db_request=False): data = {} db_job_id_request = query_info_from_db(job_id) for run_id in run_ids: data[run_id] = {} db_run_id_request = db_job_id_request.filter(Exec.run_id==run_id).distinct() if db_run_id_request.count() == 0: if return_pid: data[run_id]["pid"] = None if return_db_request: data[run_id]["db_id"] = None data[run_id]["status"] = "not started yet" data[run_id]["time_started"] = "-" data[run_id]["time_finished"] = "-" data[run_id]["duration"] = "-" data[run_id]["exec_profile"] = "-" data[run_id]["retry_count"] = "0" else: # find latest: run_info = db_run_id_request.filter(Exec.id==max([r.id for r in db_run_id_request])).first() # check if background process still running: cleanup_zombie_process(run_info.pid) if (not isinstance(run_info.time_finished, datetime)) and \ (not run_info.pid == -1) and \ (not pid_exists(run_info.pid)): run_info.status = "process ended unexpectedly" run_info.time_finished = datetime.now() db_commit() # if not ended, set end time to now for calc of duration if run_info.time_finished: time_finished = run_info.time_finished else: time_finished = datetime.now() if return_pid: data[run_id]["pid"] = run_info.pid if return_db_request: data[run_id]["db_id"] = run_info.id data[run_id]["status"] = run_info.status data[run_id]["time_started"] = run_info.time_started data[run_id]["time_finished"] = run_info.time_finished data[run_id]["duration"] = get_duration(run_info.time_started, time_finished) data[run_id]["exec_profile"] = run_info.exec_profile_name data[run_id]["retry_count"] = run_info.retry_count if return_db_request: return data, db_job_id_request else: return data
def add_user(username, email, level, password, status="active"): if check_if_username_exists(username): sys.exit("Username already exists.") user = User( username=username, email=email, level=level, status=status, date_register = datetime.now(), date_last_login = None ) user.set_password(password) db.session.add(user) db_commit()
def check_user_credentials(username, password, return_user_if_valid): user = get_user_by_username(username) if user is None: valid = False else: valid = user.check_password(password) if return_user_if_valid: if valid: user.date_last_login = datetime.now() db_commit() return user else: return None return valid
def terminate_runs( job_id, run_ids, mode="terminate" # can be one of terminate, reset, or delete ): could_not_be_terminated = [] could_not_be_cleaned = [] succeeded = [] run_info, db_request = get_run_info(job_id, run_ids, return_pid=True, return_db_request=True) db_changed = False for run_id in run_info.keys(): if isinstance(run_info[run_id]["time_started"], datetime) and \ not isinstance(run_info[run_id]["time_finished"], datetime): if run_info[run_id]["pid"] != -1: is_killed = kill_proc_tree(run_info[run_id]["pid"]) if not is_killed: could_not_be_terminated.append(run_id) continue cleanup_zombie_process(run_info[run_id]["pid"]) db_run_entry = db_request.filter(Exec.id==run_info[run_id]["db_id"]) db_run_entry.time_finished = datetime.now() db_run_entry.status = "terminated by user" db_changed = True if mode in ["reset", "delete"]: try: log_path = get_path("run_log", job_id, run_id) if os.path.exists(log_path): os.remove(log_path) run_out_dir = get_path("run_out_dir", job_id, run_id) if os.path.exists(run_out_dir): rmtree(run_out_dir) if isinstance(run_info[run_id]["time_started"], datetime): db_request.filter(Exec.run_id==run_id).delete(synchronize_session=False) db_changed = True except: could_not_be_cleaned.append(run_id) continue if mode == "delete": try: yaml_path = get_path("run_yaml", job_id, run_id) if os.path.exists(yaml_path): os.remove(yaml_path) except: could_not_be_cleaned.append(run_id) continue succeeded.append(run_id) if db_changed: db_commit() return succeeded, could_not_be_terminated, could_not_be_cleaned
def exec_runs(job_id, run_ids, exec_profile_name, cwl, user_id=None, max_parrallel_exec_user_def=None): # check if runs are already running: already_running_runs = [] db_job_id_request = query_info_from_db(job_id) for run_id in run_ids: db_run_id_request = db_job_id_request.filter(Exec.run_id==run_id).distinct() if db_run_id_request.count() > 0: # find latest: run_info = db_run_id_request.filter(Exec.id==max([r.id for r in db_run_id_request])).first() if run_info.time_finished is None or run_info.status == "finished": already_running_runs.append(run_id) run_ids = sorted(list(set(run_ids) - set(already_running_runs))) # create new exec entry in database: exec_profile = app.config["EXEC_PROFILES"][exec_profile_name] if not max_parrallel_exec_user_def is None and \ exec_profile["allow_user_decrease_max_parallel_exec"] and \ max_parrallel_exec_user_def < exec_profile["max_parallel_exec"]: exec_profile["max_parallel_exec"] = max_parrallel_exec_user_def exec_db_entry = {} for run_id in run_ids: exec_db_entry[run_id] = Exec( job_id=job_id, run_id=run_id, cwl=get_path("cwl", cwl_target=cwl), yaml=get_path("run_yaml", job_id=job_id, run_id=run_id), out_dir=get_path("run_out_dir", job_id=job_id, run_id=run_id), global_temp_dir=app.config["TEMP_DIR"], log=get_path("run_log", job_id=job_id, run_id=run_id), status="queued", err_message="", retry_count=0, time_started=datetime.now(), time_finished=None, #* timeout_limit=None, #* pid=-1, #* user_id=user_id if not user_id is None else None, exec_profile=exec_profile, exec_profile_name=exec_profile_name ) #* will be set by the background process itself db.session.add(exec_db_entry[run_id]) db_commit() # start the background process: # the child process will be detached from the parent # and manages the its status in the database autonomously, # even if the parent process is terminated / fails, # the child process will continue started_runs = [] for run_id in run_ids: create_background_process( [ python_interpreter, os.path.join(basedir, "cwlab_bg_exec.py"), app.config["SQLALCHEMY_DATABASE_URI"], str(exec_db_entry[run_id].id), str(app.config["DEBUG"]) ], get_path("debug_run_log", job_id=job_id, run_id=run_id) ) started_runs.append(run_id) return started_runs, already_running_runs
def delete_user(id): user = load_user(id) db.session.delete(user) db_commit()