def process_task(config: ConfigHolder, task: Task, session: sqlalchemy.orm.Session): try: task.status = Task.STATUS_OPTIONS.PROCESSING session.commit() for subtask in tqdm.tqdm(task.children, desc=f"Task {task.id}: Processing subtasks"): if subtask.status not in [ Task.STATUS_OPTIONS.ERROR, Task.STATUS_OPTIONS.INTERRUPTED, Task.STATUS_OPTIONS.FINISHED ]: subconfig = ConfigHolder(subtask) if config.local: subconfig.local = True process_task(subconfig, subtask, session) to_process = [job for job in task.jobs if job.solution is None] process_jobs(to_process, config, session) task.status = Task.STATUS_OPTIONS.FINISHED except Exception as e: print(e) to_process = [job for job in task.jobs if job.solution is None] if str(e).lower( ) != "Backend does not support on_message callback".lower( ) and to_process: task.status = Task.STATUS_OPTIONS.ERROR task.error_message = str(e) if is_debug_env(): raise e else: task.status = Task.STATUS_OPTIONS.FINISHED finally: session.commit()
def _create_task(arg_config, session): solvers = arg_config.solvers solvers_args = arg_config.solvers_args assert len(solvers) == len(solvers_args),\ "The amount of solver arguments must match the amount of solvers" for solver in solvers: assert solver in ALL_SOLVER,\ f"Solver {solver} not found! Please make sure that all solver are properly named." task = Task(task_type="instance_test", status=Task.STATUS_OPTIONS.CREATED, name=arg_config.name) config = ConfigHolder.fromNamespace(arg_config, task=task, ignored_attributes=["url_path", "solvers", "solvers_args", "create_only", "config", "name"]) jobs = _get_instances(task, config, session) for solver, solver_args in zip(solvers, solvers_args): subtask = Task(parent=task, name=f"{solver}_test", task_type="instance_test", status=Task.STATUS_OPTIONS.CREATED) task.children.append(subtask) subconfig_namespace = configargparse.Namespace(solver=solver, solver_args=solver_args) subconfig = ConfigHolder.fromNamespace(subconfig_namespace, task=subtask) add_prev_job = (subconfig.with_start_sol is not None and subconfig.with_start_sol) if isinstance(jobs[0], TaskJobs): for task_job in jobs: prev_job = task_job if add_prev_job else None for i in range(config.repetitions): subtask.jobs.append(TaskJobs(task=subtask, graph=task_job.graph, prev_job=prev_job)) else: for graph in jobs: for i in range(config.repetitions): subtask.jobs.append(TaskJobs(task=subtask, graph=graph)) session.add(task) session.commit() return task, config
def _get_task_and_config(session, arg_config): task = None config = None if arg_config.url_path: if hasattr(arg_config, "task") and arg_config.task is not None: task = session.query(Task).filter(Task.id == arg_config.task).one() if arg_config.override_config and \ input(f"Are you sure to override the configs for {task.id}? (y/N)").lower() in ["y", "yes"]: print(f"Override config from task {task.id})") for task_config in task.configs: session.delete(task_config) arg_config.override_config = False config = ConfigHolder.fromNamespace(arg_config, task, [ "override_config", "url_path", "PreEvolveInteractive", "create_only" ]) session.add(config) session.commit() else: print("Using config from database") config = ConfigHolder(task) else: if input("New Task will be created (Y/n)?").lower() in [ "", "yes", "y" ]: print("Will create a new task.") task = Task(task_type="instance_evolver", status=Task.STATUS_OPTIONS.CREATED, name=arg_config.name) session.add(task) session.commit() arg_config.task = task.id config = ConfigHolder.fromNamespace(arg_config, task, ignored_attributes=[ "url_path", "create_only", "name", "override_config" ]) session.add_all(config.database_configs) session.commit() savepath = input( f"Task ID is {task.id}. Type a filepath to save the ID in a config file (default: Skip save): " ) if savepath: _save_task_file(savepath, config, task) else: config = arg_config return task, config
def process_tasks(url, task_id, check_stuck, local): session = get_session(url) if check_stuck: print("Check for stuck tasks with more that 24 hours idle time...") stuck_tasks = session.query(Task).filter( Task.last_updated <= datetime.datetime.now() - datetime.timedelta(hours=24)) for task in stuck_tasks: if input( f"Task {task.id} with type {task.task_type} is still processing. Reset? (y/N)" ).lower() in ["y", "yes"]: task.status = "RESTART" session.commit() if task_id is None: print( "No specific task was given. All unprocessed tasks will be processed" ) tasks = session.query(Task).filter(Task.status.notin_(IGNORE_STATI))\ .all() #.filter(Task.parent_id == None)\ # Filter sub_tasks where parents are finished but the children are still processing # We want the parent tasks for these, but only once task_set = set([ task if task.parent_id == None else task.parent for task in tasks ]) unordered_tasks = [task for task in task_set] t_dict = {t.id: t for t in unordered_tasks} tasks = [t_dict[t] for t in sorted(t_dict)] else: tasks = session.query(Task).filter(Task.id == task_id).all() while tasks: for task in tasks: config = ConfigHolder(task) if local: config.local = True else: config.local = False try: PROCESSOR_MAP[task.task_type].process_task( config, task, session) except InterruptedError: #Exception: pass # If no specific task id is given, we will look for futher tasks to process print("All tasks processed. Will look for more tasks...") if task_id is None: tasks = session.query(Task).filter(Task.status.notin_(IGNORE_STATI))\ .filter(Task.parent_id == None).all()
def main(): config = _load_config() session = get_session(config.url_path) graphs = [] if config.seed is None: seed = int(np.random.default_rng(None).integers(np.array([2**63]))[0]) config.seed = seed else: seed = int(config.seed) gen = np.random.default_rng(seed) counter = 0 assert len(config.vertex_shape) % 2 == 0 shapes = np.array(config.vertex_shape).reshape(round(len(config.vertex_shape)/2), 2) l_shapes = len(shapes) for n in range(config.min_n, config.max_n): size = config.cel_min while size <= config.cel_max: for i in range(config.repetitions): graphs.append(create_random_celest_graph(n,vertex_shape=shapes[counter % l_shapes], celest_bounds=(size,size+config.cel_range), seed=gen)) counter += 1 size += config.cel_step task = Task(name=config.name, task_type="CelestialGraphInstances", status=Task.STATUS_OPTIONS.FINISHED) # Convert the namespace config into database config task_config_holder = ConfigHolder.fromNamespace(config, task=task, ignored_attributes=["url_path", "name", "config"]) task.jobs = [TaskJobs(graph=graph, task=task) for graph in graphs] session.add(task) session.commit() print(counter, "instances were created. Corresponding task is", task.id)
def main(): if not is_debug_env(): print( "No debugger detected! This file is inteded to run with debugger. Proceed without with care" ) arguments = _parse_args() session = get_session(arguments.url_path) error_jobs = session.query(TaskJobs).join(AngularGraphSolution)\ .filter(AngularGraphSolution.error_message != None).order_by(TaskJobs.task_id).all() print( f"{len(error_jobs)} found. Will now start resolve these instances...") prev_task = None for job in tqdm.tqdm(error_jobs, desc="Resolvng error solutions"): if prev_task is None or prev_task != job.task: config = ConfigHolder(job.task) solver = ALL_SOLVER[config.solver](**config.solver_args) prev_task = job.task sol = solver.solve(job.graph) if sol.error_message is None: old_sol = job.solution session.delete(old_sol) job.solution = sol session.commit() else: print( f"Still error message for job {job} with message: {sol.error_message}" ) session.commit()
def main(): config = _load_config() session = get_session(config.url_path) if config.seed is None: seed = int(np.random.default_rng(None).integers(np.array([2**63]))[0]) config.seed = seed else: seed = int(config.seed) gen = np.random.default_rng(seed) if config.fixed_edges: graphs = _fixed_generation(config, gen) else: graphs = _probability_generation(config, gen) task = Task(name=config.name, task_type="GeometricGraphInstances", status=Task.STATUS_OPTIONS.FINISHED) # Convert the namespace config into database config task_config_holder = ConfigHolder.fromNamespace( config, task=task, ignored_attributes=["url_path", "name", "config"]) task.jobs = [TaskJobs(graph=graph, task=task) for graph in graphs] session.add(task) session.commit() print(len(task.jobs), "instances were created. Corresponding task is", task.id)
def process_tasks(url, task_id, check_stuck, local): session = get_session(url) if check_stuck: print("Check for stuck tasks with more that 24 hours idle time...") stuck_tasks = session.query(Task).filter(Task.last_updated <= datetime.datetime.now() - datetime.timedelta(hours=24)) for task in stuck_tasks: if input(f"Task {task.id} with type {task.task_type} is still processing. Reset? (y/N)").lower() in ["y", "yes"]: task.status = "RESTART" session.commit() if task_id is None: print("No specific task was given. All unprocessed tasks will be processed") ignore_stati = [Task.STATUS_OPTIONS.FINISHED, Task.STATUS_OPTIONS.PROCESSING, Task.STATUS_OPTIONS.ERROR, Task.STATUS_OPTIONS.ABORTED] tasks = session.query(Task).filter(Task.status.notin_(ignore_stati))\ .filter(Task.parent_id == None).all() else: tasks = session.query(Task).filter(Task.id == task_id).all() for task in tasks: config = ConfigHolder(task) if local: config.local = True try: PROCESSOR_MAP[task.task_type].process_task(config, task, session) except InterruptedError:#Exception: pass
def _get_task_and_config(session, arg_config): if session is not None: task = Task(task_type="instance_evolver_greedy", status=Task.STATUS_OPTIONS.CREATED, name=arg_config.name) config = ConfigHolder.fromNamespace( arg_config, task, ignored_attributes=["create_only", "url_path", "name"]) session.add(task) session.commit() else: task = None config = arg_config return task, config
def process_task(config: ConfigHolder, task: Task, session: sqlalchemy.orm.Session): try: task.status = Task.STATUS_OPTIONS.PROCESSING session.commit() for subtask in tqdm.tqdm(task.children, desc=f"Task {task.id}: Processing subtasks"): if subtask.status not in [Task.STATUS_OPTIONS.ERROR, Task.STATUS_OPTIONS.INTERRUPTED, Task.STATUS_OPTIONS.PROCESSING, Task.STATUS_OPTIONS.FINISHED]: process_task(ConfigHolder(subtask), subtask, session) to_process = [job for job in task.jobs if job.solution is None] process_jobs(to_process, config, session) task.status = Task.STATUS_OPTIONS.FINISHED session.commit() except Exception as e: print(e) task.status = Task.STATUS_OPTIONS.ERROR task.error_message = str(e) session.commit()
def color_solver_check(): from database import Task, TaskJobs, Config, ConfigHolder, Graph, get_session, CelestialGraph, AngularGraphSolution session = get_session("angular.db") color_configs = session.query(Config).filter(Config._value_str == '"MscColoringSolver"').all() tasks = [c.task for c in color_configs if c.task != None] bad_solution_jobs = [j for t in tqdm.tqdm(tasks) for j in t.jobs if j.solution != None and len(j.solution.order) != j.graph.edge_amount] bad_tasks = {j.task:[] for j in bad_solution_jobs} for job in bad_solution_jobs: bad_tasks[job.task].append(job) for task in tqdm.tqdm(bad_tasks, desc="Processing bad tasks"): holder = ConfigHolder(task) solver = ALL_SOLVER[holder.solver](**holder.solver_args) for job in tqdm.tqdm(bad_tasks[task], desc="Processing bad jobs"): sol = solver.solve(job.graph) job.solution = sol session.commit()