Ejemplo n.º 1
0
def dry_run(problem, sub_problem, eval_mode, dev_mode, cloud_mode):
    if problem == 'whales':
        setup_torch_multiprocessing()

    pm = importlib.import_module('minerva.{}.problem_manager'.format(problem))
    logging.info('running: {0}'.format(sub_problem))
    pm.dry_run(sub_problem, eval_mode, dev_mode, cloud_mode)
Ejemplo n.º 2
0
def dry_run(problem, train_mode, dev_mode):
    if problem == 'whales':
        setup_torch_multiprocessing()

    pm = importlib.import_module('minerva.{}.problem_manager'.format(problem))
    sub_problems = SUBPROBLEM_INFERENCE.get(problem, {0: None})
    for sub_problem in list(set(sub_problems.values())):
        if sub_problem:
            logging.info('running: {0}'.format(sub_problem))
        pm.dry_run(sub_problem, train_mode, dev_mode)
Ejemplo n.º 3
0
def submit(problem, task_nr, file_path, dev_mode):
    if file_path is None:
        file_path = 'resources/{}/tasks/task{}.ipynb'.format(problem, task_nr)
    if problem == 'whales':
        setup_torch_multiprocessing()

    sub_problems = SUBPROBLEM_INFERENCE.get(problem, {})
    task_sub_problem = sub_problems.get(task_nr, None)

    pm = importlib.import_module('minerva.{}.problem_manager'.format(problem))
    pm.submit_task(task_sub_problem, task_nr, file_path, dev_mode)
Ejemplo n.º 4
0
def submit(problem, sub_problem, task_nr, filepath, dev_mode, cloud_mode):
    if problem == 'whales':
        setup_torch_multiprocessing()
    pm = importlib.import_module('minerva.{}.problem_manager'.format(problem))
    pm.submit_task(sub_problem, task_nr, filepath, dev_mode, cloud_mode)