def dry_run(sub_problem, train_mode, dev_mode):
    config, cloud_mode = setup_env(SOLUTION_CONFIG, sub_problem)

    check_inputs(train_mode, config, solution_pipeline)
    trainer = Trainer(solution_pipeline, config, dev_mode)

    if train_mode:
        trainer.train()
    _evaluate(trainer)
    K.clear_session()
Ejemplo n.º 2
0
def dry_run(sub_problem, train_mode, dev_mode):
    config, cloud_mode = setup_env(SOLUTION_CONFIG, sub_problem)

    pipeline = pipeline_dict[sub_problem]
    check_inputs(train_mode, config, pipeline)

    trainer = Trainer(pipeline, config, dev_mode, cloud_mode, sub_problem)

    if train_mode:
        trainer.train()
    _evaluate(trainer, sub_problem)
Ejemplo n.º 3
0
def submit_task(sub_problem, task_nr, filepath, dev_mode):
    config, _ = setup_env(SOLUTION_CONFIG, sub_problem)

    check_inputs(train_mode=False, config=config, pipeline=solution_pipeline)
    submit_config = submit_setup(config)
    trainer = Trainer(solution_pipeline, submit_config, dev_mode)
    user_task_solution, user_config = _fetch_task_solution(filepath)
    task_handler = registered_tasks[task_nr](trainer)
    new_trainer = task_handler.substitute(user_task_solution, user_config)

    new_trainer.train()
    _evaluate(new_trainer)
    K.clear_session()
    submit_teardown(submit_config)
def submit_task(sub_problem, task_nr, filepath, dev_mode):
    with TaskSolutionParser(filepath) as task_solution:
        config, cloud_mode = setup_env(SOLUTION_CONFIG, sub_problem)

        pipeline = pipeline_dict[sub_problem]
        check_inputs(train_mode=False, config=config, pipeline=pipeline)

        submit_config = submit_setup(config)

        trainer = Trainer(pipeline, submit_config, dev_mode, cloud_mode,
                          sub_problem)
        user_task_solution = task_solution.get('solution')
        user_config = task_solution.get('CONFIG')
        task_handler = registered_tasks[task_nr](trainer)
        new_trainer = task_handler.substitute(user_task_solution, user_config)
        new_trainer.train()
        _evaluate(new_trainer, sub_problem)

        submit_teardown(submit_config)