Пример #1
0
def test_evaluation(database, tmpdir):
    connect_db(database)
    clear_schema()
    init_user()
    working_dir = str(tmpdir.mkdir("sub"))
    config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
    run_evaluation(config_dir, working_dir, 33, database, models=['alexnet'],
                   benchmarks=['dicarlo.MajajHong2015.IT-pls'])
    scores = Score.select().dicts()
    assert len(scores) == 1
    # If comment is none the score was successfully stored, otherwise there would be an error message there
    assert scores[0]['comment'] is None
Пример #2
0
def run_evaluation(config_dir, work_dir, jenkins_id, db_secret, models=None,
                   benchmarks=None):
    data = []
    try:
        connect_db(db_secret)
        config_file = Path(f'{config_dir}/submission_{jenkins_id}.json').resolve()
        with open(config_file) as file:
            configs = json.load(file)
        configs['config_file'] = str(config_file)
        submission_config = object_decoder(configs, work_dir, config_file.parent, db_secret, jenkins_id)

        logger.info(f'Run with following configurations: {str(configs)}')
        test_benchmarks = all_benchmarks_list if benchmarks is None or len(benchmarks) == 0 else benchmarks
        if isinstance(submission_config, MultiConfig):
            # We rerun existing models, which potentially are defined in different submissions
            for submission_entry in submission_config.submission_entries.values():
                repo = None
                try:
                    module, repo = prepare_module(submission_entry, submission_config)
                    logger.info('Successfully installed repository')
                    models = []
                    for model_entry in submission_config.models:
                        if model_entry.submission.id == submission_entry.id:
                            models.append(model_entry)
                    assert len(models) > 0
                    sub_data = run_submission(module, models, test_benchmarks, submission_entry)
                    data = data + sub_data
                    deinstall_project(repo)
                except Exception as e:
                    if repo is not None:
                        deinstall_project(repo)
                    logging.error(f'Could not install submission because of following error')
                    logging.error(e, exc_info=True)
                    raise e
        else:
            submission_entry = submission_config.submission
            repo = None
            try:
                module, repo = prepare_module(submission_entry, submission_config)
                logger.info('Successfully installed repository')
                test_models = module.get_model_list() if models is None or len(models) == 0 else models
                assert len(test_models) > 0
                model_entries = []
                logger.info(f'Create model instances')
                for model_name in test_models:
                    model_entry, created = Model.get_or_create(name=model_name, owner=submission_entry.submitter,
                                                               defaults={'public': submission_config.public,
                                                                         'submission': submission_entry})
                    if hasattr(module, 'get_bibtex') and created:
                        bibtex_string = module.get_bibtex(model_name)
                        reference = get_reference(bibtex_string)
                        model_entry.reference = reference
                        model_entry.save()
                    model_entries.append(model_entry)
                data = run_submission(module, model_entries, test_benchmarks, submission_entry)
                deinstall_project(repo)
            except Exception as e:
                if repo is not None:
                    deinstall_project(repo)
                submission_entry.status = 'failure'
                submission_entry.save()
                logging.error(f'Could not install submission because of following error')
                logging.error(e, exc_info=True)
                raise e
    finally:
        df = pd.DataFrame(data)
        # This is the result file we send to the user after the scoring process is done
        df.to_csv(f'result_{jenkins_id}.csv', index=None, header=True)
Пример #3
0
 def setup_class(cls):
     logger.info('Connect to database')
     connect_db(database)
     clear_schema()
Пример #4
0
 def setup_class(cls):
     connect_db(database)
     clear_schema()
     init_user()
Пример #5
0
 def setup_class(cls):
     logger.info('Connect to database')
     connect_db(TestIntegration.databse)
     clear_schema()