def test_rerun_evaluation(self, tmpdir): working_dir = str(tmpdir.mkdir('sub')) config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/')) submission = Submission.create(id=33, submitter=1, timestamp=datetime.now(), model_type='BaseModel', status='running') model = Model.create(name='alexnet', owner=submission.submitter, public=False, submission=submission) with open(f'{config_dir}submission_34.json', 'w') as rerun: rerun.write(f"""{{ "model_ids": [{model.id}], "user_id": 1}}""") run_evaluation(config_dir, working_dir, 34, TestIntegration.databse, benchmarks=['dicarlo.Rajalingham2018-i2n']) with open('result_34.csv') as results: csv_reader = csv.reader(results, delimiter=',') next(csv_reader) # header row result_row = next(csv_reader) assert result_row[0] == 'alexnet' assert result_row[1] == 'dicarlo.Rajalingham2018-i2n' assert self.compare(float(result_row[2]), 0.25771746331458695) assert self.compare(float(result_row[3]), 0.3701702418190641) assert self.compare(float(result_row[4]), 0.011129032024657565)
def score_model_console(): logger.info('Start scoring model process..') assert Path(args.config_dir).is_dir(), 'Configuration directory doesn\'t exist' assert Path(args.work_dir).is_dir(), 'Work directory is not a valid directory' assert args.db_secret is not None, 'The db connection file doesn\'t exist' assert isinstance(args.jenkins_id, int) logger.info(f'Benchmarks configured: {args.benchmarks}') logger.info(f'Models configured: {args.models}') run_evaluation(args.config_dir, args.work_dir, args.jenkins_id, db_secret=args.db_secret, models=args.models, benchmarks=args.benchmarks)
def test_failure_evaluation(self, tmpdir): working_dir = str(tmpdir.mkdir('sub')) config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/')) with pytest.raises(Exception): run_evaluation(config_dir, working_dir, 35, TestIntegration.databse, models=['alexnet'], benchmarks=['dicarlo.Rajalingham2018-i2n'])
def test_evaluation(database, tmpdir): connect_db(database) clear_schema() init_user() working_dir = str(tmpdir.mkdir("sub")) config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/')) run_evaluation(config_dir, working_dir, 33, database, models=['alexnet'], benchmarks=['dicarlo.MajajHong2015.IT-pls']) scores = Score.select().dicts() assert len(scores) == 1 # If comment is none the score was successfully stored, otherwise there would be an error message there assert scores[0]['comment'] is None
def test_model_failure_evaluation(self, tmpdir): # os.environ['RESULTCACHING_DISABLE'] = 'brainscore.score_model,model_tools' working_dir = str(tmpdir.mkdir('sub')) config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/')) run_evaluation(config_dir, working_dir, 36, TestIntegration.databse, models=['alexnet'], benchmarks=['movshon.FreemanZiemba2013.V1-pls']) with open('result_36.csv') as results: csv_reader = csv.reader(results, delimiter=',') next(csv_reader) # header row result_row = next(csv_reader) assert result_row[0] == 'alexnet' assert result_row[1] == 'movshon.FreemanZiemba2013.V1-pls' assert result_row[2] == '0' assert result_row[3] == '0' model = Model.get() score = Score.get(model=model) assert score.comment is not None # When there's a problem, the comment field contains an error message
def test_evaluation(self, tmpdir): working_dir = str(tmpdir.mkdir('sub')) config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/')) run_evaluation(config_dir, working_dir, 33, TestIntegration.databse, models=['alexnet'], benchmarks=['dicarlo.MajajHong2015.IT-pls']) with open('result_33.csv') as results: csv_reader = csv.reader(results, delimiter=',') next(csv_reader) # header row result_row = next(csv_reader) assert result_row[0] == 'alexnet' assert result_row[1] == 'dicarlo.MajajHong2015.IT-pls' assert self.compare(float(result_row[2]), 0.5857491098187586) assert self.compare(float(result_row[3]), 0.5079816726934638) assert self.compare(float(result_row[4]), 0.003155449372125895) scores = Score.select() assert len(scores) == 1 # successful score comment should inform about which layers were used for which regions assert scores[0].comment.startswith("layers:")
def test_evaluation(self, tmpdir): working_dir = str(tmpdir.mkdir('sub')) config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/')) run_evaluation(config_dir, working_dir, 33, TestIntegration.databse, models=['alexnet'], benchmarks=['dicarlo.MajajHong2015.IT-pls']) with open('result_33.csv') as results: csv_reader = csv.reader(results, delimiter=',') next(csv_reader) # header row result_row = next(csv_reader) assert result_row[0] == 'alexnet' assert result_row[1] == 'dicarlo.MajajHong2015.IT-pls' assert self.compare(float(result_row[2]), 0.5857491098187586) assert self.compare(float(result_row[3]), 0.5079816726934638) assert self.compare(float(result_row[4]), 0.003155449372125895) scores = Score.select() assert len(scores) == 1 assert scores[ 0].comment is None # If comment is none the score was successfully stored, otherwise there would be an error message there