Example #1
0
class TestNumberOfTrials:
    @pytest.mark.private_access
    @pytest.mark.parametrize('benchmark_identifier',
                             evaluation_benchmark_pool.keys())
    def test_repetitions(self, benchmark_identifier):
        """ Tests that all evaluation benchmarks have repetitions in the stimulus_set """
        benchmark = benchmark_pool[benchmark_identifier]

        class AssertRepeatCandidate(BrainModel):
            class StopException(Exception):
                pass

            def visual_degrees(self):
                return 8

            def look_at(self, stimuli, number_of_trials=1):
                assert number_of_trials > 1
                raise self.StopException()

            def start_task(self, task: BrainModel.Task, fitting_stimuli):
                pass

            def start_recording(self,
                                recording_target: BrainModel.RecordingTarget,
                                time_bins=List[Tuple[int]]):
                pass

        candidate = AssertRepeatCandidate()
        try:
            benchmark(candidate)  # just call to get the stimuli
        except AssertRepeatCandidate.StopException:  # but stop early
            pass
Example #2
0
 def test_exact_evaluation_pool(self):
     assert set(evaluation_benchmark_pool.keys()) == {
         'movshon.FreemanZiemba2013.V1-pls',
         'movshon.FreemanZiemba2013.V2-pls',
         'dicarlo.MajajHong2015.V4-pls',
         'dicarlo.MajajHong2015.IT-pls',
         'dicarlo.Kar2019-ost',
         'dicarlo.Rajalingham2018-i2n',
     }
Example #3
0
 def test_exact_evaluation_pool(self):
     assert set(evaluation_benchmark_pool.keys()) == {
         'movshon.FreemanZiemba2013.V1-pls',
         'movshon.FreemanZiemba2013.V2-pls', 'dicarlo.MajajHong2015.V4-pls',
         'dicarlo.MajajHong2015.IT-pls', 'dicarlo.Kar2019-ost',
         'dicarlo.Rajalingham2018-i2n', 'fei-fei.Deng2009-top1',
         'dietterich.Hendrycks2019-noise-top1',
         'dietterich.Hendrycks2019-blur-top1',
         'dietterich.Hendrycks2019-weather-top1',
         'dietterich.Hendrycks2019-digital-top1'
     }
Example #4
0
from pybtex.database.input import bibtex
import pandas as pd
from peewee import DoesNotExist

from brainscore import score_model
from brainscore.benchmarks import evaluation_benchmark_pool, benchmark_pool
from brainscore.submission.configuration import object_decoder, MultiConfig
from brainscore.submission.database import connect_db
from brainscore.submission.ml_pool import MLBrainPool, ModelLayers
from brainscore.submission.models import Model, Score, BenchmarkInstance, BenchmarkType, Reference
from brainscore.submission.repository import prepare_module, deinstall_project
from brainscore.utils import LazyLoad

logger = logging.getLogger(__name__)

all_benchmarks_list = [benchmark for benchmark in evaluation_benchmark_pool.keys()
                       if benchmark not in ['dicarlo.Kar2019-ost']]

SCORE_COMMENT_MAX_LENGTH = 1000


def run_evaluation(config_dir, work_dir, jenkins_id, db_secret, models=None,
                   benchmarks=None):
    data = []
    try:
        connect_db(db_secret)
        config_file = Path(f'{config_dir}/submission_{jenkins_id}.json').resolve()
        with open(config_file) as file:
            configs = json.load(file)
        configs['config_file'] = str(config_file)
        submission_config = object_decoder(configs, work_dir, config_file.parent, db_secret, jenkins_id)
Example #5
0
import os
import pandas as pd
import subprocess
import sys
from pathlib import Path

from brainscore import score_model
from brainscore.benchmarks import evaluation_benchmark_pool
from brainscore.submission.database import store_score
from brainscore.submission.ml_pool import MLBrainPool, ModelLayers
from brainscore.utils import LazyLoad

logger = logging.getLogger(__name__)

all_benchmarks_list = [
    benchmark for benchmark in evaluation_benchmark_pool.keys()
    if benchmark not in ['dicarlo.Kar2019-ost', 'fei-fei.Deng2009-top1']
]


def run_evaluation(config_file,
                   work_dir,
                   jenkins_id,
                   db_secret,
                   models=None,
                   benchmarks=None):
    config_file = Path(config_file).resolve()
    work_dir = Path(work_dir).resolve()
    with open(config_file) as file:
        configs = json.load(file)
    logger.info(f'Run with following configurations: {str(configs)}')