예제 #1
0
 def __init__(self):
     stimulus_set = pd.read_csv(
         os.path.join(os.path.dirname(__file__), 'imagenet2012.csv'))
     stimulus_set = StimulusSet(stimulus_set)
     stimulus_set.image_paths = {
         row.image_id: row.filepath
         for row in stimulus_set.itertuples()
     }
     self._stimulus_set = stimulus_set
     self._similarity_metric = Accuracy()
     ceiling = Score([1, np.nan],
                     coords={'aggregation': ['center', 'error']},
                     dims=['aggregation'])
     super(Imagenet2012, self).__init__(identifier='fei-fei.Deng2009-top1',
                                        version=1,
                                        ceiling_func=lambda: ceiling,
                                        parent='ImageNet',
                                        bibtex="""@INPROCEEDINGS{5206848,  
                                             author={J. {Deng} and W. {Dong} and R. {Socher} and L. {Li} and  {Kai Li} and  {Li Fei-Fei}},  
                                             booktitle={2009 IEEE Conference on Computer Vision and Pattern Recognition},   
                                             title={ImageNet: A large-scale hierarchical image database},   
                                             year={2009},  
                                             volume={},  
                                             number={},  
                                             pages={248-255},
                                             url = {https://ieeexplore.ieee.org/document/5206848}
                                         }""")
예제 #2
0
 def __init__(self, identifier_suffix, noise_type):
     identifier = f'dietterich.Hendrycks2019.{identifier_suffix}'
     stimulus_set = brainscore.get_stimulus_set(identifier)
     self._stimulus_set = stimulus_set
     self._similarity_metric = Accuracy()
     self._benchmark_name = identifier
     self._noise_type = noise_type
     ceiling = Score([1, np.nan], coords={'aggregation': ['center', 'error']}, dims=['aggregation'])
     super(Imagenet_C_Individual, self).__init__(identifier=f"{identifier}-top1", version=1,
                                                 ceiling_func=lambda: ceiling,
                                                 parent=f'dietterich.Hendrycks2019-{noise_type}-top1',
                                                 bibtex=BIBTEX)
예제 #3
0
 def __init__(self, stimulus_set, noise_level, noise_type, noise_category):
     self.stimulus_set = stimulus_set[stimulus_set['noise_level'] ==
                                      noise_level]
     self.noise_level = noise_level
     self.noise_type = noise_type
     self.benchmark_name = f'dietterich.Hendrycks2019-{noise_category}-{noise_type}-{noise_level}-top1'
     self._similarity_metric = Accuracy()
     ceiling = Score([1, np.nan],
                     coords={'aggregation': ['center', 'error']},
                     dims=['aggregation'])
     super(Imagenet_C_Individual, self).__init__(
         identifier=self.benchmark_name,
         version=2,
         ceiling_func=lambda: ceiling,
         parent=
         f'dietterich.Hendrycks2019-{noise_category}-{noise_type}-top1',
         bibtex=BIBTEX)
예제 #4
0
 def __init__(self):
     stimulus_set = pd.read_csv(
         os.path.join(os.path.dirname(__file__), 'imagenet2012.csv'))
     stimulus_set = StimulusSet(stimulus_set)
     stimulus_set.image_paths = {
         row.image_id: row.filepath
         for row in stimulus_set.itertuples()
     }
     self._stimulus_set = stimulus_set
     self._similarity_metric = Accuracy()
     ceiling = Score([1, np.nan],
                     coords={'aggregation': ['center', 'error']},
                     dims=['aggregation'])
     super(Imagenet2012, self).__init__(
         identifier='fei-fei.Deng2009-top1',
         version=1,
         ceiling_func=lambda: ceiling,
         parent='ImageNet',
         paper_link="https://ieeexplore.ieee.org/abstract/document/5206848")
예제 #5
0
from collections import OrderedDict

import numpy as np
from brainio_base.assemblies import walk_coords, array_is_element, BehavioralAssembly
from numpy.random.mtrand import RandomState
from tqdm import tqdm

from brainscore.metrics import Score
from brainscore.metrics.accuracy import Accuracy
from brainscore.metrics.transformations import CrossValidationSingle
from brainscore.utils import fullname
from neural_nlp.analyze.data.ceiling import average_subregions
from neural_nlp.neural_data.fmri import load_voxels
from result_caching import store

accuracy = Accuracy()


def fit_decode(train, test):
    train, test = train.transpose('presentation', 'neuroid'), test.transpose(
        'presentation', 'neuroid')
    decoder = TFProbabilitiesClassifier()
    decoder.fit(train, train['story'])
    train_acc = evaluate(decoder, train)
    test_acc = evaluate(decoder, test)
    train_acc, test_acc = train_acc.expand_dims(
        'train_test'), test_acc.expand_dims('train_test')
    train_acc['train_test'], test_acc['train_test'] = ['train'], ['test']
    acc = Score.merge(train_acc, test_acc)
    return acc