def test_model(self, model_identifier): layers = model_layers[model_identifier] activations_model = base_model_pool[model_identifier] stimulus_set = get_stimulus_set('dicarlo.hvm') stimulus_set = stimulus_set[:100] stimulus_set.name = 'dicarlo.hvm-min' activations = activations_model(stimulus_set, layers=layers) assert activations is not None assert hasattr(activations, 'layer')
def __init__(self): self._metric = I2n() self._fitting_stimuli = brainscore.get_stimulus_set('dicarlo.objectome.public') self._assembly = LazyLoad(lambda: load_assembly('private')) self._visual_degrees = 8 self._number_of_trials = 2 super(DicarloRajalingham2018I2n, self).__init__( identifier='dicarlo.Rajalingham2018-i2n', version=2, ceiling_func=lambda: self._metric.ceiling(self._assembly), parent='behavior', bibtex=BIBTEX)
def __init__(self): self._metric = I2n() self._fitting_stimuli = brainscore.get_stimulus_set( 'dicarlo.objectome.public') self._assembly = LazyLoad(lambda: load_assembly('private')) super(DicarloRajalingham2018I2n, self).__init__( identifier='dicarlo.Rajalingham2018-i2n', version=2, ceiling_func=lambda: self._metric.ceiling(self._assembly), parent='behavior', paper_link='https://www.biorxiv.org/content/early/2018/02/12/240614' )
def record_from_model(model: BrainModel, stimulus_identifier, number_of_trials): stimulus_set = get_stimulus_set(stimulus_identifier) stimulus_set = place_on_screen( stimulus_set, target_visual_degrees=model.visual_degrees()) activations = model.look_at(stimulus_set, number_of_trials) if 'time_bin' in activations.dims: activations = activations.squeeze( 'time_bin') # static case for these benchmarks if not activations.values.flags['WRITEABLE']: activations.values.setflags(write=1) return activations
def __init__(self, identifier_suffix, noise_type): identifier = f'dietterich.Hendrycks2019.{identifier_suffix}' stimulus_set = brainscore.get_stimulus_set(identifier) self._stimulus_set = stimulus_set self._similarity_metric = Accuracy() self._benchmark_name = identifier self._noise_type = noise_type ceiling = Score([1, np.nan], coords={'aggregation': ['center', 'error']}, dims=['aggregation']) super(Imagenet_C_Individual, self).__init__(identifier=f"{identifier}-top1", version=1, ceiling_func=lambda: ceiling, parent=f'dietterich.Hendrycks2019-{noise_type}-top1', bibtex=BIBTEX)
def test_model(self, model_identifier): # reset graph to get variable names back import keras keras.backend.clear_session() import tensorflow as tf tf.compat.v1.reset_default_graph() layers = model_layers[model_identifier] activations_model = base_model_pool[model_identifier] stimulus_set = get_stimulus_set('dicarlo.hvm') stimulus_set = stimulus_set[:100] stimulus_set.identifier = 'dicarlo.hvm-min' activations = activations_model(stimulus_set, layers=layers) assert activations is not None assert hasattr(activations, 'layer')
def __init__(self, noise_category, sampling_factor=10): self.noise_category = noise_category self.stimulus_set_name = f'dietterich.Hendrycks2019.{noise_category}' # take every nth image, n=sampling_factor. stimulus_set = brainscore.get_stimulus_set( self.stimulus_set_name)[::sampling_factor] self.stimulus_set = stimulus_set self.noise_types = self.noise_category_map[noise_category] ceiling = Score([1, np.nan], coords={'aggregation': ['center', 'error']}, dims=['aggregation']) super(Imagenet_C_Category, self).__init__( identifier=f'dietterich.Hendrycks2019-{noise_category}-top1', version=2, ceiling_func=lambda: ceiling, parent='dietterich.Hendrycks2019-top1', bibtex=BIBTEX)
def run_test_properties(self, benchmark, files, expected): benchmark = benchmark_pool[benchmark] from brainscore import get_stimulus_set stimulus_identifiers = np.unique( np.array([ 'dicarlo.Marques2020_blank', 'dicarlo.Marques2020_receptive_field', 'dicarlo.Marques2020_orientation', benchmark._assembly.stimulus_set.identifier ])) precomputed_features = {} for current_stimulus in stimulus_identifiers: stimulus_set = get_stimulus_set(current_stimulus) precomputed_features[current_stimulus] = Path( __file__).parent / files[current_stimulus] precomputed_features[current_stimulus] = BehavioralAssembly( xr.load_dataarray(precomputed_features[current_stimulus])) precomputed_features[current_stimulus] = \ precomputed_features[current_stimulus].stack(presentation=['stimulus_path']) precomputed_paths = list( map( lambda f: Path(f).name, precomputed_features[current_stimulus] ['stimulus_path'].values)) # attach stimulus set meta expected_stimulus_paths = [ stimulus_set.get_image(image_id) for image_id in stimulus_set['image_id'] ] expected_stimulus_paths = list( map(lambda f: Path(f).name, expected_stimulus_paths)) assert set(precomputed_paths) == set(expected_stimulus_paths) for column in stimulus_set.columns: precomputed_features[current_stimulus][ column] = 'presentation', stimulus_set[column].values precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8) # score score = benchmark(precomputed_features).raw assert score.sel(aggregation='center') == expected
def load_stimulus_set(self): """ ImageNet-C is quite large, and thus cumbersome to download each time the benchmark is run. Here we try loading a local copy first, before proceeding to download the AWS copy. """ try: _logger.debug(f'Loading local Imagenet-C {self.noise_category}') category_path = os.path.join( LOCAL_STIMULUS_DIRECTORY, f'image_dietterich_Hendrycks2019_{self.noise_category}') loader = SampledStimulusSetLoader(csv_path=os.path.join( category_path, f'image_dietterich_Hendrycks2019_{self.noise_category}.csv'), stimuli_directory=category_path, sampling_factor=self. sampling_factor) return loader.load() except OSError as error: _logger.debug( f'Excepted {error}. Attempting to access {self.stimulus_set_name} through Brainscore.' ) return brainscore.get_stimulus_set(self.stimulus_set_name)
def get_activations(model, layers, stimulus_set): stimuli = brainscore.get_stimulus_set(stimulus_set) return model(stimuli, layers=layers)