def __init__(self): ceiling = Score( [.79, np.nan], # following private conversation with Kohitij Kar coords={'aggregation': ['center', 'error']}, dims=['aggregation']) super(DicarloKar2019OST, self).__init__( identifier='dicarlo.Kar2019-ost', version=2, ceiling_func=lambda: ceiling, parent='IT-temporal', paper_link='https://www.nature.com/articles/s41593-019-0392-5') assembly = brainscore.get_assembly('dicarlo.Kar2019') # drop duplicate images _, index = np.unique(assembly['image_id'], return_index=True) assembly = assembly.isel(presentation=index) assembly.attrs['stimulus_set'] = assembly.stimulus_set.drop_duplicates( 'image_id') assembly = assembly.sel(decoder='svm') self._assembly = assembly self._assembly['truth'] = self._assembly['image_label'] self._assembly.stimulus_set['truth'] = self._assembly.stimulus_set[ 'image_label'] self._similarity_metric = OSTCorrelation() self._visual_degrees = VISUAL_DEGREES self._number_of_trials = 44
def test_model(self): target_model_pool = pytorch_custom(28) stimuli_model_pool = pytorch_custom(224) search_target_model_param = {} search_stimuli_model_param = {} search_target_model_param['target_model'] = target_model_pool search_stimuli_model_param['stimuli_model'] = stimuli_model_pool search_target_model_param['target_layer'] = 'relu1' search_stimuli_model_param['stimuli_layer'] = 'relu1' search_target_model_param['target_img_size'] = 28 search_stimuli_model_param['search_image_size'] = 224 model = ModelCommitment(identifier=stimuli_model_pool.identifier, activations_model=None, layers=['relu1'], search_target_model_param=search_target_model_param, search_stimuli_model_param=search_stimuli_model_param) assemblies = brainscore.get_assembly('klab.Zhang2018search_obj_array') stimuli = assemblies.stimulus_set fix = [[640, 512], [365, 988], [90, 512], [365, 36], [915, 36], [1190, 512], [915, 988]] max_fix = 6 data_len = 300 model.start_task(BrainModel.Task.visual_search_obj_arr, fix=fix, max_fix=max_fix, data_len=data_len) cumm_perf, saccades = model.look_at(stimuli) assert saccades.shape == (300, 8, 2) assert cumm_perf.shape == (7, 2)
def load_hvm(group=lambda hvm: hvm.multi_groupby(['object_name', 'image_id'])): assembly = brainscore.get_assembly(name="dicarlo.MajajHong2015").sel( variation=6) assembly.load() assembly = group(assembly) assembly = assembly.mean(dim="presentation").squeeze("time_bin").T return assembly
def load_assembly(average_repetitions): assembly = brainscore.get_assembly(name='aru.Kuzovkin2018') if average_repetitions: assembly = average_repetition(assembly) # assembly.reset_index('time', drop=True, inplace=True) return assembly
def load_assembly(average_repetitions): assembly = brainscore.get_assembly(name='aru.Cichy2019') # fix the off by 1 error with the stimulus set # assembly = assembly['image_id'] + 1 if average_repetitions: assembly = average_repetition(assembly) return assembly
def load_assembly(average_repetitions, region, access='private'): assembly = brainscore.get_assembly(name=f'dicarlo.Majaj2015.{access}') assembly = assembly.sel(region=region) assembly['region'] = 'neuroid', [region] * len(assembly['neuroid']) assembly = assembly.squeeze("time_bin") assembly.load() assembly = assembly.transpose('presentation', 'neuroid') if average_repetitions: assembly = average_repetition(assembly) return assembly
def __call__(self, average_repetition=True): assembly = brainscore.get_assembly(name='tolias.Cadena2017') assembly = assembly.rename({'neuroid': 'neuroid_id'}).stack(neuroid=['neuroid_id']) assembly.load() assembly['region'] = 'neuroid', ['V1'] * len(assembly['neuroid']) assembly = assembly.squeeze("time_bin") assembly = assembly.transpose('presentation', 'neuroid') if average_repetition: assembly = self.average_repetition(assembly) return assembly
def _MarquesSchiller1976V1Property(property_name): assembly = brainscore.get_assembly(ASSEMBLY_NAME) similarity_metric = BootstrapDistributionSimilarity(similarity_func=ks_similarity, property_name=property_name) ceil_func = NeuronalPropertyCeiling(BootstrapDistributionSimilarity(similarity_func=ks_similarity, property_name=property_name)) parent = PARENT return PropertiesBenchmark(identifier=f'dicarlo.Marques_schiller1976-{property_name}', assembly=assembly, neuronal_property=schiller1976_properties, similarity_metric=similarity_metric, timebins=TIMEBINS, parent=parent, ceiling_func=ceil_func, bibtex=BIBTEX, version=1)
def MarquesDeValois1982V1PeakSpatialFrequency(): assembly = brainscore.get_assembly(ASSEMBLY_NAME) property_name = 'peak_spatial_frequency' parent = PARENT similarity_metric = BootstrapDistributionSimilarity(similarity_func=ks_similarity, property_name=property_name) ceil_func = NeuronalPropertyCeiling(similarity_metric) return PropertiesBenchmark(identifier=f'dicarlo.Marques_devalois1982-{property_name}', assembly=assembly, neuronal_property=devalois1982b_properties, similarity_metric=similarity_metric, timebins=TIMEBINS, parent=parent, ceiling_func=ceil_func, bibtex=BIBTEX, version=1)
def load_assembly(average_repetitions, region): assembly = brainscore.get_assembly(name=f'dicarlo.Rajalingham2020') assembly = assembly.sel(region=region) assembly['region'] = 'neuroid', [region] * len(assembly['neuroid']) assembly.load() assembly = assembly.squeeze('time_bin') assert NUMBER_OF_TRIALS == len(np.unique(assembly.coords['repetition'])) if average_repetitions: assembly = average_repetition(assembly) return assembly
def load_assembly(average_repetitions, region): assembly = brainscore.get_assembly(name=f'dicarlo.Sanghavi2020') assembly = assembly.sel(region=region) assembly['region'] = 'neuroid', [region] * len(assembly['neuroid']) assembly.load() assembly = assembly.sel(time_bin_id=0) # 70-170ms assembly = assembly.squeeze('time_bin') assert NUMBER_OF_TRIALS == len(np.unique(assembly.coords['repetition'])) assert VISUAL_DEGREES == assembly.attrs['image_size_degree'] if average_repetitions: assembly = average_repetition(assembly) return assembly
def _MarquesFreemanZiemba2013V1Property(property_name, parent): assembly = brainscore.get_assembly(ASSEMBLY_NAME) similarity_metric = BootstrapDistributionSimilarity( similarity_func=ks_similarity, property_name=property_name) ceil_func = NeuronalPropertyCeiling(similarity_metric) return PropertiesBenchmark( identifier=f'dicarlo.Marques_freemanziemba2013-{property_name}', assembly=assembly, neuronal_property=freemanziemba2013_properties, similarity_metric=similarity_metric, timebins=TIMEBINS, parent=parent, ceiling_func=ceil_func, bibtex=BIBTEX, version=1)
def __init__(self): ceiling = Score( [.79, np.nan], # following private conversation with Kohitij Kar coords={'aggregation': ['center', 'error']}, dims=['aggregation']) super(DicarloKar2019OST, self).__init__(identifier='dicarlo.Kar2019-ost', version=2, ceiling_func=lambda: ceiling, parent='IT-temporal', bibtex="""@Article{Kar2019, author={Kar, Kohitij and Kubilius, Jonas and Schmidt, Kailyn and Issa, Elias B. and DiCarlo, James J.}, title={Evidence that recurrent circuits are critical to the ventral stream's execution of core object recognition behavior}, journal={Nature Neuroscience}, year={2019}, month={Jun}, day={01}, volume={22}, number={6}, pages={974-983}, abstract={Non-recurrent deep convolutional neural networks (CNNs) are currently the best at modeling core object recognition, a behavior that is supported by the densely recurrent primate ventral stream, culminating in the inferior temporal (IT) cortex. If recurrence is critical to this behavior, then primates should outperform feedforward-only deep CNNs for images that require additional recurrent processing beyond the feedforward IT response. Here we first used behavioral methods to discover hundreds of these `challenge' images. Second, using large-scale electrophysiology, we observed that behaviorally sufficient object identity solutions emerged {\textasciitilde}30{\thinspace}ms later in the IT cortex for challenge images compared with primate performance-matched `control' images. Third, these behaviorally critical late-phase IT response patterns were poorly predicted by feedforward deep CNN activations. Notably, very-deep CNNs and shallower recurrent CNNs better predicted these late IT responses, suggesting that there is a functional equivalence between additional nonlinear transformations and recurrence. Beyond arguing that recurrent circuits are critical for rapid object identification, our results provide strong constraints for future recurrent model development.}, issn={1546-1726}, doi={10.1038/s41593-019-0392-5}, url={https://doi.org/10.1038/s41593-019-0392-5} }""") assembly = brainscore.get_assembly('dicarlo.Kar2019') # drop duplicate images _, index = np.unique(assembly['image_id'], return_index=True) assembly = assembly.isel(presentation=index) assembly.attrs['stimulus_set'] = assembly.stimulus_set.drop_duplicates( 'image_id') assembly = assembly.sel(decoder='svm') self._assembly = assembly self._assembly['truth'] = self._assembly['image_label'] self._assembly.stimulus_set['truth'] = self._assembly.stimulus_set[ 'image_label'] self._similarity_metric = OSTCorrelation() self._visual_degrees = VISUAL_DEGREES self._number_of_trials = 44
def __init__(self, ceil_score=None, assembly_name=None, identifier_suffix=""): self.human_score = Score([ceil_score, np.nan], coords={'aggregation': ['center', 'error']}, dims=['aggregation']) self._version = 1 self._identifier = 'klab.Zhang2018.ObjSearch-' + identifier_suffix self.parent = 'visual_search', self.paper_link = 'https://doi.org/10.1038/s41467-018-06217-x' self._assemblies = brainscore.get_assembly(assembly_name) self._stimuli = self._assemblies.stimulus_set self.fix = [[640, 512], [365, 988], [90, 512], [365, 36], [915, 36], [1190, 512], [915, 988]] self.max_fix = 6 self.data_len = 300 self._logger = logging.getLogger(fullname(self))
def __init__(self, ceil_score=None, assembly_name=None, identifier_suffix=""): self._logger = logging.getLogger(fullname(self)) self.human_score = Score([ceil_score, np.nan], coords={'aggregation': ['center', 'error']}, dims=['aggregation']) self._version = 1 self._identifier = 'klab.Zhang2018.VisualSearch-' + identifier_suffix self.parent = 'visual_search' self.paper_link = 'https://doi.org/10.1038/s41467-018-06217-x' self._assemblies = brainscore.get_assembly(assembly_name) self._stimuli = self._assemblies.stimulus_set self.max_fix = self._assemblies.fixation.values.shape[0] - 2 self.num_sub = np.max(self._assemblies.subjects.values) self.data_len = int(self._assemblies.presentation.values.shape[0] / self.num_sub) self.ior_size = 100
def load_assembly(average_repetitions, region, access='private'): assembly = brainscore.get_assembly(f'movshon.FreemanZiemba2013.{access}') assembly = assembly.sel(region=region) assembly = assembly.stack( neuroid=['neuroid_id']) # work around xarray multiindex issues assembly['region'] = 'neuroid', [region] * len(assembly['neuroid']) assembly.load() time_window = (50, 200) assembly = assembly.sel(time_bin=[(t, t + 1) for t in range(*time_window)]) assembly = assembly.mean(dim='time_bin', keep_attrs=True) assembly = assembly.expand_dims('time_bin_start').expand_dims( 'time_bin_end') assembly['time_bin_start'], assembly['time_bin_end'] = [time_window[0]], [ time_window[1] ] assembly = assembly.stack(time_bin=['time_bin_start', 'time_bin_end']) assembly = assembly.squeeze('time_bin') assembly = assembly.transpose('presentation', 'neuroid') if average_repetitions: assembly = average_repetition(assembly) return assembly
def load_assembly(access='private'): assembly = brainscore.get_assembly(f'dicarlo.Rajalingham2018.{access}') assembly['correct'] = assembly['choice'] == assembly['sample_obj'] return assembly
def package( features_path='/braintree/data2/active/users/qbilius/computed/hvm/ait' ): assert os.path.isdir(features_path) features_paths = [ os.path.join(features_path, 'basenets_hvm_feats_V4'), os.path.join(features_path, 'basenets_hvm_feats_pIT'), os.path.join(features_path, 'basenets_hvm_feats') ] # alignment meta = pd.read_pickle( os.path.join(os.path.dirname(__file__), 'basenets-meta.pkl')) meta = meta[meta['var'] == 6] meta_ids = meta['id'].values.tolist() hvm = brainscore.get_assembly('dicarlo.Majaj2015') \ .sel(variation=6) \ .multi_groupby(['category_name', 'object_name', 'image_id']) \ .mean(dim="presentation") \ .squeeze("time_bin") hvm_ids = hvm['image_id'].values.tolist() assert len(hvm_ids) == len(meta_ids) indexes = [meta_ids.index(id) for id in hvm_ids] basenets = [] for activations_path_v4 in glob.glob( os.path.join(features_paths[0], '*.npy')): activations_path_pit = os.path.abspath( os.path.join(features_paths[1], os.path.basename(activations_path_v4))) activations_path_ait = os.path.abspath( os.path.join(features_paths[2], os.path.basename(activations_path_v4))) assert os.path.isfile(activations_path_pit) assert os.path.isfile(activations_path_ait) print(activations_path_v4, activations_path_pit, activations_path_ait, end='') activations_v4 = np.load(activations_path_v4) activations_pit = np.load(activations_path_pit) activations_ait = np.load(activations_path_ait) assert activations_v4.shape[0] == activations_pit.shape[ 0] == activations_ait.shape[0] == len(indexes) activations_v4 = activations_v4[indexes, :] activations_pit = activations_ait[indexes, :] activations_ait = activations_ait[indexes, :] coords = { coord: (dims, values) for coord, dims, values in walk_coords(hvm) if array_is_element(dims, 'presentation') } coords['neuroid_id'] = 'neuroid', list(range(3000)) coords['layer'] = 'neuroid', np.concatenate([ np.repeat('basenet-layer_v4', 1000), np.repeat('basenet-layer_pit', 1000), np.repeat('basenet-layer_ait', 1000) ]) activations = np.concatenate( [activations_v4, activations_pit, activations_ait], axis=1) print(activations.shape, end='') assert activations.shape[0] == len(indexes) assembly = NeuroidAssembly(activations, coords=coords, dims=['presentation', 'neuroid']) model_name = os.path.splitext( os.path.basename(activations_path_pit))[0] basenets.append(model_name) target_path = os.path.abspath( os.path.join( os.path.dirname(__file__), '..', '..', '..', 'output/candidate_models.models.model_activations', 'model={},stimulus_set=dicarlo.hvm,weights=imagenet,image_size=224,pca_components=1000.pkl' .format(model_name))) print("-->", target_path) with open(target_path, 'wb') as target_file: pickle.dump({'data': assembly}, target_file) print(" ".join(basenets))