def get_stimulus_set(name): stimulus_set_model = StimulusSetModel.get(StimulusSetModel.name == name) image_paths = fetch_stimulus_set(stimulus_set_model) pw_query = ImageModel.select() \ .join(StimulusSetImageMap) \ .join(StimulusSetModel) \ .where(StimulusSetModel.name == name) df_reconstructed = pd.DataFrame(list(pw_query.dicts())) pw_query_attributes = AttributeModel.select() \ .join(ImageMetaModel) \ .join(ImageModel) \ .join(StimulusSetImageMap) \ .join(StimulusSetModel) \ .where(StimulusSetModel.name == name) \ .distinct() for a in pw_query_attributes: pw_query_single_attribute = AttributeModel.select(ImageModel.image_id, ImageMetaModel.value) \ .join(ImageMetaModel) \ .join(ImageModel) \ .join(StimulusSetImageMap) \ .join(StimulusSetModel) \ .where((StimulusSetModel.name == name) & (AttributeModel.name == a.name)) df_single_attribute = pd.DataFrame( list(pw_query_single_attribute.dicts())) merged = df_reconstructed.merge(df_single_attribute, on="image_id", how="left", suffixes=("orig_", "")) df_reconstructed[a.name] = merged["value"].astype(a.type) stimulus_set = StimulusSet(df_reconstructed) stimulus_set.image_paths = image_paths stimulus_set.name = name return stimulus_set
def get_assembly(): image_names = [] for i in range(1, 21): image_names.append(f'images/{i}.png') assembly = NeuroidAssembly( (np.arange(40 * 5) + np.random.standard_normal(40 * 5)).reshape( (5, 40, 1)), coords={ 'image_id': ('presentation', image_names * 2), 'object_name': ('presentation', ['a'] * 40), 'repetition': ('presentation', ([1] * 20 + [2] * 20)), 'neuroid_id': ('neuroid', np.arange(5)), 'region': ('neuroid', ['IT'] * 5), 'time_bin_start': ('time_bin', [70]), 'time_bin_end': ('time_bin', [170]) }, dims=['neuroid', 'presentation', 'time_bin']) labels = ['a'] * 10 + ['b'] * 10 stimulus_set = StimulusSet([{ 'image_id': image_names[i], 'object_name': 'a', 'image_label': labels[i] } for i in range(20)]) stimulus_set.image_paths = { image_name: os.path.join(os.path.dirname(__file__), image_name) for image_name in image_names } stimulus_set.name = 'test' assembly.attrs['stimulus_set'] = stimulus_set assembly.attrs['stimulus_set_name'] = stimulus_set.name assembly = assembly.squeeze("time_bin") return assembly.transpose('presentation', 'neuroid')
def convert_stimuli(stimulus_set_existing, stimulus_set_name_new, image_dir_new): Path(image_dir_new).mkdir(parents=True, exist_ok=True) image_converter = ApplyCosineAperture(target_dir=image_dir_new) converted_image_paths = {} converted_image_ids = {} for image_id in tqdm(stimulus_set_existing['image_id'], total=len(stimulus_set_existing), desc='apply cosine aperture'): converted_image_path = image_converter.convert_image( image_path=stimulus_set_existing.get_image(image_id)) converted_image_id = kf(converted_image_path).sha1 converted_image_ids[image_id] = converted_image_id converted_image_paths[converted_image_id] = converted_image_path _logger.debug( f"{image_id} -> {converted_image_id}: {converted_image_path}") converted_stimuli = StimulusSet(stimulus_set_existing.copy(deep=True)) converted_stimuli["image_id_without_aperture"] = converted_stimuli[ "image_id"] converted_stimuli["image_id"] = converted_stimuli["image_id"].map( converted_image_ids) converted_stimuli["image_file_sha1"] = converted_stimuli["image_id"] converted_stimuli.image_paths = converted_image_paths converted_stimuli.name = stimulus_set_name_new converted_stimuli.id_mapping = converted_image_ids return converted_stimuli
def test_creates_probabilities(self): activations_model = pytorch_custom() brain_model = ModelCommitment(identifier=activations_model.identifier, activations_model=activations_model, layers=None, behavioral_readout_layer='relu2') fitting_stimuli = StimulusSet({ 'image_id': ['rgb1', 'rgb2'], 'image_label': ['label1', 'label2'] }) fitting_stimuli.image_paths = { 'rgb1': os.path.join(os.path.dirname(__file__), 'rgb1.jpg'), 'rgb2': os.path.join(os.path.dirname(__file__), 'rgb2.jpg') } fitting_stimuli.name = 'test_probabilities_mapping.creates_probabilities' brain_model.start_task(BrainModel.Task.probabilities, fitting_stimuli) probabilities = brain_model.look_at(fitting_stimuli) np.testing.assert_array_equal(probabilities.dims, ['presentation', 'choice']) np.testing.assert_array_equal(probabilities.shape, [2, 2]) np.testing.assert_array_almost_equal( probabilities.sel(image_id='rgb1', choice='label1').values, probabilities.sel(image_id='rgb2', choice='label2').values) assert probabilities.sel(image_id='rgb1', choice='label1') + \ probabilities.sel(image_id='rgb1', choice='label2') == approx(1)
def test_creates_synset(self, model_ctr): np.random.seed(0) activations_model = model_ctr() brain_model = ModelCommitment(identifier=activations_model.identifier, activations_model=activations_model, layers=None, behavioral_readout_layer='dummy') # not needed stimuli = StimulusSet({'image_id': ['abc123']}) stimuli.image_paths = {'abc123': os.path.join(os.path.dirname(__file__), 'rgb1.jpg')} stimuli.name = 'test_logits_behavior.creates_synset' brain_model.start_task(BrainModel.Task.label, 'imagenet') synsets = brain_model.look_at(stimuli) assert len(synsets) == 1 assert synsets[0].startswith('n')
def test_commit(self, model_ctr, layers, region): activations_model = model_ctr() layer_model = LayerMappedModel(identifier=activations_model.identifier, activations_model=activations_model) layer_model.commit(region, layers) layer_model.start_recording(region) stimulus_set = StimulusSet([{'image_id': 'test'}]) stimulus_set.image_paths = { 'test': os.path.join(os.path.dirname(__file__), 'rgb1.jpg') } stimulus_set.name = self.__class__.__name__ predictions = layer_model.look_at(stimulus_set) assert set(predictions['region'].values) == {region} assert set(predictions['layer'].values) == {layers} if isinstance( layers, str) else set(layers)
def load_assemblies(self, subset=None, fname='dataset.nc'): if subset is None: subset = np.arange(len(self.runs)) assemblies = [ xarray.open_dataarray(os.path.join(rd, fname)) for rd in self.runs[subset] ] self.assemblies = [] loss_w = self.experiment_set[['xent', 'recon']] for da, xent, recon in zip(assemblies, loss_w.xent[subset], loss_w.recon[subset]): stim_df = da.presentation.to_dataframe( name='stimulus_set').reset_index().drop( columns=['presentation']) stim_set = StimulusSet(stim_df) stim_set.name = "lg.xent{}.recon{}".format(xent, recon) da = da.assign_attrs({'stimulus_set': stim_set}) da = DataAssembly(da) da = da.assign_attrs({'xent': xent, 'recon': recon}) self.assemblies.append(da) return self.assemblies
def __call__(self, stimuli): target_dir = os.path.join( self._directory, stimuli.name, f"target_{self.target_degrees}deg_{self.target_pixels}pix") os.makedirs(target_dir, exist_ok=True) image_paths = { image_id: self.convert_image(stimuli.get_image(image_id), image_degrees=degrees, target_dir=target_dir) for image_id, degrees in zip(stimuli['image_id'], stimuli['degrees']) } converted_stimuli = StimulusSet( stimuli ) # .copy() for some reason keeps the link to the old metadata converted_stimuli.name = f"{stimuli.name}-{self.target_degrees}degrees_{self.target_pixels}" converted_stimuli['degrees'] = self.target_degrees converted_stimuli.image_paths = image_paths converted_stimuli.original_paths = { converted_stimuli.image_paths[image_id]: stimuli.image_paths[image_id] for image_id in stimuli['image_id'] } return converted_stimuli