def get_assembly():
    image_names = []
    for i in range(1, 21):
        image_names.append(f'images/{i}.png')
    assembly = NeuroidAssembly(
        (np.arange(40 * 5) + np.random.standard_normal(40 * 5)).reshape(
            (5, 40, 1)),
        coords={
            'image_id': ('presentation', image_names * 2),
            'object_name': ('presentation', ['a'] * 40),
            'repetition': ('presentation', ([1] * 20 + [2] * 20)),
            'neuroid_id': ('neuroid', np.arange(5)),
            'region': ('neuroid', ['IT'] * 5),
            'time_bin_start': ('time_bin', [70]),
            'time_bin_end': ('time_bin', [170])
        },
        dims=['neuroid', 'presentation', 'time_bin'])
    labels = ['a'] * 10 + ['b'] * 10
    stimulus_set = StimulusSet([{
        'image_id': image_names[i],
        'object_name': 'a',
        'image_label': labels[i]
    } for i in range(20)])
    stimulus_set.image_paths = {
        image_name: os.path.join(os.path.dirname(__file__), image_name)
        for image_name in image_names
    }
    stimulus_set.identifier = 'test'
    assembly.attrs['stimulus_set'] = stimulus_set
    assembly.attrs['stimulus_set_name'] = stimulus_set.identifier
    assembly = assembly.squeeze("time_bin")
    return assembly.transpose('presentation', 'neuroid')
 def test_creates_probabilities(self):
     activations_model = pytorch_custom()
     brain_model = ModelCommitment(identifier=activations_model.identifier,
                                   activations_model=activations_model,
                                   layers=None,
                                   behavioral_readout_layer='relu2')
     fitting_stimuli = StimulusSet({
         'image_id': ['rgb1', 'rgb2'],
         'image_label': ['label1', 'label2']
     })
     fitting_stimuli.image_paths = {
         'rgb1': os.path.join(os.path.dirname(__file__), 'rgb1.jpg'),
         'rgb2': os.path.join(os.path.dirname(__file__), 'rgb2.jpg')
     }
     fitting_stimuli.identifier = 'test_probabilities_mapping.creates_probabilities'
     fitting_stimuli = place_on_screen(
         fitting_stimuli,
         target_visual_degrees=brain_model.visual_degrees(),
         source_visual_degrees=8)
     brain_model.start_task(BrainModel.Task.probabilities, fitting_stimuli)
     probabilities = brain_model.look_at(fitting_stimuli)
     np.testing.assert_array_equal(probabilities.dims,
                                   ['presentation', 'choice'])
     np.testing.assert_array_equal(probabilities.shape, [2, 2])
     np.testing.assert_array_almost_equal(
         probabilities.sel(image_id='rgb1', choice='label1').values,
         probabilities.sel(image_id='rgb2', choice='label2').values)
     assert probabilities.sel(image_id='rgb1', choice='label1') + \
            probabilities.sel(image_id='rgb1', choice='label2') == approx(1)
def _build_stimulus_set(image_names):
    stimulus_set = StimulusSet([{
        'image_id': image_name,
        'some_meta': image_name[::-1]
    } for image_name in image_names])
    stimulus_set.image_paths = {
        image_name: os.path.join(os.path.dirname(__file__), image_name)
        for image_name in image_names
    }
    return stimulus_set
Beispiel #4
0
    def test_commit(self, model_ctr, layers, region):
        activations_model = model_ctr()
        layer_model = LayerMappedModel(identifier=activations_model.identifier, activations_model=activations_model,
                                       region_layer_map={region: layers})

        layer_model.start_recording(region)
        stimulus_set = StimulusSet([{'image_id': 'test'}])
        stimulus_set.image_paths = {'test': os.path.join(os.path.dirname(__file__), 'rgb1.jpg')}
        stimulus_set.identifier = self.__class__.__name__
        predictions = layer_model.look_at(stimulus_set)
        assert set(predictions['region'].values) == {region}
        assert set(predictions['layer'].values) == {layers} if isinstance(layers, str) else set(layers)
Beispiel #5
0
 def load(self):
     stimulus_set = pd.read_csv(self.csv_path)[::self.sampling_factor]
     stimulus_set = StimulusSet(stimulus_set)
     stimulus_set.image_paths = {
         row['image_id']: os.path.join(self.stimuli_directory,
                                       row['filename'])
         for _, row in stimulus_set.iterrows()
     }
     assert all(
         os.path.isfile(image_path)
         for image_path in stimulus_set.image_paths.values())
     return stimulus_set
def visual_degree_samples(visual_degrees_samples=(8, 4, 12), base_degrees=8):
    base_image = Path(__file__).parent / 'base.png'
    stimulus_set = StimulusSet([{'image_id': 'base'}])
    stimulus_set.image_paths = {'base': base_image}
    stimulus_set.identifier = 'visual_degrees_base'
    for visual_degrees in visual_degrees_samples:
        converted_stimulus_set = place_on_screen(
            stimulus_set,
            source_visual_degrees=8,
            target_visual_degrees=visual_degrees)
        converted_path = converted_stimulus_set.get_image('base')
        target_path = static_directory / 'visual_degrees' / f"{base_degrees}_to_{visual_degrees}.png"
        converted_image = Image.open(converted_path)
        converted_image.save(target_path)
Beispiel #7
0
def _place_on_screen(stimuli_identifier: str,
                     stimulus_set: StimulusSet,
                     target_visual_degrees: int,
                     source_visual_degrees: int = None):
    converted_stimuli_id = f"{stimuli_identifier}--target{target_visual_degrees}--source{source_visual_degrees}"
    source_visual_degrees = _determine_visual_degrees(source_visual_degrees,
                                                      stimulus_set)

    target_dir = root_path / converted_stimuli_id
    target_dir.mkdir(parents=True, exist_ok=False)
    image_converter = ImageConverter(target_dir=target_dir)

    converted_image_paths = {}
    for image_id, image_degrees in tqdm(zip(stimulus_set['image_id'],
                                            source_visual_degrees),
                                        total=len(stimulus_set),
                                        desc='convert image degrees'):
        converted_image_path = image_converter.convert_image(
            image_path=stimulus_set.get_image(image_id),
            source_degrees=image_degrees,
            target_degrees=target_visual_degrees)
        converted_image_paths[image_id] = converted_image_path
    converted_stimuli = StimulusSet(stimulus_set.copy(
        deep=True))  # without copy, it will link to the previous stim set
    converted_stimuli.image_paths = converted_image_paths
    converted_stimuli.identifier = converted_stimuli_id
    converted_stimuli['degrees'] = target_visual_degrees
    converted_stimuli.original_paths = copy.deepcopy(stimulus_set.image_paths)
    return converted_stimuli
Beispiel #8
0
 def __init__(self):
     stimulus_set = pd.read_csv(os.path.join(os.path.dirname(__file__), 'imagenet2012.csv'))
     stimulus_set = StimulusSet(stimulus_set)
     stimulus_set.image_paths = {row.image_id: row.filepath for row in stimulus_set.itertuples()}
     self._stimulus_set = stimulus_set
     self._similarity_metric = Accuracy()
     ceiling = Score([1, np.nan], coords={'aggregation': ['center', 'error']}, dims=['aggregation'])
     super(Imagenet2012, self).__init__(identifier='fei-fei.Deng2009-top1', version=1,
                                        ceiling_func=lambda: ceiling,
                                        parent='ImageNet',
                                        bibtex="""@INPROCEEDINGS{5206848,  
                                             author={J. {Deng} and W. {Dong} and R. {Socher} and L. {Li} and  {Kai Li} and  {Li Fei-Fei}},  
                                             booktitle={2009 IEEE Conference on Computer Vision and Pattern Recognition},   
                                             title={ImageNet: A large-scale hierarchical image database},   
                                             year={2009},  
                                             volume={},  
                                             number={},  
                                             pages={248-255},
                                             url = {https://ieeexplore.ieee.org/document/5206848}
                                         }""")
 def test_creates_synset(self, model_ctr):
     np.random.seed(0)
     activations_model = model_ctr()
     brain_model = ModelCommitment(
         identifier=activations_model.identifier,
         activations_model=activations_model,
         layers=None,
         behavioral_readout_layer='dummy')  # not needed
     stimuli = StimulusSet({
         'image_id': ['1', '2'],
         'filename': ['rgb1', 'rgb2']
     })
     stimuli.image_paths = {
         '1': os.path.join(os.path.dirname(__file__), 'rgb1.jpg'),
         '2': os.path.join(os.path.dirname(__file__), 'rgb2.jpg')
     }
     stimuli.identifier = 'test_logits_behavior.creates_synset'
     brain_model.start_task(BrainModel.Task.label, 'imagenet')
     behavior = brain_model.look_at(stimuli)
     assert isinstance(behavior, BehavioralAssembly)
     assert set(behavior['image_id'].values) == {'1', '2'}
     assert len(behavior['synset']) == 2
     assert behavior['synset'].values[0].startswith('n')