def get_assembly():
    image_names = []
    for i in range(1, 21):
        image_names.append(f'images/{i}.png')
    assembly = NeuroidAssembly(
        (np.arange(40 * 5) + np.random.standard_normal(40 * 5)).reshape(
            (5, 40, 1)),
        coords={
            'image_id': ('presentation', image_names * 2),
            'object_name': ('presentation', ['a'] * 40),
            'repetition': ('presentation', ([1] * 20 + [2] * 20)),
            'neuroid_id': ('neuroid', np.arange(5)),
            'region': ('neuroid', ['IT'] * 5),
            'time_bin_start': ('time_bin', [70]),
            'time_bin_end': ('time_bin', [170])
        },
        dims=['neuroid', 'presentation', 'time_bin'])
    labels = ['a'] * 10 + ['b'] * 10
    stimulus_set = StimulusSet([{
        'image_id': image_names[i],
        'object_name': 'a',
        'image_label': labels[i]
    } for i in range(20)])
    stimulus_set.image_paths = {
        image_name: os.path.join(os.path.dirname(__file__), image_name)
        for image_name in image_names
    }
    stimulus_set.identifier = 'test'
    assembly.attrs['stimulus_set'] = stimulus_set
    assembly.attrs['stimulus_set_name'] = stimulus_set.identifier
    assembly = assembly.squeeze("time_bin")
    return assembly.transpose('presentation', 'neuroid')
 def test_creates_probabilities(self):
     activations_model = pytorch_custom()
     brain_model = ModelCommitment(identifier=activations_model.identifier,
                                   activations_model=activations_model,
                                   layers=None,
                                   behavioral_readout_layer='relu2')
     fitting_stimuli = StimulusSet({
         'image_id': ['rgb1', 'rgb2'],
         'image_label': ['label1', 'label2']
     })
     fitting_stimuli.image_paths = {
         'rgb1': os.path.join(os.path.dirname(__file__), 'rgb1.jpg'),
         'rgb2': os.path.join(os.path.dirname(__file__), 'rgb2.jpg')
     }
     fitting_stimuli.identifier = 'test_probabilities_mapping.creates_probabilities'
     fitting_stimuli = place_on_screen(
         fitting_stimuli,
         target_visual_degrees=brain_model.visual_degrees(),
         source_visual_degrees=8)
     brain_model.start_task(BrainModel.Task.probabilities, fitting_stimuli)
     probabilities = brain_model.look_at(fitting_stimuli)
     np.testing.assert_array_equal(probabilities.dims,
                                   ['presentation', 'choice'])
     np.testing.assert_array_equal(probabilities.shape, [2, 2])
     np.testing.assert_array_almost_equal(
         probabilities.sel(image_id='rgb1', choice='label1').values,
         probabilities.sel(image_id='rgb2', choice='label2').values)
     assert probabilities.sel(image_id='rgb1', choice='label1') + \
            probabilities.sel(image_id='rgb1', choice='label2') == approx(1)
Beispiel #3
0
def _place_on_screen(stimuli_identifier: str,
                     stimulus_set: StimulusSet,
                     target_visual_degrees: int,
                     source_visual_degrees: int = None):
    converted_stimuli_id = f"{stimuli_identifier}--target{target_visual_degrees}--source{source_visual_degrees}"
    source_visual_degrees = _determine_visual_degrees(source_visual_degrees,
                                                      stimulus_set)

    target_dir = root_path / converted_stimuli_id
    target_dir.mkdir(parents=True, exist_ok=False)
    image_converter = ImageConverter(target_dir=target_dir)

    converted_image_paths = {}
    for image_id, image_degrees in tqdm(zip(stimulus_set['image_id'],
                                            source_visual_degrees),
                                        total=len(stimulus_set),
                                        desc='convert image degrees'):
        converted_image_path = image_converter.convert_image(
            image_path=stimulus_set.get_image(image_id),
            source_degrees=image_degrees,
            target_degrees=target_visual_degrees)
        converted_image_paths[image_id] = converted_image_path
    converted_stimuli = StimulusSet(stimulus_set.copy(
        deep=True))  # without copy, it will link to the previous stim set
    converted_stimuli.image_paths = converted_image_paths
    converted_stimuli.identifier = converted_stimuli_id
    converted_stimuli['degrees'] = target_visual_degrees
    converted_stimuli.original_paths = copy.deepcopy(stimulus_set.image_paths)
    return converted_stimuli
def load_stim_info(stim_name, data_dir):
    stim = pd.read_csv(os.path.join(data_dir, 'stimulus_set'), dtype={'image_id': str})
    image_paths = dict((key, value) for (key, value) in zip(stim['image_id'].values,
                                                            [os.path.join(data_dir, image_name) for image_name
                                                             in stim['image_file_name'].values]))
    stim_set = StimulusSet(stim[stim.columns[:-1]])
    stim_set.image_paths = image_paths
    stim_set.identifier = stim_name

    return stim_set
Beispiel #5
0
    def test_commit(self, model_ctr, layers, region):
        activations_model = model_ctr()
        layer_model = LayerMappedModel(identifier=activations_model.identifier,
                                       activations_model=activations_model,
                                       region_layer_map={region: layers})

        layer_model.start_recording(region)
        stimulus_set = StimulusSet([{'image_id': 'test'}])
        stimulus_set.image_paths = {
            'test': os.path.join(os.path.dirname(__file__), 'rgb1.jpg')
        }
        stimulus_set.identifier = self.__class__.__name__
        predictions = layer_model.look_at(stimulus_set)
        assert set(predictions['region'].values) == {region}
        assert set(predictions['layer'].values) == {layers} if isinstance(
            layers, str) else set(layers)
def _dummy_stimulus_set():
    stimulus_set = StimulusSet([
        {
            'image_id': 'a'
        },
        {
            'image_id': 'b'
        },
        {
            'image_id': 'c'
        },
    ])
    stimulus_set.image_paths = {
        'a': 'a.png',
        'b': 'b.png',
        'c': 'c.png',
    }
    stimulus_set.identifier = 'dummy'
    return stimulus_set
 def test_creates_synset(self, model_ctr):
     np.random.seed(0)
     activations_model = model_ctr()
     brain_model = ModelCommitment(
         identifier=activations_model.identifier,
         activations_model=activations_model,
         layers=None,
         behavioral_readout_layer='dummy')  # not needed
     stimuli = StimulusSet({
         'image_id': ['1', '2'],
         'filename': ['rgb1', 'rgb2']
     })
     stimuli.image_paths = {
         '1': os.path.join(os.path.dirname(__file__), 'rgb1.jpg'),
         '2': os.path.join(os.path.dirname(__file__), 'rgb2.jpg')
     }
     stimuli.identifier = 'test_logits_behavior.creates_synset'
     brain_model.start_task(BrainModel.Task.label, 'imagenet')
     behavior = brain_model.look_at(stimuli)
     assert isinstance(behavior, BehavioralAssembly)
     assert set(behavior['image_id'].values) == {'1', '2'}
     assert len(behavior['synset']) == 2
     assert behavior['synset'].values[0].startswith('n')