Esempio n. 1
0
 def load(model_name=model_name, layer=layer, region=region, pca_components=pca_components):
     activations_model = base_model_pool[model_name]
     if pca_components:
         LayerPCA.hook(activations_model, n_components=pca_components)
         activations_model.identifier += "-pca_1000"
     model = LayerMappedModel(f"{model_name}-{layer}", activations_model=activations_model, visual_degrees=8)
     model.commit(region, layer)
     model = TemporalIgnore(model)
     return model
Esempio n. 2
0
    def test_commit(self, model_ctr, layers, region):
        activations_model = model_ctr()
        layer_model = LayerMappedModel(identifier=activations_model.identifier, activations_model=activations_model,
                                       region_layer_map={region: layers})

        layer_model.start_recording(region)
        stimulus_set = StimulusSet([{'image_id': 'test'}])
        stimulus_set.image_paths = {'test': os.path.join(os.path.dirname(__file__), 'rgb1.jpg')}
        stimulus_set.identifier = self.__class__.__name__
        predictions = layer_model.look_at(stimulus_set)
        assert set(predictions['region'].values) == {region}
        assert set(predictions['layer'].values) == {layers} if isinstance(layers, str) else set(layers)
Esempio n. 3
0
    def test_newmodel_pytorch(self):
        import torch
        from torch import nn
        from model_tools.activations.pytorch import load_preprocess_images

        class MyModel(nn.Module):
            def __init__(self):
                super(MyModel, self).__init__()
                self.conv1 = torch.nn.Conv2d(in_channels=3,
                                             out_channels=2,
                                             kernel_size=3)
                self.relu1 = torch.nn.ReLU()
                linear_input_size = np.power((224 - 3 + 2 * 0) / 1 + 1, 2) * 2
                self.linear = torch.nn.Linear(int(linear_input_size), 1000)
                self.relu2 = torch.nn.ReLU(
                )  # can't get named ReLU output otherwise

                # init weights for reproducibility
                self.conv1.weight.data.fill_(0.01)
                self.conv1.bias.data.fill_(0.01)
                self.linear.weight.data.fill_(0.01)
                self.linear.bias.data.fill_(0.01)

            def forward(self, x):
                x = self.conv1(x)
                x = self.relu1(x)
                x = x.view(x.size(0), -1)
                x = self.linear(x)
                x = self.relu2(x)
                return x

        preprocessing = functools.partial(load_preprocess_images,
                                          image_size=224)
        model_id = 'new_pytorch'
        activations_model = PytorchWrapper(model=MyModel(),
                                           preprocessing=preprocessing,
                                           identifier=model_id)
        layer = 'relu2'
        candidate = LayerMappedModel(f"{model_id}-{layer}",
                                     activations_model=activations_model,
                                     visual_degrees=8)
        candidate.commit('IT', layer)
        candidate = TemporalIgnore(candidate)

        ceiled_score = score_model(
            model_identifier=model_id,
            model=candidate,
            benchmark_identifier='dicarlo.MajajHong2015.IT-pls')
        score = ceiled_score.raw
        assert score.sel(aggregation='center') == approx(.0820823, abs=.01)
Esempio n. 4
0
 def build_layer_model(self, identifier, model, benchmark, layer):
     layer_model = LayerMappedModel(
         identifier=identifier,
         activations_model=model,
         region_layer_map={benchmark.region: layer})
     layer_model = TemporalIgnore(layer_model)
     return layer_model
Esempio n. 5
0
    def _call(
            self,
            model_identifier,
            benchmark_identifier,  # storage fields
            model,
            benchmark,
            layers,
            prerun=False):
        if prerun:
            # pre-run activations together to avoid running every layer separately
            model(layers=layers, stimuli=benchmark._assembly.stimulus_set)

        for layer in tqdm(layers, desc="layers"):
            layer_model = LayerMappedModel(
                identifier=f"{model_identifier}-{layer}",
                activations_model=model,
                region_layer_map={benchmark.region: layer})
            layer_model = TemporalIgnore(layer_model)
            layer_model.start_recording(benchmark.region,
                                        time_bins=benchmark.timebins)
            layer_model.look_at(benchmark._assembly.stimulus_set)