예제 #1
0
 def __call__(self, candidate: BrainModel):
     fitting_stimuli = place_on_screen(
         self._fitting_stimuli,
         target_visual_degrees=candidate.visual_degrees(),
         source_visual_degrees=self._visual_degrees)
     candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli)
     stimulus_set = place_on_screen(
         self._assembly.stimulus_set,
         target_visual_degrees=candidate.visual_degrees(),
         source_visual_degrees=self._visual_degrees)
     probabilities = candidate.look_at(stimulus_set)
     score = self._metric(probabilities, self._assembly)
     score = self.ceil_score(score, self.ceiling)
     return score
예제 #2
0
 def __call__(self, candidate: BrainModel):
     candidate.start_recording('IT', time_bins=self._time_bins)
     stimulus_set = place_on_screen(
         self._assembly.stimulus_set,
         target_visual_degrees=candidate.visual_degrees(),
         source_visual_degrees=self._visual_degrees)
     # Temporal recordings from large candidates take up a lot of memory and compute time.
     # In order to quickly reject recordings that are static over time,
     # we will show one image and check whether the recordings vary over time at all or not.
     # If they don't we can quickly score the candidate with a failure state
     # since it will not be able to predict temporal differences with the OST metric
     check_stimulus_set = stimulus_set[:1]
     check_stimulus_set.identifier = None  # unset identifier to avoid storing (interferes with actual stimulus_set)
     check_recordings = candidate.look_at(
         check_stimulus_set, number_of_trials=self._number_of_trials)
     if not temporally_varying(check_recordings):
         score = Score([np.nan, np.nan],
                       coords={'aggregation': ['center', 'error']},
                       dims=['aggregation'])
     else:
         recordings = candidate.look_at(
             stimulus_set, number_of_trials=self._number_of_trials)
         score = self._similarity_metric(recordings, self._assembly)
     score = ceil_score(score, self.ceiling)
     return score
예제 #3
0
 def __call__(self, candidate: BrainModel):
     candidate.start_recording(self.region, time_bins=self.timebins)
     stimulus_set = place_on_screen(
         self._assembly.stimulus_set,
         target_visual_degrees=candidate.visual_degrees(),
         source_visual_degrees=self._visual_degrees)
     source_assembly = candidate.look_at(stimulus_set)
     if 'time_bin' in source_assembly.dims:
         source_assembly = source_assembly.squeeze(
             'time_bin')  # static case for these benchmarks
     raw_score = self._similarity_metric(source_assembly, self._assembly)
     return explained_variance(raw_score, self.ceiling)
예제 #4
0
def record_from_model(model: BrainModel, stimulus_identifier,
                      number_of_trials):
    stimulus_set = get_stimulus_set(stimulus_identifier)
    stimulus_set = place_on_screen(
        stimulus_set, target_visual_degrees=model.visual_degrees())
    activations = model.look_at(stimulus_set, number_of_trials)
    if 'time_bin' in activations.dims:
        activations = activations.squeeze(
            'time_bin')  # static case for these benchmarks
    if not activations.values.flags['WRITEABLE']:
        activations.values.setflags(write=1)
    return activations
예제 #5
0
 def __call__(self, candidate: BrainModel):
     time_bins = [(time_bin_start, time_bin_start + 10)
                  for time_bin_start in range(70, 250, 10)]
     candidate.start_recording('IT', time_bins=time_bins)
     stimulus_set = place_on_screen(
         self._assembly.stimulus_set,
         target_visual_degrees=candidate.visual_degrees(),
         source_visual_degrees=self._visual_degrees)
     recordings = candidate.look_at(stimulus_set,
                                    number_of_trials=self._number_of_trials)
     score = self._similarity_metric(recordings, self._assembly)
     score = ceil_score(score, self.ceiling)
     return score