예제 #1
0
def load_voxels(bold_shift_seconds=4):
    assembly = load_voxel_data(bold_shift_seconds=bold_shift_seconds)
    assembly = DataAssembly(assembly)
    stimulus_set = NaturalisticStories()()
    stimulus_set, assembly = _align_stimuli_recordings(stimulus_set, assembly)
    assert set(assembly['stimulus_sentence'].values).issubset(
        set(stimulus_set['sentence']))
    assembly.attrs['stimulus_set'] = stimulus_set
    assembly.attrs['stimulus_set_name'] = stimulus_set.name
    return assembly
예제 #2
0
    def extrapolate_neuroid(self, ceilings):
        # figure out how many extrapolation x points we have. E.g. for Pereira, not all combinations are possible
        subject_subsamples = list(sorted(set(ceilings['num_subjects'].values)))
        rng = RandomState(0)
        bootstrap_params = []
        for bootstrap in range(self.num_bootstraps):
            bootstrapped_scores = []
            for num_subjects in subject_subsamples:
                num_scores = ceilings.sel(num_subjects=num_subjects)
                # the sub_subjects dimension creates nans, get rid of those
                num_scores = num_scores.dropna(f'sub_{self.subject_column}')
                assert set(num_scores.dims) == {f'sub_{self.subject_column}', 'split'} or \
                       set(num_scores.dims) == {f'sub_{self.subject_column}'}
                # choose from subject subsets and the splits therein, with replacement for variance
                choices = num_scores.values.flatten()
                bootstrapped_score = rng.choice(choices,
                                                size=len(choices),
                                                replace=True)
                bootstrapped_scores.append(np.mean(bootstrapped_score))

            try:
                params = self.fit(subject_subsamples, bootstrapped_scores)
            except RuntimeError:  # optimal parameters not found
                params = [np.nan, np.nan]
            params = DataAssembly([params],
                                  coords={
                                      'bootstrap': [bootstrap],
                                      'param': ['v0', 'tau0']
                                  },
                                  dims=['bootstrap', 'param'])
            bootstrap_params.append(params)
        bootstrap_params = merge_data_arrays(bootstrap_params)
        # find endpoint and error
        asymptote_threshold = .0005
        interpolation_xs = np.arange(1000)
        ys = np.array([
            v(interpolation_xs, *params) for params in bootstrap_params.values
            if not np.isnan(params).any()
        ])
        median_ys = np.median(ys, axis=0)
        diffs = np.diff(median_ys)
        end_x = np.where(diffs < asymptote_threshold)[0].min(
        )  # first x where increase smaller than threshold
        # put together
        center = np.median(np.array(bootstrap_params)[:, 0])
        error = ci_error(ys[:, end_x], center=center)
        score = Score(
            [center] + list(error),
            coords={'aggregation': ['center', 'error_low', 'error_high']},
            dims=['aggregation'])
        score.attrs['raw'] = ceilings
        score.attrs['bootstrapped_params'] = bootstrap_params
        score.attrs['endpoint_x'] = DataAssembly(end_x)
        return score
예제 #3
0
def load_rdm_sentences(story='Boar',
                       roi_filter='from90to100',
                       bold_shift_seconds=4):
    timepoint_rdms = load_rdm_timepoints(story, roi_filter)
    meta_data = load_sentences_meta(story)
    del meta_data['fullSentence']
    meta_data.dropna(inplace=True)
    mapping_column = 'shiftBOLD_{}sec'.format(bold_shift_seconds)
    timepoints = meta_data[mapping_column].values.astype(int)
    # filter and annotate
    assert all(timepoint in timepoint_rdms['timepoint_left'].values
               for timepoint in timepoints)
    timepoint_rdms = timepoint_rdms.sel(timepoint_left=timepoints,
                                        timepoint_right=timepoints)
    # re-interpret timepoints as stimuli
    coords = {}
    for coord_name, coord_value in timepoint_rdms.coords.items():
        dims = timepoint_rdms.coords[coord_name].dims
        dims = [
            dim if not dim.startswith('timepoint') else 'presentation'
            for dim in dims
        ]
        coords[coord_name] = dims, coord_value.values
    coords = {
        **coords,
        **{
            'stimulus_sentence': ('presentation', meta_data['reducedSentence'].values)
        }
    }
    dims = [
        dim if not dim.startswith('timepoint') else 'presentation'
        for dim in timepoint_rdms.dims
    ]
    data = DataAssembly(timepoint_rdms, coords=coords, dims=dims)
    return data
예제 #4
0
def expand(assembly, target_dims):
    def strip(coord):
        stripped_coord = coord
        if stripped_coord.endswith('_source'):
            stripped_coord = stripped_coord[:-len('_source')]
        if stripped_coord.endswith('_target'):
            stripped_coord = stripped_coord[:-len('_target')]
        return stripped_coord

    def reformat_coord_values(coord, dims, values):
        stripped_coord = strip(coord)

        if stripped_coord in target_dims and len(values.shape) == 0:
            values = np.array([values])
            dims = [coord]
        return dims, values

    coords = {
        coord: reformat_coord_values(coord, values.dims, values.values)
        for coord, values in assembly.coords.items()
    }
    dim_shapes = OrderedDict((coord, values[1].shape)
                             for coord, values in coords.items()
                             if strip(coord) in target_dims)
    shape = [_shape for shape in dim_shapes.values() for _shape in shape]
    # prepare values for broadcasting by adding new dimensions
    values = assembly.values
    for _ in range(sum([dim not in assembly.dims for dim in dim_shapes])):
        values = values[:, np.newaxis]
    values = np.broadcast_to(values, shape)
    return DataAssembly(values, coords=coords, dims=list(dim_shapes.keys()))
예제 #5
0
 def test_alignment(self):
     assembly = NeuroidAssembly(
         [[1, 2], [1, 2], [4, 3], [4, 3]],
         coords={
             'image_id': ('presentation', list(range(4))),
             'image_meta': ('presentation', list(range(4))),
             'neuroid_id': ('neuroid', list(range(2))),
             'neuroid_meta': ('neuroid', list(range(2)))
         },
         dims=['presentation', 'neuroid'])
     matrix = RSA()(assembly)
     assert np.all(np.diag(matrix) == approx(1., abs=.001))
     assert all(matrix.values[np.triu_indices(matrix.shape[0], k=1)] ==
                matrix.values[np.tril_indices(matrix.shape[0], k=-1)]
                ), "upper and lower triangular need to be equal"
     expected = DataAssembly(
         [[1., 1., -1., -1.], [1., 1., -1., -1.], [-1., -1., 1., 1.],
          [-1., -1., 1., 1.]],
         coords={
             'image_id': ('presentation', list(range(4))),
             'image_meta': ('presentation', list(range(4)))
         },
         dims=['presentation', 'presentation'])
     np.testing.assert_array_almost_equal(
         matrix.values,
         expected.values)  # does not take ordering into account
예제 #6
0
def test_random_time():
    benchmark = DicarloKar2019OST()
    rnd = RandomState(0)
    stimuli = benchmark._assembly.stimulus_set
    source = DataAssembly(rnd.rand(len(stimuli), 5, 5), coords={
        'image_id': ('presentation', stimuli['image_id']),
        'image_label': ('presentation', stimuli['image_label']),
        'truth': ('presentation', stimuli['truth']),
        'neuroid_id': ('neuroid', list(range(5))),
        'layer': ('neuroid', ['test'] * 5),
        'time_bin_start': ('time_bin', [70, 90, 110, 130, 150]),
        'time_bin_end': ('time_bin', [90, 110, 130, 150, 170]),
    }, dims=['presentation', 'neuroid', 'time_bin'])
    source.name = __name__ + ".test_notime"
    score = benchmark(PrecomputedFeatures(source, visual_degrees=8))
    assert np.isnan(score.sel(aggregation='center'))  # not a temporal model
    assert np.isnan(score.raw.sel(aggregation='center'))  # not a temporal model
    assert score.attrs['ceiling'].sel(aggregation='center') == approx(.79)
예제 #7
0
 def __call__(self, train_source, train_target, test_source,
              test_target):
     assert sorted(train_source['image_id'].values) == sorted(
         train_target['image_id'].values)
     assert sorted(test_source['image_id'].values) == sorted(
         test_target['image_id'].values)
     self.train_source_assemblies.append(train_source)
     self.train_target_assemblies.append(train_target)
     self.test_source_assemblies.append(test_source)
     self.test_target_assemblies.append(test_target)
     return DataAssembly(0)
예제 #8
0
def firing_rates_affine(model_identifier, model: BrainModel, region):
    blank_activations = record_from_model(model, BLANK_STIM_NAME,
                                          ORIENTATION_NUMBER_OF_TRIALS)
    orientation_activations = record_from_model(model, ORIENTATION_STIM_NAME,
                                                ORIENTATION_NUMBER_OF_TRIALS)

    blank_activations = blank_activations.values
    blank_activations[blank_activations < 0] = 0

    _assert_grating_activations(orientation_activations)

    stim_pos = get_stimulus_position(orientation_activations)

    in_rf = filter_receptive_fields(model_identifier=model_identifier,
                                    model=model,
                                    region=region,
                                    pos=stim_pos)
    n_neuroids = len(in_rf)

    spatial_frequency = sorted(
        set(orientation_activations.spatial_frequency.values))
    orientation = sorted(set(orientation_activations.orientation.values))
    phase = sorted(set(orientation_activations.phase.values))
    nStim = orientation_activations.values.shape[1]
    n_cycles = nStim // (len(phase) * len(orientation) *
                         len(spatial_frequency))

    orientation_activations = orientation_activations.values
    orientation_activations[orientation_activations < 0] = 0

    blank_activations = blank_activations[in_rf]
    orientation_activations = orientation_activations[in_rf]
    orientation_activations = orientation_activations.reshape(
        (n_neuroids, n_cycles, len(spatial_frequency), len(orientation),
         len(phase)))
    orientation_activations = orientation_activations.mean(axis=4).reshape(
        (n_neuroids, -1)).max(axis=1)

    responsive_neurons = (orientation_activations - blank_activations[:, 0]) >  \
                         (RESP_THRESH[region] / SINGLE_MAX_RESP[region]) * \
                         np.max(orientation_activations - blank_activations[:, 0])

    median_baseline = np.median(blank_activations[responsive_neurons])
    median_activations = np.median(orientation_activations[responsive_neurons])

    slope = (MEDIAN_MAX_RESP[region] - MEDIAN_SPONTANEOUS[region]) / \
            (median_activations - median_baseline)
    offset = MEDIAN_SPONTANEOUS[region] - slope * median_baseline

    affine_transformation = np.array([slope, offset])
    affine_transformation = DataAssembly(affine_transformation)

    return affine_transformation
def schiller1976_properties(model_identifier, responses, baseline):
    _assert_grating_activations(responses)
    radius = np.array(sorted(set(responses.radius.values)))
    spatial_frequency = np.array(
        sorted(set(responses.spatial_frequency.values)))
    orientation = np.array(sorted(set(responses.orientation.values)))
    phase = np.array(sorted(set(responses.phase.values)))

    responses = responses.values
    baseline = baseline.values
    assert responses.shape[0] == baseline.shape[0]
    n_neuroids = responses.shape[0]

    responses = responses.reshape(
        (n_neuroids, len(radius), len(spatial_frequency), len(orientation),
         len(phase)))
    responses = responses.mean(axis=4)

    max_response = responses.reshape((n_neuroids, -1)).max(axis=1,
                                                           keepdims=True)

    spatial_frequency_bandwidth = np.zeros((n_neuroids, 1))
    spatial_frequency_selective = np.ones((n_neuroids, 1))

    for neur in range(n_neuroids):
        pref_radius, pref_spatial_frequency, pref_orientation = \
            np.unravel_index(np.argmax(responses[neur, :, :, :]),
                             (len(radius), len(spatial_frequency), len(orientation)))

        spatial_frequency_curve = responses[neur, pref_radius, :,
                                            pref_orientation]

        spatial_frequency_bandwidth[neur] = \
            calc_spatial_frequency_tuning(spatial_frequency_curve, spatial_frequency, thrsh=0.707, filt_type='smooth',
                                          mode='ratio')[0]

    spatial_frequency_selective[np.isnan(spatial_frequency_bandwidth)] = 0

    properties_data = np.concatenate(
        (spatial_frequency_selective, spatial_frequency_bandwidth), axis=1)

    good_neuroids = max_response > baseline + RESPONSE_THRESHOLD
    properties_data = properties_data[np.argwhere(good_neuroids)[:, 0], :]

    properties_data = DataAssembly(
        properties_data,
        coords={
            'neuroid_id': ('neuroid', range(properties_data.shape[0])),
            'region': ('neuroid', ['V1'] * properties_data.shape[0]),
            'neuronal_property': PROPERTY_NAMES
        },
        dims=['neuroid', 'neuronal_property'])
    return properties_data
def devalois1982a_properties(model_identifier, responses, baseline):
    _assert_grating_activations(responses)
    spatial_frequency = np.array(
        sorted(set(responses.spatial_frequency.values)))
    orientation = np.array(sorted(set(responses.orientation.values)))
    phase = np.array(sorted(set(responses.phase.values)))
    nStim = responses.values.shape[1]
    n_cycles = nStim // (len(phase) * len(orientation) *
                         len(spatial_frequency))

    responses = responses.values
    baseline = baseline.values
    assert responses.shape[0] == baseline.shape[0]
    n_neuroids = responses.shape[0]

    responses = responses.reshape(
        (n_neuroids, n_cycles, len(spatial_frequency), len(orientation),
         len(phase)))
    responses = responses.mean(axis=4)

    preferred_orientation = np.zeros((n_neuroids, 1))
    max_response = responses.reshape((n_neuroids, -1)).max(axis=1,
                                                           keepdims=True)

    for neur in range(n_neuroids):
        pref_cycle, pref_spatial_frequency, pref_orientation = np.unravel_index(
            np.argmax(responses[neur]),
            (n_cycles, len(spatial_frequency), len(orientation)))

        orientation_curve = responses[neur, pref_cycle,
                                      pref_spatial_frequency, :]

        preferred_orientation[neur] = \
            calc_bandwidth(orientation_curve, orientation, filt_type='smooth', thrsh=0.5, mode='full')[1]

    preferred_orientation[preferred_orientation >= ORIENTATION_BIN_LIM] = \
        preferred_orientation[preferred_orientation >= ORIENTATION_BIN_LIM] - 180
    properties_data = preferred_orientation

    good_neuroids = max_response > baseline + RESPONSE_THRESHOLD
    properties_data = properties_data[np.argwhere(good_neuroids)[:, 0], :]

    properties_data = DataAssembly(
        properties_data,
        coords={
            'neuroid_id': ('neuroid', range(properties_data.shape[0])),
            'region': ('neuroid', ['V1'] * properties_data.shape[0]),
            'neuronal_property': PROPERTY_NAMES
        },
        dims=['neuroid', 'neuronal_property'])
    return properties_data
예제 #11
0
파일: rdm.py 프로젝트: stothe2/brain-score
 def __call__(self, assembly):
     assert len(assembly.dims) == 2
     correlations = np.corrcoef(assembly) if assembly.dims[
         -1] == self._neuroid_dim else np.corrcoef(assembly.T).T
     coords = {
         coord: coord_value
         for coord, coord_value in assembly.coords.items()
         if coord != self._neuroid_dim
     }
     dims = [
         dim if dim != self._neuroid_dim else
         assembly.dims[(i - 1) % len(assembly.dims)]
         for i, dim in enumerate(assembly.dims)
     ]
     return DataAssembly(correlations, coords=coords, dims=dims)
예제 #12
0
    def build_response_matrix_from_responses(self, responses):
        num_choices = [(image_id, choice) for image_id, choice in zip(
            responses['image_id'].values, responses.values)]
        num_choices = Counter(num_choices)
        num_objects = [[
            (image_id, sample_obj), (image_id, dist_obj)
        ] for image_id, sample_obj, dist_obj in zip(
            responses['image_id'].values, responses['sample_obj'].values,
            responses['dist_obj'].values)]
        num_objects = Counter(itertools.chain(*num_objects))

        choices = np.unique(responses)
        image_ids, indices = np.unique(responses['image_id'],
                                       return_index=True)
        truths = responses['truth'].values[indices]
        image_dim = responses['image_id'].dims
        coords = {
            **{
                coord: (dims, value)
                for coord, dims, value in walk_coords(responses)
            },
            **{
                'choice': ('choice', choices)
            }
        }
        coords = {
            coord: (dims, value if dims != image_dim else value[indices]
                    )  # align image_dim coords with indices
            for coord, (dims, value) in coords.items()
        }
        response_matrix = np.zeros((len(image_ids), len(choices)))
        for (image_index,
             image_id), (choice_index,
                         choice) in itertools.product(enumerate(image_ids),
                                                      enumerate(choices)):
            if truths[image_index] == choice:  # object == choice, ignore
                p = np.nan
            else:
                # divide by number of times where object was one of the two choices (target or distractor)
                p = (num_choices[(image_id, choice)] / num_objects[(image_id, choice)]) \
                    if num_objects[(image_id, choice)] > 0 else np.nan
            response_matrix[image_index, choice_index] = p
        response_matrix = DataAssembly(response_matrix,
                                       coords=coords,
                                       dims=responses.dims + ('choice', ))
        return response_matrix
예제 #13
0
def devalois1982b_properties(model_identifier, responses, baseline):
    _assert_grating_activations(responses)
    radius = np.array(sorted(set(responses.radius.values)))
    spatial_frequency = np.array(sorted(set(responses.spatial_frequency.values)))
    orientation = np.array(sorted(set(responses.orientation.values)))
    phase = np.array(sorted(set(responses.phase.values)))

    responses = responses.values
    baseline = baseline.values
    assert responses.shape[0] == baseline.shape[0]
    n_neuroids = responses.shape[0]

    responses = responses.reshape((n_neuroids, len(radius), len(spatial_frequency), len(orientation), len(phase)))
    responses_dc = responses.mean(axis=4) - baseline.reshape((-1, 1, 1, 1))
    responses_ac = np.absolute(np.fft.fft(responses)) / len(phase)
    responses_ac = responses_ac[:, :, :, :, 1] * 2
    responses = np.zeros((n_neuroids, len(radius), len(spatial_frequency), len(orientation), 2))
    responses[:, :, :, :, 0] = responses_dc
    responses[:, :, :, :, 1] = responses_ac
    del responses_ac, responses_dc

    max_response = responses.reshape((n_neuroids, -1)).max(axis=1, keepdims=True)

    peak_spatial_frequency = np.zeros((n_neuroids, 1))

    for neur in range(n_neuroids):
        pref_radius, pref_spatial_frequency, pref_orientation, pref_component = \
            np.unravel_index(np.argmax(responses[neur, :, :, :, :]),
                             (len(radius), len(spatial_frequency), len(orientation), 2))

        spatial_frequency_curve = responses[neur, pref_radius, :, pref_orientation, pref_component]

        peak_spatial_frequency[neur] = \
            calc_spatial_frequency_tuning(spatial_frequency_curve, spatial_frequency, thrsh=0.707, filt_type='smooth',
                                          mode='ratio')[1]

    properties_data = peak_spatial_frequency

    good_neuroids = max_response > RESPONSE_THRESHOLD
    properties_data = properties_data[np.argwhere(good_neuroids)[:, 0], :]

    properties_data = DataAssembly(properties_data, coords={'neuroid_id': ('neuroid', range(properties_data.shape[0])),
                                                            'region': ('neuroid', ['V1'] * properties_data.shape[0]),
                                                            'neuronal_property': PROPERTY_NAMES},
                                   dims=['neuroid', 'neuronal_property'])
    return properties_data
예제 #14
0
 def _test_no_division_apply_manually(self, num_values):
     assembly = np.random.rand(num_values)
     assembly = NeuroidAssembly(
         assembly,
         coords={'neuroid': list(range(len(assembly)))},
         dims=['neuroid'])
     transformation = CartesianProduct()
     generator = transformation.pipe(assembly)
     for divided_assembly in generator:  # should run only once
         np.testing.assert_array_equal(assembly.values, divided_assembly[0])
         done = generator.send(
             DataAssembly([0], coords={'split': [0]}, dims=['split']))
         assert done
         break
     similarity = next(generator)
     np.testing.assert_array_equal(similarity.shape, [1])
     np.testing.assert_array_equal(similarity.dims, ['split'])
     assert similarity[0] == 0
예제 #15
0
 def test(self):
     data = NeuroidAssembly(
         np.tile(np.arange(10)[:, np.newaxis], [5, 10]),
         coords={
             'image_id': ('presentation', np.tile(list(alphabet)[:10], 5)),
             'image_meta': ('presentation', np.tile(list(alphabet)[:10],
                                                    5)),
             'repetition': ('presentation', np.tile(np.arange(5), 10)),
             'neuroid_id': ('neuroid', np.arange(10)),
             'neuroid_meta': ('neuroid', np.arange(10))
         },
         dims=['presentation', 'neuroid'])
     ceiler = SplitHalfConsistency()
     ceiling = ceiler(data, data)
     assert all(ceiling == DataAssembly([approx(1)] * 10,
                                        coords={
                                            'neuroid_id': ('neuroid',
                                                           np.arange(10)),
                                            'neuroid_meta': ('neuroid',
                                                             np.arange(10))
                                        },
                                        dims=['neuroid']))
예제 #16
0
def ringach2002_properties(model_identifier, responses, baseline):
    _assert_grating_activations(responses)
    spatial_frequency = np.array(
        sorted(set(responses.spatial_frequency.values)))
    orientation = np.array(sorted(set(responses.orientation.values)))
    phase = np.array(sorted(set(responses.phase.values)))
    nStim = responses.values.shape[1]
    n_cycles = nStim // (len(phase) * len(orientation) *
                         len(spatial_frequency))

    responses = responses.values
    baseline = baseline.values
    assert responses.shape[0] == baseline.shape[0]
    n_neuroids = responses.shape[0]

    responses = responses.reshape(
        (n_neuroids, n_cycles, len(spatial_frequency), len(orientation),
         len(phase)))
    responses_dc = responses.mean(axis=4)
    responses_ac = np.absolute(np.fft.fft(responses)) / len(phase)
    responses_ac = responses_ac[:, :, :, :, 1] * 2
    del responses

    max_dc = np.zeros((n_neuroids, 1))
    max_ac = np.zeros((n_neuroids, 1))
    min_dc = np.zeros((n_neuroids, 1))
    circular_variance = np.zeros((n_neuroids, 1))
    bandwidth = np.zeros((n_neuroids, 1))
    orthogonal_preferred_ratio = np.zeros((n_neuroids, 1))
    orientation_selective = np.ones((n_neuroids, 1))

    for neur in range(n_neuroids):
        pref_cycle, pref_spatial_frequency, pref_orientation = np.unravel_index(
            np.argmax(responses_dc[neur]),
            (n_cycles, len(spatial_frequency), len(orientation)))

        max_dc[neur] = responses_dc[neur, pref_cycle, pref_spatial_frequency,
                                    pref_orientation]
        max_ac[neur] = responses_ac[neur, pref_cycle, pref_spatial_frequency,
                                    pref_orientation]

        orientation_curve = responses_dc[neur, pref_cycle,
                                         pref_spatial_frequency, :]
        min_dc[neur] = orientation_curve.min()

        circular_variance[neur] = calc_circular_variance(
            orientation_curve, orientation)
        bandwidth[neur] = \
            calc_bandwidth(orientation_curve, orientation, filt_type='hanning', thrsh=0.707, mode='half')[0]
        orthogonal_preferred_ratio[neur] = calc_orthogonal_preferred_ratio(
            orientation_curve, orientation)

    orientation_selective[np.isnan(bandwidth)] = 0
    modulation_ratio = max_ac / max_dc
    circular_variance_bandwidth_ratio = circular_variance / bandwidth
    orthogonal_preferred_ratio_circular_variance_difference = orthogonal_preferred_ratio - circular_variance
    orthogonal_preferred_ratio_bandwidth_ratio = orthogonal_preferred_ratio / bandwidth

    properties_data = np.concatenate(
        (baseline, max_dc, min_dc, max_ac, modulation_ratio, circular_variance,
         bandwidth, orthogonal_preferred_ratio, orientation_selective,
         circular_variance_bandwidth_ratio,
         orthogonal_preferred_ratio_circular_variance_difference,
         orthogonal_preferred_ratio_bandwidth_ratio),
        axis=1)

    good_neuroids = max_dc > baseline + RESPONSE_THRESHOLD
    properties_data = properties_data[np.argwhere(good_neuroids)[:, 0], :]

    properties_data = DataAssembly(
        properties_data,
        coords={
            'neuroid_id': ('neuroid', range(properties_data.shape[0])),
            'region': ('neuroid', ['V1'] * properties_data.shape[0]),
            'neuronal_property': PROPERTY_NAMES
        },
        dims=['neuroid', 'neuronal_property'])
    return properties_data
예제 #17
0
 def __call__(self, assembly):
     self.assemblies.append(assembly)
     return DataAssembly([0])
예제 #18
0
 def __call__(self, train, test):
     self.train_assemblies.append(train)
     self.test_assemblies.append(test)
     return DataAssembly(0)
예제 #19
0
 def test_sel(self):
     score = Score([1, 2], coords={'a': [1, 2]}, dims=['a'])
     score.attrs['raw'] = DataAssembly([0, 2, 1, 3], coords={'a': [1, 1, 2, 2]}, dims=['a'])
     sel_score = score.sel(a=1)
     np.testing.assert_array_equal(sel_score.raw['a'], [1, 1])
예제 #20
0
 def test_mean_no_apply_raw(self):
     score = Score([1, 2], coords={'a': [1, 2]}, dims=['a'])
     score.attrs['raw'] = DataAssembly([0, 2, 1, 3], coords={'a': [1, 1, 2, 2]}, dims=['a'])
     mean_score = score.mean('a', _apply_raw=True)
     assert mean_score.raw == 1.5
예제 #21
0
 def test_mean(self):
     score = Score([1, 2], coords={'a': [1, 2]}, dims=['a'])
     score.attrs['raw'] = DataAssembly([0, 2, 1, 3], coords={'a': [1, 1, 2, 2]}, dims=['a'])
     mean_score = score.mean('a')
     np.testing.assert_array_equal(mean_score.raw['a'], [1, 1, 2, 2])
예제 #22
0
 def test_squeeze(self):
     score = Score([[1, 2]], coords={'s': [0], 'a': [1, 2]}, dims=['s', 'a'])
     score.attrs['raw'] = DataAssembly([[0, 2, 1, 3]], coords={'s': [0], 'a': [1, 1, 2, 2]}, dims=['s', 'a'])
     sel_score = score.squeeze('s')
     np.testing.assert_array_equal(sel_score.raw.dims, ['a'])
예제 #23
0
 def __call__(self, train_source, train_target, test_source,
              test_target):
     # compare assemblies for a single split. we ignore the 10% train ("leave-one-out") and only use test.
     score = self._metric(test_source, test_target)
     return DataAssembly(score)
def cavanaugh2002_properties(model_identifier, responses, baseline):
    _assert_grating_activations(responses)
    radius = np.array(sorted(set(responses.radius.values)))
    spatial_frequency = np.array(
        sorted(set(responses.spatial_frequency.values)))
    orientation = np.array(sorted(set(responses.orientation.values)))
    phase = np.array(sorted(set(responses.phase.values)))

    responses = responses.values
    baseline = baseline.values
    assert responses.shape[0] == baseline.shape[0]
    n_neuroids = responses.shape[0]

    responses = responses.reshape(
        (n_neuroids, len(radius), len(spatial_frequency), len(orientation),
         len(phase)))
    responses_dc = responses.mean(axis=4) - baseline.reshape((-1, 1, 1, 1))
    responses_ac = np.absolute(np.fft.fft(responses)) / len(phase)
    responses_ac = responses_ac[:, :, :, :, 1] * 2
    responses = np.zeros(
        (n_neuroids, len(radius), len(spatial_frequency), len(orientation), 2))
    responses[:, :, :, :, 0] = responses_dc
    responses[:, :, :, :, 1] = responses_ac
    del responses_ac, responses_dc

    max_response = responses.reshape((n_neuroids, -1)).max(axis=1,
                                                           keepdims=True)

    surround_suppression_index = np.zeros((n_neuroids, 1))
    strongly_suppressed = np.zeros((n_neuroids, 1))
    grating_summation_field = np.zeros((n_neuroids, 1))
    surround_diameter = np.zeros((n_neuroids, 1))
    surround_grating_summation_field_ratio = np.zeros((n_neuroids, 1))

    for neur in range(n_neuroids):
        pref_radius, pref_spatial_frequency, pref_orientation, pref_component = \
            np.unravel_index(np.argmax(responses[neur, :, :, :, :]),
                             (len(radius), len(spatial_frequency), len(orientation), 2))

        size_curve = responses[neur, :, pref_spatial_frequency,
                               pref_orientation, pref_component]

        grating_summation_field[neur], surround_diameter[neur], surround_grating_summation_field_ratio[neur], \
        surround_suppression_index[neur] = calc_size_tuning(size_curve, radius)

    strongly_suppressed[surround_suppression_index >= 0.1] = 1

    properties_data = np.concatenate(
        (surround_suppression_index, strongly_suppressed,
         grating_summation_field, surround_diameter,
         surround_grating_summation_field_ratio),
        axis=1)

    good_neuroids = max_response > RESPONSE_THRESHOLD
    properties_data = properties_data[np.argwhere(good_neuroids)[:, 0], :]

    properties_data = DataAssembly(
        properties_data,
        coords={
            'neuroid_id': ('neuroid', range(properties_data.shape[0])),
            'region': ('neuroid', ['V1'] * properties_data.shape[0]),
            'neuronal_property': PROPERTY_NAMES
        },
        dims=['neuroid', 'neuronal_property'])
    return properties_data
예제 #25
0
def freemanziemba2013_properties(model_identifier, responses, baseline):
    _assert_texture_activations(responses)
    responses = responses.sortby(['type', 'family', 'sample'])
    type = np.array(sorted(set(responses.type.values)))
    family = np.array(sorted(set(responses.family.values)))
    sample = np.array(sorted(set(responses.sample.values)))

    responses = responses.values
    baseline = baseline.values
    assert responses.shape[0] == baseline.shape[0]
    n_neuroids = responses.shape[0]

    responses = responses.reshape(n_neuroids, len(type), len(family),
                                  len(sample))
    responses_spikes = responses / 10
    responses_spikes = np.sqrt(responses_spikes) + np.sqrt(responses_spikes +
                                                           1)
    responses -= baseline.reshape((-1, 1, 1, 1))

    max_texture = np.max((responses.reshape((n_neuroids, 2, -1)))[:, 1, :],
                         axis=1,
                         keepdims=True)
    max_noise = np.max((responses.reshape((n_neuroids, 2, -1)))[:, 0, :],
                       axis=1,
                       keepdims=True)
    max_response = np.max(responses.reshape((n_neuroids, -1)),
                          axis=1,
                          keepdims=True)

    responses_family = responses.mean(axis=3)

    texture_modulation_index = np.zeros((n_neuroids, 1))
    texture_selectivity = np.zeros((n_neuroids, 1))
    noise_selectivity = np.zeros((n_neuroids, 1))
    texture_sparseness = np.zeros((n_neuroids, 1))
    noise_sparseness = np.zeros((n_neuroids, 1))
    variance_ratio = np.zeros((n_neuroids, 1))
    sample_variance = np.zeros((n_neuroids, 1))
    family_variance = np.zeros((n_neuroids, 1))

    for neur in range(n_neuroids):
        texture_modulation_index[neur] = calc_texture_modulation(
            responses_family[neur])[0]
        texture_selectivity[neur] = calc_sparseness(responses_family[neur, 1])
        noise_selectivity[neur] = calc_sparseness(responses_family[neur, 0])
        texture_sparseness[neur] = calc_sparseness(responses[neur, 1])
        noise_sparseness[neur] = calc_sparseness(responses[neur, 0])
        variance_ratio[neur], sample_variance[neur], family_variance[neur] = \
            calc_variance_ratio(responses_spikes[neur, 1])

    absolute_texture_modulation_index = np.abs(texture_modulation_index)

    properties_data = np.concatenate(
        (texture_modulation_index, absolute_texture_modulation_index,
         texture_selectivity, noise_selectivity, texture_sparseness,
         noise_sparseness, variance_ratio, sample_variance, family_variance,
         max_texture, max_noise),
        axis=1)

    good_neuroids = max_response > RESPONSE_THRESHOLD
    properties_data = properties_data[np.argwhere(good_neuroids)[:, 0], :]

    properties_data = DataAssembly(
        properties_data,
        coords={
            'neuroid_id': ('neuroid', range(properties_data.shape[0])),
            'region': ('neuroid', ['V1'] * properties_data.shape[0]),
            'neuronal_property': PROPERTY_NAMES
        },
        dims=['neuroid', 'neuronal_property'])
    return properties_data
예제 #26
0
def map_receptive_field_locations(model_identifier, model: BrainModel, region):
    blank_activations = record_from_model(model, BLANK_STIM_NAME,
                                          RF_NUMBER_OF_TRIALS)
    blank_activations = blank_activations.values
    blank_activations[blank_activations < 0] = 0

    rf_activations = record_from_model(model, RF_STIM_NAME,
                                       RF_NUMBER_OF_TRIALS)

    _assert_grating_activations(rf_activations)

    position_y = np.array(sorted(set(rf_activations.position_y.values)))
    position_x = np.array(sorted(set(rf_activations.position_x.values)))
    n_neuroids = rf_activations.values.shape[0]
    neuroid_ids = rf_activations.neuroid.values
    rf_activations = rf_activations.values
    rf_activations[rf_activations < 0] = 0

    rf_activations = rf_activations.reshape(n_neuroids, len(position_y),
                                            len(position_x), -1)
    rf_activations = rf_activations - np.reshape(
        blank_activations, [n_neuroids] + [1] *
        (len(rf_activations.shape) - 1))

    rf_map = rf_activations.max(axis=3)

    rf_map[rf_map < 0] = 0

    max_resp = np.max(rf_map.reshape(n_neuroids, -1), axis=1)

    rf_pos = np.zeros((n_neuroids, 2))
    rf_pos[:] = np.nan

    for n in range(n_neuroids):
        exc_pos = rf_map[n] > max_resp[n] * RF_THRSH

        if max_resp[n] > 0:
            # rf centroid
            rf_coord = np.sum(
                np.argwhere(exc_pos) * np.repeat(
                    np.expand_dims(rf_map[n, exc_pos], axis=1), 2, axis=1),
                axis=0) / np.sum(np.repeat(
                    np.expand_dims(rf_map[n, exc_pos], axis=1), 2, axis=1),
                                 axis=0)
            # interpolates pos of rf centroid
            rf_pos[n, 0] = np.interp(rf_coord[0], np.arange(len(position_y)),
                                     position_y)
            rf_pos[n, 1] = np.interp(rf_coord[1], np.arange(len(position_x)),
                                     position_x)

    rf_pos = DataAssembly(rf_pos,
                          coords={
                              'neuroid': neuroid_ids,
                              'axis': ['y', 'x']
                          },
                          dims=['neuroid', 'axis'])
    rf_map = DataAssembly(rf_map,
                          coords={
                              'neuroid': neuroid_ids,
                              'position_y': position_y,
                              'position_x': position_x
                          },
                          dims=['neuroid', 'position_y', 'position_x'])

    return rf_pos, rf_map