def ToliasCadena2017PLS(): loader = AssemblyLoader() assembly_repetition = loader(average_repetition=False) assembly = loader(average_repetition=True) assembly.stimulus_set.name = assembly.stimulus_set_name similarity_metric = CrossRegressedCorrelation( regression=pls_regression(), correlation=pearsonr_correlation(), crossvalidation_kwargs={'stratification_coord': None}) identifier = f'tolias.Cadena2017-pls' ceiler = InternalConsistency(split_coord='repetition_id') def ceiling(): # This assembly has many stimuli that are only shown to a subset of the neurons. # When the loader runs with `average_repetition=True`, it automatically drops nan cells, # but for the `assembly_repetition`, it keeps all the rows. # If we now use the usual ceiling approach, the two halves will end up with NaN values # due to stimuli not being shown to neurons, which doesn't let us correlate. # Instead, we here drop all NaN cells and their corresponding stimuli, # which keeps only 43% of the original presentation rows, but lets us correlate again. assembly_nonan, stimuli = loader.dropna(assembly_repetition, assembly_repetition.attrs['stimulus_set']) return ceiler(assembly_nonan) return NeuralBenchmark(identifier=identifier, version=1, assembly=assembly, similarity_metric=similarity_metric, visual_degrees=VISUAL_DEGREES, ceiling_func=ceiling)
def __init__(self): ceiling = Score([1, np.nan], coords={'aggregation': ['center', 'error']}, dims=['aggregation']) assembly_repetition = get_assembly() assert len(np.unique(assembly_repetition['region'])) == 1 assert hasattr(assembly_repetition, 'repetition') self.region = 'IT' self.assembly = average_repetition(assembly_repetition) self._assembly = self.assembly self.timebins = timebins_from_assembly(self.assembly) self._similarity_metric = CrossRegressedCorrelation( regression=pls_regression(), correlation=pearsonr_correlation(), crossvalidation_kwargs=dict( stratification_coord=Split.Defaults.stratification_coord if hasattr(self.assembly, Split.Defaults.stratification_coord ) else None)) identifier = f'{assembly_repetition.name}-layer_selection' ceiler = InternalConsistency() super(_MockBenchmark, self).__init__(identifier=identifier, ceiling_func=lambda: ceiler(assembly_repetition), version='1.0')
def DicarloSanghavi2020ITPLS(): return _DicarloSanghavi2020Region( 'IT', identifier_metric_suffix='pls', similarity_metric=CrossRegressedCorrelation( regression=pls_regression(), correlation=pearsonr_correlation(), crossvalidation_kwargs=dict(stratification_coord='object_name')), ceiler=InternalConsistency())
def MovshonFreemanZiemba2013V2PLS(): return _MovshonFreemanZiemba2013Region( 'V2', identifier_metric_suffix='pls', similarity_metric=CrossRegressedCorrelation( regression=pls_regression(), correlation=pearsonr_correlation(), crossvalidation_kwargs=dict(stratification_coord='texture_type')), ceiler=InternalConsistency())
def __init__(self, bold_shift=None): assembly = load_Fedorenko2016() self._target_assembly = assembly self._regression = pls_regression(xarray_kwargs=dict(stimulus_coord='stimulus_id')) # word self._correlation = pearsonr_correlation(xarray_kwargs=dict(correlation_coord='stimulus_id')) self._metric = CrossRegressedCorrelation( regression=self._regression, correlation=self._correlation, # crossvalidation_kwargs=dict(split_coord='sentence_id', stratification_coord=None)) crossvalidation_kwargs=dict(split_coord='stimulus_id', stratification_coord='sentence_id', train_size=.8))
def test_small(self): assembly = NeuroidAssembly((np.arange(30 * 25) + np.random.standard_normal(30 * 25)).reshape((30, 25)), coords={'image_id': ('presentation', np.arange(30)), 'object_name': ('presentation', ['a', 'b', 'c'] * 10), 'neuroid_id': ('neuroid', np.arange(25)), 'region': ('neuroid', ['some_region'] * 25)}, dims=['presentation', 'neuroid']) metric = CrossRegressedCorrelation(regression=pls_regression(), correlation=pearsonr_correlation()) score = metric(source=assembly, target=assembly) assert score.sel(aggregation='center') == approx(1, abs=.00001)
def __init__(self, bold_shift=None): assembly = load_Fedorenko2016() self._target_assembly = assembly # avg code # packaging file, change the benchmark to include averaging. self._regression = pls_regression(xarray_kwargs=dict(stimulus_coord='stimulus_id')) # word self._correlation = pearsonr_correlation(xarray_kwargs=dict(correlation_coord='stimulus_id')) self._metric = CrossRegressedCorrelation( regression=self._regression, correlation=self._correlation, crossvalidation_kwargs=dict(split_coord='stimulus_id', stratification_coord='stimulus_id'))
def _standard_benchmark(identifier, load_assembly, visual_degrees, number_of_trials, stratification_coord, bibtex): assembly_repetition = LazyLoad(lambda: load_assembly(average_repetitions=False)) assembly = LazyLoad(lambda: load_assembly(average_repetitions=True)) similarity_metric = CrossRegressedCorrelation( regression=pls_regression(), correlation=pearsonr_correlation(), crossvalidation_kwargs=dict(stratification_coord=stratification_coord)) ceiler = InternalConsistency() return NeuralBenchmark(identifier=f"{identifier}-pls", version=1, assembly=assembly, similarity_metric=similarity_metric, visual_degrees=visual_degrees, number_of_trials=number_of_trials, ceiling_func=lambda: ceiler(assembly_repetition), parent=None, bibtex=bibtex)
def _standard_benchmark(identifier, load_assembly, stratification_coord): assembly_repetition = LazyLoad( lambda: load_assembly(average_repetitions=False)) assembly = LazyLoad(lambda: load_assembly(average_repetitions=True)) similarity_metric = CrossRegressedCorrelation( regression=pls_regression(), correlation=pearsonr_correlation(), crossvalidation_kwargs=dict(stratification_coord=stratification_coord)) ceiler = InternalConsistency() return NeuralBenchmark( identifier=f"{identifier}-pls", version=1, assembly=assembly, similarity_metric=similarity_metric, ceiling_func=lambda: ceiler(assembly_repetition), parent=None, paper_link='http://www.jneurosci.org/content/35/39/13402.short')
def test_small(self): values = (np.arange(30 * 25 * 5) + np.random.standard_normal(30 * 25 * 5)).reshape((30, 25, 5)) assembly = NeuroidAssembly( values, coords={ 'image_id': ('presentation', np.arange(30)), 'object_name': ('presentation', ['a', 'b', 'c'] * 10), 'neuroid_id': ('neuroid', np.arange(25)), 'region': ('neuroid', ['some_region'] * 25), 'time_bin_start': ('time_bin', list(range(5))), 'time_bin_end': ('time_bin', list(range(1, 6))), }, dims=['presentation', 'neuroid', 'time_bin']) regression = TemporalRegressionAcrossTime(pls_regression()) regression.fit(source=assembly, target=assembly) prediction = regression.predict(source=assembly) assert all(prediction['image_id'] == assembly['image_id']) assert all(prediction['neuroid_id'] == assembly['neuroid_id']) assert all(prediction['time_bin'] == assembly['time_bin'])