Esempio n. 1
0
def test_regression_gabor():
    rf = hdf5_load('tests/data/test_data.hdf5', 'ReceptiveField', arrs=['neu'])['neu']
    gabor = GaborFit.from_hdf5('tests/data/test_data.hdf5', load_prev_run=False)
    gabor.fit(rf)

    gnd = GaborFit.from_hdf5('tests/data/test_data.hdf5').rf_fit
    assert np.mean((gabor.rf_fit - gnd)) < 0.05
Esempio n. 2
0
#
#  | $X$ | $Y$ |
#  |------|------|
#  | V1-1 | V1-2 |
#  | V1-1 | V2-1 |
#  | V2-1 | V2-2 |
#
#  Then, for each group, we split the spiking data by stimulus into train and test sets.

# %% tags=["parameters"]
path_loader = "data/superstim.hdf5"
path_gabor = "data/superstim.hdf5"

# %%
cr = CCARepeatedStim(
    loader := SpikeLoader.from_hdf5(path_loader), gabor := GaborFit.from_hdf5(path_gabor),
)
n_train = [500, 1000, 2000, 5000, 10000]

# %%
sns.pairplot(
    cr.df,
    hue="region",
    vars=["x", "y", "σ", "azimuth", "altitude"],
    corner=True,
    plot_kws=dict(s=4, linewidth=0, alpha=0.3),
)

# %% [markdown]
# There is a sharp increase in sampled neuron at the V1-V2 boundary. This is due to the fact that V1 neurons outnumber V2 neurons by 60%. Furthermore, the azimuthal preferences of V2 neurons extend include more of the lateral visual field, reducing the number of potential matches with V1.
#
Esempio n. 3
0
from src.gabor_analysis.gabor_fit import GaborFit
from src.spikeloader import SpikeLoader
""" Smooth retinotopic field and make contour plot. """


def gen_grid(var, σ):
    zi = griddata((df['x'], df['y']),
                  df[var], (xi[None, :], yi[:, None]),
                  method='nearest')
    return gaussian_filter(zi, σ, mode='mirror', truncate=3.)


if __name__ == '__main__':
    loader = SpikeLoader.from_hdf5('data/raw.hdf5')
    g = GaborFit.from_hdf5('data/gabor.hdf5')
    df = pd.DataFrame(data=g.params_fit, columns=list(GaborFit.KEY.keys()))
    df = df.join(loader.pos)

    xi = np.linspace(df['x'].min(), df['x'].max(), 2000)
    yi = np.linspace(df['y'].min(), df['y'].max(), 2000)

    z_x, z_y = gen_grid('pos_x', 150), gen_grid('pos_y', 100),

    fig, ax = plt.subplots(dpi=300)
    con = ax.contour(xi,
                     yi,
                     z_x,
                     cmap='magma',
                     levels=np.arange(-10.5, -2, 1.),
                     alpha=1.)
Esempio n. 4
0
from scipy.stats.stats import zscore
from src.canonical_analysis.subspace_comm import CCARepeatedStim
from src.gabor_analysis.gabor_fit import GaborFit
from src.power_law.subtract_spont import SubtractSpontAnalyzer
from src.spikeloader import SpikeLoader

alt.data_transformers.disable_max_rows()
sns.set()

# %% tags=["parameters"]
path_loader = "data/superstim_TX57.hdf5"
path_gabor = "data/superstim_TX57.hdf5"

# %%
loader = SpikeLoader.from_hdf5(path_loader)
gabor = GaborFit.from_hdf5(path_gabor)

idx_spont = loader.idx_spont
spks = zscore(loader.spks, axis=0)
spks_nospont = SubtractSpontAnalyzer(128).fit(spks,
                                              loader.idx_spont).transform(spks)

# %% [markdown]
"""
# Canonical Correlation Analysis - Stimuli

We use CCA to capture the largest modes of the spiking data that are common to both neuron groups.

Let $X$ and $Y$ be $(n \times p_i)$ matrices where $n$ is the number of stimuli and $p_i$ is the number of neurons in each group. Random group assignment is based on a checkerboard pattern to avoid signal contamination between adjacent neurons.

Neuron assignment to each group never changes in the following analysis.
Esempio n. 5
0
                        df_train.corrwith(S_neu_filtered)).assign(
                            regions=name,
                            group=group,
                            test_stim_in_train=case,
                            n=n)
                    out.append(corr)

        return pd.concat(out).rename(columns={0: "corr"})


if __name__ == "__main__":
    from src.gabor_analysis.gabor_fit import GaborFit
    from src.spikeloader import SpikeLoader

    cr = CCARepeatedStim(SpikeLoader.from_hdf5("data/processed.hdf5"),
                         GaborFit.from_hdf5("data/gabor.hdf5"))
    n_train = [500, 1000, 2000, 5000, 10000, 20000]
    df_un = cr.calc_cr(ns_train=n_train[:3])
    test = cr.corr_between_test(df_un)
    # %% [markdown]
    # V1 and V2 are separated by a line where the azimuth preference reverses with increased receptive field size (σ).
    #
    # The retinotopy between both regions are matched by a method similar to importance sampling.

    # %%

    # %% [markdown]
    # There is a sharp increase in sampled neuron at the V1-V2 boundary. This is due to the fact that V1 neurons outnumber V2 neurons by 60%. Furthermore, the azimuthal preferences of V2 neurons extend include more of the lateral visual field, reducing the number of potential matches with V1.

    # %% [markdown]
    # We randomly split each region into two for CCA and verify that the split is uniform.