def test_regression_gabor(): rf = hdf5_load('tests/data/test_data.hdf5', 'ReceptiveField', arrs=['neu'])['neu'] gabor = GaborFit.from_hdf5('tests/data/test_data.hdf5', load_prev_run=False) gabor.fit(rf) gnd = GaborFit.from_hdf5('tests/data/test_data.hdf5').rf_fit assert np.mean((gabor.rf_fit - gnd)) < 0.05
def vary(what, rang): p = { "σ": 150, "θ": 0.0, "λ": 300, "γ": 1, "φ": -0.0, "pos_x": 0.0, "pos_y": 0.0, } params = np.zeros((len(rang), len(p))) for i, v in enumerate(p.values()): params[:, i] = v params[:, GaborFit.KEY[what]] = rang return GaborFit._make_gabor((640, 640), params)
# # | $X$ | $Y$ | # |------|------| # | V1-1 | V1-2 | # | V1-1 | V2-1 | # | V2-1 | V2-2 | # # Then, for each group, we split the spiking data by stimulus into train and test sets. # %% tags=["parameters"] path_loader = "data/superstim.hdf5" path_gabor = "data/superstim.hdf5" # %% cr = CCARepeatedStim( loader := SpikeLoader.from_hdf5(path_loader), gabor := GaborFit.from_hdf5(path_gabor), ) n_train = [500, 1000, 2000, 5000, 10000] # %% sns.pairplot( cr.df, hue="region", vars=["x", "y", "σ", "azimuth", "altitude"], corner=True, plot_kws=dict(s=4, linewidth=0, alpha=0.3), ) # %% [markdown] # There is a sharp increase in sampled neuron at the V1-V2 boundary. This is due to the fact that V1 neurons outnumber V2 neurons by 60%. Furthermore, the azimuthal preferences of V2 neurons extend include more of the lateral visual field, reducing the number of potential matches with V1. #
from src.gabor_analysis.gabor_fit import GaborFit from src.spikeloader import SpikeLoader """ Smooth retinotopic field and make contour plot. """ def gen_grid(var, σ): zi = griddata((df['x'], df['y']), df[var], (xi[None, :], yi[:, None]), method='nearest') return gaussian_filter(zi, σ, mode='mirror', truncate=3.) if __name__ == '__main__': loader = SpikeLoader.from_hdf5('data/raw.hdf5') g = GaborFit.from_hdf5('data/gabor.hdf5') df = pd.DataFrame(data=g.params_fit, columns=list(GaborFit.KEY.keys())) df = df.join(loader.pos) xi = np.linspace(df['x'].min(), df['x'].max(), 2000) yi = np.linspace(df['y'].min(), df['y'].max(), 2000) z_x, z_y = gen_grid('pos_x', 150), gen_grid('pos_y', 100), fig, ax = plt.subplots(dpi=300) con = ax.contour(xi, yi, z_x, cmap='magma', levels=np.arange(-10.5, -2, 1.), alpha=1.)
from scipy.stats.stats import zscore from src.canonical_analysis.subspace_comm import CCARepeatedStim from src.gabor_analysis.gabor_fit import GaborFit from src.power_law.subtract_spont import SubtractSpontAnalyzer from src.spikeloader import SpikeLoader alt.data_transformers.disable_max_rows() sns.set() # %% tags=["parameters"] path_loader = "data/superstim_TX57.hdf5" path_gabor = "data/superstim_TX57.hdf5" # %% loader = SpikeLoader.from_hdf5(path_loader) gabor = GaborFit.from_hdf5(path_gabor) idx_spont = loader.idx_spont spks = zscore(loader.spks, axis=0) spks_nospont = SubtractSpontAnalyzer(128).fit(spks, loader.idx_spont).transform(spks) # %% [markdown] """ # Canonical Correlation Analysis - Stimuli We use CCA to capture the largest modes of the spiking data that are common to both neuron groups. Let $X$ and $Y$ be $(n \times p_i)$ matrices where $n$ is the number of stimuli and $p_i$ is the number of neurons in each group. Random group assignment is based on a checkerboard pattern to avoid signal contamination between adjacent neurons. Neuron assignment to each group never changes in the following analysis.
im.set_clim(u := -np.max(np.abs(test[i])), -u) ax.set_title(f'Neuron 11666. Step {i}.') return [im] # call the animator. blit=True means only re-draw the parts that have changed. anim = FuncAnimation(fig, animate, frames=30, blit=True) anim.save(f'test.mp4', fps=fps, extra_args=['-vcodec', 'libx264']) #%% f = SpikeLoader.from_hdf5() rf = ReceptiveField(f.img_dim) rf.fit_neuron(f.imgs_stim, f.S) #%% from src.receptive_field.rf import gen_rf_rank rf = gen_rf_rank(rf.rf_, n_pc=30) #%% choose = rf[11666, ...] x = GaborFit(n_iter=500, n_pc=0, optimizer={ 'name': 'adam', 'step_size': 2e-2 }).fit(choose[np.newaxis, ...]) y = np.array(x.params_fit).squeeze() z = GaborFit._make_gabor((16, 9), y) #%% ani(z)
df_train.corrwith(S_neu_filtered)).assign( regions=name, group=group, test_stim_in_train=case, n=n) out.append(corr) return pd.concat(out).rename(columns={0: "corr"}) if __name__ == "__main__": from src.gabor_analysis.gabor_fit import GaborFit from src.spikeloader import SpikeLoader cr = CCARepeatedStim(SpikeLoader.from_hdf5("data/processed.hdf5"), GaborFit.from_hdf5("data/gabor.hdf5")) n_train = [500, 1000, 2000, 5000, 10000, 20000] df_un = cr.calc_cr(ns_train=n_train[:3]) test = cr.corr_between_test(df_un) # %% [markdown] # V1 and V2 are separated by a line where the azimuth preference reverses with increased receptive field size (σ). # # The retinotopy between both regions are matched by a method similar to importance sampling. # %% # %% [markdown] # There is a sharp increase in sampled neuron at the V1-V2 boundary. This is due to the fact that V1 neurons outnumber V2 neurons by 60%. Furthermore, the azimuthal preferences of V2 neurons extend include more of the lateral visual field, reducing the number of potential matches with V1. # %% [markdown] # We randomly split each region into two for CCA and verify that the split is uniform.
out = np.zeros((5, 2), dtype=np.float32) out[GaborFit.KEY["σ"]] = (0.04, 2.0) out[GaborFit.KEY["λ"]] = (0.6, 0.85) out[GaborFit.KEY["γ"]] = (0.8, 0.5) return tuple(map(tuple, out)) # Make immutable. g = GaborFit( n_pc=0, optimizer={ "name": "adam", "step_size": 2e-2 }, params_init={ "σ": 2, "θ": 0.0, "λ": 1.0, "γ": 1.5, "φ": 0.0, "pos_x": 0.0, "pos_y": 0.0 }, penalties=penalties(), ).fit(rf_pcaed) g.plot() g.save_append(path_loader, overwrite_group=True) # %% [markdown] """ ### Diagnostic