Пример #1
0
noise_on_dec = (np.dot(noise_axis, dec_axis[:, np.newaxis])) * dec_axis
orth_ax = noise_axis - noise_on_dec
orth_ax = (orth_ax / np.linalg.norm(orth_ax)).squeeze()

rot_mat = np.concatenate((orth_ax[:, np.newaxis], dec_axis[:, np.newaxis]),
                         axis=-1)[:, ::-1]

f, ax = plt.subplots(1, 2)

t_active = np.matmul(d1['TARGET'].squeeze(), rot_mat)
t_passive = np.matmul(d2['TARGET'].squeeze(), rot_mat)
r_active = np.matmul(d1['REFERENCE'].squeeze(), rot_mat)
r_passive = np.matmul(d2['REFERENCE'].squeeze(), rot_mat)
miss = np.matmul(dm['TARGET'].squeeze(), rot_mat)

t_active_el = cplt.compute_ellipse(t_active[:, 0], t_active[:, 1])
t_passive_el = cplt.compute_ellipse(t_passive[:, 0], t_passive[:, 1])
r_active_el = cplt.compute_ellipse(r_active[:, 0], r_active[:, 1])
r_passive_el = cplt.compute_ellipse(r_passive[:, 0], r_passive[:, 1])
miss_el = cplt.compute_ellipse(miss[:, 0], miss[:, 1])

ax[0].scatter(t_passive[:, 0],
              t_passive[:, 1],
              color='red',
              alpha=0.3,
              s=15,
              edgecolor='none')
ax[0].scatter(r_passive[:, 0],
              r_passive[:, 1],
              color='blue',
              alpha=0.3,
Пример #2
0
ax[0].set_ylabel('Explained variance')
ax[0].set_xlabel('PC')

# reduce dimensionality and plot in TDR dimensions
Y = dr.get_one_hot_matrix(2, Ntrials)
Xflat = nat_preproc.flatten_X(X[:, :, :, np.newaxis])
tdr = dr.TDR()
tdr.fit(Xflat.T, Y.T)
Xtdr = Xflat.T.dot(tdr.weights.T).T
Xtdr = nat_preproc.fold_X(Xtdr, nreps=Ntrials, nstim=2, nbins=1).squeeze()

# run dprime analysis on the reduced data, and on the raw data
dp_tdr, wopt_tdr, evals_tdr, evecs_tdr, dU_tdr = decoding.compute_dprime(
    Xtdr[:, :, 0], Xtdr[:, :, 1])
dp, wopt, evals, evecs, dU = decoding.compute_dprime(X[:, :, 0], X[:, :, 1])
el1 = cplt.compute_ellipse(Xtdr[0, :, 0], Xtdr[1, :, 0])
el2 = cplt.compute_ellipse(Xtdr[0, :, 1], Xtdr[1, :, 1])

ax[1].scatter(Xtdr[0, :, 0],
              Xtdr[1, :, 0],
              s=20,
              color='r',
              edgecolor='white',
              alpha=0.5)
ax[1].scatter(Xtdr[0, :, 1],
              Xtdr[1, :, 1],
              s=20,
              color='b',
              edgecolor='white',
              alpha=0.5)
ax[1].plot(el1[0], el1[1], color='r', lw=2)
Пример #3
0
noiseax = plt.subplot2grid((1, 3), (0, 1))
tdrax = plt.subplot2grid((1, 3), (0, 2))

# Define 3 stimulus clouds, A, B, C, all with identical covariacne matrices
k = 100
cov = np.array([[1, 0.4], [0.4, 1]])
u1 = [2.5, -2.5]
u2 = [2.5, 2.5]
u3 = [-2.5, 2.5]

X1 = np.random.multivariate_normal(u1, cov, k)
X2 = np.random.multivariate_normal(u2, cov, k)
X3 = np.random.multivariate_normal(u3, cov, k)

# plot raw data
el1 = cplt.compute_ellipse(X1[:, 0], X1[:, 1])
el2 = cplt.compute_ellipse(X2[:, 0], X2[:, 1])
el3 = cplt.compute_ellipse(X3[:, 0], X3[:, 1])

rawax.scatter(X1[:, 0], X1[:, 1], s=15, alpha=0.3, color='c', edgecolor='none')
rawax.scatter(X2[:, 0], X2[:, 1], s=15, alpha=0.3, color='m', edgecolor='none')
rawax.scatter(X3[:, 0], X3[:, 1], s=15, alpha=0.3, color='y', edgecolor='none')

rawax.plot(el1[0], el1[1], lw=2, color='c', label='A')
rawax.plot(el2[0], el2[1], lw=2, color='m', label='B')
rawax.plot(el3[0], el3[1], lw=2, color='y', label='C')

rawax.set_xlim((-6, 6))
rawax.set_ylim((-6, 6))

rawax.legend(frameon=False)
Пример #4
0
                       rotation=45)
g.axes.set_title(r"$\Delta d'^2$" + "\nExplained variance")
ax[1, 0].axvline(0, linestyle='--', color='grey')
ax[1, 1].axhline(0, linestyle='--', color='grey')
ax[1, 0].set_title(r"$\Delta d'^2$" + "\nRegression coefficients")

# plot simulated data for large / small pupil to illustrate changes in dU and lambda

# small pupil
np.random.seed(123)
u1 = [-1, .1]
u2 = [1, -.2]
cov = np.array([[1, 0.5], [0.5, 1]])
A = np.random.multivariate_normal(u1, cov, (200, ))
B = np.random.multivariate_normal(u2, cov, (200, ))
Ael = cplt.compute_ellipse(A[:, 0], A[:, 1])
Bel = cplt.compute_ellipse(B[:, 0], B[:, 1])

ax[0, 0].scatter(B[:, 0].mean(),
                 B[:, 1].mean(),
                 edgecolor='k',
                 s=50,
                 color='tab:orange')
ax[0, 0].scatter(A[:, 0].mean(),
                 A[:, 1].mean(),
                 edgecolor='k',
                 s=50,
                 color='tab:blue')
ax[0, 0].plot(Ael[0], Ael[1], color='tab:blue', lw=2)
ax[0, 0].plot(Bel[0], Bel[1], color='tab:orange', lw=2)
ax[0, 0].set_title("Small Pupil", color=color.SMALL)
Пример #5
0
        if idx==0:
            color = 'tab:blue'
            lw = 2
            zorder = 10
            idx += 1
        elif idx==1:
            color = 'tab:orange'
            lw = 2
            zorder = 10
    else:
        color = 'lightgrey'
        lw = 0.7
        zorder = -1
    r = proj[i, :, :]
    bp = pup_mask[:, i]
    el = cplt.compute_ellipse(r[bp, 0], r[bp, 1])
    ax[0, 0].plot(el[0], el[1], lw=lw, color=color, zorder=zorder)
    el = cplt.compute_ellipse(r[~bp, 0], r[~bp, 1])
    ax[0, 1].plot(el[0], el[1], lw=lw, color=color, zorder=zorder)

ax[0, 0].axhline(0, linestyle='--', color='k', zorder=-1); ax[0, 0].axvline(0, linestyle='--', color='k', zorder=-1)
ax[0, 1].axhline(0, linestyle='--', color='k', zorder=-1); ax[0, 1].axvline(0, linestyle='--', color='k', zorder=-1)

ax[0, 0].set_xlabel(r"Stim $PC_1$")
ax[0, 0].set_ylabel(r"Stim $PC_2$")
ax[0, 0].set_title("Large pupil")
ax[0, 1].set_xlabel(r"Stim $PC_1$")
ax[0, 1].set_ylabel(r"Stim $PC_2$")
ax[0, 1].set_title("Small pupil")

# share axes
Пример #6
0
                           figsize=(12, 6),
                           sharex=True,
                           sharey=True)
    for j, f in enumerate(files):
        pca = PCA()
        _R = np.zeros((len(targets), R.shape[-1]))
        for i, t in enumerate(targets):
            _R[i, :] = R[f_masks[f][:, 0] & tar_masks[t][:, 0], :].mean(axis=0)
        pca.fit(_R)

        # now, project each target single trials onto the first two PCs
        for i, t in enumerate(targets):
            tar_resp = R[f_masks[f][:, 0] & tar_masks[t][:, 0], :]
            proj = np.matmul(tar_resp, pca.components_[:2, :].T)
            ax[j].plot(proj[:, 0], proj[:, 1], '.', label=t)
            el = cplt.compute_ellipse(proj[:, 0], proj[:, 1])
            ax[j].plot(el[0, :],
                       el[1, :],
                       color=ax[j].get_lines()[-1].get_color())
        ax[j].legend(frameon=False, fontsize=8)
        ax[j].set_xlabel('PC1', fontsize=8)
        ax[j].set_ylabel('PC2', fontsize=8)
        ax[j].set_title(f)
        #ax[j].set_aspect(cplt.get_square_asp(ax[j]))
        ax[j].set_aspect('equal')
    fig.tight_layout()
    fig.canvas.set_window_title(site)

    # for each file, compute 1st PC independently, then project all single trials, over time, onto the
    # single axis
    fig, ax = plt.subplots(len(files), 1)
Пример #7
0
            ax.axvline(st, color=cols(j), lw=1)
            ax.axvline(en, color=cols(j), lw=1)
            yl = ax.get_ylim()[-1]
            yll = ax.get_ylim()[0]
            ax.plot([st, en], [yl, yl], color=cols(j), lw=1)
            ax.plot([st, en], [yll, yll], color=cols(j), lw=1)
ax.set_xlabel("Time (ms)")
ax.set_xticks(np.linspace(0, spec['data'].shape[-1], 4))
ax.set_xticklabels([0, 250, 500, 750])

# plot pc ellipse plots
ms = 2
lw = 2
for i in range(proj.shape[0]):
    if i in ev_bins:
        el = cplt.compute_ellipse(proj[i, :, 0], proj[i, :, 1])
        el_all.plot(el[0], el[1], color='lightgrey', lw=1, zorder=-1)
        bp = pup_mask[:, i]
        el = cplt.compute_ellipse(proj[i, bp, 0], proj[i, bp, 1])
        el_bp.plot(el[0], el[1], color='lightgrey', lw=1, zorder=-1)
        el = cplt.compute_ellipse(proj[i, ~bp, 0], proj[i, ~bp, 1])
        el_sp.plot(el[0], el[1], color='lightgrey', lw=1, zorder=-1)

el1 = cplt.compute_ellipse(pr1[:, 0], pr1[:, 1])
el_all.plot(el1[0], el1[1], color='tab:blue', lw=lw)
el_all.scatter(pr1[:, 0], pr1[:, 1], color='tab:blue', s=ms)
bp = pup_mask[:, stims[0]]
el1 = cplt.compute_ellipse(pr1[bp, 0], pr1[bp, 1])
el_bp.plot(el1[0], el1[1], color='tab:blue', lw=lw)
el1 = cplt.compute_ellipse(pr1[~bp, 0], pr1[~bp, 1])
el_sp.plot(el1[0], el1[1], color='tab:blue', lw=lw)
Пример #8
0
import numpy as np
import charlieTools.plotting as cplt
import matplotlib.pyplot as plt

bp_cov = np.array([[1, 0.01], [0.01, 1]])
sp_cov = np.array([[1, 2], [2, 1]])

bp = np.random.multivariate_normal([0, 0], bp_cov, (1000, ))
sp = np.random.multivariate_normal([0, 0], sp_cov, (1000, ))

f, ax = plt.subplots(2, 2)

ax[0, 0].plot(bp[:, 0], bp[:, 1], '.', color='firebrick', alpha=0.2)
ax[0, 0].plot(sp[:, 0], sp[:, 1], '.', color='navy', alpha=0.2)
bp_el = cplt.compute_ellipse(bp[:, 0], bp[:, 1])
sp_el = cplt.compute_ellipse(sp[:, 0], sp[:, 1])
ax[0, 0].plot(bp_el[0, :], bp_el[1, :], color='firebrick', lw=2)
ax[0, 0].plot(sp_el[0, :], sp_el[1, :], color='navy', lw=2)

ax[0, 0].set_aspect(cplt.get_square_asp(ax[0, 0]))

# diff of cov matrices
cov = np.cov(bp.T) - np.cov(sp.T)
ax[0, 1].imshow(cov, cmap='seismic')
ax[0, 1].set_aspect(cplt.get_square_asp(ax[0, 0]))
ax[0, 1].set_title('difference of cov matrices', fontsize=8)

# find the top eigenvector of covariance matrix
ev, eg = np.linalg.eig(cov)
Пример #9
0
    if zscore:
        dp = {k: (v.transpose(0, -1, 1) - m).transpose(0, -1, 1)  for (k, v) in dp.items()}
        dp = {k: (v.transpose(0, -1, 1) / sd).transpose(0, -1, 1)  for (k, v) in dp.items()}

    # project active / passive responses onto PCA plane
    fig, ax = plt.subplots(2, 2, figsize=(10, 10), sharex=True, sharey=True)
    colors = plt.cm.get_cmap('viridis', len(ref_stims))
    colors_tar = plt.cm.get_cmap('rainbow', len(sounds))
    col = 0
    col_tar = 0
    for e in all_stims:
        if e in ref_stims:
            try:
                passive = dp[e].mean(axis=-1).dot(pc_axes.T)
                ax[0, 0].plot(passive[:, 0], passive[:, 1], alpha=0.3, marker='.', lw=0, color=colors(col))
                el = compute_ellipse(passive[:, 0], passive[:, 1])
                ax[0, 0].plot(el[0], el[1], lw=2, color=ax[0, 0].get_lines()[-1].get_color())
            except:
                pass

            try:
                active = da[e].mean(axis=-1).dot(pc_axes.T)
                ax[0, 1].plot(active[:, 0], active[:, 1], alpha=0.3, marker='.', lw=0, color=colors(col))
                el = compute_ellipse(active[:, 0], active[:, 1])
                ax[0, 1].plot(el[0], el[1], lw=2, color=ax[0, 1].get_lines()[-1].get_color())
            except: 
                pass
            col += 1
        else:
            try:
                passive = dp[e].mean(axis=-1).dot(pc_axes.T)
Пример #10
0
X1 = X_raw + x1_noise
# dataset two
X2 = X_raw + x2_noise

# fit dDR to all data
xall = np.concatenate((X1, X2), axis=1)
tdr = TDR(tdr2_init=noise_axis)
tdr.fit(xall[:, :, 0].T, xall[:, :, 1].T)

x11 = X1[:, :, 0].T.dot(tdr.weights.T)
x12 = X1[:, :, 1].T.dot(tdr.weights.T)
x21 = X2[:, :, 0].T.dot(tdr.weights.T)
x22 = X2[:, :, 1].T.dot(tdr.weights.T)

ax[0].scatter(x11[:, 0], x11[:, 1], color='tab:blue', s=10, edgecolor='white')
el = compute_ellipse(x11[:, 0], x11[:, 1])
ax[0].plot(el[0], el[1], color='tab:blue', lw=2)
ax[0].scatter(x12[:, 0],
              x12[:, 1],
              color='tab:orange',
              s=10,
              edgecolor='white')
el = compute_ellipse(x12[:, 0], x12[:, 1])
ax[0].plot(el[0], el[1], color='tab:orange', lw=2)
ax[0].set_xlabel(r"$dDR_1 (\Delta \mu)$")
ax[0].set_ylabel(r"$dDR_2$")
tot_var = round(np.var(X1, axis=(1, 2)).sum(), 2)
tdr_var = round(np.var(np.stack([x11, x12]), axis=(0, 1)).sum(), 2)
ax[0].set_title(f"Low\nfull dataset var: {tot_var}, dDR var: {tdr_var}")

ax[1].scatter(x21[:, 0], x21[:, 1], color='tab:blue', s=10, edgecolor='white')
Пример #11
0
                                      norm=True,
                                      equal_tbin=True)

# plot data. d1 is active, d2 is big pupil

# Don't plot REF or TARGET ID alone
plot_epochs = [e for e in d1.keys() if (e != 'TARGET') & (e != 'REFERENCE')]

# get ref / tar decoding axis for active / passive
di.get_null_axis(d1['REFERENCE'].squeeze(), d1['TARGET'].squeeze())

f, ax = plt.subplots(1, 2)

for ep in plot_epochs:

    active_el = cplt.compute_ellipse(d1[ep][:, 0], d1[ep][:, 1])
    passive_el = cplt.compute_ellipse(d2[ep][:, 0], d2[ep][:, 1])

    if 'TORC' in ep:
        ax[0].plot(active_el[0, :], active_el[1, :], color='navy')
        ax[1].plot(passive_el[0, :], passive_el[1, :], color='navy')

    else:
        ax[0].plot(active_el[0, :], active_el[1, :], color='firebrick')
        ax[1].plot(passive_el[0, :], passive_el[1, :], color='firebrick')

ax[0].axhline(0, linestyle='--', color='k')
ax[0].axvline(0, linestyle='--', color='k')

ax[1].axhline(0, linestyle='--', color='k')
ax[1].axvline(0, linestyle='--', color='k')
Пример #12
0
            Xsp[:, spi, s] = X[:, t, s]
            spi += 1


pca = PCA()
pca.fit(X.mean(axis=1).T)

pax = pca.components_[0:2, :].T
#pax = np.concatenate((pca.components_[[0], :], rv), axis=0).T
proj = X.T.dot(pax)
bproj = Xbp.T.dot(pax)
sproj = Xsp.T.dot(pax)

f, ax = plt.subplots(1, 3, figsize=(18, 6), sharex=True, sharey=True)
for i in range(0, proj.shape[0]):
    el = cplt.compute_ellipse(proj[i, :, 0], proj[i, :, 1])
    ax[0].plot(el[0], el[1], color='grey', lw=0.5)
    el = cplt.compute_ellipse(bproj[i, :, 0], bproj[i, :, 1])
    ax[1].plot(el[0], el[1], color='grey', lw=0.5)
    el = cplt.compute_ellipse(sproj[i, :, 0], sproj[i, :, 1])
    ax[2].plot(el[0], el[1], color='grey', lw=0.5)

ax[0].axis('square')
for a in ax.flatten():
    a.axvline(0, linestyle='--', color='k', zorder=-1, lw=1)
    a.axhline(0, linestyle='--', color='k', zorder=-1, lw=1)
ax[0].set_title("All data")
ax[1].set_title("Big pupil")
ax[2].set_title("Small pupil")

f.tight_layout()
Пример #13
0
        f, ax = plt.subplots(2, len(targets), figsize=(int(len(targets) * 4), 8), sharey=True, sharex=True)
        f2, ax2 = plt.subplots(2, len(targets), figsize=(int(len(targets) * 4), 8), sharey=True, sharex=True)
        for i, (t, snr) in enumerate(zip(targets, snrs)):
            # ACTIVE DATA
            Respc = rec['resp'].extract_epoch(catch[0], mask=ra['mask'])[:, :, start:end].mean(axis=-1)
            respc = Respc.dot(noise_axes)
            Respt = rec['resp'].extract_epoch(t, mask=ra['mask'])[:, :, start:end].mean(axis=-1)
            respt = Respt.dot(noise_axes)

            crj_mask = rec['resp'].epoch_to_signal('CORRECT_REJECT_TRIAL').extract_epoch(catch[0], mask=ra['mask'])[:, :, start:end].mean(axis=-1).squeeze().astype(bool)
            ich_mask = rec['resp'].epoch_to_signal('INCORRECT_HIT_TRIAL').extract_epoch(catch[0], mask=ra['mask'])[:, :, start:end].mean(axis=-1).squeeze().astype(bool)
            hit_mask = rec['resp'].epoch_to_signal('HIT_TRIAL').extract_epoch(t, mask=ra['mask'])[:, :, start:end].mean(axis=-1).squeeze().astype(bool)
            miss_mask = rec['resp'].epoch_to_signal('MISS_TRIAL').extract_epoch(t, mask=ra['mask'])[:, :, start:end].mean(axis=-1).squeeze().astype(bool)
            if crj_mask.sum()>2:
                ax[0, i].scatter(respc[crj_mask, 0], respc[crj_mask, 1], color=cmap['CORRECT_REJECT_TRIAL'])
                el = compute_ellipse(respc[crj_mask, 0], respc[crj_mask, 1])
                ax[0, i].plot(el[0], el[1], color=cmap['CORRECT_REJECT_TRIAL'], lw=2, label='C.R.')
            if ich_mask.sum()>2:
                ax[0, i].scatter(respc[ich_mask, 0], respc[ich_mask, 1], color=cmap['INCORRECT_HIT_TRIAL'])
                el = compute_ellipse(respc[ich_mask, 0], respc[ich_mask, 1])
                ax[0, i].plot(el[0], el[1], color=cmap['INCORRECT_HIT_TRIAL'], lw=2, label='F.A.')
            if hit_mask.sum()>2:
                ax[0, i].scatter(respt[hit_mask, 0], respt[hit_mask, 1], color=cmap['HIT_TRIAL'])
                el = compute_ellipse(respt[hit_mask, 0], respt[hit_mask, 1])
                ax[0, i].plot(el[0], el[1], color=cmap['HIT_TRIAL'], lw=2, label='HIT')
            if miss_mask.sum()>2:
                ax[0, i].scatter(respt[miss_mask, 0], respt[miss_mask, 1], color=cmap['MISS_TRIAL'])
                el = compute_ellipse(respt[miss_mask, 0], respt[miss_mask, 1])
                ax[0, i].plot(el[0], el[1], color=cmap['MISS_TRIAL'], lw=2, label='MISS')

            catchel = compute_ellipse(respc[:, 0], respc[:, 1])
Пример #14
0
# ======================== this works very poorly for many stimuli ===========================
# tricky thing is, it maxmizes covariance with a reduced representation of Y too, which makes
# it a little tricky to interpret, but I guess looking at the y_weights tells you which stimuli
# drives the most variance in X
pls = PLSRegression(n_components=2, max_iter=100, tol=1e-7, scale=False)
pls.fit(X_scale.T, Y.T)

# reshape projections to ID stimuli, then plot the data in reduced space
X_new = pls.x_scores_.reshape(2, nreps, nstim)
Y_new = pls.y_scores_.reshape(2, nreps, nstim)

f, ax = plt.subplots(1, 2, figsize=(10, 5))
for s in range(nstim):

    ax[0].plot(X_new[0, :, s], X_new[1, :, s], '.')
    el = compute_ellipse(X_new[0, :, s], X_new[1, :, s])
    ax[0].plot(el[0], el[1], color=ax[0].get_lines()[-1].get_color())

    ax[1].scatter(Y_new[0, :, s], Y_new[1, :, s], s=10)

f.suptitle('PLS on single trial data')
ax[0].set_xlabel('PLS 1')
ax[0].set_ylabel('PLS 2')
ax[0].set_title('X data')
ax[1].set_title('Y data')

# ============== perform dimensionality reduction with trial averaged PCA ====================
X_center = X_trial_average - X_trial_average.mean(axis=-1, keepdims=True)
pca = PCA(n_components=2)
pca.fit(X_center.T)