Example #1
0
    f"Mean MI: {MI.mean()}, sem: {MI.std() / np.sqrt(MI.shape[0])} \n n total units: {MI.shape[0]}"
)

miax.hist(MI, bins=mi_bins, edgecolor='white', color='grey')
miax.axvline(0, linestyle='--', color='k')
miax.set_xlabel('Modulation Index')
miax.set_ylabel(r"$n$ units")

# get bootstrapped p-value
np.random.normal(123)
d = {
    s: MI[np.argwhere(df['site'].values == s).squeeze()]
    for s in df['site'].unique()
}
bootsample = get_bootstrapped_sample(d, even_sample=False, nboot=1000)
p = get_direct_prob(bootsample, np.zeros(len(bootsample)))[0]
miax.text(mi_bins.min(), miax.get_ylim()[-1] - 2, r"p=%s" % round(p, 4))

# plot noise correlation
rsc_path = '/auto/users/hellerc/results/nat_pupil_ms/noise_correlations/'
rsc_df = ld.load_noise_correlation('rsc_ev',
                                   xforms_model='NULL',
                                   path=rsc_path)
mask = ~(rsc_df['bp'].isna() | rsc_df['sp'].isna())
rsc_df = rsc_df[mask]
d = {
    s: rsc_df.loc[rsc_df.site == s]['sp'].values -
    rsc_df.loc[rsc_df.site == s]['bp'].values
    for s in rsc_df.site.unique()
}
bootsample = get_bootstrapped_sample(d, even_sample=False, nboot=1000)
Example #2
0
        vals = np.random.normal(trueDiff, sd, nPairs[s])
        d[s] = vals
        siteID.extend(s * np.ones(nPairs[s]).tolist())
        all_vals.extend(vals)

    grouped_vals = [d[s].mean() for s in range(nSites)]

    # generate bootstrapped samples
    bs_even = stats.get_bootstrapped_sample(d, even_sample=True, nboot=1000)
    bs_weighted = stats.get_bootstrapped_sample(d,
                                                even_sample=False,
                                                nboot=1000)

    # get pvalue for each method (and for standard Wilcoxon over the population, ignoring sites, one sided)
    p_even = round(
        1 - stats.get_direct_prob(np.zeros(len(bs_even)), bs_even)[0], 5)
    p_weighted = round(
        1 - stats.get_direct_prob(np.zeros(len(bs_even)), bs_weighted)[0], 5)
    p_wilcox = round(
        ss.wilcoxon(all_vals, np.zeros(len(all_vals)),
                    alternative='greater').pvalue, 5)
    p_wilcoxg = round(
        ss.wilcoxon(grouped_vals,
                    np.zeros(len(grouped_vals)),
                    alternative='greater').pvalue, 5)

    # plot results

    if plot:
        bins = np.linspace(-0.005, 0.02, 100)
        bins = np.linspace(min(np.append(bs_even, bs_weighted)),
Example #3
0
               vmin=-1,
               vmax=1)

plt.show()

# stats tests
# noise variance change
np.random.seed(123)
nboots = 1000
ds = {
    s: df_cut[(df_cut.site == s)]['lambda_diff'].values
    for s in df_cut.site.unique()
}
ds_boot = stats.get_bootstrapped_sample(ds, nboot=nboots)

p = 1 - stats.get_direct_prob(ds_boot, np.zeros(nboots))[0]

print("big pupil variance vs. small pupil variance: \n" + \
                f"p = {p}\n" + \
                f"mean = {np.mean(ds_boot)}\n" + \
                f"sem  = {np.std(ds_boot)/np.sqrt(nboots)}\n")

nboots = 1000
ds = {
    s: df_cut[(df_cut.site == s)]['mag_diff'].values
    for s in df_cut.site.unique()
}
ds_boot = stats.get_bootstrapped_sample(ds, nboot=nboots)

p = 1 - stats.get_direct_prob(np.zeros(nboots), ds_boot)[0]
Example #4
0
            data=r2a.melt(),
            palette=palette,
            width=0.3,
            showfliers=False,
            linewidth=1,
            ax=reg1)
reg1.axhline(0, linestyle='--', color='k')
reg1.set_xlabel("Regressor")
reg1.set_ylabel(r"$R^2$ (unique)")
reg1.set_xticks(range(r2a.shape[1]))
reg1.set_xticklabels([''] * r2a.shape[1])
# add pvalue for each regressor
ym = reg1.get_ylim()[-1]
print('r-squared (overall)')
for i, r in enumerate(r2a.keys()):
    p = get_direct_prob(r2a[r], np.zeros(r2a.shape[0]))[0]
    print(f"{r}, {p}, {r2a[r].mean()}")
    reg1.text(i - 0.3, ym, f"p={p:.4f}", fontsize=6)

r2d = r2_delta[[k for k in r2_delta if (k == 'full') | (k.startswith('u'))]]
sns.boxplot(x='variable',
            y='value',
            data=r2d.melt(),
            palette=palette,
            width=0.3,
            showfliers=False,
            linewidth=1,
            ax=reg2)
reg2.axhline(0, linestyle='--', color='k')
reg2.set_xlabel("Regressor")
reg2.set_ylabel(r"$R^2$ (unique)")
Example #5
0
ax[1].set_ylabel(r"$\Delta d'^2$")
ax[1].set_xticks([0, 1, 2])
ax[1].set_xticklabels(['TDR', 'TDR+1', 'TDR+2'])
ax[1].set_xlabel('Number of dimensions')

f.tight_layout()

f.savefig(fig_fn)

# compute stats
np.random.seed(123)
d = {s: dfn1_dp.loc[dfn1_dp.site==s, 'dp_opt_test'].values - df_dp.loc[df_dp.site==s, 'dp_opt_test'].values
                    for s in df_dp.site.unique()}
bootstat = stats.get_bootstrapped_sample(d, nboot=5000)
p = 1 - stats.get_direct_prob(np.zeros(len(bootstat)), bootstat)[0]

print("========== OVERALL DPRIME ================")
print("TDR+1 vs. TDR: pval: {0}".format(p))

print("Mean percent improvement: {0}".format((100 * (dfn1_dp.groupby(by='site').mean()['dp_opt_test'] - \
                                                    df_dp.groupby(by='site').mean()['dp_opt_test']) / \
                                                       df_dp.groupby(by='site').mean()['dp_opt_test'] ).mean()))

d = {s: dfn2_dp.loc[dfn2_dp.site==s, 'dp_opt_test'].values - dfn1_dp.loc[dfn1_dp.site==s, 'dp_opt_test'].values
                    for s in df_dp.site.unique()}
bootstat = stats.get_bootstrapped_sample(d, nboot=5000)
p = 1 - stats.get_direct_prob(np.zeros(len(bootstat)), bootstat)[0]

print("TDR+2 vs. TDR+1: pval: {0}".format(p))
Example #6
0
            y='value',
            data=r2p.melt(),
            color='lightgrey',
            width=0.3,
            showfliers=False,
            linewidth=2,
            ax=ax[0])
ax[0].axhline(0, linestyle='--', color='k')
ax[0].set_xlabel("Regressor")
ax[0].set_ylabel(r"$R^2$ (unique)")
ax[0].set_xticks(range(r2p.shape[1]))
ax[0].set_xticklabels(['Full Model'] + xlab, rotation=45)
# add pvalue for each regressor
ym = ax[0].get_ylim()[-1]
for i, r in enumerate(r2p.keys()):
    p = get_direct_prob(r2[r], np.zeros(nboot))[0]
    ax[0].text(i - 0.3, ym, f"p={p:.4f}", fontsize=6)

# plot coefficients
sns.boxplot(x='variable',
            y='value',
            data=coef.melt(),
            color='lightgrey',
            width=0.3,
            showfliers=False,
            linewidth=2,
            ax=ax[1])
ax[1].axhline(0, linestyle='--', color='k')
ax[1].set_xlabel("Regressor")
ax[1].set_ylabel("Regression coefficient (normalized)")
ax[1].set_xticks(range(coef.shape[1]))
Example #7
0
sp = df_all['sp_dp'].values[idx][sidx]
s = 5
xy = np.vstack([bp, sp])
z = gaussian_kde(xy)(xy)
scax.scatter(sp, bp, s=s, c=z, cmap='inferno')
scax.plot([0, 100], [0, 100], 'k--')
scax.set_xlabel("Small pupil")
scax.set_ylabel("Large pupil")
scax.set_title(r"Stimulus discriminability ($d'^2$)")
scax.axis('square')

# get statistics for all data
df_all['delta'] = (df_all['bp_dp'] - df_all['sp_dp']) #/ (df_all['bp_dp'] + df_all['sp_dp'])
d = {s: df_all[df_all.site==s]['delta'].values for s in df_all.site.unique()}
bs = get_bootstrapped_sample(d, even_sample=False, nboot=1000)
p = get_direct_prob(bs, np.zeros(bs.shape[0]))[0]

print(f"mean large pupil d': {df_all['bp_dp'].mean()}, {df_all['bp_dp'].sem()}")
print(f"mean small pupil d': {df_all['sp_dp'].mean()}, {df_all['sp_dp'].sem()}")
print(f"pval (bootstrapped): {p}")
print(f"Mean n stimulus pairs per session: {np.mean([len(d[s]) for s in d.keys()])}, {np.std([len(d[s]) for s in d.keys()]) / np.sqrt(len(d.keys()))}")


frac = []
for s in d.keys():
    frac.append(np.sum(d[s]<0) / len(d[s]))
print(f"Fraction of stimlulus pairs with decreases per site: {np.mean(frac)}, sem: {np.std(frac)/len(frac)}")


#f.tight_layout()