Пример #1
0
def decode_single_attribute(all_words, enc_words, orig_words, noise_words, 
                            resample=100, c=1000):
    accuracy_dims = []
    for i in range(all_words.shape[1]):
        feat_vals = np.unique(all_words[:, i])
        corr_matrix = np.zeros((len(feat_vals), resample))
        for j, v in enumerate(feat_vals):
            c1_mask = orig_words[:, i] == v
            c1_words = enc_words[all_words[:, i] == v]
            c1 = noise_words[c1_mask].T
            c1 = np.reshape(c1, c1.shape + (1,))

            c2_words = enc_words[np.logical_not(all_words[:, i] == v)]
            c2 = noise_words[np.logical_not(c1_mask)].T
            c2 = np.reshape(c2, c2.shape + (1,))

            # find ideal bound, then decide which noise words are on 
            # either side
            x = np.concatenate((c1_words, c2_words), axis=0)
            y = np.concatenate((np.ones(c1_words.shape[0]), 
                                -np.ones(c2_words.shape[0])))
            clf = svm.SVC(kernel='linear', C=c)
            clf.fit(x, y)
            c1_class = clf.predict(c1[:, :, 0].T) == 1
            c2_class = clf.predict(c2[:, :, 0].T) == -1
            corr = np.concatenate((c1_class, c2_class), axis=0)
            corr_matrix[j] = u.bootstrap_list(corr, np.mean, resample)
        accuracy_dims.append(corr_matrix)
    return accuracy_dims
Пример #2
0
def plot_spatial_bias(ds,
                      conds,
                      labels=None,
                      colors=None,
                      ax=None,
                      filt_func=None,
                      left_field='left_first',
                      right_field='right_first',
                      boots=1000):
    if ax is None:
        f = plt.figure()
        ax = f.add_subplot(1, 1, 1)
    if labels is None:
        labels = ('', ) * len(ds)
    if colors is None:
        colors = (None, ) * len(ds)
    l_func = lambda x: np.sum(x[left_field])
    r_func = lambda x: np.sum(x[right_field])
    ratio_func = _make_ratio_function(l_func, r_func)
    for i, d in enumerate(ds):
        trs = u.get_only_conds(d, conds)
        if filt_func is not None:
            trs = filt_func(trs)
        v = u.bootstrap_list(trs, ratio_func, n=boots)
        v = np.array(v).reshape((-1, 1))
        gpl.plot_trace_werr(np.array([0]) + i,
                            v,
                            error_func=gpl.conf95_interval,
                            ax=ax,
                            label=labs[i],
                            fill=False,
                            color=cols[i])
    return ax
Пример #3
0
def plot_load_mse(data,
                  ax=None,
                  plot_fit=True,
                  max_load=np.inf,
                  boots=None,
                  sep_subj=False,
                  data_color=(.7, .7, .7),
                  **plot_args):
    if ax is None:
        f, ax = plt.subplots(1, 1)
    ls, errs = data
    for i, err in enumerate(errs):
        if sep_subj:
            use_ax = ax[i]
        else:
            use_ax = ax
        l = ls[i]
        mask = l <= max_load
        l = l[mask]
        subj = i + 1
        mse = np.array(err)[mask]**2
        if boots is not None:
            mse = np.array(
                list(
                    u.bootstrap_list(mse_i.flatten(), np.nanmean, boots)
                    for mse_i in mse))
        gpl.plot_trace_werr(l,
                            mse,
                            ax=use_ax,
                            label='S{}'.format(subj),
                            jagged=True,
                            color=data_color,
                            **plot_args)
    return ax
Пример #4
0
 def estimate_ae_rate(self,
                      n_stim,
                      noise_mag=0,
                      set_dists=None,
                      n_est=10e4,
                      dist_func=mse,
                      excl_close=None,
                      add_std=0,
                      boots=None):
     out = self._generate_input_output_pairs(n_est,
                                             n_stim,
                                             noise_mag=noise_mag,
                                             set_dists=set_dists)
     x, y, inp = out
     y_hat = self.model(x)
     ys_all = self._make_alternate_outputs(inp)
     if excl_close is not None:
         _, _, r = self._section_input(inp)
         ms = self._get_min_pair_distances(r)
         close_mask = ms > excl_close
         ys_all = ys_all[:, close_mask]
         y_hat = y_hat[close_mask]
         n_est = np.sum(close_mask)
     ds = dist_func(np.expand_dims(y_hat, 0), ys_all)
     mes = self._select_map(ds, add_std=add_std)
     if boots is not None:
         f = lambda x: np.sum(x > 0)
         ae_rate = u.bootstrap_list(mes, f, n=boots) / n_est
     else:
         ae_rate = np.sum(mes > 0) / n_est
     return ae_rate
Пример #5
0
def get_model_performance(snrs, n_feats, n_values, n_neurs, metric=cc.hamming,
                          n=5000, n_boots=1000, noise_var=1,
                          model=cc.LinearCode, **model_args):
    perf = np.zeros((n_boots, len(snrs)))
    pwrs = noise_var*snrs**2
    for i, pwr in enumerate(pwrs):
        lc = model(n_feats, n_values, n_neurs, power=pwr, noise_cov=noise_var,
                   **model_args)
        trls = lc.compute_trl_metric(metric=metric, n=n)
        perf[:, i] = u.bootstrap_list(trls, np.nanmean, n=n_boots)
    return perf
Пример #6
0
def rigotti_repl(c, o, n_i, snrs, times_samp=10, excl=False, 
                 neurs=1000, nuis=False, nuis_prob=.5, bs_samps=1000):
    sigs = np.array(snrs)**2
    nv = 1
    c_dims = np.zeros(len(snrs))
    ic_dims = np.zeros_like(c_dims)

    for i, s in enumerate(sigs):
        out = simulate_transform_code_out(c, o, n_i, nv, s, 
                                          neurs=neurs,
                                          times_samp=times_samp, 
                                          excl=excl)
        owords, dec_words, ns, corr, bt, words, trs, sel = out
        owords_nb = get_original_from_binary(bt, words, owords)
        if nuis:
            nonlin_sel = np.array([len(s) > 1 for s in sel]).reshape((1, -1))
            nuis_occur = np.random.rand(owords.shape[0]) > nuis_prob
            nuis_occur = nuis_occur.reshape((-1, 1))
            sub_mask = nuis_occur*nonlin_sel*trs(owords)
            ns = ns - sub_mask
            corr = np.logical_not(np.logical_and(np.logical_not(corr), 
                                                 nuis_occur[:, 0]))
        c_dims[i], ic_dims[i] = estimate_code_dimensionality(corr, owords, 
                                                             ns, bt)
        incorr_mask = corr.astype(bool)
        corr_mask = np.logical_not(corr.astype(bool))
        acd_corr = decode_single_attribute(words, owords_nb[corr_mask], 
                                              ns[corr_mask])
        acd_incorr = decode_single_attribute(words, owords_nb[incorr_mask], 
                                            ns[incorr_mask])
        if i == 0:
            feat_corrs = np.zeros((words.shape[1], len(sigs), bs_samps))
            feat_incorrs = np.zeros_like(feat_corrs)
        for j, d in enumerate(acd_corr):
            feat_corrs[j, i] = u.bootstrap_list(d.flatten(), np.mean, bs_samps)
            feat_incorrs[j, i] = u.bootstrap_list(acd_incorr[j].flatten(),
                                                  np.mean, bs_samps)
    return c_dims, ic_dims, feat_corrs, feat_incorrs
Пример #7
0
def plot_sdmst_bias(ds,
                    condlist,
                    cond_labels=None,
                    d_labels=None,
                    d_colors=None,
                    ax=None,
                    filt_func=None,
                    boots=1000,
                    err_field='TrialError',
                    corr=0,
                    incorr=6,
                    offset_div=6,
                    rotate_labels=True):
    if cond_labels is None:
        cond_labels = ('', ) * len(condlist)
    if d_labels is None:
        d_labels = ('', ) * len(ds)
    if d_colors is None:
        d_colors = ('', ) * len(ds)
    if ax is None:
        f = plt.figure()
        ax = f.add_subplot(1, 1, 1)
    corr_func = lambda x: np.sum(x['TrialError'] == corr)
    incorr_func = lambda x: np.sum(x['TrialError'] == incorr)
    ratio_func = _make_ratio_function(corr_func, incorr_func)
    for i, d in enumerate(ds):
        for j, c in enumerate(condlist):
            trs = u.get_only_conds(d, (c, ))
            c_r = u.bootstrap_list(trs, ratio_func, n=boots)
            c_r = np.array(c_r).reshape((-1, 1))
            offset = (i - len(ds) / 2) / 10
            if j == 0:
                use_label = d_labels[i]
            else:
                use_label = ''
            gpl.plot_trace_werr(np.array([j]) + offset,
                                c_r,
                                error_func=gpl.conf95_interval,
                                ax=ax,
                                label=use_label,
                                fill=False,
                                color=d_colors[i])
    ax.set_xticks(range(len(condlist)))
    ax.set_xlabel('condition')
    ax.set_ylabel('P(correct)')
    if rotate_labels:
        ax.set_xticklabels(cond_labels, rotation=90)
    else:
        ax.set_xticklabels(cond_labels)
    return ax
Пример #8
0
def plot_dist_dependence(data,
                         ax=None,
                         eps=1e-4,
                         plot_fit=True,
                         boots=None,
                         sep_subj=False,
                         n_bins=5,
                         need_trials=20,
                         data_color=None,
                         digit_percentile=95,
                         **plot_args):
    if ax is None:
        f, ax = plt.subplots(1, 1)
    ls, load_errs = data
    for j, load in enumerate(load_errs):
        if sep_subj:
            use_ax = ax[j]
        else:
            use_ax = ax
        for i, (errs, dists) in enumerate(load):
            if ls[j][i] > 1:
                l = ls[j][i]
                dists = np.min(np.abs(dists[:, 1:l]), axis=1)
                max_bin = np.percentile(dists, digit_percentile)
                bins = np.linspace(0, max_bin + eps, n_bins + 1)
                bin_inds = np.digitize(dists, bins)
                binned_errs = []
                bin_cents = []
                for bi, binbeg in enumerate(bins[:-1]):
                    mask = bin_inds == bi
                    if np.sum(mask) > need_trials:
                        be = errs[mask].flatten()**2
                        binned_errs.append(be)
                        bin_cents.append((binbeg + bins[bi + 1]) / 2)
                bin_cents = np.array(bin_cents)
                binned_errs = np.array(binned_errs, dtype=object)
                if boots is not None:
                    binned_errs = np.array(
                        list(
                            u.bootstrap_list(be_i, np.mean, boots)
                            for be_i in binned_errs))
                gpl.plot_trace_werr(bin_cents,
                                    binned_errs,
                                    ax=use_ax,
                                    jagged=True,
                                    color=data_color,
                                    **plot_args)
    return ax
Пример #9
0
def get_dist_diff_prop(corr_dist, err_dist, n_boots=1000):
    outs = []
    for i, cd in enumerate(corr_dist):
        ed = err_dist[i]
        out_i = np.zeros((len(ed), n_boots, ed.shape[-1]))
        cd_diff = np.diff(cd, axis=2)[:, :, 0]
        assert cd.shape[1] == 1
        ed_diff = np.squeeze(np.diff(ed, axis=2))
        diff_diff = ed_diff - cd_diff
        func = lambda x: np.mean(x > 0, axis=0)
        for j, dd in enumerate(diff_diff):
            out_i[j] = u.bootstrap_list(dd,
                                        func,
                                        n=n_boots,
                                        out_shape=(ed.shape[-1], ))
        outs.append(out_i)
    return outs
Пример #10
0
def _plot_di(di, t_ind, xs, axs, n_boots=1000, buff=.01, avg_models=False):
    x_pts = di[:, 0, 0, t_ind]
    y_pts = di[:, 0, 1, t_ind]
    diffs = di[:, :, 1] - di[:, :, 0]
    if avg_models:
        diffs = np.mean(diffs, axis=0)
    else:
        diffs = diffs[:, 0]
    diffs_b = u.bootstrap_list(diffs,
                               u.mean_axis0,
                               n=n_boots,
                               out_shape=(diffs.shape[1], ))
    gpl.plot_trace_werr(xs, diffs_b, ax=axs[1], conf95=True)
    axs[0].plot(x_pts, y_pts, 'o')
    bound_low = min(np.min(x_pts), np.min(y_pts)) - buff
    bound_high = max(np.max(x_pts), np.max(y_pts)) + buff
    axs[0].plot([bound_low, bound_high], [bound_low, bound_high])
    return axs
Пример #11
0
def get_fsc_bias(d,
                 err_field='TrialError',
                 oa_field='target_onset_time_diff',
                 user_field='UserVars',
                 corr=0,
                 incorr=6,
                 boots=1000,
                 conds=None,
                 condfield='ConditionNumber',
                 loc_field='first_target_location',
                 target_num='target_num'):
    def _err_ratio_func(es):
        corr_errs = np.sum(es == corr)
        incorr_errs = np.sum(es == incorr)
        er = corr_errs / (corr_errs + incorr_errs)
        return er

    if conds is not None:
        cs = d[condfield][0, 0]
        cond_mask = np.array(list([x in conds for x in cs]))
    else:
        cond_mask = np.ones(d[condfield][0, 0].shape[0], dtype=bool)
    tnums = d[user_field][0, 0][target_num] == 2
    cond_mask = np.logical_and(tnums[0, :], cond_mask)
    errs = d[err_field][0, 0][cond_mask, 0]
    oas = d[user_field][0, 0][oa_field][0, cond_mask]
    loc = d[user_field][0, 0][loc_field][0, cond_mask]
    x_oas = np.unique(oas)
    x_oas = np.array([x[0, 0] for x in x_oas])
    err_rate = np.zeros((len(x_oas), boots))
    for i, oa in enumerate(x_oas):
        err_types = errs[oas == oa]
        locs = loc[oas == oa]
        locs_arr = np.array([l for l in locs])
        er = u.bootstrap_list(err_types, _err_ratio_func, n=boots)
        if oa < 0:
            er = 1 - er
        err_rate[i] = er
    return x_oas, err_rate
Пример #12
0
def estimate_real_perc_correct(c, o, n_i, noisevar, v, n_samps=1000,
                               excl=False, cc_rf=None, subdim=False,
                               distortion_func=hamming_distortion, 
                               eps=1, bs=True, input_noise=0,
                               pwr_func=empirical_variance_power,
                               noise_func=gaussian_noise,
                               local_input_noise=True):
    if cc_rf is None:
        ts, bt, trs, _ = generate_types((n_i,)*c, order=o, excl=excl)
    else:
        _, ts, trs = generate_cc_types((n_i,)*c, cc_rf, order=o, excl=excl)
        bt = ts
    out = simulate_transform_code_full(bt, trs, noisevar, v, 
                                       neurs=trs(bt).shape[1], 
                                       n_samps=n_samps, subdim=subdim,
                                       distortion_func=distortion_func,
                                       eps=eps, pwr_func=pwr_func,
                                       noise_func=noise_func,
                                       num_types=ts, input_noise=input_noise,
                                       local_input_noise=local_input_noise)
    _, _, _, corr = out
    if bs:
        corr = u.bootstrap_list(corr, np.mean, n=n_samps)
    return corr
Пример #13
0
def plot_plt_bias(ds,
                  labs=None,
                  cols=None,
                  filt_errs=True,
                  err_field='TrialError',
                  corr=0,
                  sep_field='angular_separation',
                  axs=None,
                  cond_nf=22,
                  cond_fn=19,
                  cond_nn=21,
                  cond_ff=20,
                  postthr='fixation_off',
                  sacc_vthr=.1,
                  readdpost=False,
                  lc=(-9, 0),
                  rc=(9, 0),
                  wid=3,
                  hei=3,
                  centoffset=(0, 0),
                  use_bhv_img_params=True,
                  boots=1000,
                  sep_filt=None,
                  figsize=(12, 4)):
    if labs is None:
        labs = ('', ) * len(ds)
    if cols is None:
        cols = ('', ) * len(ds)
    if axs is None:
        f = plt.figure(figsize=figsize)
        ax_fs_nl = f.add_subplot(1, 3, 1)
        ax_fs_nr = f.add_subplot(1, 3, 2)
        ax_fs_bias = f.add_subplot(1, 3, 3)
    else:
        ax_fs_nl, ax_fs_nr, ax_fs_bias = axs
    ax_fs_nl.set_title('left novelty bias')
    ax_fs_nr.set_title('right novelty bias')
    ax_fs_bias.set_title('full bias')
    ax_fs_nl.set_ylabel('P(look left| novel vs familiar) -\n'
                        'P(look left | homogeneous)')
    ax_fs_bias.set_ylabel('P(look novel)')
    ax_fs_nl.set_xlabel('session')
    ax_fs_nr.set_xlabel('session')
    ax_fs_bias.set_xlabel('session')

    conds = (cond_nn, cond_fn, cond_nf, cond_ff)
    for i, d in enumerate(ds):
        seps = np.unique(d[sep_field])
        if sep_filt is not None:
            seps = sep_filt(seps)
        for j, s in enumerate(seps):
            d_sep = d[d[sep_field] == s]

            x = es.get_fixtimes(d,
                                conds,
                                postthr=postthr,
                                thr=sacc_vthr,
                                readdpost=readdpost,
                                lc=lc,
                                rc=rc,
                                wid=wid,
                                hei=hei,
                                centoffset=centoffset,
                                use_bhv_img_params=use_bhv_img_params)
            ls, ts, begs, ends = x
            fls = es.get_first_sacc_latency_nocompute(begs,
                                                      ts,
                                                      onim=False,
                                                      first_n=1,
                                                      sidesplit=True)
            sacc_arr1 = _make_fls_arr(fls, cond_nf)
            sacc_arr_nn = _make_fls_arr(fls, cond_nn)
            sacc_arr_ff = _make_fls_arr(fls, cond_ff)
            sacc_arr_null = np.concatenate((sacc_arr_nn, sacc_arr_ff))
            # look novel when on left
            f1 = lambda x: np.sum(x == 0)
            f2 = lambda x: np.sum(x == 1)
            rf1 = _make_ratio_function(f1, f2)
            nov_left = u.bootstrap_list(sacc_arr1, rf1, n=boots)
            nov_left = nov_left.reshape((-1, 1))
            sub1 = np.mean(u.bootstrap_list(sacc_arr_null, rf1, n=boots))
            gpl.plot_trace_werr(np.array([0]) + i,
                                nov_left - sub1,
                                error_func=gpl.conf95_interval,
                                ax=ax_fs_nl,
                                label=labs[i],
                                fill=False,
                                color=cols[i])

            sacc_arr2 = _make_fls_arr(fls, cond_fn, l=1, r=0)
            # look novel when on right
            nov_right = u.bootstrap_list(sacc_arr2, rf1, n=boots)
            nov_right = nov_right.reshape((-1, 1))
            rf2 = _make_ratio_function(f2, f1)
            sub2 = np.mean(u.bootstrap_list(sacc_arr_null, rf2, n=boots))
            gpl.plot_trace_werr(np.array([0]) + i,
                                nov_right - sub2,
                                error_func=gpl.conf95_interval,
                                ax=ax_fs_nr,
                                fill=False,
                                color=cols[i])

            full_sacc_arr = np.concatenate((sacc_arr1, sacc_arr2))
            nov_full = u.bootstrap_list(full_sacc_arr, rf1, n=boots)
            nov_full = nov_full.reshape((-1, 1))
            gpl.plot_trace_werr(np.array([0]) + i,
                                nov_full,
                                error_func=gpl.conf95_interval,
                                ax=ax_fs_bias,
                                fill=False,
                                color=cols[i])
Пример #14
0
def simulate_trans_code_full_wrapper(*args, boot_times=1000):
    _, _, _, corr = simulate_transform_code_full(*args)
    corr = u.bootstrap_list(corr, np.mean, n=boot_times)
    return corr