Exemple #1
0
def linear_2d(fs, npc=0, lam=5):
    """ cosine / sine decoding """
    errors = np.zeros((len(fs), ), np.float32)
    for i, f in enumerate(fs):
        print('dataset %d' % i)
        dat = np.load(f, allow_pickle=True).item()
        sresp, istim, itrain, itest = utils.compile_resp(dat)

    # von mises
    y = np.exp((np.cos(theta0) - 1) / sigma)

    # cosine decoding
    theta_pref = np.array([0.0])
    theta0 = 2 * np.pi / nangle * istim[itrain,
                                        np.newaxis] - theta_pref[np.newaxis, :]
    y = np.concatenate((np.cos(theta0[:, :1]), np.sin(theta0[:, :1])), axis=-1)

    A = fast_ridge(X, y, lam=lam)
    ypred = sresp[:, itest].T @ A

    apred = np.arctan2(ypred[:, 1], ypred[:, 0])
    error = istim[itest] - apred
    error = np.remainder(error, nangle)
    error[error > nangle / 2] = error[error > nangle / 2] - nangle
    errors[i] = np.median(np.abs(error)) * 180 / np.pi
    print(errors[i])

    return errors
def run_decoder(fs, linear=True, npc=32):
    E = np.zeros((len(fs), ))
    errors = []
    stims = []
    snrs = []
    theta_prefs = []
    for t, f in enumerate(fs):
        dat = np.load(f, allow_pickle=True).item()

        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)
        SNR = []
        theta_pref = []
        if linear:
            apred, error, _, _ = vonmises_decoder(sresp, istim, itrain, itest)
        else:
            apred, error, _, _, SNR, theta_pref = independent_decoder(
                sresp, istim, itrain, itest)

        # save error and stimulus
        errors.append(error)
        stims.append(istim[itest])
        snrs.append(SNR)
        theta_prefs.append(theta_pref)
        E[t] = np.median(np.abs(error)) * 180 / np.pi
        print(os.path.basename(f), E[t])

    return E, errors, stims, snrs, theta_prefs
Exemple #3
0
def population_tuning(fs, angle_pref, saveroot):

    # averaged tuning curves
    theta_pref = np.linspace(0, 2 * np.pi, 17)[:-1]
    nth = theta_pref.size
    bins = np.linspace(0, 2 * np.pi, 65)
    avg_tuning = np.zeros((nth, bins.size - 1, len(fs)), np.float32)
    tdiff = np.abs(np.diff(theta_pref).mean())

    thetas = []

    for t, f in enumerate(fs):
        print(f)
        dat = np.load(f, allow_pickle=True).item()
        sresp, istim, itrain, itest = utils.compile_resp(dat)

        # compute averaged tuning curves
        avg_test_curves = np.zeros((nth, itest.size))
        stest = sresp[:, itest]
        for k, tf in enumerate(theta_pref):
            dists = np.abs(tf - angle_pref[t])
            dists[dists > np.pi] = 2 * np.pi - dists[dists > np.pi]
            avg_test_curves[k] = stest[dists < tdiff / 2].mean(axis=0)
            avg_tuning[k, :, t], _, _ = utils.binned(istim[itest],
                                                     avg_test_curves[k], bins)

    tbins = bins[:-1] + (bins[1] - bins[0]) / 2

    return avg_tuning, tbins
Exemple #4
0
def pc_decoding(fs, nPC, npc=0):
    ''' linearly decode from PCs of data '''

    errors = np.zeros((len(fs), len(nPC)))
    for t, f in enumerate(fs):
        print(os.path.basename(f))
        dat = np.load(f, allow_pickle=True).item()

        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)

        pca = PCA(n_components=nPC[-1]).fit(sresp)
        u = pca.components_.T
        sv = pca.singular_values_
        u *= sv

        for k, pc in enumerate(nPC):
            apred, error, _, _ = vonmises_decoder(u[:, :pc].T, istim, itrain,
                                                  itest)
            errors[t, k] = np.median(np.abs(error)) * 180 / np.pi
            if t == 0:
                if k == 0:
                    apreds = np.zeros((len(nPC), len(itest)))
                    atrues = np.zeros((len(nPC), len(itest)))
                apreds[k] = apred * 180 / np.pi
                atrues[k] = istim[itest] * 180 / np.pi
    return errors, apreds, atrues
def run_discrimination(fs, decoder='linear', npc=32):
    drange = np.arange(-29, 30)
    P = np.zeros((len(fs), len(drange)))
    d75 = np.zeros((len(fs), ))
    ithres = np.pi / 4
    for t, f in enumerate(fs):
        print(os.path.basename(f))
        dat = np.load(f, allow_pickle=True).item()
        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)

        if decoder == 'linear':
            D, dy, A = derivative_decoder(istim, sresp, itrain, itest)
            for j, deg in enumerate(drange):
                ix = np.logical_and(D > np.pi / 180 * (deg - .5),
                                    D < np.pi / 180 * (deg + .5))
                P[t, j] = np.nanmean(dy[ix] > 0)
        else:
            if decoder == 'deep_net':
                ## compute PC's
                pca = PCA(n_components=256).fit(sresp)
                x = pca.components_.T
                sv = pca.singular_values_
                x *= sv
            else:
                x = sresp.T.copy()
                del sresp
            imin = ithres - np.pi / 6
            imax = ithres + np.pi / 6
            gstims = np.logical_and(istim >= imin, istim <= imax).nonzero()[0]
            xtrain = x[gstims[::2], :]
            xtest = x[gstims[1::2], :]
            ytrain = (istim[gstims[::2]] - ithres) > 0
            ytest = (istim[gstims[1::2]] - ithres) > 0
            atest = istim[gstims[1::2]] - ithres

            Pk = np.zeros((len(drange), 5))
            for k in range(5):
                if decoder == 'random_forest':
                    ychoice = rf_discriminator(xtrain, ytrain, xtest, ytest)
                elif decoder == 'deep_net':
                    ychoice = nn_discriminator(xtrain, ytrain, xtest, ytest)

                P0 = np.zeros(drange.shape)
                for j, deg in enumerate(drange):
                    ix = np.logical_and(atest > np.pi / 180 * (deg - .5),
                                        atest < np.pi / 180 * (deg + .5))
                    P0[j] = np.mean(ychoice[ix] > 0)
                P0 = (P0 + 1 - P0[::-1]) / 2
                d750 = utils.discrimination_threshold(P0, drange)[0]
                print('discrimination threshold %2.2f' % d750)
                Pk[:, k] = P0
            P[t] = Pk.mean(axis=-1)
        d75[t] = utils.discrimination_threshold(P[t], drange)[0]
        print('--- discrimination threshold %2.2f' % d75[t])
    return P, d75, drange
def train_weak_learners(fs):

    nstim = np.zeros((len(fs), 32))
    perf = np.zeros((3, len(fs), 32))
    theta_pref = np.pi / 4
    thmax = np.pi / 6
    all_thetas = np.linspace(0, 2 * np.pi, 33)[:-1]

    D = np.zeros((0, ))
    dy = np.zeros((0, 3))

    for t, f in enumerate(fs):
        print(os.path.basename(f))
        dat = np.load(f, allow_pickle=True).item()
        sresp, istim, itrain, itest = utils.compile_resp(dat)

        for j, theta_pref in enumerate(all_thetas):
            ds = istim - theta_pref
            ds = ds % (2 * np.pi)
            ds[ds > np.pi] = ds[ds > np.pi] - 2 * np.pi
            ix = np.logical_and(np.abs(ds) < thmax, np.abs(ds) > thmax / 6)

            itrain0 = itrain[ix[itrain]]
            itest0 = itest[ix[itest]]

            ypred1, xlist = weak_learning(sresp,
                                          ds,
                                          itrain0,
                                          itest0,
                                          dcdtype='best_neuron')
            ypred2, _ = weak_learning(sresp,
                                      ds,
                                      itrain0,
                                      itest0,
                                      dcdtype='one_shot')
            ypred3, _ = weak_learning(sresp,
                                      ds,
                                      itrain0,
                                      itest0,
                                      dcdtype='random_projection')

            D = np.concatenate((D, ds[itest0]), axis=0)
            dy = np.concatenate((dy, np.vstack((ypred1, ypred2, ypred3)).T),
                                axis=0)

    drange = np.concatenate((np.arange(-29, -4), np.arange(5, 30)))
    P = np.zeros((len(drange), 3))
    dd = .5
    for j, deg in enumerate(drange):
        ix = np.logical_and(D > np.pi / 180 * (deg - dd),
                            D < np.pi / 180 * (deg + dd))
        P[j, :] = np.mean(dy[ix, :] > 0, axis=0)

    return P, drange, xlist
Exemple #7
0
def dense_discrimination(fs, npc=0):
    ''' discriminate between +/- 2 degrees trials and as a function of # of neurons and stims '''
    nskipstim = 2**np.linspace(0, 10, 21)
    nstim = np.zeros((len(nskipstim), len(fs)), 'int')
    nskip = 2**np.linspace(0, 10, 21)
    npop = np.zeros((len(nskip), len(fs)), 'int')

    nth = 1
    lam = 1
    theta_pref = np.array([np.pi / 4])
    dd = 1 / 10
    drange2 = np.arange(-2, 2.01, dd * 2)
    P = np.zeros((len(nskipstim), len(nskip), len(drange2), len(fs)),
                 np.float32)
    #P2 = np.zeros((len(nskip), len(drange2), len(fs)), np.float32)

    for t, f in enumerate(fs):
        print(os.path.basename(f))
        dat = np.load(f, allow_pickle=True).item()

        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)
        theta0 = istim[itrain, np.newaxis] - theta_pref
        y = theta0
        NN = sresp.shape[0]
        ntot = y.shape[0]
        X = sresp[:, itrain]
        Xtest = sresp[:, itest]

        nstim[:, t] = (itrain.size / nskipstim).astype('int')
        y = zscore(y, axis=0)  # changed this from axis=0
        np.random.seed(seed=101)
        rperm2 = np.random.permutation(itrain.size)
        np.random.seed(seed=101)
        npop[:, t] = (NN / nskip).astype('int')
        rperm = np.random.permutation(NN)
        for m in range(len(nskipstim)):
            iSS = rperm2[:nstim[m, t]]
            for k in range(len(nskip)):
                iNN = rperm[:npop[k, t]]
                A = fast_ridge(X[np.ix_(iNN, iSS)], y[iSS], lam=1)
                ypred = (A.T @ Xtest[iNN]).flatten()
                D = np.zeros((0, ))
                dy = np.zeros((0, ))
                ds = (istim[itest] - theta_pref[0]) % (2 * np.pi)
                ds[ds > np.pi] = ds[ds > np.pi] - 2 * np.pi
                D = np.concatenate((D, ds), axis=0)
                dy = np.concatenate((dy, ypred), axis=0)
                for j, deg in enumerate(drange2):
                    ix = np.logical_and(D > np.pi / 180 * (deg - dd),
                                        D < np.pi / 180 * (deg + dd))
                    P[m, k, j, t] = np.mean(dy[ix] > 0)

    return npop, nstim, P, drange2
Exemple #8
0
def nbasis_linear(fs, npc=0):
    """ how the decoding varies as a function of the number of basis functions """
    nbasis = [2, 5, 8, 10, 15, 20, 30, 48, 100]
    ntt = [2.5, 5, 7.5, 10]
    errors = np.zeros((len(ntt), len(nbasis), len(fs)), np.float32)
    for i, f in enumerate(fs):
        print('dataset %d' % i)
        dat = np.load(f, allow_pickle=True).item()
        sresp, istim, itrain, itest = utils.compile_resp(dat)

        lam = 1
        nangle = 2 * np.pi
        X = sresp[:, itrain]
        XtX = X @ X.T
        for j, nt in enumerate(ntt):
            for k, nth in enumerate(nbasis):
                if nth > 2:
                    sigma = nt / nth

                    # von mises
                    theta_pref = np.linspace(0, 2 * np.pi, nth + 1)[:-1]
                    theta0 = 2 * np.pi / nangle * istim[
                        itrain, np.newaxis] - theta_pref[np.newaxis, :]
                    y = np.exp((np.cos(theta0) - 1) / sigma)
                    y = zscore(y, axis=1)
                else:
                    # cosine decoding
                    theta_pref = np.array([0.0])
                    theta0 = 2 * np.pi / nangle * istim[
                        itrain, np.newaxis] - theta_pref[np.newaxis, :]
                    y = np.concatenate(
                        (np.cos(theta0[:, :1]), np.sin(theta0[:, :1])),
                        axis=-1)

                ntot = y.shape[0]

                A = fast_ridge(X, y, lam=lam)
                ypred = sresp[:, itest].T @ A

                # circular interpolation of ypred
                if nth > 2:
                    Kup = utils.upsampling_mat(y.shape[1])
                    yup = ypred @ Kup.T
                    apred = np.argmax(yup, axis=1) / yup.shape[1] * nangle
                else:
                    apred = np.arctan2(ypred[:, 1], ypred[:, 0])
                error = istim[itest] - apred
                error = np.remainder(error, nangle)
                error[error > nangle / 2] = error[error > nangle / 2] - nangle
                errors[j, k, i] = np.median(np.abs(error)) * 180 / np.pi
                print(errors[j, k, i])

    return errors, nbasis
def train_perceptrons(fs, task_type='hard'):
    thmax = np.pi / 6
    if task_type == 'hard':
        theta_pref = np.pi / 4
        all_thetas = [theta_pref]
    else:
        all_thetas = np.linspace(0, 2 * np.pi, 33)[:-1]
    nstim = np.zeros((len(fs), len(all_thetas), 28))
    perf = np.zeros((4, len(fs), len(all_thetas), 28))

    for t, f in enumerate(fs):
        print(os.path.basename(f))
        dat = np.load(f, allow_pickle=True).item()
        sresp, istim, itrain, itest = utils.compile_resp(dat)

        for j, theta_pref in enumerate(all_thetas):
            dy = istim - theta_pref
            itrain0 = itrain
            itest0 = itest
            if task_type != 'hard':
                dy = dy % (2 * np.pi)
                dy[dy > np.pi] = dy[dy > np.pi] - 2 * np.pi
                ix = np.logical_and(np.abs(dy) < thmax, np.abs(dy) > thmax / 6)

                itrain0 = itrain[ix[itrain]]
                itest0 = itest[ix[itest]]

            nstim[t, j], perf[0, t,
                              j] = perceptron_learning(sresp,
                                                       dy,
                                                       itrain0,
                                                       itest0,
                                                       Ltype='regression',
                                                       lam=1)
            _, perf[1, t, j] = perceptron_learning(sresp,
                                                   dy,
                                                   itrain0,
                                                   itest0,
                                                   Ltype='basic')
            _, perf[2, t, j] = perceptron_learning(sresp,
                                                   dy,
                                                   itrain0,
                                                   itest0,
                                                   Ltype='full')
            _, perf[3, t, j] = perceptron_learning(sresp,
                                                   dy,
                                                   itrain0,
                                                   itest0,
                                                   Ltype='Hebb')

    return nstim, perf
Exemple #10
0
def log2d(fs, npc=0):
    merror = np.zeros((len(fs), ), np.float32)
    for t, f in enumerate(fs):
        dat = np.load(f, allow_pickle=True).item()
        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)

        x = sresp[:, itrain]
        y = istim[itrain]
        NN, NT = np.shape(sresp)

        th_range = np.arange(0, 2 * np.pi, 2 * np.pi / 360)
        rcos = np.cos(th_range)
        rsin = np.sin(th_range)

        a = np.random.randn(2, NN) / 1e7  # initializdr with very small values

        eps0 = 0.05  # learning rate
        niter = 801
        lam = .0  # regularization parameter, makes fitting unstable

        logL = np.zeros(niter, )
        pa = np.zeros(a.shape)

        for it in range(niter):
            logL[it], da = log_prob(a, x, y, rcos, rsin)
            pa = .95 * pa + .05 * (da - lam * a)
            if it < 20:
                eps = eps0 / (20 - it)
            a += eps * pa
            #if it%100==0:
            #    print(logL[it])

        dx = a[0].T @ sresp[:, itest]
        dy = a[1].T @ sresp[:, itest]

        apred = np.angle(dx + 1j * dy)
        apred[apred < 0] = apred[apred < 0] + 2 * np.pi

        nangle = 2 * np.pi
        error = istim[itest] - apred
        error = np.remainder(error, nangle)
        error[error > nangle / 2] = error[error > nangle / 2] - nangle
        merror[t] = np.median(np.abs(error)) * 180 / np.pi
        print(t, merror[t])
        if t == 0:
            errors_ex = error
            stims_ex = istim[itest]

    return merror, errors_ex, stims_ex
def signal_variance(fs, npc=0):
    sigvar = np.zeros((0, ), np.float32)
    for f in fs:
        dat = np.load(f, allow_pickle=True).item()
        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)
        NN, nstim = sresp.shape

        isort = np.argsort(istim)
        sresp = sresp[:, isort]
        A = np.transpose(
            np.reshape(sresp[:, :(nstim // 2) * 2], (NN, nstim // 2, 2)),
            (1, 0, 2))

        A = (A - A.mean(axis=0)) / A.std(axis=0) + 1e-3
        sv0 = (A[:, :, 0] * A[:, :, 1]).mean(axis=0)
        print(sv0.mean())
        sigvar = np.append(sigvar, sv0, axis=0)
    return sigvar, A
Exemple #12
0
def run_independent_and_gain(fs, npc=0):
    E = np.zeros((2, len(fs)))
    ccE = np.zeros((2, 2, len(fs)))
    nsplit = np.zeros((len(fs), ), 'int')
    nstrips = 8

    for t, f in enumerate(fs):
        dat = np.load(f, allow_pickle=True).item()

        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)
        ypos = np.array(
            [dat['stat'][j]['med'][0] for j in range(len(dat['stat']))])

        # split neurons for decoder into strips (no Z overlap between two sets)
        NN = sresp.shape[0]
        np.random.seed(seed=101)
        iNN = np.random.permutation(NN)

        for fitgain in [0, 1]:
            error = independent_decoder(sresp[iNN, :],
                                        istim,
                                        itrain,
                                        itest,
                                        fitgain=fitgain)[1]
            E[fitgain, t] = np.median(np.abs(error)) * 180 / np.pi
            print('%s error=%2.2f' % (os.path.basename(f), E[fitgain, t]))
            n1, n2 = utils.stripe_split(ypos[iNN], nstrips)
            err1 = independent_decoder(sresp[iNN[n1]],
                                       istim,
                                       itrain,
                                       itest,
                                       fitgain=fitgain)[1]
            err2 = independent_decoder(sresp[iNN[n2]],
                                       istim,
                                       itrain,
                                       itest,
                                       fitgain=fitgain)[1]

            ccE[fitgain, 0, t] = np.corrcoef(err1, err2)[0, 1]
            ccE[fitgain, 1, t] = spearmanr(err1, err2)[0]
            print(ccE[fitgain, 1, t])

    return E, ccE
Exemple #13
0
def dense_asymptotics(fs, lam=1, npc=0):
    """ linear decoding of densely presented stims as a fcn of neurons and trials """
    nskip = 2**np.linspace(0, 10, 21)
    nskipstim = 2**np.linspace(0, 10, 21)
    Eneur = np.zeros((len(nskip), len(fs)))
    Estim = np.zeros((len(nskipstim), len(fs)))

    npop = np.zeros((len(nskip), len(fs)), 'int')
    nstim = np.zeros((len(nskipstim), len(fs)), 'int')

    errors = []
    stims = []
    snrs = []
    theta_prefs = []
    for t, f in enumerate(fs):
        print('dataset %d' % t)
        dat = np.load(f, allow_pickle=True).item()
        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)
        istim -= istim.mean()

        NN = sresp.shape[0]
        npop[:, t] = (NN / nskip).astype('int')
        np.random.seed(seed=101)
        rperm = np.random.permutation(NN)
        for k in range(len(nskip)):
            iNN = rperm[:npop[k, t]]
            error = dense_decoder(sresp[iNN], istim, itrain, itest, lam=lam)[1]
            Eneur[k, t] = np.mean((error * 180 / np.pi)**2)
            #if k==0:
            #print(np.median(np.abs(error))* 180/np.pi)
            #print(k,t,Eneur[k,t])

        nstim[:, t] = (itrain.size / nskipstim).astype('int')
        np.random.seed(seed=101)
        rperm = np.random.permutation(itrain.size)
        for k in range(len(nskipstim)):
            iSS = rperm[:nstim[k, t]]
            error = dense_decoder(sresp, istim, itrain[iSS], itest, lam=lam)[1]
            Estim[k, t] = np.mean((error * 180 / np.pi)**2)
            #if k==0:
            #print(k,t,Estim[k,t])

    return Eneur, Estim, npop, nstim
Exemple #14
0
def layer_discrimination(fs, all_depths, npc=0):
    drange = np.arange(-29, 30, 1)
    P0 = np.zeros((len(fs), len(drange), 2), np.float32)
    d75 = np.zeros((len(fs), 2), np.float32)
    nangle = 2 * np.pi
    for t, f in enumerate(fs):
        print(os.path.basename(f))
        dat = np.load(f, allow_pickle=True).item()
        depths = all_depths[t]

        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)
        nstim = sresp.shape[1]

        upper = depths < depths.min() + 100
        lower = depths > depths.max() - 100

        D, dy0, A = derivative_decoder(istim,
                                       sresp[upper],
                                       itrain,
                                       itest,
                                       lam=1)
        D, dy1, A = derivative_decoder(istim,
                                       sresp[lower],
                                       itrain,
                                       itest,
                                       lam=1)
        for j, deg in enumerate(drange):
            ix = np.logical_and(D > np.pi / 180 * (deg - .5),
                                D < np.pi / 180 * (deg + .5))
            P0[t, j, 0] = np.mean(dy0[ix] > 0)
            P0[t, j, 1] = np.mean(dy1[ix] > 0)

        d75[t, 0] = utils.discrimination_threshold(P0[t, :, 0], drange)[0]
        d75[t, 1] = utils.discrimination_threshold(P0[t, :, 1], drange)[0]
        print('--- discrimination threshold L2/3 %2.2f, L4 %2.2f' %
              (d75[t, 0], d75[t, 1]))

    return P0, d75, drange
Exemple #15
0
def runspeed_discrimination(fs, all_running, npc=0):
    ntesthalf = 1000
    drange = np.arange(-29, 30, 1)
    P0 = np.zeros((len(fs), len(drange), 2), np.float32)
    d75 = np.zeros((len(fs), 2), np.float32)
    for t, f in enumerate(fs):
        print(os.path.basename(f))
        dat = np.load(f, allow_pickle=True).item()

        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)

        rperm = np.random.permutation(istim.size)
        runsp = all_running[t]
        itest1 = (runsp[rperm] < 2).nonzero()[0]
        itest1 = rperm[itest1[:ntesthalf]]
        itest2 = (runsp[rperm] > 10).nonzero()[0]
        itest2 = rperm[itest2[:ntesthalf]]
        itest = np.concatenate((itest1, itest2), axis=0)
        itrain = np.ones(istim.size, 'Bool')
        itrain[itest] = False

        D, dy, A = derivative_decoder(istim, sresp, itrain[::1], itest1, lam=1)
        for j, deg in enumerate(drange):
            ix = np.logical_and(D > np.pi / 180 * (deg - .5),
                                D < np.pi / 180 * (deg + .5))
            P0[t, j, 0] = np.mean(dy[ix] > 0)

        D, dy, A = derivative_decoder(istim, sresp, itrain[::1], itest2, lam=1)
        for j, deg in enumerate(drange):
            ix = np.logical_and(D > np.pi / 180 * (deg - .5),
                                D < np.pi / 180 * (deg + .5))
            P0[t, j, 1] = np.mean(dy[ix] > 0)
        d75[t, 0] = utils.discrimination_threshold(P0[t, :, 0], drange)[0]
        d75[t, 1] = utils.discrimination_threshold(P0[t, :, 1], drange)[0]
        print('--- discrimination threshold passive %2.2f, running %2.2f' %
              (d75[t, 0], d75[t, 1]))

    return P0, d75, drange
Exemple #16
0
def run_decoder(fs, linear=True, nangles=None, npc=0):
    if nangles is None:
        nangles = 2 * np.pi * np.ones((len(fs), ))
    E = np.zeros((len(fs), ))
    errors = []
    stims = []
    snrs = []
    theta_prefs = []
    for t, f in enumerate(fs):
        dat = np.load(f, allow_pickle=True).item()

        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)
        SNR = []
        theta_pref = []
        if linear:
            d = vonmises_decoder(sresp,
                                 istim,
                                 itrain,
                                 itest,
                                 nangle=nangles[t])
            apred, error = d[0], d[1]
        else:
            d = independent_decoder(sresp,
                                    istim,
                                    itrain,
                                    itest,
                                    nangle=nangles[t])
            apred, error, SNR, theta_pref = d[0], d[1], d[4], d[5]

        # save error and stimulus
        errors.append(error)
        stims.append(istim[itest])
        snrs.append(SNR)
        theta_prefs.append(theta_pref)
        E[t] = np.median(np.abs(error)) * 180 / np.pi
        print(os.path.basename(f), E[t])

    return E, errors, stims, snrs, theta_prefs
Exemple #17
0
def chron_discrimination(fs, all_depths, npc=0):
    drange = np.arange(-29, 30, 1)
    P0 = np.zeros((len(fs), len(drange), 2), np.float32)
    d75 = np.zeros((len(fs), 2), np.float32)
    nangle = 2 * np.pi
    for t, f in enumerate(fs):
        print(os.path.basename(f))
        dat = np.load(f, allow_pickle=True).item()

        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)
        nstim = sresp.shape[1]

        D0, dy0, A = derivative_decoder(istim, sresp, itrain, itest, lam=1)

        # use 75% vs 25%
        itrain = np.zeros((nstim, ), np.bool)
        itest = np.zeros((nstim, ), np.bool)
        itrain[:int(nstim * .75)] = True
        itest[int(nstim * .75):] = True
        D1, dy1, A = derivative_decoder(istim, sresp, itrain, itest, lam=1)

        for j, deg in enumerate(drange):
            ix = np.logical_and(D0 > np.pi / 180 * (deg - .5),
                                D0 < np.pi / 180 * (deg + .5))
            P0[t, j, 0] = np.mean(dy0[ix] > 0)

            ix = np.logical_and(D1 > np.pi / 180 * (deg - .5),
                                D1 < np.pi / 180 * (deg + .5))
            P0[t, j, 1] = np.mean(dy1[ix] > 0)
        d75[t, 0] = utils.discrimination_threshold(P0[t, :, 0], drange)[0]
        d75[t, 1] = utils.discrimination_threshold(P0[t, :, 1], drange)[0]
        print(
            '--- discrimination threshold original %2.2f, chronological %2.2f'
            % (d75[t, 0], d75[t, 1]))

    return P0, d75, drange
Exemple #18
0
def asymptotics(fs, linear=True, npc=0):
    nskip = 2**np.linspace(0, 10, 21)
    nskipstim = 2**np.linspace(0, 10, 21)
    E = np.zeros((len(nskip), 2, len(fs)))
    E2 = np.zeros((len(nskipstim), len(fs)))

    ccE = np.zeros((len(nskip), 2, len(fs)))
    nsplit = np.zeros((len(nskip), len(fs)), 'int')
    npop = np.zeros((len(nskip), len(fs)), 'int')
    nstim = np.zeros((len(nskipstim), len(fs)), 'int')

    for t, f in enumerate(fs):
        print('asymp for: ', os.path.basename(f))
        dat = np.load(f, allow_pickle=True).item()

        sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)
        ypos = np.array(
            [dat['stat'][j]['med'][0] for j in range(len(dat['stat']))])

        # split neurons for decoder into strips (no Z overlap between two sets)
        nstrips = 8

        NN = sresp.shape[0]
        npop[:, t] = (NN / nskip).astype('int')

        np.random.seed(seed=101)
        rperm = np.random.permutation(NN)
        for k in range(len(nskip)):
            iNN = rperm[:npop[k, t]]
            if linear:
                error = vonmises_decoder(sresp[iNN], istim, itrain, itest)[1]
            else:
                error = independent_decoder(sresp[iNN, :], istim, itrain,
                                            itest)[1]

            E[k, 0, t] = np.median(np.abs(error)) * 180 / np.pi

            n1, n2 = utils.stripe_split(ypos[iNN], nstrips)
            if linear:
                err1 = vonmises_decoder(sresp[iNN[n1]], istim, itrain,
                                        itest)[1]
                err2 = vonmises_decoder(sresp[iNN[n2]], istim, itrain,
                                        itest)[1]
            else:
                err1 = independent_decoder(sresp[iNN[n1]], istim, itrain,
                                           itest)[1]
                err2 = independent_decoder(sresp[iNN[n2]], istim, itrain,
                                           itest)[1]

            E[k, 1, t] = np.abs(np.median(err1 * err2))**.5 * 180 / np.pi

            ccE[k, 0, t] = np.corrcoef(err1, err2)[0, 1]
            ccE[k, 1, t] = spearmanr(err1, err2)[0]
            nsplit[k, t] = len(n1)

        nstim[:, t] = (itrain.size / nskipstim).astype('int')
        np.random.seed(seed=101)
        rperm = np.random.permutation(itrain.size)
        for k in range(len(nskipstim)):
            iSS = rperm[:nstim[k, t]]
            if linear:
                error = vonmises_decoder(sresp, istim, itrain[iSS], itest)[1]
            else:
                error = independent_decoder(sresp, istim, itrain[iSS],
                                            itest)[1]
            E2[k, t] = np.median(np.abs(error)) * 180 / np.pi

    return E, ccE, nsplit, npop, nstim, E2