예제 #1
0
    # z = this_exp.train_data[0].detach().numpy()
    # z = linreg.predict(this_exp.train_data[0])@W1.T
    n_compute = np.min([5000, z.shape[0]])

    idx = np.random.choice(z.shape[0], n_compute, replace=False)
    # idx_tst = idx[::4] # save 1/4 for test set
    # idx_trn = np.setdiff1d(idx, idx_tst)

    cond = this_exp.train_conditions[idx]
    # cond = util.decimal(this_exp.train_data[1][idx,...])
    num_cond = len(np.unique(cond))

    xor = np.where(~(np.isin(range(8), args['dichotomies'][0])
                     ^ np.isin(range(8), args['dichotomies'][1])))[0]
    # Loop over dichotomies
    D = assistants.Dichotomies(num_cond, args['dichotomies'] + [xor], extra=50)
    clf = assistants.LinearDecoder(N, 1, assistants.MeanClassifier)
    gclf = assistants.LinearDecoder(N, 1, svm.LinearSVC)
    dclf = assistants.LinearDecoder(N, D.ntot, svm.LinearSVC)
    # clf = LinearDecoder(this_exp.dim_input, 1, MeanClassifier)
    # gclf = LinearDecoder(this_exp.dim_input, 1, svm.LinearSVC)
    # dclf = LinearDecoder(this_exp.dim_input, D.ntot, svm.LinearSVC)

    # K = int(num_cond/2) - 1 # use all but one pairing
    K = int(num_cond / 4)  # use half the pairings

    PS = np.zeros(D.ntot)
    CCGP = np.zeros(D.ntot)
    d = np.zeros((n_compute, D.ntot))
    pos_conds = []
    for i, pos in enumerate(D):
예제 #2
0
#%% Shattering dimension
# idx = []
# shat = np.zeros(mets[netid]['shattering'].shape)
# baba = np.arange(35)[None,None,:]*np.ones((30,1000,1))
real_shat = np.array([m for m in mets[n_id]['shattering'] if len(m)>0])
shat_args = [a for m,a in zip(mets[n_id]['shattering'], all_args[n_id]) if len(m)>0]
shat = np.zeros(real_shat.shape)

for i,arg in enumerate(shat_args):
    xor = np.where(~(np.isin(range(8), arg['dichotomies'][0])^np.isin(range(8), arg['dichotomies'][1])))[0]
    
    ba = []
    for d in arg['dichotomies']+[xor]:
        ba.append(np.where([(list(p) == list(d)) or (list(np.setdiff1d(range(8),p))==list(d))\
                      for p in assistants.Dichotomies(8,arg['dichotomies'],extra=50)])[0][0])
    idx = np.concatenate([ba, np.setdiff1d(range(35),ba)])
    shat[i,:,:] = real_shat[i,:,idx].T
    # shat[i,:,:] = baba[i,:,idx].T
# idx = np.array(idx)

mean = np.nanmean(shat,0)
err = np.nanstd(shat,0)

plt.plot(epochs,mean[:,:2].mean(1))
plt.plot(epochs,mean[:,3:4].mean(1))
plt.plot(epochs,mean[:,4:].mean(1))

plt.fill_between(epochs,mean[:,:2].mean(1)-mean[:,:2].std(1),mean[:,:2].mean(1)+mean[:,:2].std(1),
                 alpha=0.5)
plt.fill_between(epochs,mean[:,3:4].mean(1)-mean[:,3:4].std(1),mean[:,3:4].mean(1)+mean[:,3:4].std(1),
예제 #3
0
     z1 = nonlinearity(torch.matmul(W1,inputs[idx_tst,:].T) + b1)
     z = nonlinearity(torch.matmul(W2,z1) + b2)
 else:
     z = nonlinearity(torch.matmul(W1,inputs[idx_tst,:].T) + b1)
 pred = torch.matmul(W,z) + b
 
 if ppp == 0:
     perf = np.sum((pred.T-targets[idx_tst,:]).detach().numpy()**2,1).mean(0)
 else:
     perf = ((pred.T>0) == targets[idx_tst,:]).detach().numpy().mean(0)
 test_perf.append(perf)
 
 # this is just the way I compute the abstraction metrics, sorry
 clf = assistants.LinearDecoder(N, 1, assistants.MeanClassifier)
 gclf = assistants.LinearDecoder(N, 1, svm.LinearSVC)
 D = assistants.Dichotomies(len(np.unique(inp_condition)),
                             input_task.positives, extra=0)
 
 ps = []
 ccgp = []
 for _ in D:
     ps.append(D.parallelism(z.T.detach().numpy(), inp_condition[idx_tst], clf))
     ccgp.append(D.CCGP(z.T.detach().numpy(), inp_condition[idx_tst], gclf, max_iter=1000))
 inp_PS.append(ps)
 inp_CCGP.append(ccgp)
 
 D = assistants.Dichotomies(len(np.unique(inp_condition)),
                             [(0,2,3,4),(0,4,6,7),(0,1,3,7),(0,1,2,6)], extra=0)
 ps = []
 ccgp = []
 for _ in D:
     ps.append(D.parallelism(z.T.detach().numpy(), inp_condition[idx_tst], clf))
예제 #4
0
 
 idx = np.random.choice(trn, 5000, replace=False)
 zee, _, z = net(torch.tensor(inputs[idx,:]).float())
 # why = successors[idx,...].detach().numpy()
 
 # centroids = np.stack([z[this_exp.train_conditions[idx]==i,:].detach().mean(0) \
 #                       for i in np.unique(this_exp.train_conditions[idx])])
 # dist_to_class = np.sum((zee[:,:,None].detach().numpy() - centroids.T)**2,1)
 # nearest = dist_to_class.argmin(1)
 # labs = this_exp.task(torch.tensor(nearest)).detach().numpy()
 # perf = np.mean(util.decimal(labs) == util.decimal(why))
 # train_perf.append(perf)
 # train_perf.append(la.norm(centroids.T[:,:,None]-centroids.T[:,None,:],2,0))
 
 clf = assistants.LinearDecoder(N, 1, assistants.MeanClassifier)
 D = assistants.Dichotomies(len(np.unique(this_exp.train_conditions)),
                             this_exp.task.positives, extra=0)
 PS = [D.parallelism(z.detach().numpy(), this_exp.train_conditions[idx], clf) for _ in D]
 train_PS.append(PS)
 
 loss = net.grad_step(dl, optimizer)
 
 # running_loss = 0
 
 # for i, btch in enumerate(dl):
 #     optimizer.zero_grad()
     
 #     inps, outs = btch
 #     # pred = net(inps[...,:-4],inps[...,-4:])
 #     pred = net(inps)
     
 #     # loss = nn.MSELoss()(pred, outs)
예제 #5
0
    else:
        z = nonlinearity(torch.matmul(W1, inputs[idx_tst, :].T) + b1)
    pred = torch.matmul(W, z) + b

    if ppp == 0:
        perf = np.sum((pred.T - targets[idx_tst, :]).detach().numpy()**2,
                      1).mean(0)
    else:
        perf = ((pred.T > 0) == targets[idx_tst, :]).detach().numpy().mean(0)
    test_perf.append(perf)

    # this is just the way I compute the abstraction metrics, sorry
    clf = assistants.LinearDecoder(N, 1, assistants.MeanClassifier)
    gclf = assistants.LinearDecoder(N, 1, svm.LinearSVC)
    D = assistants.Dichotomies(len(np.unique(inp_condition)),
                               input_task.positives + task.positives,
                               extra=5)

    ps = []
    ccgp = []
    for _ in D:
        ps.append(
            D.parallelism(z.T.detach().numpy(), inp_condition[:ndat][idx_tst],
                          clf))
        ccgp.append(
            D.CCGP(z.T.detach().numpy(),
                   inp_condition[:ndat][idx_tst],
                   gclf,
                   max_iter=1000))
    PS.append(ps)
    CCGP.append(ccgp)
예제 #6
0
        idx = np.random.choice(z.shape[0], n_compute, replace=False)
        # idx_tst = idx[::4] # save 1/4 for test set
        # idx_trn = np.setdiff1d(idx, idx_tst)

        cond = stim_cond[idx]
        # cond = util.decimal(this_exp.train_data[1][idx,...])

        # xor = np.where(~(np.isin(range(num_cond), args['dichotomies'][0])^np.isin(range(num_cond), args['dichotomies'][1])))[0]
        ## Loop over dichotomies
        # D = assistants.Dichotomies(num_cond, args['dichotomies']+[xor], extra=50)

        # choose dichotomies to have a particular order
        Q = input_task.num_var
        D_fake = assistants.Dichotomies(num_cond,
                                        this_task.positives,
                                        extra=7000)
        mi = np.array([this_task.information(p) for p in D_fake])
        midx = np.append(range(Q), np.flip(np.argsort(mi[Q:])) + Q)
        # these_dics = args['dichotomies'] + [D_fake.combs[i] for i in midx]
        D = assistants.Dichotomies(num_cond, [D_fake.combs[i] for i in midx],
                                   extra=0)

        clf = assistants.LinearDecoder(N, 1, assistants.MeanClassifier)
        gclf = assistants.LinearDecoder(N, 1, svm.LinearSVC)
        dclf = assistants.LinearDecoder(N, D.ntot, svm.LinearSVC)
        # clf = LinearDecoder(this_exp.dim_input, 1, MeanClassifier)
        # gclf = LinearDecoder(this_exp.dim_input, 1, svm.LinearSVC)
        # dclf = LinearDecoder(this_exp.dim_input, D.ntot, svm.LinearSVC)

        # K = int(num_cond/2) - 1 # use all but one pairing
예제 #7
0
    running_loss = 0

    idx = np.random.choice(this_exp.test_data[0].shape[0], 5000, replace=False)
    z1 = nn.ReLU()(torch.matmul(W1, this_exp.test_data[0][idx, :].T) + b1)
    z = nn.ReLU()(torch.matmul(W2, z1) + b2)
    pred = torch.matmul(W, z) + b

    perf = ((pred.T >
             0) == this_exp.test_data[1][idx, :]).detach().numpy().mean(0)
    test_perf.append(perf)

    # this is just the way I compute the abstraction metrics, sorry
    clf = assistants.LinearDecoder(N, 1, assistants.MeanClassifier)
    gclf = assistants.LinearDecoder(N, 1, svm.LinearSVC)
    D = assistants.Dichotomies(len(np.unique(this_exp.test_conditions)),
                               this_exp.task.positives,
                               extra=0)

    ps = []
    ccgp = []
    for _ in D:
        ps.append(
            D.parallelism(z.T.detach().numpy(), this_exp.test_conditions[idx],
                          clf))
        ccgp.append(
            D.CCGP(z.T.detach().numpy(), this_exp.test_conditions[idx], gclf))
    PS.append(ps)
    CCGP.append(ccgp)

    _, S, _ = la.svd(z.detach() - z.mean(1).detach()[:, None],
                     full_matrices=False)
예제 #8
0
    pred = torch.matmul(W, z) + b

    if ppp == 0:
        perf = np.sum((pred.T - targets[idx_tst, :]).detach().numpy()**2,
                      1).mean(0)
    else:
        perf = ((pred.T > 0) == targets[idx_tst, :]).detach().numpy().mean(0)
    test_perf.append(perf)

    # this is just the way I compute the abstraction metrics, sorry
    clf = assistants.LinearDecoder(N, 1, assistants.MeanClassifier)
    gclf = assistants.LinearDecoder(N, 1, svm.LinearSVC)
    # D = assistants.Dichotomies(len(np.unique(inp_condition)),
    # input_task.positives+task.positives, extra=0)
    D = assistants.Dichotomies(len(np.unique(inp_condition)),
                               task.positives,
                               extra=0)

    ps = []
    ccgp = []
    for _ in D:
        ps.append(
            D.parallelism(z.T.detach().numpy(), inp_condition[:ndat][idx_tst],
                          clf))
        ccgp.append(
            D.CCGP(z.T.detach().numpy(),
                   inp_condition[:ndat][idx_tst],
                   gclf,
                   max_iter=1000))
    PS.append(ps)
    CCGP.append(ccgp)
예제 #9
0
파일: run_fit.py 프로젝트: Kelarion/repler
                          for i in np.unique(this_exp.train_conditions[idx])])
    # dist_to_class = np.sum((zee[:,:,None].detach().numpy() - centroids.T)**2,1)
    # nearest = dist_to_class.argmin(1)
    # labs = this_exp.task(torch.tensor(nearest)).detach().numpy()
    # perf = np.mean(util.decimal(labs) == util.decimal(why))
    # train_perf.append(perf)
    metrics['distances'].append(la.norm(centroids.T[:,:,None]-centroids.T[:,None,:],2,0))
    
    U, S, _ = la.svd(z-z.mean(0)[None,:],full_matrices=False)
    metrics['PR'].append(((S**2).sum()**2)/(S**4).sum())
    
    metrics['sparsity'].append(np.mean(z>0))
    
    K = int(num_class/4) # use half the pairings

    D = assistants.Dichotomies(len(np.unique(this_exp.train_conditions)),
                               this_exp.task.positives, extra=sample_dichotomies)
    
    clf = assistants.LinearDecoder(N, 1, assistants.MeanClassifier)
    gclf = assistants.LinearDecoder(N, 1, svm.LinearSVC)
    dclf = assistants.LinearDecoder(N, D.ntot, svm.LinearSVC)
    
    cond = this_exp.train_conditions[idx]
    ps = []
    ccgp = []
    d = np.zeros((z.shape[0], D.ntot))
    for i, _ in enumerate(D):
        # parallelism
        ps.append(D.parallelism(z, cond, clf))
        
        # CCGP
        ccgp.append(D.CCGP(z, cond, gclf, K))
예제 #10
0
        z1 = W1 @ inps
        z2 = W2 @ inps
        z3 = W3 @ inps
        z4 = W4 @ inps

        z = np.concatenate([z1, z2, z3, z4], axis=0)

        if nonlin == 'relu':
            z *= (z >= 0)
        elif nonlin == 'tanh':
            z = np.tanh(z)
        elif nonlin == 'binary':
            z = (z >= 0).astype(int)

        D = assistants.Dichotomies(4)

        clf = assistants.LinearDecoder(dim_grp * 4, 1,
                                       assistants.MeanClassifier)
        gclf = assistants.LinearDecoder(dim_grp * 4, 1, svm.LinearSVC)
        dclf = assistants.LinearDecoder(dim_grp * 4, D.ntot, svm.LinearSVC)
        # clf = LinearDecoder(this_exp.dim_input, 1, MeanClassifier)
        # gclf = LinearDecoder(this_exp.dim_input, 1, svm.LinearSVC)
        # dclf = LinearDecoder(this_exp.dim_input, D.ntot, svm.LinearSVC)

        # K = int(num_cond/2) - 1 # use all but one pairing
        # K = int(num_cond/4) # use half the pairings

        PS = np.zeros(D.ntot)
        CCGP = []  #np.zeros((D.ntot, 100))
        out_corr = []
 pr_.append(((S**2).sum()**2)/(S**4).sum())
 
 # # U = [email protected]()@V.T
 # U = (z[:ndat,:]+eps1)@V.T
 # U = [email protected]
 # pcs.append(U)
 
 eps1 = np.random.randn(ndat, dim)*noise
 eps2 = np.random.randn(ndat, dim)*noise
 
 clf = assistants.LinearDecoder(dim, 1, assistants.MeanClassifier)
 gclf = assistants.LinearDecoder(dim, 1, svm.LinearSVC)
 # rclf = svm.LinearSVC()
 rclf = linear_model.LogisticRegression()
 
 D_fake = assistants.Dichotomies(len(np.unique(this_exp.train_conditions)), this_exp.task.positives, extra=50)
 Q = len(this_exp.task.positives)
 mi = np.array([this_exp.task.information(p) for p in D_fake])
 midx = np.append(range(Q),np.flip(np.argsort(mi[Q:]))+Q)
 # these_dics = args['dichotomies'] + [D_fake.combs[i] for i in midx]
 D = assistants.Dichotomies(len(np.unique(cond)), [D_fake.combs[i] for i in midx], extra=0)
 
 PS = []
 DCorr_res = []
 DCorr_proj = []
 DCorr_marg = []
 PDCorr = []
 PDim = []
 CCGP = []
 out_corr = []
 apprx_cost = []
예제 #12
0
def xor2(a, b, c):
    return a ^ b


def xor3(a, b, c):
    return a ^ b ^ c


#%%

# dics = util.RandomDichotomies(8,1)

X = np.mod(np.arange(8)[:, None] // (2**np.arange(3)[None, :]), 2)

fig = plt.figure()
for i, d in enumerate(assistants.Dichotomies(8)):
    # colorby = inp_condition
    # colorby = util.decimal(outputs).numpy()
    colorby = np.isin(np.arange(8), d)

    # whichone = int('57%d'%(i+1))
    ax = fig.add_subplot(5, 7, i + 1, projection='3d')

    ax.scatter(X[:, 0], X[:, 1], X[:, 2], s=500, c=colorby)
    # for i in np.unique(these_conds):
    #     c = [int(i), int(np.mod(i+1,U.shape[0]))]
    #     ax.plot(U[c,0],U[c,1],U[c,2],'k')
    ax.set_title(d)
    ax.set_xticks([])
    ax.set_yticks([])
    ax.set_zticks([])