Beispiel #1
0
        rg = re.findall(r"init(\d+)?", f)
        if len(rg) > 0:
            init = np.array(rg[0]).astype(int)
        else:
            init = None
        if init is not None:
            if init > 10:
                continue
        else:
            continue

        metrics = pickle.load(open(SAVE_DIR + FOLDERS + met_files[j], 'rb'))
        args = pickle.load(open(SAVE_DIR + FOLDERS + arg_files[j], 'rb'))

        net = students.MultiGLM(
            students.Feedforward([100, N, N], ['ReLU', 'ReLU']),
            students.Feedforward([N, N_out], [None]), students.GausId(N_out))

        net.load(SAVE_DIR + FOLDERS + f)

        # if metrics['test_perf'][-1,...].min() > maxmin:
        #     maxmin = metrics['test_perf'][-1,...].min()
        #     best_net = model
        #     this_arg = args

        for key, val in metrics.items():
            if len(val) == 1000:
                continue
            if key not in all_metrics.keys():
                shp = (num, ) + np.squeeze(np.array(val)).shape
                all_metrics[key] = np.zeros(shp) * np.nan
Beispiel #2
0
                               dim=100,
                               var_means=1)

coding_dir = np.random.randn(100)

cont_comp = np.random.randn(this_exp.train_data[0].shape[0], 1) * 10
input_states = (this_exp.train_data[0].data +
                cont_comp * coding_dir[None, :]).float()

output_states = torch.cat(
    (this_exp.train_data[1].data, torch.tensor(cont_comp).float()), 1)

#%% Train network
N = 100

net = students.Feedforward([input_states.shape[1], N, output_states.shape[1]],
                           ['ReLU', None])

optimizer = optim.Adam(net.parameters(), lr=1e-4)
dset = torch.utils.data.TensorDataset(input_states, output_states)
dl = torch.utils.data.DataLoader(dset, batch_size=64, shuffle=True)

train_loss = []
# train_perf = []
train_PS = []
test_loss = []
min_dist = []
for epoch in range(2000):

    # loss = net.grad_step(dl, optimizer)

    running_loss = 0
Beispiel #3
0
# train set would be like an autoencoder training, so maybe that's fine
samps = np.concatenate([np.random.choice(np.where(abstract_conds==c)[0],n) \
                        for c,n in zip(cond_set,succ_counts)])

unscramble = np.argsort(np.argsort(succ_conds))
successor_idx = samps[unscramble]
targets = output_states[successor_idx,:]

# targets = output_state


#%%
N = 100

# net = students.Feedforward([inputs.shape[1],100,2],['ReLU',None])
net = students.MultiGLM(students.Feedforward([inputs.shape[1], N], ['ReLU']),
                        students.Feedforward([N, targets.shape[1]], [None]),
                        students.GausId(targets.shape[1]))
# net = students.MultiGLM(students.Feedforward([inputs.shape[1],N], ['ReLU']),
#                         students.Feedforward([N,targets.shape[1]], [None]),
#                         students.Bernoulli(targets.shape[1]))
# net = students.MultiGLM(students.Feedforward([inputs.shape[1],N], ['ReLU']),
#                         students.Feedforward([N, p], [None]),
#                         students.Categorical(p))

n_trn = int(0.5*targets.shape[0])   
trn = np.random.choice(targets.shape[0],n_trn,replace=False)
tst = np.random.choice(np.setdiff1d(range(targets.shape[0]),trn), int(0.5*n_trn), replace=False)

optimizer = optim.Adam(net.parameters(), lr=1e-4)
dset = torch.utils.data.TensorDataset(torch.tensor(inputs[trn,:]).float(),
Beispiel #4
0
# for i in np.unique(these_conds):
#     c = [int(i), int(np.mod(i+1,U.shape[0]))]
#     ax.plot(U[c,0],U[c,1],U[c,2],'k')
util.set_axes_equal(ax)

# plt.title('PCA dimension: %.2f'%((np.sum(S**2)**2)/np.sum(S**4)))

#%%
N = 100

# net = students.Feedforward([inputs.shape[1],100,2],['ReLU',None])
# net = students.MultiGLM(students.Feedforward([inputs.shape[1], N], ['ReLU']),
#                         students.Feedforward([N, targets.shape[1]], [None]),
#                         students.GausId(targets.shape[1]))
net = students.MultiGLM(
    students.Feedforward([dim_inp * num_var, N], ['ReLU']),
    students.Feedforward([N, output_task.dim_output], [None]),
    students.Bernoulli(output_task.dim_output))
# net = students.MultiGLM(students.Feedforward([inputs.shape[1],N], ['ReLU']),
#                         students.Feedforward([N, p], [None]),
#                         students.Categorical(p))

n_trn = int(0.8 * num_data)
trn = np.random.choice(num_data, n_trn, replace=False)
tst = np.setdiff1d(range(num_data), trn)

optimizer = optim.Adam(net.enc.parameters(), lr=1e-4)
dset = torch.utils.data.TensorDataset(inputs[trn, :].float(),
                                      outputs[trn, :].float())
dl = torch.utils.data.DataLoader(dset, batch_size=64, shuffle=True)
     for l in range(Data.num_data)])

inp_task = tasks.RandomPatterns(Data.num_data, dim_inp)
out_task = tasks.RandomPatterns(Data.num_data, dim_inp)

inputs = inp_task(left_word)
outputs = out_task(right_word)

plt.figure()
plt.imshow(cooc, 'binary')

#%%
N_hid = 100
n_epoch = 10000

net = students.Feedforward([dim_inp, N_hid, dim_inp],
                           nonlinearity=['Tanh', None])

optimizer = optim.Adam(net.parameters(), lr=1e-3)

dl = pt_util.batch_data(inputs, outputs, batch_size=200, shuffle=True)

train_loss = []
kernel_align = []
for epoch in tqdm(range(n_epoch)):
    z = net.network[:2](inp_task(np.arange(8), noise=0)).detach().numpy()
    Kz = z @ z.T

    kernel_align.append(
        np.sum(Kz * cooc) / np.sqrt(np.sum(cooc * cooc) * np.sum(Kz * Kz)))

    running_loss = 0
Beispiel #6
0
# for i in np.unique(these_conds):
#     c = [int(i), int(np.mod(i+1,U.shape[0]))]
#     ax.plot(U[c,0],U[c,1],U[c,2],'k')
util.set_axes_equal(ax)

# plt.title('PCA dimension: %.2f'%((np.sum(S**2)**2)/np.sum(S**4)))

#%%
N = 200

# net = students.Feedforward([inputs.shape[1],100,2],['ReLU',None])
# net = students.MultiGLM(students.Feedforward([inputs.shape[1], N], ['ReLU']),
#                         students.Feedforward([N, targets.shape[1]], [None]),
#                         students.GausId(targets.shape[1]))
net = students.MultiGLM(
    students.Feedforward([dim_inp * 4, N, N], ['ReLU', 'ReLU']),
    students.Feedforward([N, 2], [None]), students.Bernoulli(2))
# net = students.MultiGLM(students.Feedforward([inputs.shape[1],N], ['ReLU']),
#                         students.Feedforward([N, p], [None]),
#                         students.Categorical(p))

n_trn = int(0.8 * num_data)
trn = np.random.choice(num_data, n_trn, replace=False)
tst = np.setdiff1d(range(num_data), trn)

optimizer = optim.Adam(net.enc.parameters(), lr=1e-4)
dset = torch.utils.data.TensorDataset(inputs[trn, :].float(),
                                      outputs[trn, :].float())
dl = torch.utils.data.DataLoader(dset, batch_size=128, shuffle=True)

n_compute = np.min([len(tst), 1000])