Example #1
0
def train(GAN, G, D, epochs=100, n_samples=20000, batch_size=64, verbose=False, v_freq=1):
    d_iters=10
    D_loss = []
    G_loss = []
    e_range = range(epochs)
    for epoch in e_range:
        d_loss = []
        g_loss = []
        pretrain(G, D, n_samples, batch_size, 20)
        for batch in range(n_samples/batch_size):
            X, y = sample_data_and_gen(G, batch_size)
            set_trainability(D, True)
            d_loss.append(D.train_on_batch(X[output_columns], y))

            set_trainability(D, False)
            X = generate_input(batch_size)
            y = -1*np.ones(batch_size) #Claim these are true tracks, see if discriminator believes
            g_loss.append(GAN.train_on_batch(X, y))

        G_loss.append(np.mean(g_loss))
        D_loss.append(np.mean(d_loss))
        if verbose and (epoch + 1) % v_freq == 0:
            print("Epoch #{}: Generative Loss: {}, Discriminative Loss: {}".format(epoch + 1, G_loss[-1], D_loss[-1]))
            plot_losses(G_loss, D_loss)
            X, y = sample_data_and_gen(G, 2000)
            binning = np.linspace(-0.5,0.5,100)
            for distr in output_columns:
                plot_distribution(X[y==1][distr], binning, epoch=epoch+1, title="Generated_"+distr)
                plot_distribution(X[y==-1][distr], binning, epoch=epoch+1, title="Real_"+distr)

        if (epoch + 1) % 200 == 0:
            print "Old lr: "+ str(K.eval(D.optimizer.lr))
            K.set_value(D.optimizer.lr, 0.5*K.eval(D.optimizer.lr))
            K.set_value(G.optimizer.lr, 0.5*K.eval(G.optimizer.lr))
            print "New lr: "+ str(K.eval(D.optimizer.lr))

#            D.optimizer.lr.set_value(0.1*D.optimizer.lr.get_value())
#            G.optimizer.lr.set_value(0.1*G.optimizer.lr.get_value())

    return D_loss, G_loss
### TRAINING LFADS MODEL

# Initialize parameters for LFADS and reset optimization loop counters.
key = random.PRNGKey(onp.random.randint(0, utils.MAX_SEED_INT))
init_params = lfads.lfads_params(key, lfads_hps)

# Get the trained parameters and check out the losses.
trained_params, losses_dict = \
    optimize_lfads(init_params, lfads_hps, lfads_opt_hps, train_data, eval_data)

# Plot some information about the training.
if do_plot:
    plotting.plot_losses(losses_dict['tlosses'],
                         losses_dict['elosses'],
                         sampled_every=10,
                         start_idx=100,
                         stop_idx=num_batches)
    plt.savefig(os.path.join(figure_dir, 'losses.png'))

# Here, have to implement after LFADS is working again.
#nexamples_to_save = 10
#plotting.plot_lfads()

# Create a savename for the trained parameters and save them.
rnn_type = 'lfads'
task_type = 'integrator'
fname_uniquifier = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
network_fname = ('trained_params_' + rnn_type + '_' + task_type + '_' + \
                 fname_uniquifier + '.npz')
network_path = os.path.join(output_dir, network_fname)
Example #3
0
def main(*args, **kwargs):
    torch.manual_seed(123)

    # load data, set model paramaters 
    # ---------------------------------

    home = Path.home()
    path_for_data = home/"teas-data/sklearn/"
    if not os.path.exists(path_for_data):
        raise ValueError("No data. By default, this script uses synthetic data that you can generate by running skl_synthetic.py. Otherwise please modify this script")
    if os.path.exists(path_for_data):
        X_train, X_valid, X_test, Y_train, Y_valid, Y_test = map(FloatTensor, load_skl_data(path_for_data))

    batch_size = 128
    train_ds = TensorDataset(X_train, Y_train)
    valid_ds = TensorDataset(X_valid, Y_valid)
    test_ds = TensorDataset(X_test, Y_test)
    train_dl = DataLoader(train_ds, batch_size)
    valid_dl = DataLoader(valid_ds, batch_size)
    test_dl = DataLoader(test_ds, batch_size)

    # these give us some shape values for later
    X, Y = next(iter(train_ds))
    input_dim = X.shape[0]
    hidden_dim = 512
    output_dim = Y.shape[0]

    # first, train and benchmark a simple Linear MLP 
    lmlp_model = LinearMLP([input_dim, 512, output_dim])

    # train the linear MLP
    epochs = 10
    lr = 1e-2
    opt = optim.Adam(lmlp_model.parameters(), lr)
    mse = nn.MSELoss()
    train_loss, valid_loss = [], []

    print("Training a linear MLP")
    for e in tqdm(range(epochs)):
        this_train_loss = np.mean([lmlp_model.update_batch(X, Y, opt, mse) for X, Y in train_dl])
        this_valid_loss = np.mean([lmlp_model.update_batch(X, Y, opt, mse, train=False) for X, Y in valid_dl])
        train_loss.append(this_train_loss)
        valid_loss.append(this_valid_loss)

    plot_losses(epochs, train_loss, valid_loss)

    # visualise predicted vs. actual
    # pull out a random row
    idx = np.random.randint(0, X_valid.shape[1])
    X, Y = valid_ds[idx]
    Y_hat = lmlp_model(X)
    # Y_hat vs. Y
    plot_predicted_vs_actual(Y, Y_hat, idx)

    # test losses
    test_pred = []
    pred_error = []
    mse = nn.MSELoss()
    for X, Y in test_ds:
        Y_hat = lmlp_model(X)
        test_pred.append(Y_hat.detach().numpy())
        pred_error.append(mse(Y_hat, Y).detach().numpy())
    print("Final test MSE loss on prediction task (linear MLP): {}".format(np.mean(pred_error)))
print("Saving parameters: ", network_path)
onp.savez(network_path, trained_params)

if do_plot:
    # Plot examples and statistics about the data.
    plotting.plot_data_pca(data_dict)
    plt.savefig(os.path.join(figure_dir, 'data_pca.png'))
    plotting.plot_data_example(data_dict['inputs'], data_dict['hiddens'],
                               data_dict['outputs'], data_dict['targets'])
    plt.savefig(os.path.join(figure_dir, 'data_example.png'))
    plotting.plot_data_stats(data_dict, data_bxtxn, data_dt)
    plt.savefig(os.path.join(figure_dir, 'data_stats.png'))

    # Plot some information about the training.
    plotting.plot_losses(opt_details_dict['tlosses'],
                         opt_details_dict['elosses'],
                         sampled_every=print_every)
    plt.savefig(os.path.join(figure_dir, 'losses.png'))

    # Plot a bunch of examples of eval trials run through LFADS.
    nexamples_to_save = 10
    for eidx in range(nexamples_to_save):
        bidx = onp.random.randint(eval_data.shape[0])
        psa_example = eval_data[bidx, :, :].astype(np.float32)
        # Make an entire batch of a single, example, and then
        # randomize the VAE with batchsize number of keys.
        examples = onp.repeat(np.expand_dims(psa_example, axis=0),
                              batch_size,
                              axis=0)
        skeys = random.split(key, batch_size)
        lfads_dict = lfads.batch_lfads_jit(trained_params, lfads_hps, skeys,
Example #5
0
opt = optim.Adam(lae_model.parameters(), lr)
mse = nn.MSELoss()
train_loss, valid_loss = [], []

print("Training a linear autoencoder; the weights will be used in the FEA")

for e in tqdm(range(epochs)):
    this_train_loss = np.mean(
        [lae_model.update_batch(X, opt, mse) for X, _ in train_dl])
    this_valid_loss = np.mean([
        lae_model.update_batch(X, opt, mse, train=False) for X, _ in valid_dl
    ])
    train_loss.append(this_train_loss)
    valid_loss.append(this_valid_loss)

plot_losses(epochs, train_loss, valid_loss)

# visualise predicted vs. actual
# generate predictions on a random row
idx = np.random.randint(0, X_valid.shape[1])
X, _ = valid_ds[idx]
X_tilde = lae_model(X)
plot_predicted_vs_actual(X,
                         X_tilde,
                         idx,
                         title="Predicted X vs. observed X (linear AE)")


# now use these weights in a FEA
class joint_loss(nn.Module):
    """
Example #6
0
    device = gf.get_default_device()
    train_loader = gf.DeviceDataLoader(train_loader, device)
    test_loader = gf.DeviceDataLoader(test_loader, device)
    mnist_model = gf.move_to_device(mnist_model, device)

    # Train MLP model
    history = te.train_model(model=mnist_model,
                             epochs=n_epochs,
                             lr=0.01,
                             train_loader=train_loader,
                             val_loader=test_loader,
                             opt_func=torch.optim.SGD)

    # Visualize loss and accuracy history
    pl.plot_accuracy(history)
    pl.plot_losses(history)

    # Evaluate final model
    scores = te.evaluate(model=mnist_model, val_loader=test_loader)
    print('Test scores: ', scores)

    # Predict on a few inputs
    test_dataset = MNIST(root='data/',
                         train=False,
                         transform=transforms.ToTensor())
    x, label = dataset[0]
    x = x.unsqueeze(0)
    pred = te.predict(x=x, model=mnist_model)
    print('True label: {}, Predicted: {}'.format(label, pred))

    x, label = dataset[111]