예제 #1
0
def main():
    # Set parameters
    vae_epoch = 2
    can_epoch = 1000
    batch_size = 64
    latent_dim = 10
    beta_eeg = 5.0
    train = True

    # Read data sets
    data_root = "/home/zainkhan/bci-representation-learning"
    eeg_train, eeg_test, pupil_train, pupil_test, sub_cond = utils.read_single_trial_datasets(
        data_root)

    if train:
        # Train VAE
        vae = VAE(beta=beta_eeg, latent_dim=latent_dim)
        vae.compile(optimizer=keras.optimizers.Adam())
        vae.fit(eeg_train, epochs=vae_epoch, batch_size=batch_size)

        # Save VAE
        #vae.encoder.save("vae_encoder")
        #vae.decoder.save("vae_decoder")

        # Train CAN
        can = CAN(
            vae=vae,
            can_data=pupil_train,
            vae_data=eeg_train,
            latent_dim=latent_dim,
            epochs=can_epoch,
            batch_size=batch_size,
        )
        can.compile(optimizer=keras.optimizers.Adam(), run_eagerly=True)
        can.fit(pupil_train,
                epochs=can_epoch,
                batch_size=batch_size,
                shuffle=False)

        # Save CAN
        can.encoder.save("can_encoder")
        can.decoder.save("can_decoder")
    else:
        # Load all encoders/decoders
        vae = VAE(beta=beta_eeg, latent_dim=latent_dim)
        vae.encoder = keras.models.load_model("vae_encoder")
        vae.decoder = keras.models.load_model("vae_decoder")

        can = CAN(vae=vae, vae_data=eeg_train, latent_dim=latent_dim)
        can.encoder = keras.models.load_model("can_encoder")
        can.decoder = keras.models.load_model("can_decoder")

    # VAE predictions
    encoded_data = vae.encoder.predict(eeg_test)
    decoded_data = vae.decoder.predict(encoded_data)
    fn = utils.get_filename("predictions/", "test-eeg")
def generate(model_path, device):
    print("Loading model....\n")
    model = VAE().to(device)
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()
    with torch.no_grad():
        z = torch.randn(10, 100).to(device)
        decoding = model.fc_decode(z)
        decoding = decoding.reshape(decoding.shape[0], decoding.shape[1], 1, 1)
        gen_output = model.decoder(decoding)
        gen_output = gen_output.squeeze().detach().cpu().numpy()
        gen_output[gen_output >= 0.5] = 1
        gen_output[gen_output < 0.5] = 0
    # plotting
    fig, axs = plt.subplots(2, 5)
    fig.suptitle('Generated images')

    for i in range(2):
        for j in range(5):
            axs[i, j].imshow(gen_output[j + i * 5])
    plt.savefig("Generated_Samples.png")
예제 #3
0
            rec_log.append(reconstruction_loss.item())
            kl_log.append(kl_loss.item())

            utils.show_process(epoch_i, step_i + 1, steps_per_epoch, rec_log,
                               kl_log)

        if epoch_i == 1:
            torchvision.utils.save_image(result.reshape(-1, 1, 28, 28),
                                         os.path.join(vae_image_dir,
                                                      'orig.png'),
                                         nrow=10)
        reconstructed, _, _ = net(result)
        utils.save_image(reconstructed.reshape(-1, 1, 28, 28), 10, epoch_i,
                         step_i + 1, vae_image_dir)
        image = net.decoder(torch.randn((100, 2)).to(system))
        torchvision.utils.save_image(image.reshape(-1, 1, 28, 28),
                                     os.path.join(
                                         vae_image_dir,
                                         'image_{}.png'.format(epoch_i)),
                                     nrow=10)

        utils.save_model(net, optim, rec_log, checkpoint_dir,
                         'autoencoder.ckpt')

    steps = 50
    z = utils.box_muller(steps).to(system)
    result = net.decoder(z)
    torchvision.utils.save_image(result.reshape(-1, 1, 28, 28),
                                 os.path.join(vae_image_dir, 'manifold.png'),
                                 nrow=steps)
예제 #4
0
plot_eigenvalues = True
sample_vecs = latent_SVD(latent_vecs, rand_vecs, plot_eigenvalues)


### Generate New Tracks
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create new seprsco tracks using our model and the random samples
# Seprsco files can later be converted to valid NES music format

# Parameters for track generation (specifically filtering)
p_min = .5

print('Generating New Tracks from Latent Samples')

# Decode samples using VAE
decoded_tracks = model.decoder(sample_vecs)

# Plot first decoded track
print("Example Model Generated Track")
plot_track(decoded_tracks[0])

# Filter Track
decoded_tracks = filter_tracks(decoded_tracks, p_min)

# Plot first filtered track
print("Example Filtered Track")
plot_track(decoded_tracks[0])

# Convert tracks to seprsco format
print('Converting Model Output to Seprsco')
seprsco_tracks = generate_seprsco(decoded_tracks, int2labels_map)