コード例 #1
0
    def __init__(self,
                 vae_path,
                 mdn_rnn_path,
                 output_size,
                 device,
                 random=False):
        super().__init__()

        self.device = device

        if random:
            self.vae = VAE(32).to(device)
            self.vae.set_device(self.device)
            self.mdn_rnn = MDN_RNN(35, 32, 512, 256, 5).to(device)
        else:
            self.vae = torch.load(vae_path, map_location=self.device)
            self.vae.set_device(self.device)
            self.mdn_rnn = torch.load(mdn_rnn_path, map_location=self.device)

        self.state = (torch.zeros(
            (1, 1, self.mdn_rnn.get_hidden_size())).to(self.device),
                      torch.zeros(
                          (1, 1,
                           self.mdn_rnn.get_hidden_size())).to(self.device))
        #self.action = torch.zeros((1,output_size)).to(self.device)
        self.action = None

        self.controller_input = self.mdn_rnn.get_hidden_size(
        ) + self.vae.get_latent_size()
        self.output_size = output_size

        self.controller = Controller(self.controller_input,
                                     self.output_size).to(device)
コード例 #2
0
def main(train_loader, valid_loader, test_loader, n_epochs, device, lr=3e-4):

    model = VAE().to(device)
    optimizer = optim.Adam(model.parameters(), lr=lr)

    train_losses = []
    valid_losses = []

    # Training VAE
    for epoch in range(n_epochs):
        train_loss = train(model, optimizer, epoch, train_loader, device)
        valid_loss = test(model, epoch, valid_loader, device, split="Valid")
        train_losses.append(train_loss)
        valid_losses.append(valid_loss)

    print("Saving the model")
    torch.save(model.state_dict(), "best_model.pth")
    plt.plot(train_losses, label="train loss")
    plt.plot(valid_losses, label="valid loss")
    plt.title("Learning curves")
    plt.xlabel("Epochs")
    plt.ylabel("Negative ELBO")
    plt.legend()
    plt.savefig("Learning_curves.png")

    print("Evaluation on test set----------")
    test_loss = test(model, epoch, test_loader, device, split="Test")
コード例 #3
0
def train(args):
    trainset = TrainDataset()
    trainloader = DataLoader(trainset,
                             batch_size=args.batch_size,
                             shuffle=True)
    vae = VAE().to(DEVICE)
    vae.fit(trainloader, n_epochs=args.num_epochs, lr=args.lr)
コード例 #4
0
def run_episode(env, vae:VAE, mdn_rnn:MDN_RNN, ctrl:Controller):
    done = False
    comulative_reward = 0.0
    obs = env.reset()
    obs = preprocess_obs(obs)
    ctrl_idx = ctrl.get_current_controller_idx() 
    hidden_vec = mdn_rnn.initial_state()
    
    while not done:
        obs_z_vec = vae.get_z_vec(obs)
        action = ctrl.get_action(obs_z_vec, hidden_vec)
        hidden_vec = mdn_rnn.get_hidden_vec(action, obs_z_vec)
        
        obs, reward, done, info = env.step(action)
        obs = preprocess_obs(obs)
        comulative_reward += reward
        mdn_rnn.insert_training_sample(obs_z_vec, action, reward, done)
        
    ctrl.insert_evluation(comulative_reward)
    print('episode has been finished, total rewards: %f, controller: %d, generation: %d' %(comulative_reward, ctrl_idx, ctrl.get_generation_number()))
    
    if ctrl_idx == (constants.controller_pop_size - 1):
        print('start training VAE')
        vae.optimize()
        print('start training MDN_RNN')
        mdn_rnn.optimize()
        print('start training controller CMA')
        ctrl.optimize()
        print('VAE loss: %f, MDN_RNN loss: %s, generation_total_rewards: %f' %(vae.get_episod_loss(), mdn_rnn.get_episod_loss(), ctrl.get_current_total_eval()))
        print('################################################################################')
    
    return comulative_reward
コード例 #5
0
ファイル: train.py プロジェクト: constantino-garcia/vaele
def optimize_sde_standard_grad(y_input,
                               y_target,
                               gm: VAE,
                               optimizer,
                               initial_state=None,
                               effective_nb_timesteps=None,
                               kl_weight=tf.convert_to_tensor(
                                   1.0, dtype=tf_floatx()),
                               clip_value=100.):
    vvars = gm.sde_model.variational_variables
    (samples, entropies,
     _), encoded_dist, decoded_dist, final_state = gm._encode_and_decode(
         y_input, training=False, initial_state=initial_state)
    with tf.GradientTape(persistent=False,
                         watch_accessed_variables=False) as tape:
        tape.watch(vvars)
        breaked_loss = gm._breaked_loss(y_input, y_target, samples, entropies,
                                        encoded_dist, decoded_dist,
                                        initial_state, effective_nb_timesteps,
                                        kl_weight)
        loss = tf.reduce_sum(breaked_loss)
        # loss = gm.loss(y_input, y_target, training=True)
    vgrads = tape.gradient(loss, vvars)
    optimizer.apply_gradients(zip(vgrads, vvars))
    return loss, breaked_loss, final_state
コード例 #6
0
ファイル: main.py プロジェクト: LiamMa/DL_AS3
def main():
    # load dataset and data_iter
    train_dataset = get_dataset('train')
    valid_dataset = get_dataset('valid')
    test_dataset = get_dataset('test')

    train_iter = DataLoader(train_dataset,
                            batch_size=BATCH_SIZE,
                            shuffle=True,
                            num_workers=4)
    valid_iter = DataLoader(valid_dataset,
                            batch_size=BATCH_SIZE,
                            shuffle=False,
                            num_workers=4)
    test_iter = DataLoader(test_dataset,
                           batch_size=BATCH_SIZE,
                           shuffle=False,
                           num_workers=4)

    # load model, loss, optimizer
    model = VAE(n_latent=N_LATENT)
    model.to(dev)
    optimizer = torch.optim.Adam(model.parameters(), lr=LR)

    # train
    train(model, train_iter, [valid_iter, test_iter], optimizer, NUM_EPOCHS,
          args.test_interval)
コード例 #7
0
def main_visualisation():
    train_loader, valid_loader, test_loader = get_dataloaders()
    inputs, _ = next(iter(train_loader))
    _, _, width, height = inputs.shape

    vae = VAE(input_dim=width * height, h_dim=H_DIM, z_dim=Z_DIM)
    checkpoint = torch.load(CHECKPOINT_PATH + '.pth')
    vae.load_state_dict(checkpoint['model_state_dict'])

    dict_mu = {label: [] for label in range(10)}

    t = tqdm(total=len(test_loader))
    with torch.no_grad():
        for step, (batch_x, batch_y) in enumerate(test_loader):
            t.update(1)
            batch_x = batch_x.view((batch_x.shape[0], -1))
            mu, _ = vae.encode(batch_x)

            for i in range(BATCH_SIZE):
                label = batch_y[i].item()
                dict_mu[label].append(mu[i])

    l_points = []
    l_labels = []
    for label, mu in dict_mu.items():
        mu = np.array(torch.stack(mu))
        l_points.append(plt.scatter(mu[:, 0], mu[:, 1]))
        l_labels.append(label)

    plt.legend(l_points, l_labels)
    plt.show()
コード例 #8
0
    def __init__(self, filename=None, model=None):
        if model is not None:
            # error
            return

        self.session = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        self.last_batch_size = 64
        self.model = VAE(self.session,
                         epoch=20,
                         batch_size=self.last_batch_size,
                         z_dim=20,
                         dataset_name="mnist",
                         checkpoint_dir=filename,
                         result_dir="results",
                         log_dir="logs")

        # build graph
        self.model.build_model()
        """ Loss Function """
        # encoding
        # mu, sigma = self.model.encoder(self.inputs, is_training=False, reuse=True)

        # sampling by re-parameterization technique
        # self.z_fn = mu + sigma * tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)

        # launch the graph in a session
        self.model.saver = tf.train.Saver()
        could_load, checkpoint_counter = self.model.load(filename)
        print(" [*] Loading finished!")
コード例 #9
0
def train(args):
    full_data = get_data_loader(args)
    VAE_human = VAE(512).to(device)
    VAE_cartoon = VAE(512).to(device)
    optimiser_human = optim.Adam(VAE_human.parameters(), lr=0.0002)
    optimiser_cartoon = optim.Adam(VAE_cartoon.parameters(), lr=0.0002)
    VAE_human.train()
    VAE_cartoon.train()
    print("Start Training....")
    for epoch in trange(args.num_epochs):
        total_VAE_human_loss = 0.0
        total_VAE_cartoon_loss = 0.0
        total_data = 0
        for batch_num, data in enumerate(full_data):
            human, cartoon = data[0].to(device), data[1].to(
                device)  # x is cartoon, y is human
            total_data += human.shape[0]
            total_VAE_human_loss += train_VAE_1_step(VAE_human, VAE_cartoon,
                                                     optimiser_human, human)
            total_VAE_cartoon_loss += train_VAE_1_step(VAE_cartoon, VAE_human,
                                                       optimiser_cartoon,
                                                       cartoon)
        avg_VAE_human_loss = total_VAE_human_loss / total_data
        avg_VAE_cartoon_loss = total_VAE_cartoon_loss / total_data
        print("Avg VAE Cartoon Loss: {}".format(avg_VAE_cartoon_loss))
        print("Avg VAE Human Loss: {}".format(avg_VAE_human_loss))
コード例 #10
0
ファイル: VAETest.py プロジェクト: cyranawm/atiamML
    def test_bernoulliVAE_Learning(self):
        mb_size = 1
        # test on mnist dataset
        X, _ = mnist.train.next_batch(mb_size)

        # define vae structure
        X = Variable(torch.from_numpy(X))
        X_dim = mnist.train.images.shape[1]
        Z_dim = 1
        IOh_dims_Enc = [X_dim, 50, Z_dim]
        IOh_dims_Dec = [Z_dim, 50, X_dim]
        NL_types_Enc = ['relu6']
        NL_types_Dec = ['relu6', 'sigmoid']
        vae = VAE(X_dim,
                  Z_dim,
                  IOh_dims_Enc,
                  IOh_dims_Dec,
                  NL_types_Enc,
                  NL_types_Dec,
                  mb_size,
                  bernoulli=True,
                  gaussian=False)

        optimizer = optim.Adam(vae.parameters, lr=1e-3)

        fig = plt.figure()
        ims = []

        for i in range(100):
            optimizer.zero_grad()
            if vae.decoder.gaussian:
                vae(X)
                out = vae.X_mu
            elif vae.decoder.bernoulli:
                vae(X)
                out = vae.X_sample
            else:
                raise
            loss, _, _ = vae.loss(X)
            if i == 0:
                initialLoss = loss.data[0]
            if (i % 10 == 0):
                print("Loss -> " + str(loss.data[0]))
            loss.backward()
            optimizer.step()

            # update plot
            gen = out.data.numpy()
            gen_2D = numpy.reshape(gen[0], (28, 28))
            im = plt.imshow(gen_2D, animated=True)
            ims.append([im])

        ani = animation.ArtistAnimation(fig,
                                        ims,
                                        interval=50,
                                        blit=True,
                                        repeat_delay=1000)

        plt.show()
コード例 #11
0
def main():
    test_anomaly = load_dataset('anomaly')
    test_normal = load_dataset('normal')

    model = VAE()
    model, loss = model.vae_net()
    model.load_weights("weight/vae_model.h5")
    anomaly_detector(model, test_normal, test_anomaly)
コード例 #12
0
 def build_network(self):
     self.X = tf.placeholder(tf.float32, [self.conf.batch_size, self.conf.height, self.conf.width, self.conf.channel])
     model = VAE(self.X, self.conf)
     # self.tsample = model.get_tsample()
     self.kl_loss, self.ce_loss, loss = model.get_loss()
     self.train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_or_create_global_step(), 
         learning_rate=self.conf.learning_rate, optimizer='Adam', update_ops=[])
     self.gsample = model.get_gsample()
     self.log_marginal_likelihood_estimate = model.log_marginal_likelihood_estimate()
コード例 #13
0
def main():
    # Set parameters
    vae_epoch = 2
    can_epoch = 1000
    batch_size = 64
    latent_dim = 10
    beta_eeg = 5.0
    train = True

    # Read data sets
    data_root = "/home/zainkhan/bci-representation-learning"
    eeg_train, eeg_test, pupil_train, pupil_test, sub_cond = utils.read_single_trial_datasets(
        data_root)

    if train:
        # Train VAE
        vae = VAE(beta=beta_eeg, latent_dim=latent_dim)
        vae.compile(optimizer=keras.optimizers.Adam())
        vae.fit(eeg_train, epochs=vae_epoch, batch_size=batch_size)

        # Save VAE
        #vae.encoder.save("vae_encoder")
        #vae.decoder.save("vae_decoder")

        # Train CAN
        can = CAN(
            vae=vae,
            can_data=pupil_train,
            vae_data=eeg_train,
            latent_dim=latent_dim,
            epochs=can_epoch,
            batch_size=batch_size,
        )
        can.compile(optimizer=keras.optimizers.Adam(), run_eagerly=True)
        can.fit(pupil_train,
                epochs=can_epoch,
                batch_size=batch_size,
                shuffle=False)

        # Save CAN
        can.encoder.save("can_encoder")
        can.decoder.save("can_decoder")
    else:
        # Load all encoders/decoders
        vae = VAE(beta=beta_eeg, latent_dim=latent_dim)
        vae.encoder = keras.models.load_model("vae_encoder")
        vae.decoder = keras.models.load_model("vae_decoder")

        can = CAN(vae=vae, vae_data=eeg_train, latent_dim=latent_dim)
        can.encoder = keras.models.load_model("can_encoder")
        can.decoder = keras.models.load_model("can_decoder")

    # VAE predictions
    encoded_data = vae.encoder.predict(eeg_test)
    decoded_data = vae.decoder.predict(encoded_data)
    fn = utils.get_filename("predictions/", "test-eeg")
コード例 #14
0
 def __init__(self, CKPT_path=None):
     self._vae = VAE()
     self.optimizer = Adam(self._vae.parameters(),
                           lr=0.001,
                           weight_decay=0.001)
     if CKPT_path is not None:
         model_CKPT = torch.load(CKPT_path)
         self._vae.load_state_dict(model_CKPT['state_dict'])
         self.optimizer.load_state_dict(model_CKPT['optimizer'])
         print('load vae and optimizer form file')
コード例 #15
0
    def __init__(self, data_dir, latent_size=32):
        self.data_dir = data_dir
        self.latent_size = latent_size

        self.dataset = VAE_DataSet(data_dir)
        self.dl = DataLoader(self.dataset, batch_size=1,
                             shuffle=True)  # sample whole eps

        self.model = VAE(latent_size).cuda()
        self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3)
コード例 #16
0
def FLVAE(epochs, latent_dim, X_train, X_test, train_dataset):
    model = VAE(latent_dim)
    for i in range(0, epochs):
        models = []
        elbo = 0
        for j in range(0, SPLIT_SIZE):
            print('Running data on node ', j + 1)
            current_model = model

            for train_x in X_train[j]:
                model.train_step(current_model, train_x, model.optimizer)
            loss = tf.keras.metrics.Mean()
            for test_x in X_test[j]:
                loss(model.compute_loss(current_model, test_x))
            elbo = -loss.result()
            display.clear_output(wait=False)
            print('Epoch: {}, Test set ELBO: {}, clien-num: {}'.format(i, elbo, j))
            # generate_and_save_images(current_model, i, j, test_sample)

            models.append(current_model)

        weights = [model.get_weights() for model in models]
        new_weights = list()
        for weights_list_tuple in zip(*weights):
            new_weights.append(np.array([np.array(w).mean(axis=0) for w in zip(*weights_list_tuple)]))

        model.set_weights(new_weights)
        # write code for accuracy

    loss = tf.keras.metrics.Mean()
    for train_x in train_dataset:
        loss(model.compute_loss(model, train_x))
    print(loss.result())
コード例 #17
0
def train(input_dim, train_loader, valid_loader, device, writers):
    vae = VAE(input_dim=input_dim, h_dim=H_DIM, z_dim=Z_DIM)
    optimizer = optim.Adam(vae.parameters())

    _, _ = train_loop(vae,
                      optimizer,
                      train_loader,
                      valid_loader,
                      device,
                      writers,
                      max_epochs=10)
コード例 #18
0
 def __init__(self, nhidden, adj, user_to_idx, movie_to_idx, dropout, num_layers, gave=True, vae_mode=False):
     super(Network, self).__init__()
     if gvae:
         self.gvae = GraphVAE(
             nhidden, adj, user_to_idx, movie_to_idx, dropout, num_layers, vae_mode=vae_mode)
     else:
         self.gvae = VAE(nhidden, adj, user_to_idx, movie_to_idx, vae_mode=vae_mode)
     self.r_loss = torch.nn.NLLLoss()
     self.p_loss = torch.nn.NLLLoss()
     self.opt = torch.optim.Adam(self.parameters(), lr=1e-3)
     self.lr = torch.optim.lr_scheduler.ExponentialLR(self.opt, 0.9)
     self.vae_mode = vae_mode
コード例 #19
0
def make_model():
	global model, arch
	arch = {}
	arch["log"] = args.log
	arch["n_feature"] = n_feature
	arch["n_topic"] = args.n_topic
	arch["alpha"] = torch.tensor([1/args.n_topic]*args.n_topic)
	arch["L"] = args.L
	arch["device"] = device
	arch["lr"] = 1e-3
	model = VAE(arch).to(device)
	model = model.float()
	model = model.to(device)
コード例 #20
0
def test(input_dim, test_loader, device, writer):
    vae = VAE(input_dim=input_dim, h_dim=H_DIM, z_dim=Z_DIM)
    checkpoint = torch.load(CHECKPOINT_PATH + '.pth')
    vae.load_state_dict(checkpoint['model_state_dict'])
    epoch = checkpoint['epoch']
    validation_loss = checkpoint['last_test_loss']
    print(validation_loss)
    print(epoch)

    im, _ = next(iter(test_loader))
    im = im.to(device)

    create_grid(vae, im, writer=writer, epoch=0)
コード例 #21
0
def main():
    vae = VAE(input_dim, latent_dim)

    input_x = tflearn.input_data(shape=(None, input_dim), name='input_x')
    optimizer = tflearn.optimizers.Adam().get_tensor()

    trainer = vae.return_trainer(input_x, optimizer, batch_size)

    trainer.fit(feed_dicts={input_x: trainX},
                val_feed_dicts={input_x: testX},
                n_epoch=n_epoch,
                shuffle_all=True,
                run_id='VAE')
コード例 #22
0
class VAEp:
    def __init__(self, filename=None, model=None):
        if model is not None:
            # error
            return

        self.session = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        self.last_batch_size = 64
        self.model = VAE(self.session,
                         epoch=20,
                         batch_size=self.last_batch_size,
                         z_dim=20,
                         dataset_name="mnist",
                         checkpoint_dir=filename,
                         result_dir="results",
                         log_dir="logs")

        # build graph
        self.model.build_model()
        """ Loss Function """
        # encoding
        # mu, sigma = self.model.encoder(self.inputs, is_training=False, reuse=True)

        # sampling by re-parameterization technique
        # self.z_fn = mu + sigma * tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)

        # launch the graph in a session
        self.model.saver = tf.train.Saver()
        could_load, checkpoint_counter = self.model.load(filename)
        print(" [*] Loading finished!")

        # self.invert_models = def_invert_models(self.net, layer='conv4', alpha=0.002)

    def encode_images(self, images, cond=None):
        channel_last = np.rollaxis(images, 1, 4)
        z = self.session.run(self.model.mu,
                             feed_dict={self.model.inputs: channel_last})
        return z

    def get_zdim(self):
        return self.model.z_dim

    def sample_at(self, z):
        samples = self.session.run(self.model.fake_images,
                                   feed_dict={self.model.z: z})
        channel_first = np.rollaxis(samples, 3, 1)
        return channel_first
コード例 #23
0
    def _prepare_model(self):
        dim = np.load(os.path.join(self.DATA_ROOT, "train", "dim.npy"))
        if self.NET == 'VAE':
            self.model = VAE(input_dim=dim,
                             latent_dim=self.LATENT_DIM).to(self.GPU_ID)

        if self.LEARNING_RATE_TYPE == '45':
            self.LEARNING_RATE = self.LR_LIST_45
        elif self.LEARNING_RATE_TYPE == '3':
            self.LEARNING_RATE = self.LR_LIST_3

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.LEARNING_RATE[0],
                                    betas=(0.9, 0.999),
                                    eps=1e-08,
                                    weight_decay=0)
コード例 #24
0
    def __init__(self, *args, **kwargs):
        assert 'dims_reduce' in kwargs.keys(
        ) or kwargs['dims_reduce'] is list, "MUST UNCLUDE DIMS AS LIST."
        self.dims_reduce = kwargs['dims_reduce']
        del kwargs['dims_reduce']

        if 'train_epochs' not in kwargs.keys():
            self.train_epochs = 2  # should make this specific to only VAEs but being quick for now
        else:
            self.train_epochs = kwargs['train_epochs']
            del kwargs['train_epochs']

        super(EESN_ENCODED, self).__init__(*args, **kwargs)

        self.data_mean = None
        # normalisation data for reservoir outputs
        self.reservoir_means = [np.zeros(N_i) for N_i in self.reservoir_sizes]
        self.reservoir_stds = [np.zeros(N_i) for N_i in self.reservoir_sizes]
        # normalisation data for encoder outputs
        self.encoder_means = [np.zeros(N_i) for N_i in self.dims_reduce]
        self.encoder_stds = [np.zeros(N_i) for N_i in self.dims_reduce]

        self.encoders = []

        for j in range(self.num_reservoirs):
            self.encoders.append(
                VAE(input_size=self.reservoir_sizes[j],
                    latent_variable_size=self.dims_reduce[j],
                    epochs=self.train_epochs,
                    batch_size=64))

        # signals of the encoders
        self.encoder_signals = [[] for _ in range(self.num_reservoirs)]
コード例 #25
0
    def __init__(self, *args, **kwargs):
        assert 'dims_reduce' in kwargs.keys(
        ) or kwargs['dims_reduce'] is list, "MUST UNCLUDE DIMS AS LIST."
        self.dims_reduce = kwargs['dims_reduce']
        del kwargs['dims_reduce']

        if 'train_epochs' not in kwargs.keys():
            self.train_epochs = 2  # should make this specific to only VAEs but being quick for now
        else:
            self.train_epochs = kwargs['train_epochs']
            del kwargs['train_epochs']

        if 'train_batches' not in kwargs.keys():
            self.train_batches = 64  # should make this specific to only VAEs but being quick for now
        else:
            self.train_batches = kwargs['train_batches']
            del kwargs['train_batches']

        if 'encoder_type' not in kwargs.keys():
            self.encoder_type = 'PCA'
        else:
            self.encoder_type = kwargs['encoder_type']
            del kwargs['encoder_type']

        if 'encode_norm' not in kwargs.keys():
            self.encode_norm = False  # similar to batch norm (without trained std/mean) - we normalise AFTER the encoding
        else:
            self.encode_norm = kwargs['encode_norm']
            del kwargs['encode_norm']

        super(DHESN, self).__init__(*args, **kwargs)

        # print(self.dims_reduce)
        self.data_mean = None
        # normalisation data for reservoir outputs
        self.reservoir_means = [np.zeros(N_i) for N_i in self.reservoir_sizes]
        self.reservoir_stds = [np.zeros(N_i) for N_i in self.reservoir_sizes]
        # normalisation data for encoder outputs
        self.encoder_means = [np.zeros(N_i) for N_i in self.dims_reduce]
        self.encoder_stds = [np.zeros(N_i) for N_i in self.dims_reduce]

        self.encoders = []

        if self.encoder_type == 'PCA':
            for j in range(1, self.num_reservoirs):
                # self.encoders.append(PCA(n_components=self.reservoirs[j-1].N))
                self.encoders.append(PCA(n_components=self.dims_reduce[j - 1]))
        elif self.encoder_type == 'VAE':
            for j in range(1, self.num_reservoirs):
                self.encoders.append(
                    VAE(input_size=self.reservoir_sizes[j - 1],
                        latent_variable_size=self.dims_reduce[j - 1],
                        epochs=self.train_epochs,
                        batch_size=self.train_batches))
                # epochs=self.train_epochs*j, batch_size=self.train_batches))
        else:
            raise NotImplementedError('non-PCA/VAE encodings not done yet')

        # signals of the encoders
        self.encoder_signals = [[] for _ in range(self.num_reservoirs - 1)]
コード例 #26
0
def plot_synthetic_samples(fig, axes, vae: VAE, y_input, y_target,
                           simulation_steps):
    # print("------------------> Starting synthetic samples")
    # start = time.time() # TODO
    if isinstance(axes, np.ndarray):
        # Axes is a matrix (or vector) where the number of rows can be either 1 or 2 (do not or do phase_space)
        # and the number of columns is the number of examples to plot
        if axes.ndim == 2:
            assert axes.shape[0] == 1 or axes.shape[
                0] == 2, "Non valid number of rows"
            do_phase_space = axes.shape[0] == 2
            nb_examples = axes.shape[1]
        else:
            # If axes is a vector, interpret it as a matrix (1, -1)
            do_phase_space = False
            nb_examples = len(axes)
            axes = np.reshape(axes, (1, -1))
    else:
        do_phase_space = False
        nb_examples = 1
        # homogenize access to the axes
        axes = np.array([[axes]])
    nb_examples = min(nb_examples, y_input.shape[0])

    (samples, _, _), _, _, _ = vae._encode_and_decode(y_input, False, None)
    (mean, scale), synthetic_samples = vae.synthetize(y_input, y_target,
                                                      simulation_steps)
    ips = vae.sde_model.drift_svgp.inducing_variable.variables[0]
    ax_lim = [
        min(np.min(samples[0, :nb_examples, ...]),
            np.min(synthetic_samples[:nb_examples, ...])),
        max(np.max(samples[0, :nb_examples, ...]),
            np.max(synthetic_samples[:nb_examples, ...]))
    ]
    for example_index in range(nb_examples):
        dec_ax = axes[0, example_index]
        phase_space_ax = axes[1, example_index] if do_phase_space else None
        _plot_synthetic_sample(dec_ax, phase_space_ax,
                               y_target[example_index, :,
                                        0], mean[example_index, :, 0],
                               scale[example_index, :,
                                     0], samples[0, example_index, ...],
                               synthetic_samples[example_index,
                                                 ...], vae, ax_lim)
        if samples.shape[-1] == 2 and phase_space_ax:
            phase_space_ax.scatter(ips[:, 0], ips[:, 1], c='C3')
コード例 #27
0
def get_model(path):
    ckpt = torch.load(path)
    train_args = ckpt['args']
    # model = {'dae': DAE, 'vae': VAE, 'aae': AAE}[train_args.model](
    # vocab, train_args).to(device)
    model = VAE(vocab, train_args, device)
    model.load_state_dict(ckpt['model'])
    model.flatten()
    model.eval()
    return model
コード例 #28
0
def train(x_train, learning_rate, batch_size, num_epochs):
    autoencoder = VAE(input_shape=(256, 64, 1),
                      conv_filters=(512, 256, 128, 64, 32),
                      conv_kernel=(3, 3, 3, 3, 3),
                      conv_strides=(2, 2, 2, 2, (2, 1)),
                      latent_dim=128)
    autoencoder.summary()
    autoencoder.compile(learning_rate)
    autoencoder.train(x_train, batch_size, num_epochs)
    return autoencoder
コード例 #29
0
ファイル: test.py プロジェクト: lanzhang128/dissertation
def load_model(emb_dim, rnn_dim, z_dim, vocab_size, lr, model_path):
    vae = VAE(emb_dim=emb_dim,
              rnn_dim=rnn_dim,
              z_dim=z_dim,
              vocab_size=vocab_size)
    optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
    ckpt = tf.train.Checkpoint(optimizer=optimizer, model=vae)
    ckpt.restore(tf.train.latest_checkpoint(model_path)).expect_partial()
    return vae
コード例 #30
0
def get_models():
    netVAE = VAE.VanillaVAE(args.VAE_x_dim, args.VAE_h_dim, args.VAE_z_dim, dtype=dtype)
    if args.model_type == 'Net':
        net = model.Net(args.dropout)
    elif args.model_type == 'LeNet':
        net = model.LeNet(args.batch_normalization)
    else:
        raise SystemExit("Unknown model type")
    return net, netVAE
コード例 #31
0
ファイル: run.py プロジェクト: y0ast/Variational-Autoencoder
    f = open('freyfaces.pkl', 'rb')
    x = pickle.load(f, encoding='latin1')
    f.close()
    x_train = x[:1500]
    x_valid = x[1500:]
else:
    print("Loading MNIST data")
    # Retrieved from: http://deeplearning.net/data/mnist/mnist.pkl.gz
    f = gzip.open('mnist.pkl.gz', 'rb')
    (x_train, t_train), (x_valid, t_valid), (x_test, t_test) = pickle.load(f, encoding='latin1')
    f.close()

path = "./"

print("instantiating model")
model = VAE(continuous, hu_encoder, hu_decoder, n_latent, x_train)


batch_order = np.arange(int(model.N / model.batch_size))
epoch = 0
LB_list = []

if os.path.isfile(path + "params.pkl"):
    print("Restarting from earlier saved parameters!")
    model.load_parameters(path)
    LB_list = np.load(path + "LB_list.npy")
    epoch = len(LB_list)

if __name__ == "__main__":
    print("iterating")
    while epoch < n_epochs:
コード例 #32
0
ファイル: run.py プロジェクト: 2php/Variational-Autoencoder
    f = open('freyfaces.pkl', 'rb')
    x = cPickle.load(f)
    f.close()
    x_train = x[:1500]
    x_valid = x[1500:]
else:
    print "Loading MNIST data"
    # Retrieved from: http://deeplearning.net/data/mnist/mnist.pkl.gz
    f = gzip.open('mnist.pkl.gz', 'rb')
    (x_train, t_train), (x_valid, t_valid), (x_test, t_test) = cPickle.load(f)
    f.close()

path = "./"

print "instantiating model"
model = VAE(continuous, hu_encoder, hu_decoder, n_latent, x_train)


batch_order = np.arange(int(model.N / model.batch_size))
epoch = 0
LB_list = []

if os.path.isfile(path + "params.pkl"):
    print "Restarting from earlier saved parameters!"
    model.load_parameters(path)
    LB_list = np.load(path + "LB_list.npy")
    epoch = len(LB_list)

if __name__ == "__main__":
    print "iterating"
    while epoch < n_epochs:
コード例 #33
0
ファイル: VAE_character.py プロジェクト: neonnnnn/ml
def train_vae_character():
    # set params
    batch_size = 100
    sqrtbs = int(batch_size ** 0.5)
    epoch = 200
    rng = np.random.RandomState(1)

    z_dim = 128
    # load data
    image_dir = os.path.expanduser('~') + '/dataset/3_sv_actors/cropping_rgb/'
    fs = os.listdir(image_dir)
    dataset = []
    for fn in fs:
        f = open(image_dir+'/'+fn, 'rb')
        img_bin = f.read()
        dataset.append(img_bin)
        f.close()
    n_data = len(dataset)
    print(n_data)
    train_data = dataset[:-batch_size]
    valid_data = dataset[-batch_size:]
    X_train = np.zeros((batch_size, 3, 64, 64), dtype=np.float32)
    X_valid = np.zeros((batch_size, 3, 64, 64), dtype=np.float32)

    for l in xrange(batch_size):
        img = loadimg(valid_data[l])
        X_valid[l] = 2*img/255. - 1

    z_plot = np.random.standard_normal((batch_size, z_dim)).astype(np.float32)
    z_plot[-1] = -z_plot[0]
    for i in range(batch_size):
        z_plot[i] = ((batch_size-i)*z_plot[0] + i*z_plot[-1])/batch_size
    # train
    valid_batches = utils.BatchIterator(X_valid, batch_size)
    z_batches = utils.BatchIterator(z_plot, batch_size)

    # make encoder, decoder, discriminator, and cvaegan
    print('making encoder ...')
    encoder_x = x_encoder(rng)
    encoder_x.predict(valid_batches)
    encoder_mean = Dense(z_dim)
    encoder_var = Dense(z_dim)
    encoder = Gaussian(encoder_mean, encoder_var, encoder_x,)

    print('making decoder ...')
    decoder_mean = DeconvCUDNN(3, 4, 4, n_out=(3, 64, 64), n_in=(64, 32, 32),
                               subsample=(2, 2), border_mode=(1, 1))
    decoder_logvar = DeconvCUDNN(3, 4, 4, n_out=(3, 64, 64), n_in=(64, 32, 32),
                                 subsample=(2, 2), border_mode=(1, 1))
    decoder_z = z_decoder(rng, z_dim)
    decoder = Gaussian(decoder_mean, decoder_logvar, decoder_z)

    vae = VAE(rng, encoder=encoder, decoder=decoder)
    opt = Adam(lr=1e-4)
    vae.compile(opt, train_loss=None)
    print('making function ...')

    train_function = vae.function(variable(X_train), mode='train')
    utils.color_saveimg(X_valid, (sqrtbs, sqrtbs),
                        ('imgs/VAE/VAE_character_reconstract_epoch'
                         + str((i + 1)) + '.jpg'))
    for i in xrange(epoch):
        print('epoch:{0}'.format(i + 1))
        index = np.random.permutation(len(train_data))
        for j, idx in enumerate(index):
            img = loadimg(train_data[idx])
            X_train[(j+1) % batch_size] = 2.*img/255. - 1.0
            if (j+1) % batch_size == 0:
                run_on_batch([X_train], train_function)

        if (i + 1) % 1 == 0:
            analogy = (decoder.predict(z_batches) + 1.) / 2.
            analogy[analogy > 1] = 1.
            analogy[analogy < 0] = 0.
            utils.color_saveimg(analogy, (sqrtbs, sqrtbs),
                                ('imgs/VAE/VAE_character_analogy_epoch'
                                 + str((i + 1)) + '.jpg'))

            reconstract = (vae.predict(valid_batches) + 1.) / 2.
            reconstract[reconstract > 1] = 1.
            reconstract[reconstract < 0] = 0.
            utils.color_saveimg(reconstract, (sqrtbs, sqrtbs),
                                ('imgs/VAE/VAE_character_reconstract_epoch'
                                 + str((i + 1)) + '.jpg'))
コード例 #34
0
rec_layer_sizes += [(n_hidden_recog[-1], n_z)]

for i, (n_incoming, n_outgoing) in enumerate(rec_layer_sizes):
    layers['recog_%i' % i] = F.Linear(n_incoming, n_outgoing)

layers['log_sigma'] = F.Linear(n_hidden_recog[-1], n_z)

# Generating model.
gen_layer_sizes = [(n_z, n_hidden_gen[0])]
gen_layer_sizes += zip(n_hidden_gen[:-1], n_hidden_gen[1:])
gen_layer_sizes += [(n_hidden_gen[-1], train_x.shape[1])]

for i, (n_incoming, n_outgoing) in enumerate(gen_layer_sizes):
    layers['gen_%i' % i] = F.Linear(n_incoming, n_outgoing)

model = VAE(**layers)

if args.gpu >= 0:
    cuda.init(args.gpu)
    model.to_gpu()


# use Adam
optimizer = optimizers.Adam()
optimizer.setup(model.collect_parameters())

total_losses = np.zeros(n_epochs, dtype=np.float32)

for epoch in xrange(1, n_epochs + 1):
    print('epoch', epoch)