Example #1
0
def trainer(learning_rate=1e-5, batch_size=64, num_epoch=75, n_z=1000):
    model = VariantionalAutoencoder(learning_rate=learning_rate, batch_size=batch_size, n_z=n_z)

    for iter in range(1, 5000000):
        # Obtina a batch
        x_batch, y_batch = fetch_x_y(train_data, batch_threshold)  
        x_batch /= 14.0    
        # Execute the forward and the backward pass and report computed losses
        loss, recon_loss, latent_loss = model.run_single_step(x_batch)

        if iter % 10 == 0:
            print('[Iter {}] Loss: {}, Recon: {}, Latent: {}'.format(iter, loss, recon_loss, latent_loss))
            
        if iter % 5000 == 0:
            x_reconstructed = model.reconstructor(x_batch)
            x_reconstructed = (x_reconstructed*14.0).astype(int) 
            x_reconstructed = np.reshape(x_reconstructed, (-1, scene_shape[0], scene_shape[1]))
            x_batch = np.reshape(x_batch, (-1, scene_shape[0], scene_shape[1])) 
            x_batch = (x_batch*14.0).astype(int)
            for i in range(x_reconstructed.shape[0]): 
                scene = x_reconstructed[i]
                empty = np.zeros((84, 10))
                scene = np.concatenate((scene, empty), axis=1)
                scene = np.concatenate((scene, x_batch[i]), axis=1)            
                utils.npy_to_ply(directory + "/_" + str(i) + "_generated", scene) 

    print('Done!')
    return model
Example #2
0
def generate(sess, scenes, halfed_scene_shape):
    for item in glob.glob(directory + "/*.ply"):
        os.remove(item)

    scenes = np.reshape(scenes, (batch_size, scene_shape[0], scene_shape[1]))

    for i in range(batch_size):
        utils.npy_to_ply(directory + "/full" + str(i), scenes[i])

    scenes[:, :, halfed_scene_shape:] = 0.

    for i in range(scene_shape[0]):
        for j in range(halfed_scene_shape):
            scenes = np.reshape(scenes, (-1, scene_shape[0] * scene_shape[1]))
            score = sess.run(ConvNet_class.score,
                             feed_dict={
                                 x: scenes,
                                 keepProb: 1.0,
                                 phase: False
                             })
            score = np.reshape(
                score,
                (batch_size, scene_shape[0], scene_shape[1], 1, classes_count))
            score = np.argmax(score, 4)
            score = np.reshape(score,
                               (batch_size, scene_shape[0], scene_shape[1], 1))
            scenes = np.reshape(
                scenes, (batch_size, scene_shape[0], scene_shape[1], 1))
            scenes[:, i, j, 0] = score[:, i, j, 0]

    for i in range(batch_size):
        utils.npy_to_ply(directory + "/halfed" + str(i), scenes[i])

    return scenes
Example #3
0
def generate(sess, scenes, halfed_scene_shape):
    # for i in xrange(occlude_start_row,self.height):
    # for j in xrange(self.width):
    # for k in xrange(self.channel):
    # next_sample = self.predict(samples) / (self.pixel_depth - 1.) # argmax or random draw here
    # samples[:, i, j, k] = next_sample[:, i, j, k]

    scenes = np.reshape(scenes, (batch_size, scene_shape[0], scene_shape[1]))
    scenes[:, :, halfed_scene_shape:] = 0
    scenes /= (classes_count - 1.)

    for i in range(scene_shape[0]):
        for j in range(halfed_scene_shape):
            scenes = np.reshape(scenes, (-1, scene_shape[0] * scene_shape[1]))
            score = sess.run(ConvNet_class.score,
                             feed_dict={
                                 x: scenes,
                                 keepProb: 1.0,
                                 phase: False
                             })
            score = np.reshape(
                score,
                (batch_size, scene_shape[0], scene_shape[1], 1, classes_count))
            score = np.argmax(score, 4)
            score = np.reshape(score,
                               (batch_size, scene_shape[0], scene_shape[1], 1))
            score /= (classes_count - 1.)
            scenes = np.reshape(
                scenes, (batch_size, scene_shape[0], scene_shape[1], 1))
            scenes[:, i, j, 0] = score[:, i, j, 0]

    for item in glob.glob(directory + "/*.ply"):
        os.remove(item)

    for i in range(batch_size):
        utils.npy_to_ply(directory + "/halfed" + str(i), scenes[i])

    return scenes
Example #4
0
def trainer(learning_rate=1e-5, batch_size=128, num_epoch=75, n_z=1000):
    model = VariantionalAutoencoder(learning_rate=learning_rate,
                                    batch_size=batch_size,
                                    n_z=n_z)

    x_mean = np.load("x_mean_2d.npy")
    x_std = np.load("x_std_2d.npy")

    for iter in range(0, 5000000):
        # Obtina a batch
        x_batch, y_batch = fetch_x_y(train_data, batch_threshold)

        y_batch = np.zeros((batch_size, 84 * 84))
        y_batch[:, :] = x_batch
        y_batch = np.reshape(y_batch, (-1, scene_shape[0], scene_shape[1]))

        # normalize X = (X - mean) / std
        x_batch = ((x_batch - x_mean) / x_std * 1.0)
        x_batch_min = np.min(x_batch, axis=1)
        for i in range(batch_size):
            x_batch[i, :] -= x_batch_min[i]

        x_batch_max = np.max(x_batch, axis=1)
        for i in range(batch_size):
            x_batch[i, :] /= x_batch_max[i]

        loss, recon_loss, latent_loss = model.run_single_step(x_batch)

        if iter % 10 == 0:
            print('[Iter {}] Loss: {}, Recon: {}, Latent: {}'.format(
                iter, loss, recon_loss, latent_loss))

        if iter % 5000 == 0:  # completion
            x_batch = np.reshape(x_batch, (-1, scene_shape[0], scene_shape[1]))
            x_batch[:, :, :42] = np.random.rand(128, 84, 42)
            x_batch = np.reshape(x_batch,
                                 (-1, scene_shape[0] * scene_shape[1]))

            x_reconstructed = model.reconstructor(x_batch)
            x_reconstructed = np.reshape(x_reconstructed,
                                         (-1, scene_shape[0], scene_shape[1]))

            #----------------------------------------
            for i in range(batch_size):
                x_reconstructed[i, :] *= x_batch_max[i]

            for i in range(batch_size):
                x_reconstructed[i, :] += x_batch_min[i]

            x_reconstructed = np.reshape(x_reconstructed,
                                         (-1, scene_shape[0] * scene_shape[1]))
            x_reconstructed = ((x_reconstructed + x_mean) * x_std * 1.0)
            x_reconstructed = np.reshape(x_reconstructed,
                                         (-1, scene_shape[0], scene_shape[1]))
            #----------------------------------------

            for i in range(20):
                scene = x_reconstructed[i]
                empty = np.zeros((84, 10))
                scene = np.concatenate((scene, empty), axis=1)
                scene = np.concatenate((scene, y_batch[i]), axis=1)
                utils.npy_to_ply(directory + "/_" + str(i) + "_generated",
                                 scene)

    print('Done!')
    return model
Example #5
0
posterior = make_encoder(data, code_size=2)
code = posterior.sample()

# Define the loss.
likelihood = make_decoder(code, [84, 84]).log_prob(data)
divergence = tfd.kl_divergence(posterior, prior)
elbo = tf.reduce_mean(likelihood - divergence)
optimize = tf.train.AdamOptimizer(0.001).minimize(-elbo)

samples = make_decoder(prior.sample(10), [84, 84]).mean() 

# mnist = input_data.read_data_sets('MNIST_data/')
#fig, ax = plt.subplots(nrows=20, ncols=11, figsize=(10, 20))
with tf.train.MonitoredSession() as sess:
    for iter in range(5000000):
        if iter%10000 == 0:
            x_batch, y_batch = fetch_x_y(train_data, batch_threshold)  
            x_batch /= 14.0   
            feed = {data: x_batch} 
            test_elbo, test_codes, test_samples = sess.run([elbo, code, samples], feed) 
            test_samples = (test_samples * 14).astype(int)
            for i in range(test_samples.shape[0]):
                utils.npy_to_ply(str(i) + "_generated", test_samples[i]) 
            print('Step', iter, 'elbo', test_elbo) 
        
        x_batch, y_batch = fetch_x_y(train_data, batch_threshold)  
        x_batch /= 14.0 
        feed = {data: x_batch} 
        sess.run(optimize, feed) 
        
#plt.savefig('vae-mnist.png', dpi=300, transparent=True, bbox_inches='tight')