Example #1
0
def run_aevb(train_images):
    start_time = time.time()

    # Optimize aevb
    batch_size = 100
    num_training_iters = 2 * 640
    rs = npr.RandomState(0)

    parser = WeightsParser()
    parser.add_shape('encoding weights', (N_weights_enc, ))
    parser.add_shape('decoding weights', (N_weights_dec, ))
    initial_combined_weights = rs.randn(len(parser)) * param_scale

    batch_idxs = make_batches(train_images.shape[0], batch_size)

    def batch_value_and_grad(weights, iter):
        iter = iter % len(batch_idxs)
        cur_data = train_images[batch_idxs[iter]]
        return lower_bound(weights, encoder, decoder_log_like, N_weights_enc,
                           cur_data, samples_per_image, latent_dimensions, rs)

    lb_grad = grad(batch_value_and_grad)

    def callback(params, i, grad):
        ml = batch_value_and_grad(params, i)
        print "log marginal likelihood:", ml

        #Generate samples
        num_samples = 100
        images_per_row = 10
        zs = rs.randn(num_samples, latent_dimensions)
        samples = decoder(parser.get(params, 'decoding weights'), zs)
        fig = plt.figure(1)
        fig.clf()
        ax = fig.add_subplot(111)
        plot_images(samples, ax, ims_per_row=images_per_row)
        plt.savefig('samples.png')

    final_params = adam(lb_grad,
                        initial_combined_weights,
                        num_training_iters,
                        callback=callback)

    #Validation loss:
    print '--- test loss:', lower_bound(final_params, encoder,
                                        decoder_log_like, N_weights_enc,
                                        test_images[0:100, :],
                                        samples_per_image, latent_dimensions,
                                        rs)

    parameters = final_params, N_weights_enc, samples_per_image, latent_dimensions, rs
    save_string = 'parameters50.pkl'
    print 'SAVING AS: ', save_string
    print 'LATENTS DIMS', latent_dimensions
    with open(save_string, 'w') as f:
        pickle.dump(parameters, f, 1)

    finish_time = time.time()
    print "total runtime", finish_time - start_time
Example #2
0
    def callback(params, i, grad):
        ml = big_batch_value_and_grad(params, i)
        print "log marginal likelihood:", ml

        print "----- iter ", i
        if i % 1000 == 0 and not np.isnan(
                lower_bound(params, encoder, decoder_log_like, N_weights_enc,
                            test_images[0:100, :], samples_per_image,
                            latent_dimensions, rs)):
            print 'SAVING ==== '
            save_string = 'parameters10l300hfor' + str(i) + '.pkl'

            parameters = params, N_weights_enc, samples_per_image, latent_dimensions, rs
            print 'SAVING AS: ', save_string
            print 'LATENTS DIMS', latent_dimensions
            with open(save_string, 'w') as f:
                pickle.dump(parameters, f, 1)
            #Validation loss:
            print '--- test loss:', lower_bound(params, encoder,
                                                decoder_log_like,
                                                N_weights_enc,
                                                test_images[0:100, :],
                                                samples_per_image,
                                                latent_dimensions, rs)

        #Generate samples
        num_samples = 100
        images_per_row = 10
        zs = rs.randn(num_samples, latent_dimensions)
        samples = decoder(parser.get(params, 'decoding weights'), zs)
        fig = plt.figure(1)
        fig.clf()
        ax = fig.add_subplot(111)
        plot_images(samples, ax, ims_per_row=images_per_row)
        plt.savefig('samples.png')
        if i % 100 == 0:
            enc_w = params[0:N_weights_enc]
            dec_w = params[N_weights_enc:len(params)]
            plot_latent_centers(encoder, decoder, enc_w, dec_w)
Example #3
0
def lower_bound_batch(weights, encode, decode_log_like, N_weights_enc,
                      train_images, samples_per_image, latent_dimensions, rs):
    lower_bound_sum = 0.0
    #Turn off printing
    # sys.stdout = open(os.devnull, "w")

    #Batch_size is train_images.shape[0]
    for i in xrange(train_images.shape[0]):
        cur_image = train_images[i, :]
        lower_bound_sum = lower_bound_sum + lower_bound(
            weights, encode, decode_log_like, N_weights_enc, cur_image,
            samples_per_image, latent_dimensions, rs)
    lower_bound_est = lower_bound_sum / train_images.shape[0]
    #Turn on printing
    # sys.stdout = sys.__stdout__

    return lower_bound_est
Example #4
0
 def batch_value_and_grad(weights, iter):
     iter = iter % len(batch_idxs)
     cur_data = train_images[batch_idxs[iter]]
     return lower_bound(weights, two_part_encode, decoder_log_like,
                        N_weights_enc, cur_data, samples_per_image,
                        latent_dimensions, rs)
Example #5
0
 def data_L(data):
     data = np.atleast_2d(data)
     return lower_bound(params, encoder, decoder_log_like, N_weights_enc,
                        data, samples_per_image, latent_dimensions, rs)