Пример #1
0
def main(unused_argv):
    FLAGS.logdir = os.path.join(FLAGS.logdir, FLAGS.algo)
    g = tf.Graph()
    with g.as_default():

        data_batch, _, _ = mnist_data.get_mnist(batch_size=FLAGS.batch_size,
                                                split="train")

        if FLAGS.algo == "nis_vae_proposal":
            print("Running NIS with VAE proposal")
            elbo = make_nis_with_vae_proposal(data_batch,
                                              K=FLAGS.K,
                                              vae_latent_dim=FLAGS.latent_dim,
                                              nis_hidden_sizes=[200, 100],
                                              q_hidden_sizes=[300, 300],
                                              p_x_hidden_sizes=[300, 300],
                                              scale_min=FLAGS.scale_min)
        elif FLAGS.algo == "nis_gaussian_proposal":
            print("Running NIS with Gaussian proposal")
            elbo = make_nis_with_gaussian_proposal(
                data_batch, K=FLAGS.K, hidden_layer_sizes=[200, 100])
        elif FLAGS.algo == "vae_nis_prior":
            print("Running VAE with NIS prior")
            elbo = make_vae_with_nis_prior(data_batch,
                                           latent_dim=FLAGS.latent_dim,
                                           K=FLAGS.K,
                                           nis_hidden_sizes=[200, 100],
                                           q_hidden_sizes=[300, 300],
                                           p_x_hidden_sizes=[300, 300],
                                           scale_min=FLAGS.scale_min)
        elif FLAGS.algo == "vae_gaussian_prior":
            print("Running VAE with gaussian prior")
            elbo = make_vae_with_gaussian_prior(data_batch,
                                                latent_dim=FLAGS.latent_dim,
                                                q_hidden_sizes=[300, 300],
                                                p_x_hidden_sizes=[300, 300],
                                                scale_min=1e-5)

        # Finish constructing the graph
        elbo_avg = tf.reduce_mean(elbo)
        tf.summary.scalar("elbo", elbo_avg)
        global_step = tf.train.get_or_create_global_step()
        opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
        grads = opt.compute_gradients(-elbo_avg)
        train_op = opt.apply_gradients(grads, global_step=global_step)

        log_hooks = make_log_hooks(global_step, elbo_avg)

        with tf.train.MonitoredTrainingSession(
                master="",
                is_chief=True,
                hooks=log_hooks,
                checkpoint_dir=FLAGS.logdir,
                save_checkpoint_secs=120,
                save_summaries_steps=FLAGS.summarize_every,
                log_step_count_steps=FLAGS.summarize_every) as sess:
            cur_step = -1
            while cur_step <= FLAGS.max_steps and not sess.should_stop():
                _, cur_step = sess.run([train_op, global_step])
Пример #2
0
import numpy as np
import logging
import mnist_data as data
from math import sqrt
from autoencoder import AutoEncoderModel

if __name__ == '__main__':
    lv = 1e-2# lv/ln in CDL
    # set to INFO to see less information during training
    logging.basicConfig(level=logging.DEBUG)
    #ae_model = AutoEncoderModel(mx.gpu(0), [784,500,500,2000,10], pt_dropout=0.2,
    #    internal_act='relu', output_act='relu')
    ae_model = AutoEncoderModel(mx.cpu(2), [784,500,500,2000,10], pt_dropout=0.2,
        internal_act='relu', output_act='relu')

    X, _ = data.get_mnist()
    train_X = X[:60000]
    val_X = X[60000:]

    #ae_model.layerwise_pretrain(train_X, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
    #                         lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
    #V = np.zeros((train_X.shape[0],10))
    V = np.random.rand(train_X.shape[0],10)/10
    lambda_v_rt = np.ones((train_X.shape[0],10))*sqrt(lv)
    ae_model.finetune(train_X, V, lambda_v_rt, 256,
            20, 'sgd', l_rate=0.1, decay=0.0,
            lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
    ae_model.save('mnist_pt.arg')
    ae_model.load('mnist_pt.arg')
    print "Training error:", ae_model.eval(train_X,V,lambda_v_rt)
    #print "Validation error:", ae_model.eval(val_X)
Пример #3
0
from snn_multipleneurons_fast import *
import sys
import os
from sklearn.svm import LinearSVC
from mnist_data import get_mnist
import argparse
from tensorboardX import SummaryWriter

from tqdm import tqdm

import warnings
warnings.filterwarnings("ignore")

run = input("Run folder: ")

x_train, x_test, y_train, y_test = get_mnist()

clf_output = LinearSVC(tol=1e-6, dual=False, class_weight='balanced')

if run == 'raw-mnist':
    print("Doing MNIST")
    tic = time.time()
    clf_output.fit(x_train, y_train)
    train_score = clf_output.score(x_train, y_train)
    test_score = clf_output.score(x_test, y_test)
    print("Time elapsed", time.time() - tic)

    print("MNIST baseline:")
    print('Test Score: ', test_score)
    print('Train Score: ', train_score, flush=True)
else: