def test_mnist(self): import tensor_dynamic.data.input_data as mnist num_labeled = 100 data = mnist.read_data_sets("../data/MNIST_data", n_labeled=num_labeled, one_hot=True) batch_size = 100 num_epochs = 1 num_examples = 60000 num_iter = (num_examples/batch_size) * num_epochs starter_learning_rate = 0.02 inputs = tf.placeholder(tf.float32, shape=(None, 784)) targets = tf.placeholder(tf.float32) with tf.Session() as s: s.as_default() i = InputLayer(inputs) l1 = LadderLayer(i, 500, 1000.0, s) l2 = LadderGammaLayer(l1, 10, 10.0, s) ladder = LadderOutputLayer(l2, 0.1, s) loss = ladder.cost_all_layers_train(targets) learning_rate = tf.Variable(starter_learning_rate, trainable=False) train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss) bn_updates = tf.group(*(l1.bn_assigns + l2.bn_assigns)) with tf.control_dependencies([train_step]): train_step = tf.group(bn_updates) pred_cost = -tf.reduce_mean(tf.reduce_sum(targets * tf.log(tf.clip_by_value(ladder.activation_predict, 1e-10, 1.0)), 1)) # cost used for prediction correct_prediction = tf.equal(tf.argmax(ladder.activation_predict, 1), tf.argmax(targets, 1)) # no of correct predictions accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) * tf.constant(100.0) s.run(tf.initialize_all_variables()) #print "init accuracy", s.run([accuracy], feed_dict={inputs: data.test.images, targets: data.test.labels}) min_loss = 100000. writer = tf.train.SummaryWriter("/tmp/td", s.graph_def) writer.add_graph(s.graph_def) for i in range(num_iter): images, labels = data.train.next_batch(batch_size) _, loss_val = s.run([train_step, loss], feed_dict={inputs: images, targets: labels}) if loss_val < min_loss: min_loss = loss_val print(i, loss_val) # print "acc", s.run([accuracy], feed_dict={inputs: data.test.images, targets: data.test.labels}) #acc = s.run(accuracy, feed_dict={inputs: data.test.images, targets: data.test.labels}) print "min loss", min_loss #print "final accuracy ", acc self.assertLess(min_loss, 20.0)
def test_accuracy_bug(self): import tensor_dynamic.data.input_data as mnist data = mnist.read_data_sets("../data/MNIST_data", one_hot=True) inputs = tf.placeholder(tf.float32, shape=(None, 784)) input_layer = InputLayer(inputs) outputs = Layer(input_layer, 10, self.session, non_liniarity=tf.sigmoid) trainer = CategoricalTrainer(outputs, 0.1) trainer.train(data.validation.images, data.validation.labels) # this was throwing an exception accuracy = trainer.accuracy(data.validation.images, data.validation.labels) self.assertLessEqual(accuracy, 100.) self.assertGreaterEqual(accuracy, 0.)
def mnist_data(self): if self.MNIST_DATA is None: import tensor_dynamic.data.input_data as mnist self.MNIST_DATA = mnist.read_data_sets("../../tensor_dynamic/data/MNIST_data/", one_hot=True) return self.MNIST_DATA
import tensorflow as tf import tensor_dynamic.data.input_data as mnist from tensor_dynamic.layers.back_weight_layer import BackWeightLayer from tensor_dynamic.layers.batch_norm_layer import BatchNormLayer from tensor_dynamic.layers.input_layer import InputLayer from tensor_dynamic.layers.layer import Layer from tensor_dynamic.train_policy import TrainPolicy from tensor_dynamic.categorical_trainer import CategoricalTrainer batch_size = 100 data = mnist.read_data_sets("../data/MNIST_data", one_hot=True, validation_size=5000) with tf.Session() as sess: inputs = tf.placeholder(tf.float32, shape=(None, 784)) bactivate = True noise_std = 0.3 beta = 0.5 gamma = 0.5 non_lin = tf.nn.sigmoid input_layer = InputLayer(inputs) bn1 = BatchNormLayer(input_layer, sess, beta=beta, gamma=gamma) net1 = Layer(bn1, 1, sess, non_liniarity=non_lin, bactivate=bactivate, unsupervised_cost=.001, noise_std=noise_std) bn2 = BatchNormLayer(net1, sess, beta=beta, gamma=gamma) net2 = Layer(bn2, 1, sess, non_liniarity=non_lin, bactivate=bactivate, unsupervised_cost=.001, noise_std=noise_std) bn3 = BatchNormLayer(net2, sess, beta=beta, gamma=gamma) net3 = Layer(bn3, 1, sess, non_liniarity=non_lin, bactivate=bactivate, unsupervised_cost=.001, noise_std=noise_std) bn4 = BatchNormLayer(net3, sess, beta=beta, gamma=gamma) net4 = Layer(bn4, 1, sess, non_liniarity=non_lin, bactivate=bactivate, unsupervised_cost=.001, noise_std=noise_std)
import tensorflow as tf import tensor_dynamic.data.input_data as mnist from tensor_dynamic.layers.input_layer import NoisyInputLayer from tensor_dynamic.layers.ladder_layer import LadderLayer, LadderGammaLayer from tensor_dynamic.layers.ladder_output_layer import LadderOutputLayer num_labeled = 100 data = mnist.read_data_sets("../data/MNIST_data", n_labeled=num_labeled, one_hot=True) NOISE_STD = 0.3 batch_size = 100 num_epochs = 1 num_examples = 60000 num_iter = (num_examples/batch_size) * num_epochs learning_rate = 0.1 inputs = tf.placeholder(tf.float32, shape=(None, 784)) targets = tf.placeholder(tf.float32) with tf.Session() as s: s.as_default() i = NoisyInputLayer(inputs, NOISE_STD, s) l1 = LadderLayer(i, 500, 1000.0, s) l2 = LadderGammaLayer(l1, 10, 10.0, s) ladder = LadderOutputLayer(l2, 0.1, s) l3 = ladder assert int(i.z.get_shape()[-1]) == 784 assert int(l1.z_corrupted.get_shape()[-1]) == 500 assert int(l2.z_corrupted.get_shape()[-1]) == 10
# Import MINST data import tensor_dynamic.data.input_data as input_data mnist = input_data.read_data_sets("../data/MNIST_data", one_hot=True) import tensorflow as tf # Parameters learning_rate = 0.001 training_epochs = 15 batch_size = 100 display_step = 1 # Network Parameters n_hidden_1 = 256 # 1st layer num features n_hidden_2 = 256 # 2nd layer num features n_input = 784 # MNIST data input (img shape: 28*28) n_classes = 10 # MNIST total classes (0-9 digits) # tf Graph input x = tf.placeholder("float", [None, n_input]) y = tf.placeholder("float", [None, n_classes]) # Store layers weight & bias weights = { 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes])) } biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 'b2': tf.Variable(tf.random_normal([n_hidden_2])),