def test_train_xor(self):
        train_x = [[0.0, 1.0, -1.0, 0.0],
                   [1.0, 0.0, -1.0, 1.0],
                   [0.0, 1.0, -1.0, -1.0],
                   [-1.0, 0.5, 1.0, 0.0]]
        train_y = [[-1.0, 0.0],
                   [1.0, 1.0],
                   [0., -1.0],
                   [-1.0, 0.0]]
        targets = tf.placeholder('float', (None, 2))

        ladder = InputLayer(len(train_x[0]), self.session)
        ladder = LadderLayer(ladder, 6, 1000., self.session)
        ladder = LadderLayer(ladder, 6, 10., self.session)
        ladder = LadderGammaLayer(ladder, 2, 0.1, self.session)
        ladder = LadderOutputLayer(ladder, 0.1, self.session)

        cost = ladder.cost_all_layers_train(targets)
        train = tf.train.AdamOptimizer(0.1).minimize(cost)

        self.session.run(tf.initialize_all_variables())
        _, cost1 = self.session.run([train, cost], feed_dict={ladder.input_placeholder:train_x, targets:train_y})

        print self.session.run([train, cost], feed_dict={ladder.input_placeholder:train_x, targets:train_y})
        print self.session.run([train, cost], feed_dict={ladder.input_placeholder:train_x, targets:train_y})
        print self.session.run([train, cost], feed_dict={ladder.input_placeholder:train_x, targets:train_y})

        _, cost2 = self.session.run([train, cost], feed_dict={ladder.input_placeholder:train_x, targets:train_y})

        self.assertGreater(cost1, cost2, msg="Expected loss to reduce")
    def test_prune_layer(self):
        # create layer and active in such a way that all but 1 output node is useless
        self.INPUT_NODES = 3
        self.OUTPUT_NODES = 1
        x = np.zeros((self.INPUT_NODES, self.OUTPUT_NODES), np.float32)
        for i in range(self.OUTPUT_NODES):
            x[0, i - 1] = 1.0
        y = np.zeros((self.OUTPUT_NODES, self.OUTPUT_NODES), np.float32)
        np.fill_diagonal(y, 1.)
        layer_1 = DuelStateReluLayer(InputLayer(self.INPUT_NODES), self.INPUT_NODES, session=self.session, weights=x,
                                     width_regularizer_constant=1e-2)
        layer_2 = HiddenLayer(layer_1, self.OUTPUT_NODES, weights=y, freeze=True)
        trainer = CategoricalTrainer(layer_2, 0.1)

        data_1 = [1.0] * self.INPUT_NODES
        data_2 = [0.0] * self.INPUT_NODES
        label_1 = [1.0] + [0.0] * (self.OUTPUT_NODES - 1)  # only the first node is correlated with the input
        label_2 = [0.0] * self.OUTPUT_NODES

        inputs = [data_1, data_2]
        labels = [label_1, label_2]

        for i in range(500):
            self.session.run([trainer._train],
                             feed_dict={layer_2.input_placeholder: inputs[:1],
                                        trainer._target_placeholder: labels[:1],
                                        trainer._learn_rate_placeholder: 0.05})
            self.session.run([trainer._train],
                             feed_dict={layer_2.input_placeholder: inputs[1:],
                                        trainer._target_placeholder: labels[1:],
                                        trainer._learn_rate_placeholder: 0.05})

        # layer should only have 1 active node
        self.assertGreater(layer_1.width()[0], DuelStateReluLayer.ACTIVE_THRESHOLD)
        self.assertEqual(layer_1.active_nodes(), 1)

        activation_pre_prune = self.session.run([layer_2.activation_predict],
                                                feed_dict={layer_1.input_placeholder: inputs})

        # after pruning layer should have 2 nodes
        layer_1.prune(inactive_nodes_to_leave=1)

        self.assertEqual(layer_1.output_nodes, 2)

        activation_post_prune = self.session.run([layer_2.activation_predict],
                                                 feed_dict={layer_1.input_placeholder: inputs})

        np.testing.assert_array_almost_equal(activation_pre_prune, activation_post_prune, decimal=2)
Esempio n. 3
0
    def test_resize_deep(self):
        bactivate = True
        net1 = InputLayer(784)
        bn1 = BatchNormLayer(net1, self.session)
        net2 = HiddenLayer(bn1, 8, self.session, bactivate=bactivate)
        bn2 = BatchNormLayer(net2, self.session)
        net2 = HiddenLayer(bn2, 6, self.session, bactivate=bactivate)
        bn3 = BatchNormLayer(net2, self.session)
        net3 = HiddenLayer(bn3, 4, self.session, bactivate=bactivate)
        output_net = HiddenLayer(net3, 2, self.session, bactivate=False)

        print(self.session.run(output_net.activation_predict, feed_dict={net1.input_placeholder: np.zeros(shape=(1, 784))}))

        net2.resize(net2.output_nodes + 1)

        print(self.session.run(output_net.activation_predict, feed_dict={net1.input_placeholder: np.zeros(shape=(1, 784))}))
    def test_reconstruction_of_single_input(self):
        input_layer = InputLayer(1)
        layer = BackWeightLayer(input_layer, 1, non_liniarity=tf.nn.sigmoid, session=self.session, noise_std=0.3)

        cost = layer.unsupervised_cost_train()
        optimizer = tf.train.AdamOptimizer(0.1).minimize(cost)

        self.session.run(tf.initialize_all_variables())

        data = np.random.normal(0.5, 0.5, size=[200, 1])

        for x in range(100):
            self.session.run([optimizer], feed_dict={input_layer.input_placeholder: data})

        result = self.session.run([cost], feed_dict={input_layer.input_placeholder: data})
        print result
Esempio n. 5
0
    def test_remove_layer_from_network(self):
        input_layer = InputLayer(self.mnist_data.features_shape)
        layer = HiddenLayer(input_layer, 10, session=self.session,
                            node_importance_func=node_importance_optimal_brain_damage)
        output = CategoricalOutputLayer(layer, self.mnist_data.labels_shape, regularizer_weighting=0.0001)

        activation = self.session.run(output.activation_predict,
                                      feed_dict={output.input_placeholder: self.mnist_data.train.features[:1]})

        layer.remove_layer_from_network()

        activation = self.session.run(output.activation_predict,
                                      feed_dict={output.input_placeholder: self.mnist_data.train.features[:1]})

        self.assertEqual(output.layer_number, 1)
        self.assertEqual(output.input_nodes, (784,))
Esempio n. 6
0
    def test_growing(self):
        input_layer = InputLayer(self.mnist_data.features_shape)
        layer = HiddenLayer(input_layer, 1, session=self.session,
                            node_importance_func=node_importance_optimal_brain_damage)
        output = CategoricalOutputLayer(layer, self.mnist_data.labels_shape, regularizer_weighting=0.0001)

        weights_hidden = layer._weights.eval()
        bias_hidden = layer._bias.eval()
        weights_output = output._weights.eval()

        layer.resize(2)

        new_weights_hidden = layer._weights.eval()
        new_bias_hidden = layer._bias.eval()
        new_weights_output = output._weights.eval()

        np.testing.assert_almost_equal(new_weights_output[0], weights_output[0] / 2)
Esempio n. 7
0
    def test_accuracy_bug(self):
        import tensor_dynamic.data.mnist_data as mnist
        import tensor_dynamic.data.data_set as ds
        import os

        data = mnist.get_mnist_data_set_collection(os.path.dirname(ds.__file__) + "/MNIST_data", one_hot=True)

        input_layer = InputLayer(data.features_shape)
        outputs = CategoricalOutputLayer(input_layer, data.labels_shape, self.session)

        outputs.train_till_convergence(data.test,
                                       learning_rate=0.2, continue_epochs=1)

        # this was throwing an exception
        accuracy = outputs.accuracy(data.test)
        self.assertLessEqual(accuracy, 100.)
        self.assertGreaterEqual(accuracy, 0.)
    def reconstruction_loss_for(self, output_nodes, data):
        bw_layer1 = BackWeightCandidateLayer(InputLayer(len(data[0])), output_nodes, non_liniarity=tf.nn.sigmoid,
                                             session=self.session,
                                             noise_std=0.3)

        cost = bw_layer1.unsupervised_cost_train()
        optimizer = tf.train.AdamOptimizer(0.1).minimize(cost)

        self.session.run(tf.initialize_all_variables())

        for i in range(5):
            for j in range(0, len(data) - 100, 100):
                self.session.run(optimizer, feed_dict={bw_layer1.input_placeholder: data[j:j + 100]})

        result = self.session.run(bw_layer1.unsupervised_cost_predict(),
                                  feed_dict={bw_layer1.input_placeholder: data})
        print("denoising with %s hidden layer had cost %s" % (output_nodes, result))
        return result
Esempio n. 9
0
    def test_reconstruction_of_single_input(self):
        input_layer = InputLayer(1)
        layer = HiddenLayer(input_layer, 1, bactivate=True, session=self.session, layer_noise_std=0.3)

        cost_train = tf.reduce_mean(
            tf.reduce_sum(tf.square(layer.bactivation_train - input_layer.activation_train), 1))
        cost_predict = tf.reduce_mean(
            tf.reduce_sum(tf.square(layer.bactivation_predict - input_layer.activation_predict), 1))
        optimizer = tf.train.AdamOptimizer(0.1).minimize(cost_train)

        self.session.run(tf.initialize_all_variables())

        data = np.random.normal(0.5, 0.5, size=[200, 1])

        for x in range(100):
            self.session.run([optimizer], feed_dict={input_layer.input_placeholder: data})

        result = self.session.run([cost_predict], feed_dict={input_layer.input_placeholder: data})
        print result
Esempio n. 10
0
    def test_train_xor(self):
        train_x = [[0.0, 1.0, -1.0, 0.0], [1.0, 0.0, -1.0, 1.0],
                   [0.0, 1.0, -1.0, -1.0], [-1.0, 0.5, 1.0, 0.0]]
        train_y = [[-1.0, 0.0], [1.0, 1.0], [0., -1.0], [-1.0, 0.0]]
        targets = tf.placeholder('float', (None, 2))

        ladder = InputLayer(len(train_x[0]), self.session)
        ladder = LadderLayer(ladder, 6, 1000., self.session)
        ladder = LadderLayer(ladder, 6, 10., self.session)
        ladder = LadderGammaLayer(ladder, 2, 0.1, self.session)
        ladder = LadderOutputLayer(ladder, 0.1, self.session)

        cost = ladder.cost_all_layers_train(targets)
        train = tf.train.AdamOptimizer(0.1).minimize(cost)

        self.session.run(tf.initialize_all_variables())
        _, cost1 = self.session.run([train, cost],
                                    feed_dict={
                                        ladder.input_placeholder: train_x,
                                        targets: train_y
                                    })

        print self.session.run([train, cost],
                               feed_dict={
                                   ladder.input_placeholder: train_x,
                                   targets: train_y
                               })
        print self.session.run([train, cost],
                               feed_dict={
                                   ladder.input_placeholder: train_x,
                                   targets: train_y
                               })
        print self.session.run([train, cost],
                               feed_dict={
                                   ladder.input_placeholder: train_x,
                                   targets: train_y
                               })

        _, cost2 = self.session.run([train, cost],
                                    feed_dict={
                                        ladder.input_placeholder: train_x,
                                        targets: train_y
                                    })

        self.assertGreater(cost1, cost2, msg="Expected loss to reduce")
    def reconstruction_loss_for(self, output_nodes):
        data = self.mnist_data
        bw_layer1 = BackWeightLayer(InputLayer(784), output_nodes, non_liniarity=tf.nn.sigmoid, session=self.session,
                                    noise_std=0.3)

        cost = bw_layer1.unsupervised_cost_train()
        optimizer = tf.train.AdamOptimizer(0.1).minimize(cost)

        self.session.run(tf.initialize_all_variables())

        end_epoch = data.train.epochs_completed + 5

        while data.train.epochs_completed <= end_epoch:
            train_x, train_y = data.train.next_batch(100)
            self.session.run(optimizer, feed_dict={bw_layer1.input_placeholder: train_x})

        result = self.session.run(bw_layer1.unsupervised_cost_predict(),
                                  feed_dict={bw_layer1.input_placeholder: data.train.features})
        print("denoising with %s hidden nodes had cost %s" % (output_nodes, result))
        return result
    def test_mnist(self):
        data = self.mnist_data
        input = InputLayer(self.MNIST_INPUT_NODES)
        d_1 = DuelStateReluLayer(input, 3, width_regularizer_constant=1e-7, width_binarizer_constant=1e-10,
                                 session=self.session)

        d_2 = DuelStateReluLayer(d_1, 3, width_regularizer_constant=1e-7, width_binarizer_constant=1e-10, )
        output = HiddenLayer(d_2, self.MNIST_OUTPUT_NODES)

        trainer = CategoricalTrainer(output, 0.1)
        end_epoch = data.train.epochs_completed + 20

        while data.train.epochs_completed <= end_epoch:
            train_x, train_y = data.train.next_batch(100)
            trainer.train(train_x, train_y)

        accuracy, cost = trainer.accuracy(data.test.features, data.test.labels)
        print(accuracy, cost)
        print("active nodes ", d_1.active_nodes())
        self.assertGreater(accuracy, 70.)
    def test_reshape(self):
        convolution_nodes = (4, 4, 8)
        input_vals = np.random.normal(size=(1, 20, 20, 3)).astype(np.float32)
        layer = ConvolutionalLayer(InputLayer((20, 20, 3)),
                                   convolution_nodes,
                                   session=self.session)

        result1 = self.session.run(
            layer.activation_predict,
            feed_dict={layer.input_placeholder: input_vals})

        layer.resize(9)
        result2 = self.session.run(
            layer.activation_predict,
            feed_dict={layer.input_placeholder: input_vals})

        print(result1)
        print(result2)

        self.assertEquals(result2.shape[3], 9)
Esempio n. 14
0
    def test_resize_with_batch_norm_and_2_layers_resize_3(self):
        input_layer = InputLayer(self.mnist_data.features_shape)
        layer1 = HiddenLayer(input_layer, 2, session=self.session, batch_normalize_input=True)
        layer2 = HiddenLayer(layer1, 3, session=self.session, batch_normalize_input=True)

        optimizer = tf.train.AdamOptimizer()
        loss = optimizer.minimize(layer2.activation_predict)
        self.session.run(tf.initialize_variables(list(get_tf_optimizer_variables(optimizer))))
        self.session.run(loss,
                         feed_dict={input_layer.input_placeholder: self.mnist_data.train.features[:3],
                                    })

        layer1.resize(4)

        optimizer2 = tf.train.AdamOptimizer()
        loss2 = optimizer2.minimize(layer2.activation_predict)
        self.session.run(tf.initialize_variables(list(get_tf_optimizer_variables(optimizer2))))
        self.session.run(loss2,
                         feed_dict={input_layer.input_placeholder: self.mnist_data.train.features[:3],
                                    })
Esempio n. 15
0
    def test_resize_with_batch_norm_resize(self):
        input_layer = InputLayer(self.mnist_data.features_shape)
        layer = HiddenLayer(input_layer, 2, session=self.session, batch_normalize_input=True)
        output = CategoricalOutputLayer(layer, self.mnist_data.labels_shape, batch_normalize_input=False)

        # output.train_till_convergence(self.mnist_data.train, learning_rate=0.1)
        optimizer = tf.train.AdamOptimizer()
        loss = optimizer.minimize(output.activation_predict)
        self.session.run(tf.initialize_variables(list(get_tf_optimizer_variables(optimizer))))
        self.session.run(loss,
                         feed_dict={output.input_placeholder: self.mnist_data.train.features[:3],
                                    output.target_placeholder: self.mnist_data.train.labels[:3]})

        layer.resize(3)

        optimizer2 = tf.train.AdamOptimizer()
        loss2 = optimizer2.minimize(output.activation_predict)
        self.session.run(tf.initialize_variables(list(get_tf_optimizer_variables(optimizer2))))
        self.session.run(loss2,
                         feed_dict={output.input_placeholder: self.mnist_data.train.features[:3],
                                    output.target_placeholder: self.mnist_data.train.labels[:3]})
Esempio n. 16
0
    def test_reshape(self):
        input_vals = np.random.normal(size=(1, 2, 2, 1)).astype(np.float32)

        convolution_nodes = (2, 2, 2)
        input_p = tf.placeholder("float", (None, 2, 2, 1))
        layer = ConvolutionalLayer(InputLayer(input_p),
                                   convolution_nodes,
                                   session=self.session)
        flatten = FlattenLayer(layer, session=self.session)
        result1 = self.session.run(
            layer.activation_predict,
            feed_dict={flatten.input_placeholder: input_vals})

        layer.resize(3)
        result2 = self.session.run(
            layer.activation_predict,
            feed_dict={flatten.input_placeholder: input_vals})

        print(result1)
        print(result2)

        self.assertEquals(result2.shape[3], 3)
Esempio n. 17
0
    def test_adding_hidden_layer_with_resize(self):
        non_liniarity = tf.nn.relu
        regularizer_coeff = None
        layer = InputLayer(self.mnist_data.features_shape)

        layer = HiddenLayer(layer, 100, self.session, non_liniarity=non_liniarity,
                            batch_normalize_input=False)

        output = CategoricalOutputLayer(layer, self.mnist_data.labels_shape, self.session,
                                        batch_normalize_input=True,
                                        regularizer_weighting=regularizer_coeff)

        output.train_till_convergence(self.mnist_data.train, self.mnist_data.validation,
                                      learning_rate=.1)

        layer.add_intermediate_cloned_layer()
        layer.resize(110)

        self.session.run(output.activation_predict,
                         feed_dict={output.input_placeholder: self.mnist_data.train.features[:3],
                                    output.target_placeholder: self.mnist_data.train.labels[:3]})
Esempio n. 18
0
    def test_remove_unimportant_nodes_does_not_affect_test_error(self):
        data = self.mnist_data
        batch_normalize = False
        input_layer = InputLayer(data.features_shape, drop_out_prob=None)
        layer = HiddenLayer(input_layer, 800, session=self.session,
                            batch_normalize_input=batch_normalize,
                            # D.S TODO TEST
                            node_importance_func=node_importance_optimal_brain_damage)
        output = CategoricalOutputLayer(layer, data.labels_shape, batch_normalize_input=batch_normalize)

        output.train_till_convergence(data.train, data.test, learning_rate=0.001)

        _, _, target_loss_before_resize = output.evaluation_stats(data.test)  # Should this be on test or train?

        print(target_loss_before_resize)

        layer.resize(795, data_set_validation=data.test)

        _, _, target_loss_after_resize = output.evaluation_stats(data.test)

        print(target_loss_after_resize)

        self.assertAlmostEqual(target_loss_before_resize, target_loss_after_resize, delta=10.0)
Esempio n. 19
0
    def test_layer_noisy_input_activation(self):
        input_size = 100
        noise_std = 1.
        input_p = tf.placeholder("float", (None, input_size))
        layer = HiddenLayer(InputLayer(input_p), input_size,
                            weights=np.diag(np.ones(input_size, dtype=np.float32)),
                            bias=np.zeros(input_size, dtype=np.float32),
                            session=self.session,
                            non_liniarity=tf.identity,
                            layer_noise_std=noise_std)

        result_noisy = self.session.run(layer.activation_train,
                                        feed_dict={
                                            input_p: np.ones(input_size, dtype=np.float32).reshape((1, input_size))})

        self.assertAlmostEqual(result_noisy.std(), noise_std, delta=noise_std / 5.,
                               msg="the result std should be the noise_std")

        result_clean = self.session.run(layer.activation_predict, feed_dict={
            input_p: np.ones(input_size, dtype=np.float32).reshape((1, input_size))})

        self.assertAlmostEqual(result_clean.std(), 0., places=7,
                               msg="There should be no noise in the activation")
    def test_mnist_start_large(self):
        data = self.mnist_data

        input_layer = InputLayer(784)
        hidden_1 = DuelStateReluLayer(input_layer, 200, session=self.session, inactive_nodes_to_leave=200)
        output = HiddenLayer(hidden_1, self.MNIST_OUTPUT_NODES, session=self.session)
        trainer = CategoricalTrainer(output, 0.1)

        end_epoch = data.train.epochs_completed + 5

        print(trainer.accuracy(data.test.features, data.test.labels))

        while data.train.epochs_completed <= end_epoch:
            train_x, train_y = data.train.next_batch(100)
            trainer.train(train_x, train_y)

        accuracy, cost = trainer.accuracy(data.test.features, data.test.labels)
        print(accuracy, cost)
        # print(output.active_nodes())
        print(hidden_1.active_nodes())

        self.assertGreater(accuracy, 90.)
        # self.assertEqual(output.active_nodes(), self.MNIST_OUTPUT_NODES, msg='expect all output nodes to be active')
        self.assertLess(hidden_1.active_nodes(), hidden_1.output_nodes, msg='expect not all hidden nodes to be active')
Esempio n. 21
0
 def setUp(self):
     super(BaseLayerWrapper.BaseLayerTestCase, self).setUp()
     self._input_layer = InputLayer(self.INPUT_NODES)
Esempio n. 22
0
    def test_create_layer(self):
        input_p = tf.placeholder("float", (None, 10, 10, 4))
        layer = FlattenLayer(InputLayer(input_p), session=self.session)

        self.assertEqual(layer.activation_predict.get_shape().as_list(),
                         [None, 10 * 10 * 4])
Esempio n. 23
0
    def test_create_layer(self):
        output_nodes = 20
        input_p = tf.placeholder("float", (None, 10))
        layer = HiddenLayer(InputLayer(input_p), output_nodes, session=self.session)

        self.assertEqual(layer.activation_predict.get_shape().as_list(), [None, output_nodes])
Esempio n. 24
0
from tensor_dynamic.layers.max_pool_layer import MaxPoolLayer

data_set_collection = get_cifar_100_data_set_collection(validation_ratio=.15)
from tensor_dynamic.node_importance import node_importance_optimal_brain_damage, node_importance_by_removal, \
    node_importance_by_real_activation_from_input_layer_variance

# data_set_collection = get_mnist_data_set_collection(validation_ratio=.15)

for run_number in range(0, 30):
    with tf.Session() as session:
        non_liniarity = tf.nn.relu

        regularizer_coeff = 0.01
        noise = 1.
        last_layer = InputLayer(data_set_collection.features_shape,
                                # drop_out_prob=.5,
                                # layer_noise_std=1.
                                )

        last_layer = ConvolutionalLayer(
            last_layer, (4, 4, 3),
            session=session,
            node_importance_func=
            node_importance_by_real_activation_from_input_layer_variance,
            batch_normalize_input=True,
            layer_noise_std=noise)

        last_layer = MaxPoolLayer(last_layer)

        if len(data_set_collection.features_shape) > 1:
            last_layer = FlattenLayer(last_layer, session)
Esempio n. 25
0
def main(file_name_all="pruning_tests_noise_%s-%s-%s.csv" % (LAYER_NOISE_STD, start, end), file_name_avg="pruning_tests_avg_noise_%s-%s-%s.csv" % (LAYER_NOISE_STD, start, end)):
    data_set_collections = [get_mnist_data_set_collection(validation_ratio=.15),
                            get_cifar_100_data_set_collection(validation_ratio=.15)]
    methods = [node_importance_by_dummy_activation_from_input_layer,
               node_importance_by_real_activation_from_input_layer,
               node_importance_by_square_sum,
               node_importance_by_removal,
               node_importance_random,
               node_importance_optimal_brain_damage,
               node_importance_full_taylor_series,
               node_importance_by_real_activation_from_input_layer_variance,
               node_importance_error_derrivative,
               dummy_random_weights
               ]

    final_dict = defaultdict(lambda: [])

    with open(file_name_all, 'w') as result_file:
        result_file.write(
            'method, data_set, before_prune_train, before_prune_validation, before_prune_trest, after_prune_train, after_prune_validataion, after_prune_test, after_converge_train, after_converge_validataion, after_converge_test, converge_iterations\n')
        for data in data_set_collections:
            for _ in range(NUM_TRIES):
                tf.reset_default_graph()
                with tf.Session() as session:
                    input_layer = InputLayer(data.features_shape)

                    if len(data.features_shape) > 1:
                        input_layer = FlattenLayer(input_layer)

                    layer = HiddenLayer(input_layer, start, session=session,
                                        layer_noise_std=LAYER_NOISE_STD,
                                        node_importance_func=None,
                                        non_liniarity=tf.nn.relu,
                                        batch_normalize_input=True)
                    output = CategoricalOutputLayer(layer, data.labels_shape,
                                                    batch_normalize_input=True,
                                                    regularizer_weighting=0.01,
                                                    layer_noise_std=LAYER_NOISE_STD
                                                    )

                    output.train_till_convergence(data.train, data.validation, learning_rate=0.0001)

                    state = output.get_network_state()

                    for method in methods:
                        output.set_network_state(state)
                        layer._node_importance_func = method

                        _, _, target_loss_test_before_resize_test = output.evaluation_stats(data.test)
                        _, _, target_loss_test_before_resize_validation = output.evaluation_stats(data.validation)
                        _, _, target_loss_test_before_resize_train = output.evaluation_stats(data.train)

                        no_splitting_or_pruning = method == dummy_random_weights

                        layer.resize(end, data_set_train=data.train,
                                     data_set_validation=data.validation,
                                     no_splitting_or_pruning=no_splitting_or_pruning)

                        _, _, target_loss_test_after_resize_test = output.evaluation_stats(data.test)
                        _, _, target_loss_test_after_resize_validation = output.evaluation_stats(data.validation)
                        _, _, target_loss_test_after_resize_train = output.evaluation_stats(data.train)

                        error, iterations = output.train_till_convergence(data.train, data.validation,
                                                                          learning_rate=0.0001)

                        _, _, after_converge_test = output.evaluation_stats(data.test)
                        _, _, after_converge_validation = output.evaluation_stats(data.validation)
                        _, _, after_converge_train = output.evaluation_stats(data.train)

                        final_dict[method.__name__].append((target_loss_test_before_resize_train,
                                                            target_loss_test_before_resize_validation,
                                                            target_loss_test_before_resize_test,
                                                            target_loss_test_after_resize_train,
                                                            target_loss_test_after_resize_validation,
                                                            target_loss_test_after_resize_test,
                                                            after_converge_train,
                                                            after_converge_validation,
                                                            after_converge_test))

                        result_file.write('%s,%s,%s,%s,%s,%s,%s,%s, %s, %s, %s, %s\n' % (
                            method.__name__, data.name, target_loss_test_before_resize_train,
                            target_loss_test_before_resize_validation,
                            target_loss_test_before_resize_test,
                            target_loss_test_after_resize_train,
                            target_loss_test_after_resize_validation,
                            target_loss_test_after_resize_test,
                            after_converge_train,
                            after_converge_validation,
                            after_converge_test,
                            iterations))
                        result_file.flush()

    with open(file_name_avg, "w") as file_avg:
        file_avg.write(
            'method, before_prune_train, before_prune_validataion, before_prune_trest, after_prune_train, after_prune_validataion, after_prune_test, after_converge_train, after_converge_validataion, after_convert_test, test_diff\n')
        for name, values in final_dict.iteritems():
            v_len = float(len(values))
            averages = tuple(sum(x[i] for x in values) / v_len for i in range(len(values[0])))
            averages = averages + (averages[2] - averages[-2],)
            file_avg.write('%s,%s,%s,%s,%s,%s,%s,%s, %s, %s, %s\n' % ((name,) + averages))
Esempio n. 26
0
    def test_mnist(self):
        import tensor_dynamic.data.mnist_data as mnist

        num_labeled = 100
        data = mnist.get_mnist_data_set_collection(
            "../data/MNIST_data",
            number_labeled_examples=num_labeled,
            one_hot=True)

        batch_size = 100
        num_epochs = 1
        num_examples = 60000
        num_iter = (num_examples / batch_size) * num_epochs
        starter_learning_rate = 0.02
        inputs = tf.placeholder(tf.float32, shape=(None, 784))
        targets = tf.placeholder(tf.float32)

        with tf.Session() as s:
            s.as_default()
            i = InputLayer(inputs)
            l1 = LadderLayer(i, 500, 1000.0, s)
            l2 = LadderGammaLayer(l1, 10, 10.0, s)
            ladder = LadderOutputLayer(l2, 0.1, s)

            loss = ladder.cost_all_layers_train(targets)
            learning_rate = tf.Variable(starter_learning_rate, trainable=False)
            train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)

            bn_updates = tf.group(*(l1.bn_assigns + l2.bn_assigns))
            with tf.control_dependencies([train_step]):
                train_step = tf.group(bn_updates)
            pred_cost = -tf.reduce_mean(
                tf.reduce_sum(
                    targets * tf.log(
                        tf.clip_by_value(ladder.activation_predict, 1e-10,
                                         1.0)), 1))  # cost used for prediction

            correct_prediction = tf.equal(
                tf.argmax(ladder.activation_predict, 1),
                tf.argmax(targets, 1))  # no of correct predictions
            accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                              "float")) * tf.constant(100.0)

            s.run(tf.initialize_all_variables())

            #print "init accuracy", s.run([accuracy], feed_dict={inputs: data.test.images, targets: data.test.labels})

            min_loss = 100000.

            writer = tf.train.SummaryWriter("/tmp/td", s.graph_def)
            writer.add_graph(s.graph_def)

            for i in range(num_iter):
                images, labels = data.train.next_batch(batch_size)
                _, loss_val = s.run([train_step, loss],
                                    feed_dict={
                                        inputs: images,
                                        targets: labels
                                    })

                if loss_val < min_loss:
                    min_loss = loss_val
                print(i, loss_val)

                # print "acc", s.run([accuracy], feed_dict={inputs: data.test.images, targets: data.test.labels})

            #acc = s.run(accuracy, feed_dict={inputs: data.test.images, targets: data.test.labels})
            print "min loss", min_loss
            #print "final accuracy ", acc
            self.assertLess(min_loss, 20.0)
Esempio n. 27
0
from tensor_dynamic.layers.convolutional_layer import ConvolutionalLayer
from tensor_dynamic.layers.flatten_layer import FlattenLayer
from tensor_dynamic.layers.input_layer import InputLayer
from tensor_dynamic.layers.hidden_layer import HiddenLayer
from tensor_dynamic.layers.max_pool_layer import MaxPoolLayer
from tensor_dynamic.layers.output_layer import OutputLayer
from tensor_dynamic.layers.binary_output_layer import BinaryOutputLayer
from tensor_dynamic.layers.categorical_output_layer import CategoricalOutputLayer

data_set_collection = get_two_spirals_data_set_collection()

with tf.Session() as session:
    non_liniarity = tf.nn.tanh

    regularizer_coeff = 0.001
    last_layer = InputLayer(data_set_collection.features_shape, session)

    last_layer = HiddenLayer(last_layer, 5, session, non_liniarity=non_liniarity)

    last_layer = HiddenLayer(last_layer, 5, session, non_liniarity=non_liniarity)

    last_layer = HiddenLayer(last_layer, 5, session, non_liniarity=non_liniarity)
    #
    # last_layer = Layer(last_layer, 300, session, non_liniarity=non_liniarity)
    #
    # last_layer = Layer(last_layer, 300, session, non_liniarity=non_liniarity)
    #
    # last_layer = Layer(last_layer, 300, session, non_liniarity=non_liniarity)

    output = BinaryOutputLayer(last_layer, session, regularizer_weighting=regularizer_coeff)
Esempio n. 28
0
batch_size = 100

data = mnist.get_mnist_data_set_collection("../data/MNIST_data",
                                           one_hot=True,
                                           validation_size=5000)

with tf.Session() as sess:
    inputs = tf.placeholder(tf.float32, shape=(None, 784))

    bactivate = True
    noise_std = 0.3
    beta = 0.5
    gamma = 0.5
    non_lin = tf.nn.sigmoid
    input_layer = InputLayer(inputs)
    bn1 = BatchNormLayer(input_layer, sess, beta=beta, gamma=gamma)
    net1 = HiddenLayer(bn1,
                       1,
                       sess,
                       non_liniarity=non_lin,
                       bactivate=bactivate,
                       unsupervised_cost=.001,
                       noise_std=noise_std)
    bn2 = BatchNormLayer(net1, sess, beta=beta, gamma=gamma)
    net2 = HiddenLayer(bn2,
                       1,
                       sess,
                       non_liniarity=non_lin,
                       bactivate=bactivate,
                       unsupervised_cost=.001,
Esempio n. 29
0
env = gym.make('CartPole-v0')

ACTIONS_COUNT = 2
FUTURE_REWARD_DISCOUNT = 0.9
LEARN_RATE = 0.01
STORE_SCORES_LEN = 20.
TOPOLOGY_UPDATE_EVERY_X = 50.
GAMES_PER_TRAINING = 3
INPUT_NODES = env.observation_space.shape[0]

HIDDEN_NODES = 1

session = tf.Session()

input_layer = InputLayer(INPUT_NODES, session)
hidden_layer = HiddenLayer(input_layer, HIDDEN_NODES, session)
output_layer = OutputLayer(hidden_layer, ACTIONS_COUNT, session)

best_state, best_score = output_layer.get_network_state(), -1

action_placeholder = output_layer.target_placeholder
advantage_placeholder = tf.placeholder("float", [None, 1])

policy_gradient = tf.reduce_mean(advantage_placeholder * action_placeholder *
                                 tf.log(output_layer.activation_predict))
actor_train_operation = tf.train.AdamOptimizer(LEARN_RATE).minimize(
    -policy_gradient)

scores = deque(maxlen=STORE_SCORES_LEN)
Esempio n. 30
0
data = mnist.get_mnist_data_set_collection("../data/MNIST_data",
                                           number_labeled_examples=num_labeled,
                                           one_hot=True)

NOISE_STD = 0.3
batch_size = 100
num_epochs = 1
num_examples = 60000
num_iter = (num_examples / batch_size) * num_epochs
learning_rate = 0.1
inputs = tf.placeholder(tf.float32, shape=(None, 784))
targets = tf.placeholder(tf.float32)

with tf.Session() as s:
    s.as_default()
    i = InputLayer(inputs, layer_noise_std=NOISE_STD)
    l1 = LadderLayer(i, 500, 1000.0, s)
    l2 = LadderGammaLayer(l1, 10, 10.0, s)
    ladder = LadderOutputLayer(l2, 0.1, s)
    l3 = ladder

    assert int(i.z.get_shape()[-1]) == 784
    assert int(l1.z_corrupted.get_shape()[-1]) == 500
    assert int(l2.z_corrupted.get_shape()[-1]) == 10

    assert int(l3.z_est.get_shape()[-1]) == 10
    assert int(l2.z_est.get_shape()[-1]) == 500
    assert int(l1.z_est.get_shape()[-1]) == 784

    assert int(l1.mean_corrupted_unlabeled.get_shape()[0]) == 500
    assert int(l2.mean_corrupted_unlabeled.get_shape()[0]) == 10