def test_increase_layers_until_stop_decreasing_test_error(self):
        data = self.mnist_data
        input = InputLayer(784)
        hidden = Layer(input, HIDDEN_NOES, self.session, non_liniarity=tf.sigmoid, bactivate=False)
        output = Layer(hidden, 10, self.session, non_liniarity=tf.sigmoid, bactivate=False, supervised_cost=1.0)

        best_score = train_until_no_improvement_for_epochs(data, output, 3)

        for hidden_layer_count in range(1, 10):
            print("hidden_layers {0} best_score {1}".format(hidden_layer_count, best_score))

            candidate = output.clone()
            last_hidden_layer = candidate.last_layer.input_layer
            last_hidden_layer.add_intermediate_layer(
                lambda input_layer: Layer(
                    input_layer, HIDDEN_NOES, self.session, non_liniarity=tf.sigmoid, bactivate=False
                )
            )

            new_best_score = train_until_no_improvement_for_epochs(data, candidate, 3)

            if new_best_score > best_score:
                # failed to get improvement
                print("failed to get improvement with layer {0}".format(hidden_layer_count))
                break
            else:
                best_score = new_best_score
                output = candidate
Exemplo n.º 2
0
    def test_clone(self):
        inputs = tf.placeholder(tf.float32, shape=(None, 784))

        net1 = InputLayer(inputs)
        bn1 = BatchNormLayer(net1, self.session)
        net2 = Layer(bn1, 8, self.session)
        bn2 = BatchNormLayer(net2, self.session)
        net2 = Layer(bn2, 6, self.session)
        bn3 = BatchNormLayer(net2, self.session)
        net3 = Layer(bn3, 4, self.session)
        output_net = Layer(net3, 2, self.session)

        cloned_net = output_net.clone(self.session)

        self.assertNotEquals(cloned_net, output_net)
        self.assertNotEquals(cloned_net.input_layer, output_net.input_layer)
        self.assertEqual(len(list(cloned_net.all_layers)), len(list(output_net.all_layers)))