def test_output(self):
        n_inputs = 10
        n_hidden = 20

        x = tf.placeholder(dtype=tf.float32, shape=[None,n_inputs])

        input_layer = Layer(n_units=n_inputs,activation=x)
        nn = NeuralNetwork(input_layer)

        # add an identity layer
        id_layer = Layer(n_hidden, activation=tf.identity)
        nn.add_layer(id_layer,biased=True)

        wij = nn.weights(0,1)
        expected_out = id_layer.activation(tf.add(tf.matmul(x,wij), nn.biases(1)))

        init = tf.initialize_all_variables()
        with tf.Session() as ss:
            ss.run(init)

            feed = {x: np.ones((1,n_inputs),dtype=np.float32)}

            r1 = ss.run(nn.output(), feed_dict=feed)
            r2 = ss.run(expected_out,feed_dict=feed)

            self.assertTrue(np.array_equal(r1,r2))
    def test_equivalent_networks(self):
        """
        Create two equivalent neural networks

            nn1 = x -> 1 -> div(x/in) -> w -> sigm -> o
            nn2 = x -> w -> sigm -> 0

            (with shared weights)
        """
        x = tf.ones([1, 4])
        input_layer = Layer(n_units=4,activation=x)
        weights = tf.ones([4,4])
        shared_w = tf.Variable(normalised_weight_init(4,4))

        nn1 = NeuralNetwork(input_layer)
        l11 = Layer(4, lambda x_in: tf.div(x_in, 4))
        l21 = Layer(4, tf.nn.sigmoid)
        nn1.add_layer(l11,biased=False,shared_weights=weights)
        nn1.add_layer(l21,biased=False,shared_weights=shared_w)

        self.assertEqual(nn1.graph.number_of_edges(), 2)
        self.assertEqual(nn1.size(),3)

        nn2 = NeuralNetwork(Layer(4,x))
        l12 = Layer(4, tf.nn.sigmoid)
        nn2.add_layer(l12,biased=False,shared_weights=shared_w)

        self.assertEqual(nn2.size(),2)
        self.assertEqual(nn2.graph.number_of_edges(),1)

        init_vars = tf.initialize_all_variables()

        with tf.Session() as ss:
            ss.run(init_vars)

            w1 = ss.run(nn1.weights(1,2))
            w2 = ss.run(nn2.weights(0,1))

            # shared weights should be the same in both networks
            self.assertTrue(np.array_equal(w1,w2))

            out_nn1 = ss.run(nn1.output())
            out_nn2 = ss.run(nn2.output())

            # outputs should be the same since the networks are equivalent
            self.assertTrue(np.array_equal(out_nn1,out_nn2))
Exemple #3
0
ss = tf.InteractiveSession()

n_inputs = 784
x = tf.placeholder(tf.float32, [None, n_inputs], name = "x")

# create neural network with tensorx
input_layer = Layer(n_units = n_inputs, activation = x)
network = NeuralNetwork(input_layer)
output_layer = Layer(10, tf.nn.softmax)
network.add_layer(output_layer, biased=True)

# train
target_output = tf.placeholder("float", shape=[None, 10])
# loss function
network_output = network.output()
cross_entropy = -tf.reduce_sum(target_output*tf.log(tf.clip_by_value(network_output,1e-50,1.0)))
train_step_rate = 0.003 # default: 0.003 -> accuracy ~ 0.9162 (step 999)
train_step = tf.train.GradientDescentOptimizer(train_step_rate).minimize(cross_entropy)

# test
correct_prediction = tf.equal(tf.argmax(network_output,1), tf.argmax(target_output,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

# run train and test
tf.initialize_all_variables().run()

n_steps = 1000
for i in range(n_steps):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    feed = {x: batch_xs, target_output: batch_ys}