def test_resize(self): output_nodes = 10 input_p = tf.placeholder("float", (None, 10)) layer = Layer(InputLayer(input_p), output_nodes, session=self.session) layer.resize(output_nodes + 1) print layer._bias.get_shape() self.assertEqual(layer.activation_predict.get_shape().as_list(), [None, output_nodes + 1]) self.assertEquals(layer.output_nodes, output_nodes + 1)
def test_resize(self): inputs = tf.placeholder(tf.float32, shape=(None, 784)) bactivate = True net1 = InputLayer(inputs) net2 = Layer(net1, 10, self.session, bactivate=bactivate) bn1 = BatchNormLayer(net2, self.session) output_net = Layer(bn1, 10, self.session, bactivate=False) print(self.session.run(output_net.activation_predict, feed_dict={inputs: np.zeros(shape=(1, 784))})) net2.resize(net2.output_nodes + 1) print(self.session.run(output_net.activation_predict, feed_dict={inputs: np.zeros(shape=(1, 784))}))
def test_reshape(self): output_nodes = 2 input_p = tf.placeholder("float", (None, 2)) layer = Layer(InputLayer(input_p), output_nodes, session=self.session, weights=np.array([[100.0]], dtype=np.float32)) result1 = self.session.run(layer.activation_predict, feed_dict={layer.input_placeholder: [[1., 1.]]}) layer.resize(3) result2 = self.session.run(layer.activation_predict, feed_dict={layer.input_placeholder: [[1., 1.]]}) print(result1) print(result2) self.assertEquals(len(result2[0]), 3)