def test_copy(self):
        in_size = 3
        out_size = 3
        x = tf.placeholder(tf.float32, [None, in_size])
        net1 = Perceptron(
            name_or_scope="p1",
            input_tensor=x,
            input_size=in_size,
            output_size=out_size,
        )

        self.sess.run(tf.global_variables_initializer())

        net2 = net1.get_copy(name_or_scope="p2")
        input_value = np.random.rand(1, in_size)

        feed = {
            x: input_value,
        }

        self.sess.run(tf.global_variables_initializer())

        out1 = self.sess.run(net1.output, feed)
        out2 = self.sess.run(net2.output, feed)
        self.assertNpArraysNotAlmostEqual(out1, out2)

        net2.set_param_values(net1.get_param_values())
        out1 = self.sess.run(net1.output, feed)
        out2 = self.sess.run(net2.output, feed)
        self.assertNpArraysAlmostEqual(out1, out2)
Beispiel #2
0
 def _create_network_internal(self, input_tensor=None):
     assert input_tensor is not None
     input_tensor = self._process_layer(input_tensor,
                                        scope_name="input_tensor")
     in_size = self.input_size
     for layer, next_size in enumerate(self.hidden_sizes):
         p = Perceptron(
             'p{0}'.format(layer),
             input_tensor,
             in_size,
             next_size,
             W_name=self.W_name,
             b_name=self.b_name,
             W_initializer=self.W_initializer,
             b_initializer=self.b_initializer,
             batch_norm_config=self._batch_norm_config,
         )
         input_tensor = self._add_subnetwork_and_get_output(p)
         input_tensor = self._process_layer(input_tensor)
         in_size = next_size
     return tf_util.linear(
         input_tensor,
         in_size,
         self.output_size,
         W_name=self.W_name,
         b_name=self.b_name,
         W_initializer=self.W_initializer,
         b_initializer=self.b_initializer,
     )
    def test_batch_norm_off_is_a_noop(self):
        in_size = 1
        out_size = 1
        W_name = "w"
        W_initializer = tf.constant_initializer(value=np.eye(1))
        b_name = "b"
        b_initializer = tf.constant_initializer(value=np.array([0]))
        input = tf.placeholder(tf.float32, shape=(None, in_size))

        perceptron = Perceptron(
            "perceptron",
            input,
            in_size,
            out_size,
            W_name=W_name,
            W_initializer=W_initializer,
            b_name=b_name,
            b_initializer=b_initializer,
            batch_norm_config=None,
        )

        input_values = np.array([[-2], [2]])

        output = perceptron.output
        self.sess.run(tf.global_variables_initializer())
        values = self.sess.run(output, {input: input_values})
        expected_values = np.array([[-2], [2]])
        self.assertNpArraysEqual(values, expected_values)

        training_output = perceptron.training_output
        training_values = self.sess.run(training_output, {input: input_values})
        expected_training_values = np.array([[-2], [2]])
        self.assertNpArraysEqual(training_values, expected_training_values)
    def test_batch_norm_variables_are_saved(self):
        in_size = 1
        out_size = 1
        input_layer = tf.placeholder(tf.float32, shape=(None, in_size))

        perceptron = Perceptron(
            "perceptron",
            input_layer,
            in_size,
            out_size,
            batch_norm_config=BatchNormConfig(),
        )
        self.sess.run(tf.global_variables_initializer())

        params = perceptron.get_params()
        self.assertEqual(6, len(params))
    def test_batch_norm_whitens_training_data(self):
        in_size = 1
        out_size = 1
        W_name = "w"
        W_initializer = tf.constant_initializer(value=np.eye(1))
        b_name = "b"
        b_initializer = tf.constant_initializer(value=np.array([0]))
        input = tf.placeholder(tf.float32, shape=(None, in_size))

        perceptron = Perceptron(
            "perceptron",
            input,
            in_size,
            out_size,
            W_name=W_name,
            W_initializer=W_initializer,
            b_name=b_name,
            b_initializer=b_initializer,
            batch_norm_config=BatchNormConfig(),
        )

        input_values = np.array([[-2, 2]]).T

        training_output = perceptron.training_output
        self.sess.run(tf.global_variables_initializer())
        training_values = self.sess.run(training_output, {input: input_values})
        expected_training_values = np.array([[-1, 1]]).T
        self.assertNotEqual(perceptron.training_output, perceptron.output)
        self.assertNpArraysAlmostEqual(expected_training_values,
                                       training_values)
    def test_batch_norm_offset_and_scale_variables_change_correctly(self):
        in_size = 1
        out_size = 1
        input_layer = tf.placeholder(tf.float32, shape=(None, in_size))
        learning_rate = 0.5
        input_value = 0.75
        W_initializer = tf.constant_initializer(value=np.eye(1))

        scale_name = "test_scale"
        offset_name = "test_offset"
        perceptron = Perceptron(
            "perceptron",
            input_layer,
            in_size,
            out_size,
            W_initializer=W_initializer,
            batch_norm_config=BatchNormConfig(
                bn_scale_name=scale_name,
                bn_offset_name=offset_name,
            ),
        )
        self.sess.run(tf.global_variables_initializer())
        self.sess.run(tf.train.GradientDescentOptimizer(
            learning_rate=learning_rate).minimize(perceptron.output),
                      feed_dict={
                          input_layer: np.array([[input_value]]),
                      })

        params = perceptron.get_params()
        scale_values = self.sess.run(
            [v for v in params if scale_name in v.name])
        assert len(scale_values) == 1
        scale_value = scale_values[0][0]
        offset_values = self.sess.run(
            [v for v in params if offset_name in v.name])
        assert len(offset_values) == 1
        offset_value = offset_values[0][0]

        self.assertAlmostEqual(offset_value, -learning_rate)
        # Since it's just an offset, it increases by learning_rate
        self.assertAlmostEqual(scale_value,
                               1 - input_value * learning_rate,
                               delta=1e-4)
Beispiel #7
0
    def test_randomize_variables(self):
        in_size = 5
        out_size = 1
        input = tf.placeholder(tf.float32, shape=(None, in_size))

        perceptron = Perceptron(
            "perceptron",
            input,
            in_size,
            out_size,
        )
        self.sess.run(tf.global_variables_initializer())

        vars = perceptron.get_params()
        vars_old = self.sess.run(vars)
        self.randomize_param_values(perceptron)
        vars_new = self.sess.run(vars)
        for v1, v2 in zip(vars_old, vars_new):
            self.assertNpArraysNotAlmostEqual(v1, v2)
    def test_get_weight_tied_copy(self):
        in_size = 3
        out_size = 3
        net1_input = tf.placeholder(tf.float32, [None, in_size])
        net1 = Perceptron(
            name_or_scope="p1",
            input_tensor=net1_input,
            input_size=in_size,
            output_size=out_size,
        )

        self.sess.run(tf.global_variables_initializer())

        net2_input = tf.placeholder(tf.float32, [None, in_size])
        net2 = net1.get_weight_tied_copy(input_tensor=net2_input, )
        input_value = np.random.rand(1, in_size)

        feed_1 = {
            net1_input: input_value,
        }
        feed_2 = {
            net2_input: input_value,
        }

        out1 = self.sess.run(net1.output, feed_1)
        out2 = self.sess.run(net2.output, feed_2)
        self.assertNpArraysAlmostEqual(out1, out2)

        # Output should be the same even after re-initializing parameters
        self.sess.run(tf.global_variables_initializer())

        out1 = self.sess.run(net1.output, feed_1)
        out2 = self.sess.run(net2.output, feed_2)
        self.assertNpArraysAlmostEqual(out1, out2)

        params1 = net1.get_params_internal()
        params2 = net2.get_params_internal()
        self.assertEqual(params1, params2)
    def test_not_regularize_only_b(self):
        in_size = 5
        out_size = 1
        W_name = "w"
        b_name = "b"
        input = tf.placeholder(tf.float32, shape=(1, in_size))

        perceptron = Perceptron(
            "perceptron",
            input,
            in_size,
            out_size,
            W_name=W_name,
            b_name=b_name,
        )

        all_vars = perceptron.get_params_internal(regularizable=False)
        names = self.var_names(all_vars)

        expected_names = {
            "perceptron/b:0",
        }
        self.assertEqual(names, expected_names)
    def test_output_mode_switches(self):
        in_size = 1
        out_size = 1
        input_layer = tf.placeholder(tf.float32, shape=(None, in_size))

        perceptron = Perceptron(
            "perceptron",
            input_layer,
            in_size,
            out_size,
            batch_norm_config=BatchNormConfig(),
        )
        self.sess.run(tf.global_variables_initializer())
        training_output = perceptron.training_output
        eval_output = perceptron._eval_output
        self.assertNotEqual(training_output, eval_output)
        self.assertEqual(perceptron.output, eval_output)
        perceptron.switch_to_training_mode()
        self.assertEqual(perceptron.output, training_output)
        perceptron.switch_to_eval_mode()
        self.assertEqual(perceptron.output, eval_output)
        perceptron.switch_to_eval_mode()
        self.assertEqual(perceptron.output, eval_output)
    def test_batch_norm(self):
        in_size = 1
        out_size = 1
        W_name = "w"
        W_initializer = tf.constant_initializer(value=np.eye(1))
        b_name = "b"
        b_initializer = tf.constant_initializer(value=np.array([0]))
        input_layer = tf.placeholder(tf.float32, shape=(None, in_size))

        epsilon = 1e-5
        perceptron = Perceptron(
            "perceptron",
            input_layer,
            in_size,
            out_size,
            W_name=W_name,
            W_initializer=W_initializer,
            b_name=b_name,
            b_initializer=b_initializer,
            batch_norm_config=BatchNormConfig(epsilon=epsilon, decay=0.),
        )
        self.sess.run(tf.global_variables_initializer())

        input_values = np.array([[1, 2, 3, 4]]).T
        # variance([1, 2, 3, 4]) = 1.25
        # mean([1, 2, 3, 4]) = 2.5

        perceptron.switch_to_training_mode()
        self.sess.run(perceptron.batch_norm_update_stats_op,
                      {input_layer: input_values})
        perceptron.switch_to_eval_mode()

        expected_eval_values = np.array([[-2, 2]]).T
        expected_pop_mean = 2.5
        expected_pop_var = 1.25
        eval_input_values = (
            expected_eval_values * np.sqrt(expected_pop_var + epsilon) +
            expected_pop_mean)
        eval_values = self.sess.run(perceptron.output,
                                    {input_layer: eval_input_values})
        self.assertNpArraysAlmostEqual(expected_eval_values, eval_values)