Exemplo n.º 1
0
    def testEmptyUpdateOps(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = batchnorm_classifier(tf_inputs)
            loss = losses.log_loss(tf_labels, tf_predictions)
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            train_op = training.create_train_op(loss, optimizer, update_ops=[])

            moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
            moving_variance = variables_lib.get_variables_by_name(
                'moving_variance')[0]

            with self.cached_session() as session:
                # Initialize all variables
                session.run(variables_lib2.global_variables_initializer())
                mean, variance = session.run([moving_mean, moving_variance])
                # After initialization moving_mean == 0 and moving_variance == 1.
                self.assertAllClose(mean, [0] * 4)
                self.assertAllClose(variance, [1] * 4)

                for _ in range(10):
                    session.run(train_op)

                mean = moving_mean.eval()
                variance = moving_variance.eval()

                # Since we skip update_ops the moving_vars are not updated.
                self.assertAllClose(mean, [0] * 4)
                self.assertAllClose(variance, [1] * 4)
Exemplo n.º 2
0
 def testCreateVariables(self):
     height, width = 3, 3
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
     normalization.instance_norm(images, center=True, scale=True)
     beta = contrib_variables.get_variables_by_name('beta')[0]
     gamma = contrib_variables.get_variables_by_name('gamma')[0]
     self.assertEqual('InstanceNorm/beta', beta.op.name)
     self.assertEqual('InstanceNorm/gamma', gamma.op.name)
Exemplo n.º 3
0
 def testReuseVariables(self):
     height, width = 3, 3
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
     normalization.instance_norm(images, scale=True, scope='IN')
     normalization.instance_norm(images, scale=True, scope='IN', reuse=True)
     beta = contrib_variables.get_variables_by_name('beta')
     gamma = contrib_variables.get_variables_by_name('gamma')
     self.assertEqual(1, len(beta))
     self.assertEqual(1, len(gamma))
Exemplo n.º 4
0
    def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
        logdir1 = tempfile.mkdtemp('tmp_logs1')

        # First, train only the weights of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            weights = variables_lib2.get_variables_by_name('weights')

            train_op = learning.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=weights)

            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=200,
                                  log_every_n_steps=10)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Next, train the biases of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(1)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            biases = variables_lib2.get_variables_by_name('biases')

            train_op = learning.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=biases)

            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Finally, train both weights and bias to get lower loss.
        with ops.Graph().as_default():
            random_seed.set_random_seed(2)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)
            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=400,
                                  log_every_n_steps=10)

            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
Exemplo n.º 5
0
 def testCreateVariables_NHWC(self):
     height, width = 3, 3
     images = random_ops.random_uniform((5, height, width, 8), seed=1)
     normalization.group_norm(images, groups=4,
                              channels_axis=-1, reduction_axes=(-3, -2),
                              center=True, scale=True)
     beta = contrib_variables.get_variables_by_name('beta')[0]
     gamma = contrib_variables.get_variables_by_name('gamma')[0]
     self.assertEqual('GroupNorm/beta', beta.op.name)
     self.assertEqual('GroupNorm/gamma', gamma.op.name)
Exemplo n.º 6
0
 def testCreateOpNoScaleCenter(self):
     height, width = 3, 3
     images = random_ops.random_uniform(
         (5, height, width, 3), dtype=dtypes.float64, seed=1)
     output = normalization.instance_norm(images, center=False, scale=False)
     self.assertStartsWith(
         output.op.name, 'InstanceNorm/instancenorm')
     self.assertListEqual([5, height, width, 3], output.shape.as_list())
     self.assertEqual(
         0, len(contrib_variables.get_variables_by_name('beta')))
     self.assertEqual(
         0, len(contrib_variables.get_variables_by_name('gamma')))
Exemplo n.º 7
0
 def testCreateOpNoScaleCenter(self):
     height, width, groups = 3, 3, 7
     images = random_ops.random_uniform(
         (5, height, width, 3*groups), dtype=dtypes.float32, seed=1)
     output = normalization.group_norm(images, groups=groups, center=False,
                                       scale=False)
     self.assertListEqual([5, height, width, 3*groups],
                          output.shape.as_list())
     self.assertEqual(
         0, len(contrib_variables.get_variables_by_name('beta')))
     self.assertEqual(
         0, len(contrib_variables.get_variables_by_name('gamma')))
Exemplo n.º 8
0
    def testUseUpdateOps(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
            tf_labels = tf.constant(self._labels, dtype=tf.float32)

            expected_mean = np.mean(self._inputs, axis=(0))
            expected_var = np.var(self._inputs, axis=(0))
            expected_var = self._addBesselsCorrection(16, expected_var)

            tf_predictions = BatchNormClassifier(tf_inputs)
            loss_ops.log_loss(tf_labels, tf_predictions)
            total_loss = loss_ops.get_total_loss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            moving_mean = variables_lib2.get_variables_by_name(
                'moving_mean')[0]
            moving_variance = variables_lib2.get_variables_by_name(
                'moving_variance')[0]

            with tf.Session() as sess:
                # Initialize all variables
                sess.run(variables_lib.global_variables_initializer())
                mean, variance = sess.run([moving_mean, moving_variance])
                # After initialization moving_mean == 0 and moving_variance == 1.
                self.assertAllClose(mean, [0] * 4)
                self.assertAllClose(variance, [1] * 4)

                for _ in range(10):
                    sess.run([train_op])
                mean = moving_mean.eval()
                variance = moving_variance.eval()
                # After 10 updates with decay 0.1 moving_mean == expected_mean and
                # moving_variance == expected_var.
                self.assertAllClose(mean, expected_mean)
                self.assertAllClose(variance, expected_var)
Exemplo n.º 9
0
    def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
        logdir = tempfile.mkdtemp('tmp_logs3/')
        if gfile.Exists(logdir):  # For running on jenkins.
            gfile.DeleteRecursively(logdir)

        # First, train only the weights of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            weights = variables_lib.get_variables_by_name('weights')

            train_op = training.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=weights)

            saver = saver_lib.Saver()
            loss = training.train(
                train_op,
                logdir,
                hooks=[
                    basic_session_run_hooks.CheckpointSaverHook(logdir,
                                                                save_steps=200,
                                                                saver=saver),
                    basic_session_run_hooks.StopAtStepHook(num_steps=200),
                ],
                save_checkpoint_secs=None,
                save_summaries_steps=None)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Next, train the biases of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(1)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            biases = variables_lib.get_variables_by_name('biases')

            train_op = training.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=biases)

            saver = saver_lib.Saver()
            loss = training.train(
                train_op,
                logdir,
                hooks=[
                    basic_session_run_hooks.CheckpointSaverHook(logdir,
                                                                save_steps=300,
                                                                saver=saver),
                    basic_session_run_hooks.StopAtStepHook(num_steps=300),
                ],
                save_checkpoint_secs=None,
                save_summaries_steps=None)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Finally, train both weights and bias to get lower loss.
        with ops.Graph().as_default():
            random_seed.set_random_seed(2)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = training.create_train_op(total_loss, optimizer)
            saver = saver_lib.Saver()
            loss = training.train(
                train_op,
                logdir,
                hooks=[
                    basic_session_run_hooks.StopAtStepHook(num_steps=400),
                ],
                save_checkpoint_secs=None,
                save_summaries_steps=None)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)