Пример #1
0
    def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
        logdir1 = tempfile.mkdtemp('tmp_logs1')

        # First, train only the weights of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            weights = variables_lib2.get_variables_by_name('weights')

            train_op = learning.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=weights)

            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=200,
                                  log_every_n_steps=10)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Next, train the biases of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(1)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            biases = variables_lib2.get_variables_by_name('biases')

            train_op = learning.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=biases)

            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Finally, train both weights and bias to get lower loss.
        with ops.Graph().as_default():
            random_seed.set_random_seed(2)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)
            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=400,
                                  log_every_n_steps=10)

            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
Пример #2
0
    def testTrainWithEpochLimit(self):
        logdir = tempfile.mkdtemp('tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
            tf_labels = tf.constant(self._labels, dtype=tf.float32)
            tf_inputs_limited = input_lib.limit_epochs(tf_inputs,
                                                       num_epochs=300)
            tf_labels_limited = input_lib.limit_epochs(tf_labels,
                                                       num_epochs=300)

            tf_predictions = LogisticClassifier(tf_inputs_limited)
            loss_ops.log_loss(tf_labels_limited, tf_predictions)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op, logdir, log_every_n_steps=10)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
        self.assertTrue(
            os.path.isfile('{}/model.ckpt-300.index'.format(logdir)))
        self.assertTrue(
            os.path.isfile(
                '{}/model.ckpt-300.data-00000-of-00001'.format(logdir)))
Пример #3
0
    def testResumeTrainAchievesRoughlyTheSameLoss(self):
        logdir = tempfile.mkdtemp('tmp_logs')
        number_of_steps = [300, 301, 305]

        for i in range(len(number_of_steps)):
            with ops.Graph().as_default():
                random_seed.set_random_seed(i)
                tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
                tf_labels = tf.constant(self._labels, dtype=tf.float32)

                tf_predictions = LogisticClassifier(tf_inputs)
                loss_ops.log_loss(tf_labels, tf_predictions)
                total_loss = loss_ops.get_total_loss()

                optimizer = gradient_descent.GradientDescentOptimizer(
                    learning_rate=1.0)

                train_op = learning.create_train_op(total_loss, optimizer)

                loss = learning.train(train_op,
                                      logdir,
                                      number_of_steps=number_of_steps[i],
                                      log_every_n_steps=10)
                self.assertIsNotNone(loss)
                self.assertLess(loss, .015)
Пример #4
0
    def testTrainWithTrace(self):
        logdir = tempfile.mkdtemp('tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
            tf_labels = tf.constant(self._labels, dtype=tf.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_labels, tf_predictions)
            total_loss = loss_ops.get_total_loss()
            summary.scalar('total_loss', total_loss)

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op,
                                  logdir,
                                  number_of_steps=300,
                                  log_every_n_steps=10,
                                  trace_every_n_steps=100)
        self.assertIsNotNone(loss)
        for trace_step in [1, 101, 201]:
            trace_filename = 'tf_trace-%d.json' % (trace_step - 1)
            trace_filename_legacy = 'tf_trace-%d.json' % trace_step

            trace_paths = [
                os.path.join(logdir, f)
                for f in (trace_filename, trace_filename_legacy)
            ]
            # Note: with resource variables the traces are created at 0/100/200
            # with legacy variables traces are created at 1/101/201
            self.assertTrue(any(os.path.isfile(path) for path in trace_paths),
                            trace_paths)
Пример #5
0
    def testTrainWithInitFromCheckpoint(self):
        logdir1 = tempfile.mkdtemp('tmp_logs1')
        logdir2 = tempfile.mkdtemp('tmp_logs2')

        # First, train the model one step (make sure the error is high).
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            train_op = self.create_train_op()
            loss = learning.train(train_op, logdir1, number_of_steps=1)
            self.assertGreater(loss, .5)

        # Next, train the model to convergence.
        with ops.Graph().as_default():
            random_seed.set_random_seed(1)
            train_op = self.create_train_op()
            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .02)

        # Finally, advance the model a single step and validate that the loss is
        # still low.
        with ops.Graph().as_default():
            random_seed.set_random_seed(2)
            train_op = self.create_train_op()

            model_variables = variables_lib.global_variables()
            model_path = os.path.join(logdir1, 'model.ckpt-300')

            init_op = variables_lib.global_variables_initializer()
            op, init_feed_dict = variables_lib2.assign_from_checkpoint(
                model_path, model_variables)

            def InitAssignFn(sess):
                sess.run(op, init_feed_dict)

            loss = learning.train(train_op,
                                  logdir2,
                                  number_of_steps=1,
                                  init_op=init_op,
                                  init_fn=InitAssignFn)

            self.assertIsNotNone(loss)
            self.assertLess(loss, .02)
Пример #6
0
    def testTrainWithInitFromFn(self):
        logdir1 = tempfile.mkdtemp('tmp_logs1')
        logdir2 = tempfile.mkdtemp('tmp_logs2')

        # First, train the model one step (make sure the error is high).
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            train_op = self.create_train_op()
            loss = learning.train(train_op, logdir1, number_of_steps=1)
            self.assertGreater(loss, .5)

        # Next, train the model to convergence.
        with ops.Graph().as_default():
            random_seed.set_random_seed(1)
            train_op = self.create_train_op()
            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)

        # Finally, advance the model a single step and validate that the loss is
        # still low.
        with ops.Graph().as_default():
            random_seed.set_random_seed(2)
            train_op = self.create_train_op()

            model_variables = variables_lib.global_variables()
            model_path = os.path.join(logdir1, 'model.ckpt-300')
            saver = saver_lib.Saver(model_variables)

            def RestoreFn(sess):
                saver.restore(sess, model_path)

            loss = learning.train(train_op,
                                  logdir2,
                                  number_of_steps=1,
                                  init_fn=RestoreFn)

            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
Пример #7
0
    def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
            tf_labels = tf.constant(self._labels, dtype=tf.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_labels, tf_predictions)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            with self.assertRaises(ValueError):
                learning.train(train_op,
                               None,
                               number_of_steps=300,
                               trace_every_n_steps=10)
Пример #8
0
    def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
        logdir = tempfile.mkdtemp('tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
            tf_labels = tf.constant(self._labels, dtype=tf.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_labels, tf_predictions)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            with self.assertRaises(RuntimeError):
                learning.train(train_op,
                               logdir,
                               init_op=None,
                               number_of_steps=300)
Пример #9
0
    def testTrainWithAlteredGradients(self):
        # Use the same learning rate but different gradient multipliers
        # to train two models. Model with equivalently larger learning
        # rate (i.e., learning_rate * gradient_multiplier) has smaller
        # training loss.
        logdir1 = tempfile.mkdtemp('tmp_logs1')
        logdir2 = tempfile.mkdtemp('tmp_logs2')

        multipliers = [1., 1000.]
        number_of_steps = 10
        losses = []
        learning_rate = 0.001

        # First, train the model with equivalently smaller learning rate.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            train_op = self.create_train_op(learning_rate=learning_rate,
                                            gradient_multiplier=multipliers[0])
            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=number_of_steps)
            losses.append(loss)
            self.assertGreater(loss, .5)

        # Second, train the model with equivalently larger learning rate.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            train_op = self.create_train_op(learning_rate=learning_rate,
                                            gradient_multiplier=multipliers[1])
            loss = learning.train(train_op,
                                  logdir2,
                                  number_of_steps=number_of_steps)
            losses.append(loss)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .5)

        # The loss of the model trained with larger learning rate should
        # be smaller.
        self.assertGreater(losses[0], losses[1])
Пример #10
0
    def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
        logdir = tempfile.mkdtemp('tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
            tf_labels = tf.constant(self._labels, dtype=tf.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_labels, tf_predictions)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op,
                                  logdir,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
Пример #11
0
    def testTrainWithSessionConfig(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
            tf_labels = tf.constant(self._labels, dtype=tf.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_labels, tf_predictions)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            session_config = tf.ConfigProto(allow_soft_placement=True)
            loss = learning.train(train_op,
                                  None,
                                  number_of_steps=300,
                                  log_every_n_steps=10,
                                  session_config=session_config)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)