Exemple #1
0
    def test_end2end_checkpoint(self):
        """Takes real GAN (trained for 1 step) and evaluate it."""
        workdir = os.path.join(tf.test.get_temp_dir(), self.id())
        tf.logging.info("Workdir: %s" % workdir)
        options = {
            "gan_type": "GAN",
            "dataset": "fake",
            "training_steps": 1,
            "save_checkpoint_steps": 10,
            "learning_rate": 0.001,
            "discriminator_normalization": consts.NO_NORMALIZATION,
            "eval_test_samples": 50,
        }
        gan_lib.run_with_options(options, workdir)
        fake_inception = self._create_fake_inception_graph()

        eval_gan_lib.RunTaskEval(options,
                                 workdir,
                                 inception_graph=fake_inception.as_graph_def())

        rows = self._get_scores(workdir)

        self.assertEquals(1, len(rows))
        # The fid score should exist (and be quite large).
        self.assertGreater(rows[0]["fid_score"], 100.0)
Exemple #2
0
    def test_csv_writing(self):
        """Verifies that results are correctly written to final CSV file."""
        workdir = os.path.join(tf.test.get_temp_dir(), self.id())
        checkpoint_path = os.path.join(workdir, "checkpoint/")
        tf.gfile.MakeDirs(checkpoint_path)

        options = {
            "gan_type": "GAN",
            "dataset": "fake",
            "discriminator_normalization": consts.NO_NORMALIZATION,
            "learning_rate": 0.001,
        }
        # Create 10 checkpoints.
        with tf.Graph().as_default():
            tf.get_variable("foo", shape=[1])
            saver = tf.train.Saver(max_to_keep=1000)
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                for x in range(10):
                    saver.save(sess, checkpoint_path, global_step=x)

        with mock.patch.object(eval_gan_lib,
                               "RunCheckpointEval",
                               autospec=True) as mock_cls:
            result_dict = {"inception_score": 12.0, "train_d_loss": 1.3}
            mock_cls.return_value = result_dict
            eval_gan_lib.RunTaskEval(options, workdir, inception_graph=None)
        rows = self._get_scores(workdir)
        self.assertEquals(10, len(rows))
        self.assertNear(float(rows[0]["inception_score"]), 12.0, 0.01)
        self.assertNear(float(rows[1]["train_d_loss"]), 1.3, 0.01)
        self.assertNear(float(rows[1]["test_accuracy"]), -1.0, 0.01)
Exemple #3
0
    def test_csv_append(self):
        workdir = os.path.join(tf.test.get_temp_dir(), self.id())
        checkpoint_path = os.path.join(workdir, "checkpoint/")
        tf.gfile.MakeDirs(checkpoint_path)

        options = {
            "gan_type": "GAN",
            "dataset": "fake",
            "discriminator_normalization": consts.NO_NORMALIZATION,
            "learning_rate": 0.001,
        }

        # Start by creating first 2 checkpoints.
        with tf.Graph().as_default():
            tf.get_variable("foo", shape=[1])
            saver = tf.train.Saver(max_to_keep=1000)
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                for x in range(2):
                    saver.save(sess, checkpoint_path, global_step=x)

                with mock.patch.object(eval_gan_lib,
                                       "RunCheckpointEval",
                                       autospec=True) as mock_cls:
                    mock_cls.return_value = {
                        "inception_score": 12.0,
                        "train_d_loss": 1.3
                    }
                    eval_gan_lib.RunTaskEval(options,
                                             workdir,
                                             inception_graph=None)

                    rows = self._get_scores(workdir)
                    self.assertEquals(2, len(rows))
                    self.assertNear(float(rows[0]["inception_score"]), 12.0,
                                    0.01)
                    self.assertNear(float(rows[1]["train_d_loss"]), 1.3, 0.01)
                    self.assertNear(float(rows[1]["test_accuracy"]), -1.0,
                                    0.01)

                    # Now create 2 more checkpoints.
                    for x in range(3, 5):
                        saver.save(sess, checkpoint_path, global_step=x)
                    mock_cls.return_value = {
                        "inception_score": 14.0,
                        "train_d_loss": 1.5
                    }
                    eval_gan_lib.RunTaskEval(options,
                                             workdir,
                                             inception_graph=None)
                    rows = self._get_scores(workdir)
                    self.assertEquals(4, len(rows))
                    # old scores should stay intact.
                    self.assertNear(float(rows[0]["inception_score"]), 12.0,
                                    0.01)
                    self.assertNear(float(rows[1]["train_d_loss"]), 1.3, 0.01)
                    self.assertNear(float(rows[1]["test_accuracy"]), -1.0,
                                    0.01)
                    # New entries should have new values.
                    self.assertNear(float(rows[2]["inception_score"]), 14.0,
                                    0.01)
                    self.assertNear(float(rows[3]["train_d_loss"]), 1.5, 0.01)

                    self.assertNotIn("new_metric", rows[0])

                    # Now assume that metric names have changed.
                    with mock.patch.object(eval_gan_lib,
                                           "MultiscaleSSIMTask",
                                           autospec=True) as mock_task:
                        mock_task.return_value.MetricsList.return_value = [
                            "ms_ssim", "new_metric"
                        ]
                        # Now create 2 more checkpoints.
                        for x in range(5, 7):
                            saver.save(sess, checkpoint_path, global_step=x)
                        mock_cls.return_value = {
                            "inception_score": 16.0,
                            "train_d_loss": 1.7,
                            "new_metric": 20.0
                        }
                        eval_gan_lib.RunTaskEval(options,
                                                 workdir,
                                                 inception_graph=None)
                        rows = self._get_scores(workdir)
                        self.assertEquals(6, len(rows))

                        # As CSV header has changed, all the results should have been
                        # recomputed.
                        for x in range(6):
                            self.assertNear(float(rows[x]["inception_score"]),
                                            16.0, 0.01)
                            self.assertNear(float(rows[x]["new_metric"]), 20.0,
                                            0.01)
                            self.assertNear(float(rows[x]["test_accuracy"]),
                                            -1.0, 0.01)
                            self.assertNear(float(rows[x]["train_d_loss"]),
                                            1.7, 0.01)