예제 #1
0
    def test_save_restore(self):
        checkpoint_directory = self.get_temp_dir()
        checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
        m = metrics.Mean()
        checkpoint = tf.train.Checkpoint(mean=m)
        self.evaluate(tf.compat.v1.variables_initializer(m.variables))

        # update state
        self.evaluate(m(100.0))
        self.evaluate(m(200.0))

        # save checkpoint and then add an update
        save_path = checkpoint.save(checkpoint_prefix)
        self.evaluate(m(1000.0))

        # restore to the same checkpoint mean object
        checkpoint.restore(save_path).assert_consumed().run_restore_ops()
        self.evaluate(m(300.0))
        self.assertEqual(200.0, self.evaluate(m.result()))

        # restore to a different checkpoint mean object
        restore_mean = metrics.Mean()
        restore_checkpoint = tf.train.Checkpoint(mean=restore_mean)
        status = restore_checkpoint.restore(save_path)
        restore_update = restore_mean(300.0)
        status.assert_consumed().run_restore_ops()
        self.evaluate(restore_update)
        self.assertEqual(200.0, self.evaluate(restore_mean.result()))
        self.assertEqual(3, self.evaluate(restore_mean.count))
예제 #2
0
    def test_multiple_instances(self):
        m = metrics.Mean()
        m2 = metrics.Mean()

        assert m.name == 'mean'
        assert m2.name == 'mean'

        # check initial state
        assert K.eval(m.total) == 0
        assert K.eval(m.count) == 0
        assert K.eval(m2.total) == 0
        assert K.eval(m2.count) == 0

        # check __call__()
        assert K.eval(m(100)) == 100
        assert K.eval(m.total) == 100
        assert K.eval(m.count) == 1
        assert K.eval(m2.total) == 0
        assert K.eval(m2.count) == 0

        assert K.eval(m2([63, 10])) == 36.5
        assert K.eval(m2.total) == 73
        assert K.eval(m2.count) == 2
        assert K.eval(m.result()) == 100
        assert K.eval(m.total) == 100
        assert K.eval(m.count) == 1
예제 #3
0
def create_mean_metric(value, name=None):
    # import keras will import base_layer and then this module, and metric
    # relies on base_layer, which result into a cyclic dependency.
    from keras import metrics as metrics_module

    metric_obj = metrics_module.Mean(name=name, dtype=value.dtype)
    return metric_obj, metric_obj(value)
예제 #4
0
    def test_mean(self):
        m = metrics.Mean(name='my_mean')

        # check config
        assert m.name == 'my_mean'
        assert m.stateful
        assert m.dtype == 'float32'
        assert len(m.weights) == 2

        # check initial state
        assert K.eval(m.total) == 0
        assert K.eval(m.count) == 0

        # check __call__()
        assert K.eval(m(100)) == 100
        assert K.eval(m.total) == 100
        assert K.eval(m.count) == 1

        # check update_state() and result()
        result = m([1, 5])
        assert np.isclose(K.eval(result), 106. / 3)
        assert K.eval(m.total) == 106  # 100 + 1 + 5
        assert K.eval(m.count) == 3

        # check reset_states()
        m.reset_states()
        assert K.eval(m.total) == 0
        assert K.eval(m.count) == 0

        # Check save and restore config
        m2 = metrics.Mean.from_config(m.get_config())
        assert m2.name == 'my_mean'
        assert m2.stateful
        assert m2.dtype == 'float32'
        assert len(m2.weights) == 2
예제 #5
0
    def test_mean_with_sample_weight(self):
        m = metrics.Mean(dtype='float64')
        assert m.dtype == 'float64'

        # check scalar weight
        result_t = m(100, sample_weight=0.5)
        assert K.eval(result_t) == 50. / 0.5
        assert K.eval(m.total) == 50
        assert K.eval(m.count) == 0.5

        # check weights not scalar and weights rank matches values rank
        result_t = m([1, 5], sample_weight=[1, 0.2])
        result = K.eval(result_t)
        assert np.isclose(result, 52. / 1.7)
        assert np.isclose(K.eval(m.total), 52)  # 50 + 1 + 5 * 0.2
        assert np.isclose(K.eval(m.count), 1.7)  # 0.5 + 1.2

        # check weights broadcast
        result_t = m([1, 2], sample_weight=0.5)
        assert np.isclose(K.eval(result_t), 53.5 / 2.7, rtol=3)
        assert np.isclose(K.eval(m.total), 53.5, rtol=3)  # 52 + 0.5 + 1
        assert np.isclose(K.eval(m.count), 2.7, rtol=3)  # 1.7 + 0.5 + 0.5

        # check weights squeeze
        result_t = m([1, 5], sample_weight=[[1], [0.2]])
        assert np.isclose(K.eval(result_t), 55.5 / 3.9, rtol=3)
        assert np.isclose(K.eval(m.total), 55.5, rtol=3)  # 53.5 + 1 + 1
        assert np.isclose(K.eval(m.count), 3.9, rtol=3)  # 2.7 + 1.2

        # check weights expand
        result_t = m([[1], [5]], sample_weight=[1, 0.2])
        assert np.isclose(K.eval(result_t), 57.5 / 5.1, rtol=3)
        assert np.isclose(K.eval(m.total), 57.5, rtol=3)  # 55.5 + 1 + 1
        assert np.isclose(K.eval(m.count), 5.1, rtol=3)  # 3.9 + 1.2
예제 #6
0
    def test_reset_state_existing_metric_before_built(self):
        metric = metrics_mod.Mean()
        metric.update_state([2.0, 4.0])
        self.assertEqual(metric.result().numpy(), 3.0)

        metric_container = compile_utils.MetricsContainer(metric)
        metric_container.reset_state()
        self.assertEqual(metric.result().numpy(), 0.0)
예제 #7
0
    def test_multiple_keras_metrics_experimental_run(self, distribution):
        with distribution.scope():
            loss_metric = metrics.Mean("loss", dtype=np.float32)
            loss_metric_2 = metrics.Mean("loss_2", dtype=np.float32)

        @tf.function
        def train_step():
            def step_fn():
                loss = tf.constant(5.0, dtype=np.float32)
                loss_metric.update_state(loss)
                loss_metric_2.update_state(loss)

            distribution.run(step_fn)

        train_step()
        self.assertEqual(loss_metric.result().numpy(),
                         loss_metric_2.result().numpy())
        self.assertEqual(loss_metric.result().numpy(), 5.0)
예제 #8
0
    def test_update_keras_metric_outside_strategy_scope_cross_replica(
            self, distribution):
        metric = metrics.Mean("test_metric", dtype=np.float32)

        with distribution.scope():
            for i in range(10):
                metric.update_state(i)

        # This should be the mean of integers 0-9 which has a sum of 45 and a count
        # of 10 resulting in mean of 4.5.
        self.assertEqual(metric.result().numpy(), 4.5)
예제 #9
0
 def _create_metrics(self):
   """Creates per-output loss metrics, but only for multi-output Models."""
   if len(self._output_names) == 1:
     self._per_output_metrics = [None]
   else:
     self._per_output_metrics = []
     for loss_obj, output_name in zip(self._losses, self._output_names):
       if loss_obj is None:
         self._per_output_metrics.append(None)
       else:
         self._per_output_metrics.append(
             metrics_mod.Mean(output_name + '_loss'))
예제 #10
0
    def test_function_wrapped_reset_state(self):
        m = metrics.Mean(name="my_mean")

        # check reset_state in function.
        @tf.function
        def reset_in_fn():
            m.reset_state()
            return m.update_state(100)

        for _ in range(5):
            self.evaluate(reset_in_fn())
        self.assertEqual(self.evaluate(m.count), 1)
예제 #11
0
  def __init__(self, losses, loss_weights=None, output_names=None):
    super(LossesContainer, self).__init__(output_names=output_names)

    # Keep user-supplied values untouched for recompiling and serialization.
    self._user_losses = losses
    self._user_loss_weights = loss_weights

    self._losses = losses
    self._loss_weights = loss_weights
    self._per_output_metrics = None  # Per-output losses become metrics.
    self._loss_metric = metrics_mod.Mean(name='loss')  # Total loss.
    self._built = False
예제 #12
0
    def test_multiple_instances(self):
        m = metrics.Mean()
        m2 = metrics.Mean()

        self.assertEqual(m.name, "mean")
        self.assertEqual(m2.name, "mean")

        self.assertEqual(
            [v.name for v in m.variables],
            test_utils.get_expected_metric_variable_names(["total", "count"]),
        )
        self.assertEqual(
            [v.name for v in m2.variables],
            test_utils.get_expected_metric_variable_names(["total", "count"],
                                                          name_suffix="_1"),
        )

        self.evaluate(tf.compat.v1.variables_initializer(m.variables))
        self.evaluate(tf.compat.v1.variables_initializer(m2.variables))

        # check initial state
        self.assertEqual(self.evaluate(m.total), 0)
        self.assertEqual(self.evaluate(m.count), 0)
        self.assertEqual(self.evaluate(m2.total), 0)
        self.assertEqual(self.evaluate(m2.count), 0)

        # check __call__()
        self.assertEqual(self.evaluate(m(100)), 100)
        self.assertEqual(self.evaluate(m.total), 100)
        self.assertEqual(self.evaluate(m.count), 1)
        self.assertEqual(self.evaluate(m2.total), 0)
        self.assertEqual(self.evaluate(m2.count), 0)

        self.assertEqual(self.evaluate(m2([63, 10])), 36.5)
        self.assertEqual(self.evaluate(m2.total), 73)
        self.assertEqual(self.evaluate(m2.count), 2)
        self.assertEqual(self.evaluate(m.result()), 100)
        self.assertEqual(self.evaluate(m.total), 100)
        self.assertEqual(self.evaluate(m.count), 1)
예제 #13
0
    def test_deepcopy_of_metrics(self):
        m = metrics.Mean(name="my_mean")

        m.reset_state()
        m.update_state(100)
        m_copied = copy.deepcopy(m)
        m_copied.update_state(200)

        self.assertEqual(self.evaluate(m.result()), 100)
        self.assertEqual(self.evaluate(m_copied.result()), 150)

        m.reset_state()

        self.assertEqual(self.evaluate(m.result()), 0)
        self.assertEqual(self.evaluate(m_copied.result()), 150)
예제 #14
0
    def test_duplicated_metric_instance(self):
        mean_obj = metrics_mod.Mean()
        metric = mean_obj
        with self.assertRaisesRegex(ValueError, "Found duplicated metrics"):
            compile_utils.MetricsContainer(metrics=metric,
                                           weighted_metrics=metric)

        # duplicated string should be fine
        metric = "acc"
        compile_utils.MetricsContainer(metrics=metric, weighted_metrics=metric)

        # complicated structure
        metric = [mean_obj, "acc"]
        weighted_metric = {"output1": mean_obj, "output2": "acc"}
        with self.assertRaisesRegex(ValueError, "Found duplicated metrics"):
            compile_utils.MetricsContainer(metrics=metric,
                                           weighted_metrics=weighted_metric)
예제 #15
0
  def test_update_keras_metric_declared_in_strategy_scope(self, distribution):
    with distribution.scope():
      metric = metrics.Mean("test_metric", dtype=np.float32)

    dataset = tf.data.Dataset.range(10).batch(2)
    dataset = distribution.experimental_distribute_dataset(dataset)

    @tf.function
    def step_fn(i):
      metric.update_state(i)

    for i in dataset:
      distribution.run(step_fn, args=(i,))

    # This should be the mean of integers 0-9 which has a sum of 45 and a count
    # of 10 resulting in mean of 4.5.
    self.assertEqual(metric.result().numpy(), 4.5)
예제 #16
0
    def test_duplicated_metric_instance(self):
        mean_obj = metrics_mod.Mean()
        metric = mean_obj
        with self.assertRaisesRegex(ValueError, 'Found duplicated metrics'):
            compile_utils.MetricsContainer(metrics=metric,
                                           weighted_metrics=metric)

        # duplicated string should be fine
        metric = 'acc'
        compile_utils.MetricsContainer(metrics=metric, weighted_metrics=metric)

        # complicated structure
        metric = [mean_obj, 'acc']
        weighted_metric = {'output1': mean_obj, 'output2': 'acc'}
        with self.assertRaisesRegex(ValueError, 'Found duplicated metrics'):
            compile_utils.MetricsContainer(metrics=metric,
                                           weighted_metrics=weighted_metric)
예제 #17
0
    def test_mean_with_sample_weight(self):
        m = metrics.Mean(dtype=tf.float64)
        self.assertEqual(m.dtype, tf.float64)
        self.evaluate(tf.compat.v1.variables_initializer(m.variables))

        # check scalar weight
        result_t = m(100, sample_weight=0.5)
        self.assertEqual(self.evaluate(result_t), 50 / 0.5)
        self.assertEqual(self.evaluate(m.total), 50)
        self.assertEqual(self.evaluate(m.count), 0.5)

        # check weights not scalar and weights rank matches values rank
        result_t = m([1, 5], sample_weight=[1, 0.2])
        result = self.evaluate(result_t)
        self.assertAlmostEqual(result, 52 / 1.7, 2)
        self.assertAlmostEqual(self.evaluate(m.total), 52,
                               2)  # 50 + 1 + 5 * 0.2
        self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2)  # 0.5 + 1.2

        # check weights broadcast
        result_t = m([1, 2], sample_weight=0.5)
        self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2)
        self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2)  # 52 + 0.5 + 1
        self.assertAlmostEqual(self.evaluate(m.count), 2.7,
                               2)  # 1.7 + 0.5 + 0.5

        # check weights squeeze
        result_t = m([1, 5], sample_weight=[[1], [0.2]])
        self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2)
        self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2)  # 53.5 + 1 + 1
        self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2)  # 2.7 + 1.2

        # check weights expand
        result_t = m([[1], [5]], sample_weight=[1, 0.2])
        self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2)
        self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2)  # 55.5 + 1 + 1
        self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2)  # 3.9 + 1.2

        # check values reduced to the dimensions of weight
        result_t = m([[[1.0, 2.0], [3.0, 2.0], [0.5, 4.0]]],
                     sample_weight=[0.5])
        result = np.round(self.evaluate(result_t), decimals=2)  # 58.5 / 5.6
        self.assertEqual(result, 10.45)
        self.assertEqual(np.round(self.evaluate(m.total), decimals=2), 58.54)
        self.assertEqual(np.round(self.evaluate(m.count), decimals=2), 5.6)
예제 #18
0
    def test_update_keras_metrics_dynamic_shape(self, distribution):
        with distribution.scope():
            metric = metrics.Mean("test_metric", dtype=np.float32)

        dataset = tf.data.Dataset.range(10).batch(2, drop_remainder=False)

        @tf.function
        def train_fn(dataset):
            weights = tf.constant([0.1, 0.1])

            def step_fn(i):
                metric.update_state(i, weights)

            for i in dataset:
                distribution.run(step_fn, args=(i, ))

        train_fn(dataset)

        # This should be the mean of integers 0-9 which has a sum of 45 and a count
        # of 10 resulting in mean of 4.5.
        self.assertEqual(metric.result().numpy(), 4.5)
예제 #19
0
    def test_mean_graph_with_placeholder(self):
        with tf.compat.v1.get_default_graph().as_default(
        ), self.cached_session() as sess:  # noqa: E501
            m = metrics.Mean()
            v = tf.compat.v1.placeholder(tf.float32)
            w = tf.compat.v1.placeholder(tf.float32)
            self.evaluate(tf.compat.v1.variables_initializer(m.variables))

            # check __call__()
            result_t = m(v, sample_weight=w)
            result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
            self.assertEqual(self.evaluate(m.total), 50)
            self.assertEqual(self.evaluate(m.count), 0.5)
            self.assertEqual(result, 50 / 0.5)

            # check update_state() and result()
            result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
            self.assertAlmostEqual(self.evaluate(m.total), 52,
                                   2)  # 50 + 1 + 5 * 0.2
            self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2)  # 0.5 + 1.2
            self.assertAlmostEqual(result, 52 / 1.7, 2)
예제 #20
0
    def test_mean(self):
        m = metrics.Mean(name="my_mean")

        # check config
        self.assertEqual(m.name, "my_mean")
        self.assertTrue(m.stateful)
        self.assertEqual(m.dtype, tf.float32)
        self.assertEqual(len(m.variables), 2)
        self.evaluate(tf.compat.v1.variables_initializer(m.variables))

        # check initial state
        self.assertEqual(self.evaluate(m.total), 0)
        self.assertEqual(self.evaluate(m.count), 0)

        # check __call__()
        self.assertEqual(self.evaluate(m(100)), 100)
        self.assertEqual(self.evaluate(m.total), 100)
        self.assertEqual(self.evaluate(m.count), 1)

        # check update_state() and result() + state accumulation + tensor input
        update_op = m.update_state(
            [tf.convert_to_tensor(1),
             tf.convert_to_tensor(5)])
        self.evaluate(update_op)
        self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
        self.assertEqual(self.evaluate(m.total), 106)  # 100 + 1 + 5
        self.assertEqual(self.evaluate(m.count), 3)

        # check reset_state()
        m.reset_state()
        self.assertEqual(self.evaluate(m.total), 0)
        self.assertEqual(self.evaluate(m.count), 0)

        # Check save and restore config
        m2 = metrics.Mean.from_config(m.get_config())
        self.assertEqual(m2.name, "my_mean")
        self.assertTrue(m2.stateful)
        self.assertEqual(m2.dtype, tf.float32)
        self.assertEqual(len(m2.variables), 2)
# (X_train, Y_train), (X_test, Y_test) = mnist.load_data()
# X_train, X_test = X_train / 255.0, X_test / 255.0

# X_train = X_train[..., tf.newaxis].astype(np.float32)
# X_test = X_test[..., tf.newaxis].astype(np.float32)

# X와 Y로 데이터프레임을 만드는 법
train_ds = tf.data.Dataset.from_tensor_slices((X_train, Y_train)).shuffle(10000).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices((X_test, Y_test)).batch(batch_size)

model = ResNet()

loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = optimizers.Adam()

train_loss = metrics.Mean(name='train_loss')
train_accuracy = metrics.SparseCategoricalAccuracy(name='train_accuracy')

test_loss = metrics.Mean(name='test_loss')
test_accuracy = metrics.SparseCategoricalAccuracy(name='test_accuracy')

for epoch in range(epoches):
    for images, labels in train_ds:
        train_step(model, images, labels, loss_object, optimizer, train_loss, train_accuracy)
    
    for test_images, test_labels in test_ds:
        test_step(model, test_images, test_labels, loss_object, test_loss, test_accuracy)
    
    template = "Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}"
    print(template.format(epoch+1,
                        train_loss.result(),
예제 #22
0
 def __init__(self, **kwargs):
     super(MyModel, self).__init__(**kwargs)
     self.mean_obj = metrics.Mean(name='my_mean_obj')
예제 #23
0
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     self.mean_obj = metrics.Mean(name="my_mean_obj")