Esempio n. 1
0
    def testReduceVar(self):
        x = np.array([[0, 0, 0], [0, 0, 0]], "float32")
        self.assertAllClose(self.evaluate(math_ops.reduce_variance(x)), 0)
        self.assertAllClose(self.evaluate(math_ops.reduce_variance(x, axis=0)),
                            [0, 0, 0])

        x = np.array([[0, 2, 1, 1], [1, 2, 0, 1]], "float32")
        self.assertAllClose(self.evaluate(math_ops.reduce_variance(x)), 0.5)
  def testReduceVar(self):
    x = np.array([[0, 0, 0], [0, 0, 0]], "float32")
    self.assertAllClose(self.evaluate(math_ops.reduce_variance(x)), 0)
    self.assertAllClose(
        self.evaluate(math_ops.reduce_variance(x, axis=0)), [0, 0, 0])

    x = np.array([[0, 2, 1, 1], [1, 2, 0, 1]], "float32")
    self.assertAllClose(self.evaluate(math_ops.reduce_variance(x)), 0.5)
Esempio n. 3
0
 def _input_statistics_test_template(self,
                                     stat_object,
                                     num_features,
                                     dtype,
                                     warmup_iterations=0,
                                     rtol=1e-6,
                                     data_length=4):
     graph = ops.Graph()
     with graph.as_default():
         data_length_range = math_ops.range(data_length, dtype=dtype)
         num_features_range = math_ops.range(num_features, dtype=dtype)
         times = 2 * data_length_range[None, :] - 3
         values = (data_length_range[:, None] +
                   num_features_range[None, :])[None, ...]
         features = {
             TrainEvalFeatures.TIMES: times,
             TrainEvalFeatures.VALUES: values,
         }
         statistics = stat_object.initialize_graph(features=features)
         with self.session(graph=graph) as session:
             variables.global_variables_initializer().run()
             coordinator = coordinator_lib.Coordinator()
             queue_runner_impl.start_queue_runners(session,
                                                   coord=coordinator)
             for _ in range(warmup_iterations):
                 # A control dependency should ensure that, for queue-based statistics,
                 # a use of any statistic is preceded by an update of all adaptive
                 # statistics.
                 self.evaluate(statistics.total_observation_count)
             self.assertAllClose(
                 range(num_features) +
                 math_ops.reduce_mean(data_length_range)[None],
                 self.evaluate(statistics.series_start_moments.mean),
                 rtol=rtol)
             self.assertAllClose(
                 array_ops.tile(
                     math_ops.reduce_variance(data_length_range)[None],
                     [num_features]),
                 self.evaluate(statistics.series_start_moments.variance),
                 rtol=rtol)
             self.assertAllClose(
                 math_ops.reduce_mean(values[0], axis=0),
                 self.evaluate(statistics.overall_feature_moments.mean),
                 rtol=rtol)
             self.assertAllClose(
                 math_ops.reduce_variance(values[0], axis=0),
                 self.evaluate(statistics.overall_feature_moments.variance),
                 rtol=rtol)
             self.assertAllClose(-3,
                                 self.evaluate(statistics.start_time),
                                 rtol=rtol)
             self.assertAllClose(data_length,
                                 self.evaluate(
                                     statistics.total_observation_count),
                                 rtol=rtol)
             coordinator.request_stop()
             coordinator.join()
Esempio n. 4
0
    def test_step(self, data):
        if isinstance(data, tuple):
            data = data[0]
        features = self.srmConv2D(data)
        z_mean, z_log_var, z = self.encoder(features)
        reconstruction = self.decoder(z)

        L2 = squared_difference(features, reconstruction)
        error = tf.reduce_mean(L2, axis=-1)

        threshold = otsu(error)

        sigma = reduce_variance(error, axis=[1, 2])
        mean_0, sigma_b = dicriminative_error(error, threshold)

        reconstruction_loss = mean_0 + 5 * (1 - sigma_b / sigma)
        reconstruction_loss = tf.reduce_mean(reconstruction_loss)

        kl_loss = -0.5 * tf.reduce_mean(1 + z_log_var - tf.square(z_mean) -
                                        tf.exp(z_log_var))

        total_loss = reconstruction_loss + kl_loss

        return {
            "loss": total_loss,
            "reconstruction_loss": reconstruction_loss,
            "kl_loss": kl_loss,
        }
Esempio n. 5
0
    def train_step(self, data):
        with tf.GradientTape() as tape:
            features = self.srmConv2D(data)
            z_mean, z_log_var, z = self.encoder(features)
            reconstruction = self.decoder(z)

            L2 = squared_difference(features, reconstruction)
            error = tf.reduce_mean(L2, axis=-1)

            with tape.stop_recording():
                threshold = otsu(error)

            sigma = reduce_variance(error, axis=[1, 2])
            mean_0, sigma_b = dicriminative_error(error, threshold)

            reconstruction_loss = mean_0 + 5 * (1 - sigma_b / sigma)
            reconstruction_loss = tf.reduce_mean(reconstruction_loss)

            kl_loss = -0.5 * tf.reduce_mean(1 + z_log_var - tf.square(z_mean) -
                                            tf.exp(z_log_var))

            total_loss = reconstruction_loss + kl_loss
        grads = tape.gradient(total_loss, self.trainable_weights)
        self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
        return {
            "loss": total_loss,
            "reconstruction_loss": reconstruction_loss,
            "kl_loss": kl_loss,
        }
Esempio n. 6
0
 def testReduceVarComplex(self):
   # Ensure that complex values are handled to be consistent with numpy
   complex_ys = [([0 - 1j, 0 + 1j], dtypes.float64),
                 (np.array([0 - 1j, 0 + 1j], "complex64"), dtypes.float32),
                 (np.array([0 - 1j, 0 + 1j], "complex128"), dtypes.float64)]
   for y, dtype in complex_ys:
     y_result = math_ops.reduce_variance(y)
     self.assertEqual(np.var(y), 1.0)
     self.assertEqual(self.evaluate(y_result), 1.0)
     self.assertEqual(y_result.dtype, dtype)
 def testRandomNormalVariance(self):
     for dtype in self._random_types() & self.float_types:
         with self.session():
             with self.test_scope():
                 normal = random_ops.random_normal([1024],
                                                   dtype=dtype,
                                                   mean=2.3,
                                                   stddev=2.0)
                 variance = math_ops.reduce_variance(normal)
                 x = self.evaluate(variance)
                 self.assertAllClose(x, 4.0, rtol=1e-1, atol=1e-1)
Esempio n. 8
0
  def testReduceVar(self):
    x = np.array([[0, 0, 0], [0, 0, 0]], "float32")
    self.assertAllClose(self.evaluate(math_ops.reduce_variance(x)), 0)
    self.assertAllClose(
        self.evaluate(math_ops.reduce_variance(x, axis=0)), [0, 0, 0])

    x = [[1, 2, 1, 1], [1, 1, 0, 1]]
    with self.assertRaisesRegexp(TypeError, "must be either real or complex"):
      math_ops.reduce_variance(x)

    x = [[1., 2., 1., 1.], [1., 1., 0., 1.]]
    self.assertEqual(self.evaluate(math_ops.reduce_variance(x)), 0.25)
    x_np = np.array(x)
    self.assertEqual(np.var(x_np), 0.25)
    self.assertEqual(self.evaluate(math_ops.reduce_variance(x_np)), 0.25)