def setUpClass(cls):
     if tf2.enabled():
         stats_aggregator._DEFAULT_MAX_QUEUE = 0  # pylint: disable=protected-access
         stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV2
         # TODO(b/116314787): add graph mode support for StatsAggregatorV2.
     else:
         stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV1
         return test_util.run_all_in_graph_and_eager_modes(cls)
 def setUpClass(cls):
   if tf2.enabled():
     stats_aggregator._DEFAULT_MAX_QUEUE = 0  # pylint: disable=protected-access
     stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV2
     # TODO(b/116314787): add graph mode support for StatsAggregatorV2.
   else:
     stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV1
     return test_util.run_all_in_graph_and_eager_modes(cls)
Exemple #3
0
                tensor_compressed, ctx)
            self.assertEqual(tensor_decompressed.dtype, dtype)

            actual = self.evaluate(tensor_decompressed)
            expected = np.ones(tensor_size)
            err = np.linalg.norm(expected - actual)
            self.assertLess(err, 0.00000001)

        for dtype in invalid_dtypes:
            tensor = tf.ones(tensor_size, dtype=dtype)

            tensor_compressed, ctx = compression.compress(tensor)
            self.assertEqual(tensor_compressed.dtype, dtype)

            tensor_decompressed = compression.decompress(
                tensor_compressed, ctx)
            self.assertEqual(tensor_decompressed.dtype, dtype)

            actual = self.evaluate(tensor_decompressed)
            expected = np.ones(tensor_size)
            err = np.linalg.norm(expected - actual)
            self.assertLess(err, 0.00000001)


if _has_eager:
    from tensorflow.python.framework.test_util import run_all_in_graph_and_eager_modes
    run_all_in_graph_and_eager_modes(MPITests)

if __name__ == '__main__':
    tf.test.main()
Exemple #4
0
        size = hvd.size()

        def allreduce_grad_test(self, dtype, dim):
            tensor = self.random_uniform([5] * dim, -100, 100, dtype=dtype)
            averaged = hvd.allreduce(tensor, average=True)

            grad_ys = tf.ones([5] * dim, dtype=dtype)
            grad = tf.gradients(averaged, tensor, grad_ys)[0]
            return grad

        # As of TensorFlow v1.9, gradients are not supported on
        # integer tensors
        dtypes = [tf.float32, tf.float64]
        dims = [1, 2, 3]
        for dtype, dim in itertools.product(dtypes, dims):
            with tf.device("/gpu:%d" % local_rank):
                grad = tf.function(allreduce_grad_test,
                                   jit_compile=True)(self, dtype, dim)
                grad_out = self.evaluate(grad)
            expected = np.ones([5] * dim)
            err = np.linalg.norm(expected - grad_out)
            self.assertLess(
                err, 0.00000001, "gradient %s differs from expected %s, "
                "error: %s" % (grad_out, expected, str(err)))


run_all_in_graph_and_eager_modes(XLATests)

if __name__ == '__main__':
    tf.test.main()
Exemple #5
0
            state.batch = 21
            state.epoch = 11

            state.restore()

            for w1, w2 in zip(self.evaluate(vars1), weights1):
                self.assertAllClose(w1, w2)
            assert state.batch == 20
            assert state.epoch == 10

            # Partially modify then commit
            self.assign(vars1, weights2)
            state.batch = 21
            state.epoch = 11

            state.commit()
            state.restore()

            for w1, w2 in zip(self.evaluate(vars1), weights2):
                self.assertAllClose(w1, w2)
            assert state.batch == 21
            assert state.epoch == 11


if _has_eager:
    from tensorflow.python.framework.test_util import run_all_in_graph_and_eager_modes
    run_all_in_graph_and_eager_modes(TensorFlowTests)

if __name__ == '__main__':
    tf.test.main()