예제 #1
0
def tensorflow_random_state(seed: int) -> Generator[None, None, None]:
    # Save values
    origin_gpu_det = os.environ.get("TF_DETERMINISTIC_OPS", None)
    orig_random_state = random.getstate()
    orig_np_random_state = np.random.get_state()
    if context.executing_eagerly():
        tf_random_seed = context.global_seed()
    else:
        tf_random_seed = ops.get_default_graph().seed

    determism_enabled = config.is_op_determinism_enabled()
    config.enable_op_determinism()

    # Set values
    os.environ["TF_DETERMINISTIC_OPS"] = "1"
    random.seed(seed)
    np.random.seed(seed)
    tf.random.set_seed(seed)

    yield

    # Reset values
    if origin_gpu_det is not None:
        os.environ["TF_DETERMINISTIC_OPS"] = origin_gpu_det
    else:
        os.environ.pop("TF_DETERMINISTIC_OPS")
    random.setstate(orig_random_state)
    np.random.set_state(orig_np_random_state)
    tf.random.set_seed(tf_random_seed)
    if not determism_enabled:
        config.disable_op_determinism()
예제 #2
0
 def _testDilationGradDeterminismError(self, use_gpu):
     if use_gpu and test.is_gpu_available(cuda_only=True):
         try:
             config.enable_op_determinism()
             with self.assertRaisesRegexp(
                     errors_impl.UnimplementedError,
                     "Determinism is not yet supported "
                     "for Dilation2DBackpropInput."):
                 self._ConstructAndTestGradient(image_shape=[1, 3, 3, 1],
                                                kernel_shape=[1, 1, 1],
                                                strides=[1, 1],
                                                rates=[1, 1],
                                                padding="VALID",
                                                use_gpu=use_gpu)
         finally:
             config.disable_op_determinism()
     else:
         try:
             config.enable_op_determinism()
             self._ConstructAndTestGradient(image_shape=[1, 3, 3, 1],
                                            kernel_shape=[1, 1, 1],
                                            strides=[1, 1],
                                            rates=[1, 1],
                                            padding="VALID",
                                            use_gpu=use_gpu)
         finally:
             config.disable_op_determinism()
예제 #3
0
 def testDeterminism(self):
     # This does not test any ops are deterministic, because that is tested by
     # many kernel tests.
     try:
         config.disable_op_determinism()
         self.assertFalse(config.is_op_determinism_enabled())
         config.enable_op_determinism()
         self.assertTrue(config.is_op_determinism_enabled())
     finally:
         config.disable_op_determinism()
예제 #4
0
    def testSuccessAfterError(self):
        # Force an error on the TruncatedNormal kernel.
        config.enable_op_determinism()
        with self.assertRaisesRegex(
                errors.InvalidArgumentError,
                "When determinism is enabled, random ops must have a seed specified"
        ):
            self.evaluate(
                gen_random_ops.truncated_normal((1, ), dtypes.float32))
        config.disable_op_determinism()

        # Ensure the StdDev of the TruncatedNormal works as intended.
        self.testStdDev()
예제 #5
0
 def test_bincount_determinism_error(self):
     num_samples = 10000
     np.random.seed(42)
     arr = np.random.randint(0, 1000, num_samples)
     try:
         config.enable_op_determinism()
         with test_util.use_gpu():
             if test_util.is_gpu_available(cuda_only=True):
                 with self.assertRaisesRegexp(
                         errors_impl.UnimplementedError,
                         "Determinism is not yet "
                         "supported for Bincount."):
                     self.evaluate(bincount_ops.bincount(arr, None))
     finally:
         config.disable_op_determinism()
예제 #6
0
 def testDeterministicOpsErrors(self):
     try:
         config.enable_op_determinism()
         random.set_global_generator(None)
         with self.assertRaisesWithPredicateMatch(
                 RuntimeError,
                 '"get_global_generator" cannot be called if determinism is enabled'
         ):
             random.get_global_generator()
         random.set_global_generator(random.Generator.from_seed(50))
         random.get_global_generator()
         with self.assertRaisesWithPredicateMatch(
                 RuntimeError,
                 '"from_non_deterministic_state" cannot be called when determinism '
                 "is enabled."):
             random.Generator.from_non_deterministic_state()
     finally:
         config.disable_op_determinism()
예제 #7
0
 def setUp(self):
     super().setUp()
     random_seed.set_random_seed(None)
     config.enable_op_determinism()
예제 #8
0
                # gradient injector
                bias_gradients = gradients_impl.gradients(
                    gradient_injector_output,
                    bias_val,
                    grad_ys=None,
                    colocate_gradients_with_ops=True)[0]
                for i in range(repeat_count):
                    feed_dict = {
                        upstream_gradients: self._randomNDArray(output_shape)
                    }
                    result_a = bias_gradients.eval(feed_dict=feed_dict)
                    result_b = bias_gradients.eval(feed_dict=feed_dict)
                    self.assertAllEqual(result_a, result_b)

    # TODO(duncanriach): Re-enable the following three tests for the error checks
    #   after deterministic functionality is implemented at the CUDA kernel level.
    def testInputDims(self):
        pass

    def testBiasVec(self):
        pass

    def testBiasInputsMatch(self):
        pass


if __name__ == '__main__':
    # TODO(reedwm): Merge this file with bias_op_base.py and bias_op_test.py
    config.enable_op_determinism()
    test.main()
예제 #9
0
 def setUp(self):
     super().setUp()
     config.enable_op_determinism()