示例#1
0
 def testDeterminism(self):
     # This does not test any ops are deterministic, because that is tested by
     # many kernel tests.
     try:
         config.disable_op_determinism()
         self.assertFalse(config.is_op_determinism_enabled())
         config.enable_op_determinism()
         self.assertTrue(config.is_op_determinism_enabled())
     finally:
         config.disable_op_determinism()
示例#2
0
def tensorflow_random_state(seed: int) -> Generator[None, None, None]:
    # Save values
    origin_gpu_det = os.environ.get("TF_DETERMINISTIC_OPS", None)
    orig_random_state = random.getstate()
    orig_np_random_state = np.random.get_state()
    if context.executing_eagerly():
        tf_random_seed = context.global_seed()
    else:
        tf_random_seed = ops.get_default_graph().seed

    determism_enabled = config.is_op_determinism_enabled()
    config.enable_op_determinism()

    # Set values
    os.environ["TF_DETERMINISTIC_OPS"] = "1"
    random.seed(seed)
    np.random.seed(seed)
    tf.random.set_seed(seed)

    yield

    # Reset values
    if origin_gpu_det is not None:
        os.environ["TF_DETERMINISTIC_OPS"] = origin_gpu_det
    else:
        os.environ.pop("TF_DETERMINISTIC_OPS")
    random.setstate(orig_random_state)
    np.random.set_state(orig_np_random_state)
    tf.random.set_seed(tf_random_seed)
    if not determism_enabled:
        config.disable_op_determinism()
    def testSecondGradient(self):
        with self.session() as sess:
            labels = constant_op.constant([3, 0, 1], name="labels")
            logits = constant_op.constant(
                [0.3, 0.4, 0.1, 1.2, 0.1, 1.9, 0.1, 0.7, 0.8, 0.2, 1.3, 1.3],
                shape=[3, 4],
                dtype=dtypes.float64,
                name="logits")

            def xent_grad(logits):
                with backprop_lib.GradientTape() as tape:
                    tape.watch(logits)
                    return tape.gradient(
                        nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
                            labels=labels, logits=logits, name="xent"),
                        [logits])[0]

            analytical, numerical = gradient_checker_v2.compute_gradient(
                xent_grad, [logits])

            if (not context.executing_eagerly()
                    and not config.is_op_determinism_enabled()):
                # Check that second derivative is calculated.
                # (it is equivalent to being `BatchMatMul` op in the graph because of
                # implementation of xentropy grad)
                op_names = [
                    op.op_def.name for op in sess.graph.get_operations()
                    if op.op_def
                ]
                self.assertIn("BatchMatMulV2", op_names)

        tol = 5e-8
        self.assertAllClose(analytical, numerical, atol=tol, rtol=tol)
示例#4
0
    def testSecondGradient(self):
        with self.cached_session() as sess:
            labels = constant_op.constant([
                0.0, 0.0, 1.0 / 3, 0.0, 1.0 / 3, 0.0, 0.0, 0.0, 0.0, 0.5 / 3,
                0.0, 0.5 / 3
            ],
                                          shape=[12],
                                          dtype=dtypes.float64,
                                          name="labels")
            logits = constant_op.constant(
                [0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
                shape=[12],
                dtype=dtypes.float64,
                name="logits")
            x = nn_ops.softmax_cross_entropy_with_logits(labels=labels,
                                                         logits=logits,
                                                         name="xent")
            loss = math_ops.reduce_sum(x)

            gradients = gradients_impl.gradients(loss, [logits])[0]

            err = gradient_checker.compute_gradient_error(
                logits, [12], gradients, [12])

            if not config.is_op_determinism_enabled():
                # Check how second derivative is calculated.
                # (it is equivalent to a `BatchMatMul` op being in the graph because of
                # the implementation in SoftmaxCrossEntropyWithLogitsGrad)
                op_names = [
                    op.op_def.name for op in sess.graph.get_operations()
                    if op.op_def
                ]
                self.assertIn("BatchMatMulV2", op_names)

        self.assertLess(err, 5e-8)
示例#5
0
def get_seed(op_seed):
    """Returns the local seeds an operation should use given an op-specific seed.

  Given operation-specific seed, `op_seed`, this helper function returns two
  seeds derived from graph-level and op-level seeds. Many random operations
  internally use the two seeds to allow user to change the seed globally for a
  graph, or for only specific operations.

  For details on how the graph-level seed interacts with op seeds, see
  `tf.compat.v1.random.set_random_seed`.

  Args:
    op_seed: integer.

  Returns:
    A tuple of two integers that should be used for the local seed of this
    operation.
  """
    eager = context.executing_eagerly()

    if eager:
        global_seed = context.global_seed()
    else:
        global_seed = ops.get_default_graph().seed

    if global_seed is not None:
        if op_seed is None:
            # pylint: disable=protected-access
            if hasattr(ops.get_default_graph(), '_seed_used'):
                ops.get_default_graph()._seed_used = True
            if eager:
                op_seed = context.internal_operation_seed()
            else:
                op_seed = _graph_to_seed_dict.setdefault(
                    ops.get_default_graph(), 0)
                _graph_to_seed_dict[ops.get_default_graph()] += 1

        seeds = _truncate_seed(global_seed), _truncate_seed(op_seed)
    else:
        if op_seed is not None:
            seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed)
        else:
            seeds = None, None

    if seeds == (None, None) and config.is_op_determinism_enabled():
        raise RuntimeError(  # pylint: disable=g-doc-exception
            'Random ops require a seed to be set when determinism is enabled. '
            'Please set a seed before running the op, e.g. by calling '
            'tf.random.set_seed(1).')

    # Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would
    # be unexpected since Python docs say nondeterminism is (None, None).
    if seeds == (0, 0):
        return (0, _MAXINT32)
    return seeds
def get_global_generator():
  """Retrieves the global generator.

  This function will create the global generator the first time it is called,
  and the generator will be placed at the default device at that time, so one
  needs to be careful when this function is first called. Using a generator
  placed on a less-ideal device will incur performance regression.

  Returns:
    The global `tf.random.Generator` object.
  """
  global global_generator
  if global_generator is None:
    if config.is_op_determinism_enabled():
      raise RuntimeError('"get_global_generator" cannot be called if '  # pylint: disable=g-doc-exception
                         "determinism is enabled, unless "
                         '"set_global_generator" has already been called. '
                         'Please call "set_global_generator" first.')
    with ops.init_scope():
      global_generator = Generator.from_non_deterministic_state()
  return global_generator
  def from_non_deterministic_state(cls, alg=None):
    """Creates a generator by non-deterministically initializing its state.

    The source of the non-determinism will be platform- and time-dependent.

    Args:
      alg: (optional) the RNG algorithm. If None, it will be auto-selected. See
        `__init__` for its possible values.

    Returns:
      The new generator.
    """
    if config.is_op_determinism_enabled():
      raise RuntimeError('"from_non_deterministic_state" cannot be called when '  # pylint: disable=g-doc-exception
                         "determinism is enabled.")
    if alg is None:
      # TODO(b/170668986): more sophisticated algorithm selection
      alg = DEFAULT_ALGORITHM
    alg = stateless_random_ops.convert_alg_to_int(alg)
    state = non_deterministic_ints(shape=[_get_state_size(alg)],
                                   dtype=SEED_TYPE)
    return cls(state=state, alg=alg)
示例#8
0
  def testGradRandomBoxes(self):
    """Test that the gradient is correct for randomly generated boxes.

    The mapping is piecewise differentiable with respect to the box coordinates.
    The points where the function is not differentiable are those which are
    mapped to image pixels, i.e., the normalized y coordinates in
    np.linspace(0, 1, image_height) and normalized x coordinates in
    np.linspace(0, 1, image_width). Make sure that the box coordinates are
    sufficiently far away from those rectangular grid centers that are points of
    discontinuity, so that the finite difference Jacobian is close to the
    computed one.
    """
    np.random.seed(1)  # Make it reproducible.
    delta = 1e-3
    radius = 2 * delta
    low, high = -0.5, 1.5  # Also covers the case of extrapolation.

    image_height = 4
    for image_width in range(1, 3):
      for crop_height in range(1, 3):
        for crop_width in range(2, 4):
          for depth in range(1, 3):
            for num_boxes in range(1, 3):

              batch = num_boxes
              image_shape = [batch, image_height, image_width, depth]
              crop_size = [crop_height, crop_width]

              image = np.arange(0, batch * image_height * image_width *
                                depth).reshape(image_shape).astype(np.float32)
              boxes = []
              for _ in range(num_boxes):
                # pylint: disable=unbalanced-tuple-unpacking
                y1, y2 = self._randomUniformAvoidAnchors(
                    low, high, np.linspace(0, 1, image_height), radius, 2)
                x1, x2 = self._randomUniformAvoidAnchors(
                    low, high, np.linspace(0, 1, image_width), radius, 2)
                # pylint: enable=unbalanced-tuple-unpacking
                boxes.append([y1, x1, y2, x2])

              boxes = np.array(boxes, dtype=np.float32)
              box_ind = np.arange(batch, dtype=np.int32)

              image_tensor = constant_op.constant(image, shape=image_shape)
              boxes_tensor = constant_op.constant(boxes, shape=[num_boxes, 4])
              box_ind_tensor = constant_op.constant(box_ind, shape=[num_boxes])

              def crop_resize(image_tensor, boxes_tensor):
                # pylint: disable=cell-var-from-loop
                return image_ops.crop_and_resize(
                    image_tensor, boxes_tensor, box_ind_tensor,
                    constant_op.constant(crop_size, shape=[2]))

              with test_util.device(use_gpu=True):
                with self.cached_session():
                  # pylint: disable=cell-var-from-loop
                  if (config.is_op_determinism_enabled() and
                      test_util.is_gpu_available()):
                    with self.assertRaises(errors_impl.UnimplementedError):
                      gradient_checker_v2.compute_gradient(
                          lambda x: crop_resize(x, boxes_tensor),
                          [image_tensor])
                    with self.assertRaises(errors_impl.UnimplementedError):
                      gradient_checker_v2.compute_gradient(
                          lambda x: crop_resize(image_tensor, x),
                          [boxes_tensor])
                  else:
                    err1 = gradient_checker_v2.max_error(
                        *gradient_checker_v2.compute_gradient(
                            lambda x: crop_resize(x, boxes_tensor),
                            [image_tensor]))
                    err2 = gradient_checker_v2.max_error(
                        *gradient_checker_v2.compute_gradient(
                            lambda x: crop_resize(image_tensor, x),
                            [boxes_tensor]))
                    err = max(err1, err2)
                    self.assertLess(err, 2e-3)