Exemplo n.º 1
0
    def _testDoNotAccumulateInvariants(self):
        push_op = ("TensorListPushBack"
                   if control_flow_v2_toggles.control_flow_v2_enabled() else
                   "StackPushV2")

        # Tests that loop invariants, i.e., tensors that are "captured" by the
        # while loop and not passed as loop variables are not accumulated in
        # gradient computation.
        v = constant_op.constant(5.0, name="v")

        r = control_flow_ops.while_loop(lambda _: True,
                                        lambda x: v * x, [1.0],
                                        maximum_iterations=5)

        output = gradients_impl.gradients(r, v)[0]
        train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
        train_op.append(output)

        g = GetOptimizedGraph()
        # The gradient for v * x requires the value of both v and x. Since v is a
        # loop invariant it is not accumulated so we have just one accumulator for
        # x.
        self.assertLen([n for n in g.node if n.op == push_op], 1)
Exemplo n.º 2
0
  def testTensorArrayWithCondResetByExternalCaptureBreaks(self):

    if control_flow_v2_toggles.control_flow_v2_enabled():
      self.skipTest("v1 only test")

    empty_ta = tensor_array_ops.TensorArray(
        size=0, element_shape=[], dtype=dtypes.int64, dynamic_size=True)

    def scan_fn(ta, x):
      updated = ta.write(ta.size(), x)
      # Here, capture empty_ta from outside the function.  However, it may be
      # either a TF1-style TensorArray or an Eager-style TensorArray.
      next_iter = control_flow_ops.cond(
          math_ops.equal(x % 3, 0), lambda: empty_ta, lambda: updated)
      return (next_iter, updated.stack())

    start = empty_ta
    start = start.write(0, -1)

    with self.assertRaisesRegex(
        NotImplementedError,
        r"construct a new TensorArray inside the function"):
      dataset_ops.Dataset.range(6).scan(initial_state=start, scan_func=scan_fn)
Exemplo n.º 3
0
    def testCriticalSectionWithControlFlow(self, outer_cond, inner_cond):
        if (not context.executing_eagerly()
                and control_flow_v2_toggles.control_flow_v2_enabled()):
            self.skipTest("b/135070612")
        cs = critical_section_ops.CriticalSection(shared_name="cs")
        v = resource_variable_ops.ResourceVariable(0.0, name="v")
        num_concurrent = 100

        # pylint: disable=cell-var-from-loop
        def fn(a, b):
            c = v.read_value()

            def true_fn():
                with ops.control_dependencies([c]):
                    nv = v.assign_add(a * b)
                    with ops.control_dependencies([nv]):
                        return array_ops.identity(c)

            return control_flow_ops.cond(array_ops.identity(inner_cond),
                                         true_fn, lambda: c)

        def execute():
            return cs.execute(lambda: fn(1.0, 2.0))

        r = [
            control_flow_ops.cond(array_ops.identity(outer_cond), execute,
                                  v.read_value) for _ in range(num_concurrent)
        ]
        # pylint: enable=cell-var-from-loop

        self.evaluate(v.initializer)
        r_value = self.evaluate(r)
        if inner_cond and outer_cond:
            self.assertAllClose([2.0 * i for i in range(num_concurrent)],
                                sorted(r_value))
        else:
            self.assertAllClose([0] * num_concurrent, r_value)
Exemplo n.º 4
0
  def testDynamicLossScaleWithSlots(self, strategy_fn):
    strategy_obj = strategy_fn()
    if (isinstance(strategy_obj, mirrored_strategy.MirroredStrategy) and
        control_flow_v2_toggles.control_flow_v2_enabled() and
        not context.executing_eagerly()):
      self.skipTest('b/138667997')
    with strategy_obj.scope() as strategy:
      var = variables.Variable([1.0, 2.0])
      # An SGD optimizer with momentum has slot variables.
      opt = gradient_descent.SGD(1.0, momentum=1.)
      initial_loss_scale = 2.
      loss_scale = loss_scale_module.DynamicLossScale(
          initial_loss_scale=initial_loss_scale, increment_period=1,
          multiplier=4)
      opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
      loss = lambda: var / strategy.num_replicas_in_sync
      run_fn = lambda: opt.minimize(loss, var_list=[var])
      run_op = strategy.experimental_run(run_fn)
      self.evaluate(variables.global_variables_initializer())
      self._run_if_in_graph_mode(run_op)
      # The momentum accumulator starts at 0 and the gradient is 1. The
      # accumulator is incremented by the gradient, so it is now 1. Then the
      # variable is subtracted by the accumulator, so the variable is subtracted
      # by 1.
      self.assertAllClose([0.0, 1.0], self.evaluate(var))
      self.assertEqual(self.evaluate(opt.loss_scale()), initial_loss_scale * 4)

      run_op = strategy.experimental_run(run_fn)
      self._run_if_in_graph_mode(run_op)
      # The momentum accumulator was 1 before this step and the gradient is 1.
      # The accumulator is incremented by the gradient, so it is now 2. Then the
      # variable is subtracted by the accumulator, so the variable is subtracted
      # by 2.
      self.assertAllClose([-2., -1.], self.evaluate(var))
      self.assertEqual(self.evaluate(opt.loss_scale()),
                       initial_loss_scale * 16)
Exemplo n.º 5
0
 def setUp(self):
     self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
     control_flow_v2_toggles.enable_control_flow_v2()
     super(WhileV2Test, self).setUp()