Ejemplo n.º 1
0
    def _worker_fn(task_type, task_id, num_gpus):
      del num_gpus
      tf_config = {
          "cluster": self._cluster_spec,
          "task": {
              "type": task_type,
              "index": task_id
          }
      }
      with context.graph_mode(), lock, test.mock.patch.dict(
          "os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
        strategy = strategy_cls()
      with context.graph_mode(), strategy.scope(), self.cached_session(
          target="grpc://" + self._cluster_spec[task_type][task_id]) as sess:
        if tf2.enabled():
          dataset_fn = lambda _: dataset_ops.DatasetV2.range(5).batch(2)
        else:
          dataset_fn = lambda _: dataset_ops.Dataset.range(5).batch(2)
        if (input_type == "dataset" and strategy_cls is
            collective_all_reduce_strategy.CollectiveAllReduceStrategy):
          # Autosharded
          if task_id == 0:
            expected_values = [[[0, 1]], [[4]]]
          else:
            expected_values = [[[2, 3]], [[]]]

          # input_context is for between-graph auto-sharding.
          input_context = distribute_lib.InputContext(
              num_input_pipelines=2,
              input_pipeline_id=task_id,
              num_replicas_in_sync=2)
        else:
          expected_values = [[[0, 1]], [[2, 3]], [[4]]]
          input_context = None

        self._test_input_iteration(
            input_type,
            api_type,
            iteration_type,
            dataset_fn,
            [("/job:%s/task:%d" %
              (task_type, task_id), strategy.extended.worker_devices)],
            expected_values,
            strategy,
            sess=sess,
            enable_get_next_as_optional=True,
            input_context=input_context)
        return True
Ejemplo n.º 2
0
  def testInitializableIterator(self):
    with context.graph_mode():
      devices = ["/device:CPU:0"]
      # Using random input since that is only allowed with initializable
      # iterator.
      dataset = dataset_ops.Dataset.from_tensor_slices(
          random_ops.random_uniform((10,)))

      per_device_dataset = values.PerDeviceDataset(
          dataset, devices, prefetch_on_device=False)
      iterator = per_device_dataset.make_initializable_iterator()

      self.evaluate(iterator.initializer)
      next_element = iterator.get_next()
      for _ in range(10):
        self.evaluate(next_element)

      # Should fail after the input is finished.
      with self.assertRaises(errors.OutOfRangeError):
        self.evaluate(next_element)

      # After re-initializing the iterator, should be able to iterate again.
      self.evaluate(iterator.initializer)
      for _ in range(10):
        self.evaluate(next_element)
Ejemplo n.º 3
0
 def testDataDistributionNoAutoShard(self):
   worker_devices, devices = self._cpu_devices()
   with context.graph_mode():
     dataset_fn = lambda: dataset_ops.Dataset.range(4)
     self._test_dataset(dataset_fn, worker_devices, devices,
                        [[0, 0], [1, 1], [2, 2], [3, 3]],
                        auto_shard=False)
  def test_training_no_default(self):

    with context.graph_mode():
      model = TrainingNoDefaultModel()
      arg = array_ops.ones([1, 1])
      model(arg, True)
      six.assertCountEqual(self, [arg], model.inputs)
Ejemplo n.º 5
0
 def testDataDistributionOneDevicePerWorker(self):
   self.skipTest("Temporarily disabled.")
   worker_device_map, devices = self._cpu_devices()
   with context.graph_mode():
     dataset_fn = lambda: dataset_ops.Dataset.range(8)
     self._test_dataset(dataset_fn, worker_device_map, devices,
                        [[0, 1], [2, 3], [4, 5], [6, 7]])
Ejemplo n.º 6
0
def _defun_internal(name, func, args, kwds):
  """Defines and returns graph-mode version of func."""
  graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
  with context.graph_mode():
    captures = {}
    tmp_graph = CapturingGraph(captures)
    # Inherit the graph key, since this is used for matching variables in
    # optimizers.
    tmp_graph._graph_key = graph_key  # pylint: disable=protected-access
    # Copy the graph collections to ensure summaries and other things work. This
    # lets the function access (but not mutate) collections of the containing
    # graph, such as the global step and the summary writer collections.
    curr_graph = ops.get_default_graph()
    for collection in curr_graph.collections:
      tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
          collection)
    with tmp_graph.as_default():
      func_inputs = _get_defun_inputs(args)

      with capture_tensors(captures):
        this_tape = tape.push_new_tape()
        try:
          func_outputs = func(*func_inputs, **kwds)
        finally:
          tape.pop_tape(this_tape)
        variables = this_tape.watched_variables()

        # Returning a closed-over tensor as an output does not trigger a
        # call to convert_to_tensor, so we manually capture all such tensors.
        outputs_list = _flatten(func_outputs)
        func_def_outputs = [
            _convert_to_graph_tensor(x) for x in outputs_list if x is not None
        ]

      ids = list(sorted(captures.keys()))
      if ids:
        extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
      else:
        extra_inputs = []
        extra_placeholders = []
      output_shapes = tuple(
          x.shape if isinstance(x, ops.Tensor) else None
          for x in outputs_list)

  flat_inputs = [x for x in nest.flatten(func_inputs)
                 if isinstance(x, ops.Tensor)]
  all_inputs = flat_inputs + list(extra_placeholders)
  all_ignored_ops = frozenset(x.op for x in all_inputs)
  fname = _inference_name(name)
  operations = tuple(x for x in tmp_graph.get_operations()
                     if x not in all_ignored_ops)
  # Register any other functions defined in the graph
  # TODO(ashankar): Oh lord, forgive me for this lint travesty.
  if context.in_eager_mode():
    for f in tmp_graph._functions.values():  # pylint: disable=protected-access
      # TODO(ashankar): What about the gradient registry?
      _register(f._c_func)  # pylint: disable=protected-access
  return GraphModeFunction(
      fname, all_inputs, extra_inputs, tmp_graph, operations, func_def_outputs,
      func_outputs, output_shapes, variables)
Ejemplo n.º 7
0
def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
  """Creates a variable handle with information to do shape inference."""
  container = ops.get_default_graph()._container  # pylint: disable=protected-access
  if container is None:
    container = ""
  handle = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
                                                   shared_name=shared_name,
                                                   name=name,
                                                   container=container)
  if graph_mode:
    return handle

  # We do not want two distinct ResourceVariable objects for the same
  # underlying resource in the runtime.
  # When in eager mode, explicitly ensure so here. When in graph mode, it's
  # ensured by always generating different variable names.
  exists = gen_resource_variable_ops.var_is_initialized_op(handle)
  if exists:
    raise ValueError("variable object with name '%s' already created. Use "
                     "get_variable() if reuse is desired." %
                     shared_name)
  with context.graph_mode(), ops.Graph().as_default():
    h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
                                                shared_name=shared_name,
                                                name=name,
                                                container=container)

    # Tensor._handle_data contains information for the shape-inference code to
    # know the shape and dtype of the variable pointed to by a handle. Since
    # shape inference doesn't run in eager mode we copy this data here for when
    # the handle is captured by an eager mode function.
    handle._handle_data = h._handle_data  # pylint: disable=protected-access
  return handle
Ejemplo n.º 8
0
 def testAllV2SummaryOps(self):
   logdir = self.get_temp_dir()
   def define_ops():
     result = []
     # TF 2.0 summary ops
     result.append(summary_ops.write('write', 1, step=0))
     result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
     # TF 1.x tf.contrib.summary ops
     result.append(summary_ops.generic('tensor', 1, step=1))
     result.append(summary_ops.scalar('scalar', 2.0, step=1))
     result.append(summary_ops.histogram('histogram', [1.0], step=1))
     result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
     result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
     return result
   with context.graph_mode():
     ops_without_writer = define_ops()
     with summary_ops.create_file_writer_v2(logdir).as_default():
       with summary_ops.record_if(True):
         ops_recording_on = define_ops()
       with summary_ops.record_if(False):
         ops_recording_off = define_ops()
     # We should be collecting all ops defined with a default writer present,
     # regardless of whether recording was set on or off, but not those defined
     # without a writer at all.
     del ops_without_writer
     expected_ops = ops_recording_on + ops_recording_off
     self.assertCountEqual(expected_ops, summary_ops.all_v2_summary_ops())
Ejemplo n.º 9
0
  def decorated(self, **kwargs):
    """A wrapped test method that treats some arguments in a special way."""
    mode = kwargs.pop("mode", "graph")

    distribution = kwargs.get("distribution", None)
    required_tpu = kwargs.pop("required_tpu", False)
    required_gpus = kwargs.pop("required_gpus", None)

    if distribution:
      assert required_gpus is None, (
          "Do not use `required_gpus` and `distribution` together.")
      assert required_tpu is False, (
          "Do not use `required_tpu` and `distribution` together.")
      required_gpus = distribution.required_gpus
      required_tpu = distribution.required_tpu

    if required_tpu and not TPU_TEST:
      self.skipTest("Test requires a TPU, but it's not available.")
    if not required_tpu and TPU_TEST:
      self.skipTest("Test that doesn't require a TPU.")

    if not required_gpus:
      if GPU_TEST:
        self.skipTest("Test that doesn't require GPUs.")
    elif context.num_gpus() < required_gpus:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(required_gpus, context.num_gpus()))

    # At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
    # that the user might have specified.  `kwargs` still has `mode`, which
    # the test is allowed to accept or ignore.
    requested_arguments = tf_inspect.getfullargspec(test_method).args
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_arguments + ["mode"]))
    if missing_arguments:
      raise ValueError("The test is missing arguments {} .".format(
          missing_arguments))

    kwargs_to_pass = {}
    for arg in requested_arguments:
      if arg == "self":
        kwargs_to_pass[arg] = self
      else:
        kwargs_to_pass[arg] = kwargs[arg]

    if mode == "eager":
      with ops.Graph().as_default(), context.eager_mode():
        if distribution:
          kwargs_to_pass["distribution"] = distribution.strategy
        test_method(**kwargs_to_pass)
    elif mode == "graph":
      with ops.Graph().as_default(), context.graph_mode():
        if distribution:
          kwargs_to_pass["distribution"] = distribution.strategy
        test_method(**kwargs_to_pass)
    else:
      raise ValueError(
          "'mode' has to be either 'eager' or 'graph' and not {}".format(
              mode))
Ejemplo n.º 10
0
  def testCreatorStacksAreThreadLocal(self):
    devices = ["/device:CPU:0", "/device:GPU:0"]
    dist = mirrored_strategy.MirroredStrategy(devices)

    def model_fn(device_id):
      assert isinstance(device_id, int)
      def thread_creator_fn(next_creator, *args, **kwargs):
        return next_creator(*args, **kwargs) + ":thread_" + str(device_id)

      with variable_scope.variable_creator_scope(thread_creator_fn):
        # Create a variable in this scope.
        v = variable_scope.variable(1.0)

        # This will pause the current thread, and execute the other thread.
        distribute_lib.get_tower_context().merge_call(lambda _: _)
      return v

    def main_thread_creator(next_creator, *args, **kwargs):
      # We are not using the underlying next_creator for test purposes.
      del next_creator, args, kwargs
      return "main_thread"

    with context.graph_mode(), \
        dist.scope(), \
        variable_scope.variable_creator_scope(main_thread_creator):
      result = dist.call_for_each_tower(model_fn, dist.worker_device_index)
      result = dist.unwrap(result)
      expected = ["main_thread:thread_0", "main_thread:thread_1"]
      self.assertEquals(expected, result)
Ejemplo n.º 11
0
  def testCondNested(self):
    with context.graph_mode(), self.test_session():
      v = resource_variable_ops.ResourceVariable(1.0)
      variables.global_variables_initializer().run()
      p = array_ops.placeholder(dtype=dtypes.bool)
      q = array_ops.placeholder(dtype=dtypes.bool)
      with function.AutomaticControlDependencies() as c:

        def true_fn():
          v.assign(v + 1, name='true')
          return 1.0

        def false_fn():

          def inner_true_fn():
            v.assign(v * 2, name='false_true')
            return 2.0

          def inner_false_fn():
            v.assign(v * 3, name='false_false')
            return 3.0

          control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
          return 1.0

        control_flow_ops.cond(p, true_fn, false_fn)
        with ops.name_scope('final'):
          val = v.read_value()
        val = c.mark_as_return(val)
      self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0)
      self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0)
      self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
      self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
Ejemplo n.º 12
0
 def testWrite_usingDefaultStepVariable_fromLegacyGraph(self):
   logdir = self.get_temp_dir()
   try:
     with context.graph_mode():
       writer = summary_ops.create_file_writer(logdir)
       mystep = variables.Variable(0, dtype=dtypes.int64)
       summary_ops.set_step(mystep)
       with writer.as_default():
         write_op = summary_ops.write('tag', 1.0)
       first_assign_op = mystep.assign_add(1)
       second_assign_op = mystep.assign(10)
       with self.cached_session() as sess:
         sess.run(writer.init())
         sess.run(mystep.initializer)
         sess.run(write_op)
         sess.run(first_assign_op)
         sess.run(write_op)
         sess.run(second_assign_op)
         sess.run(write_op)
         sess.run(writer.flush())
     events = events_from_logdir(logdir)
     self.assertEqual(4, len(events))
     self.assertEqual(0, events[1].step)
     self.assertEqual(1, events[2].step)
     self.assertEqual(10, events[3].step)
   finally:
     # Reset to default state for other tests.
     summary_ops.set_step(None)
Ejemplo n.º 13
0
  def testSerialize(self):
    # pylint: disable=g-import-not-at-top
    try:
      import portpicker
    except ImportError:
      return
    with context.graph_mode():
      worker_port = portpicker.pick_unused_port()
      ps_port = portpicker.pick_unused_port()
      cluster_dict = {
          "worker": ["localhost:%s" % worker_port],
          "ps": ["localhost:%s" % ps_port]
      }
      cs = server_lib.ClusterSpec(cluster_dict)

      worker = server_lib.Server(
          cs, job_name="worker", protocol="grpc", task_index=0, start=True)
      unused_ps = server_lib.Server(
          cs, job_name="ps", protocol="grpc", task_index=0, start=True)
      with ops.Graph().as_default(), session.Session(target=worker.target):
        with ops.device("/job:worker"):
          t = constant_op.constant([[1.0], [2.0]])
          l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
        with ops.device("/job:ps"):
          l_ps = array_ops.identity(l)
          l_ps, e = list_ops.tensor_list_pop_back(
              l_ps, element_dtype=dtypes.float32)
        with ops.device("/job:worker"):
          worker_e = array_ops.identity(e)
        self.assertAllEqual(worker_e.eval(), [2.0])
Ejemplo n.º 14
0
 def testTrace_cannotExportTraceInGraphMode(self):
   with test.mock.patch.object(logging, 'warn') as mock_log:
     with context.graph_mode():
       summary_ops.trace_export(name='foo', step=1)
     self.assertRegexpMatches(
         str(mock_log.call_args),
         'Can only export trace while executing eagerly.')
Ejemplo n.º 15
0
def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
  """Creates a variable handle with information to do shape inference."""
  container = ops.get_default_graph()._container  # pylint: disable=protected-access
  if container is None:
    container = ""
  handle = resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
                                               shared_name=shared_name,
                                               name=name,
                                               container=container)
  if graph_mode:
    return handle

  with context.graph_mode(), ops.Graph().as_default() as graph:
    h = resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
                                            shared_name=shared_name,
                                            name=name,
                                            container=container)

    # Tensor._handle_data contains information for the shape-inference code to
    # know the shape and dtype of the variable pointed to by a handle. Since
    # shape inference doesn't run in eager mode we copy this data here for when
    # the handle is captured by an eager mode function.
    # pylint: disable=protected-access
    handle._handle_data = resource_variable_ops.get_resource_handle_data(h)
    # pylint: enable=protected-access
  # Clean up op->graph->op reference cycles.
  ops.dismantle_graph(graph)
  return handle
  def testTowerLocalVariableUpdate(self):
    with context.graph_mode():

      def model_fn():
        tower_context = distribute_lib.get_tower_context()
        with tower_context.tower_local_var_scope("sum"):
          v_sum = variable_scope.variable(1.0)
        self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
        return v_sum

      dist = mirrored_strategy.MirroredStrategy(
          ["/device:GPU:0", "/device:GPU:1"])

      def update(var, value):
        return var.assign(value)

      with dist.scope():
        ret_v_sum = dist.call_for_each_tower(model_fn, run_concurrently=False)
        update_ops = dist.unwrap(dist.update(ret_v_sum, update, 5.0))

        # Initialize variables.
        self.evaluate(variables.global_variables_initializer())
        # Assert that the aggregated value of the tower local vars is the sum of
        # the individual values before running the update ops.
        self.assertEquals(1.0, self.evaluate(
            ret_v_sum.get(dist._devices[0]).read_value()))
        self.assertEquals(2.0, self.evaluate(ret_v_sum))

        # Apply updates.
        self.evaluate(update_ops)
        # Assert that the aggregated value of the tower local vars is the sum of
        # the individual values after running the update ops.
        self.assertEquals(5.0, self.evaluate(
            ret_v_sum.get(dist._devices[0]).read_value()))
        self.assertEquals(10.0, self.evaluate(ret_v_sum))
Ejemplo n.º 17
0
  def test_training_no_default(self):

    with context.graph_mode():
      model = TrainingNoDefaultModel()
      arg = array_ops.ones([1, 1])
      model(arg, True)
      self.assertEqual(len(model.inputs), 1)
Ejemplo n.º 18
0
def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
  """Creates a variable handle with information to do shape inference."""
  container = ops.get_default_graph()._container  # pylint: disable=protected-access
  if container is None:
    container = ""
  handle = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
                                                   shared_name=shared_name,
                                                   name=name,
                                                   container=container)
  if graph_mode:
    return handle

  # We do not want two distinct ResourceVariable objects for the same
  # underlying resource in the runtime.
  # When in eager mode, explicitly ensure so here. When in graph mode, it's
  # ensured by always generating different variable names.
  exists = gen_resource_variable_ops.var_is_initialized_op(handle)
  if exists:
    raise ValueError("variable object with name '%s' already created. Use "
                     "get_variable() if reuse is desired." %
                     shared_name)
  with context.graph_mode(), ops.Graph().as_default() as graph:
    h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
                                                shared_name=shared_name,
                                                name=name,
                                                container=container)

    # Tensor._handle_data contains information for the shape-inference code to
    # know the shape and dtype of the variable pointed to by a handle. Since
    # shape inference doesn't run in eager mode we copy this data here for when
    # the handle is captured by an eager mode function.
    # pylint: disable=protected-access
    if ops._USE_C_SHAPES:
      handle._handle_data = get_resource_handle_data(h)
    else:
      if h._handle_data is None:
        ops.set_shape_and_handle_data_for_outputs(h.op)
      handle._handle_data = h._handle_data
    # pylint: enable=protected-access

  # Clean up our reference cycles to avoid making the garbage collector run.
  # pylint: disable=protected-access
  # OrderedDict, constructed on Graph creation, makes a simple reference loop
  # and hides it in an __attribute in some Python versions. We don't need to
  # throw an error if we can't find it, but if we do find it we can break the
  # loop to avoid creating work for the garbage collector.
  problematic_cycle = graph._functions.__dict__.get("_OrderedDict__root", None)
  # pylint: enable=protected-access
  if problematic_cycle:
    try:
      del problematic_cycle[0][:]
    except TypeError:
      # This is probably not one of the problematic Python versions. Continue
      # with the rest of our cleanup.
      pass
  # Now clean up our own reference cycles by clearing all of the attributes for
  # the Graph and op we created.
  h.__dict__ = {}
  graph.__dict__ = {}
  return handle
  def testNameScopeWithGetVariable(self):
    def in_cross_tower(_):
      c = variable_scope.get_variable("c", [1])
      return c

    def model_fn():
      b = variable_scope.get_variable("b", [1])
      with ops.name_scope("foo"):
        c = distribute_lib.get_tower_context().merge_call(in_cross_tower)
      return b, c

    dist = mirrored_strategy.MirroredStrategy(
        ["/device:GPU:0", "/device:CPU:0"])

    with context.graph_mode(), dist.scope():
      with ops.name_scope("main"):
        a = variable_scope.get_variable("a", [1])
        result = dist.call_for_each_tower(model_fn, run_concurrently=False)
      result_b = result[0]
      result_c = result[1]
      self.assertIsInstance(result_b, values.DistributedValues)
      self.assertIsInstance(result_c, values.DistributedValues)
      a0, a1 = dist.unwrap(a)
      b0, b1 = dist.unwrap(result_b)
      c0, c1 = dist.unwrap(result_c)
      self.assertEquals("a:0", a0.name)
      self.assertEquals("a/replica_1:0", a1.name)
      self.assertEquals("b:0", b0.name)
      self.assertEquals("b/replica_1:0", b1.name)
      self.assertEquals("c:0", c0.name)
      self.assertEquals("c/replica_1:0", c1.name)
Ejemplo n.º 20
0
 def testUsageGraph(self):
   """Expected usage when graph building."""
   with context.graph_mode():
     num_training_steps = 10
     checkpoint_directory = self.get_temp_dir()
     checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
     for training_continuation in range(3):
       with ops.Graph().as_default():
         model = MyModel()
         optimizer = adam.AdamOptimizer(0.001)
         root = util.Checkpoint(
             optimizer=optimizer, model=model,
             global_step=training_util.get_or_create_global_step())
         input_value = constant_op.constant([[3.]])
         train_op = optimizer.minimize(
             model(input_value),
             global_step=root.global_step)
         checkpoint_path = checkpoint_management.latest_checkpoint(
             checkpoint_directory)
         with self.session(graph=ops.get_default_graph()) as session:
           status = root.restore(save_path=checkpoint_path)
           status.initialize_or_restore(session=session)
           if checkpoint_path is None:
             self.assertEqual(0, training_continuation)
             with self.assertRaises(AssertionError):
               status.assert_consumed()
           else:
             status.assert_consumed()
           for _ in range(num_training_steps):
             session.run(train_op)
           root.save(file_prefix=checkpoint_prefix, session=session)
           self.assertEqual((training_continuation + 1) * num_training_steps,
                            session.run(root.global_step))
           self.assertEqual(training_continuation + 1,
                            session.run(root.save_counter))
Ejemplo n.º 21
0
    def decorated(self):
      """Decorated the test method."""
      with context.graph_mode():
        with self.test_session(graph, config, use_gpu, force_gpu):
          f(self)

      if reset_test:
        # This decorator runs the wrapped test twice.
        # Reset the test environment between runs.
        self.tearDown()
        self.setUp()

      def run_eager_mode():
        if force_gpu:
          gpu_name = gpu_device_name()
          if not gpu_name:
            gpu_name = "/device:GPU:0"
          with context.device(gpu_name):
            f(self)
        elif use_gpu:
          # TODO(xpan): Support softplacement and gpu by default when available.
          f(self)
        else:
          with context.device("/device:CPU:0"):
            f(self)

      eager_graph = graph or ops.Graph()
      with context.eager_mode():
        with eager_graph.as_default():
          run_eager_mode()
Ejemplo n.º 22
0
    def decorated(self):
      """Decorated the test method."""
      with context.graph_mode():
        with self.test_session(graph, config, use_gpu, force_gpu):
          f(self)

      def run_eager_mode():
        if force_gpu:
          gpu_name = gpu_device_name()
          if not gpu_name:
            gpu_name = "/device:GPU:0"
          with context.device(gpu_name):
            f(self)
        elif use_gpu:
          # TODO(xpan): Support softplacement and gpu by default when available.
          f(self)
        else:
          with context.device("/device:CPU:0"):
            f(self)

      with context.eager_mode():
        if graph is None:
          run_eager_mode()
        else:
          with graph.as_default():
            run_eager_mode()
Ejemplo n.º 23
0
 def _compute_backprop(self):
   """Computes the backprop function object for this function."""
   self._has_backprop = True
   with self._graph.as_default(), context.graph_mode():
     c = _CapturingContext()
     with c:
       filtered_outputs = [x for x in self._returns if x is not None]
       self._out_grad_placeholders = [
           graph_placeholder(x.dtype, x.shape) for x in filtered_outputs]
       in_gradients = gradients_impl.gradients(
           filtered_outputs,
           self._input_placeholders,
           grad_ys=self._out_grad_placeholders)
       shapes = tuple(x.shape for x in in_gradients if x is not None)
   captures = list(sorted(c.captured_tensors, key=lambda x: x.name))
   forward_name = _forward_name(self._func_name)
   self._forward_fdef = _EagerDefinedFunction(
       forward_name, self._graph, self._ops, self._input_placeholders,
       filtered_outputs + captures)
   backward_outputs = tuple(x for x in in_gradients if x is not None)
   all_inputs = self._out_grad_placeholders + captures
   # Excluding input ops from the body as we do not intend to execute these
   # operations when the function is executed.
   all_ignored_ops = frozenset(x.op for x in all_inputs)
   # Enforce a deterministic order of operations in the generated graph. This
   # means rerunning the function-defining code will always define the same
   # function, which is useful if we serialize this etc.
   function_def_ops = tuple(x
                            for x in sorted(c.known_ops, key=lambda x: x.name)
                            if x not in all_ignored_ops)
   bname = _backward_name(self._func_name)
   self._backward_function = GraphModeFunction(
       bname, all_inputs, [], self._graph, function_def_ops,
       backward_outputs, in_gradients, shapes)
Ejemplo n.º 24
0
  def testNamedTupleEstimatorSpec(self):
    with context.graph_mode(), ops.Graph().as_default():
      devices = []
      created_estimator_specs = []

      for device_id in range(3):
        spec = model_fn_lib.EstimatorSpec(
            mode=model_fn_lib.ModeKeys.TRAIN,
            loss=constant_op.constant(device_id / 2),
            train_op=array_ops.identity(constant_op.constant(device_id)))
        devices.append(_device_str(device_id))
        created_estimator_specs.append(spec)

      device_map = values.ReplicaDeviceMap(devices)
      merged_estimator_spec = values.regroup(
          device_map, created_estimator_specs)

      self.assertTrue(
          isinstance(merged_estimator_spec, model_fn_lib.EstimatorSpec))
      self.assertEqual(model_fn_lib.ModeKeys.TRAIN, merged_estimator_spec.mode)
      for device_id in range(3):
        d = _device_str(device_id)
        self.assertEqual(created_estimator_specs[device_id].loss,
                         merged_estimator_spec.loss.get(d))
        self.assertEqual(created_estimator_specs[device_id].train_op,
                         merged_estimator_spec.train_op.get(d))
        # Scaffold is populated by `EstimatorSpec.__new__`.
        self.assertEqual(created_estimator_specs[device_id].scaffold,
                         merged_estimator_spec.scaffold.get(d))
        # Also test that we can undo the merge using select_replica()
        self.assertEqual(created_estimator_specs[device_id],
                         values.select_replica(device_id,
                                               merged_estimator_spec))
Ejemplo n.º 25
0
  def testGraphOpNames(self):
    """Network operation names should match variable naming."""

    def _check_op_prefixes(expected_prefix, checked_ops):
      for operation in ops.get_default_graph().get_operations():
        if operation.name == "ignore":
          continue
        if operation.name in checked_ops:
          continue
        checked_ops.add(operation.name)
        self.assertStartsWith(expected_start=expected_prefix,
                              actual=operation.name)
        self.assertNotIn("my_network", operation.name[len(expected_prefix):])
        self.assertNotIn("dense", operation.name[len(expected_prefix):])

    with context.graph_mode():
      net = MyNetwork()
      zero = constant_op.constant([[0.]], name="ignore")
      net(zero)
      checked_ops = set()
      _check_op_prefixes(expected_prefix="my_network/dense/",
                         checked_ops=checked_ops)
      net.net2 = net.track_layer(MyNetwork())
      net.net2(zero)
      _check_op_prefixes(expected_prefix="my_network/my_network/dense/",
                         checked_ops=checked_ops)
      MyNetwork()(zero)
      _check_op_prefixes(expected_prefix="my_network_1/dense/",
                         checked_ops=checked_ops)
Ejemplo n.º 26
0
  def testAggregateGradients(self):

    def fn(x):
      ind1 = constant_op.constant(np.array([0, 1]))
      ind2 = constant_op.constant(np.array([2, 3]))
      ind3 = constant_op.constant(np.array([1, 3]))
      # A mixture of IndexedSlices and dense tensor to aggregate.
      g1 = embedding_ops.embedding_lookup(x, ind1)
      g2 = embedding_ops.embedding_lookup(x, ind2)
      g3 = embedding_ops.embedding_lookup(x, ind3)
      g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
      return g1 * g2 * g3 * g4

    var_np = np.random.rand(4, 2).astype(np.float32)
    var = constant_op.constant(var_np)
    grad = backprop.gradients_function(fn, [0])(var)[0]
    grad = ops.convert_to_tensor(grad).numpy()

    with context.graph_mode(), self.test_session():
      tf_var = array_ops.constant(var_np, dtypes.float32)
      tf_ind1 = array_ops.constant([0, 1])
      tf_ind2 = array_ops.constant([2, 3])
      tf_ind3 = array_ops.constant([1, 3])
      tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
      tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
      tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
      tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
      tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
      tf_grad = gradients.gradients(tf_y, [tf_var])[0]

      tf_dense_grad = math_ops.unsorted_segment_sum(
          tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])

      self.assertAllClose(grad, tf_dense_grad.eval())
Ejemplo n.º 27
0
  def test_build_standardized_signature_def_classify_classes_only(self):
    """Tests classification with one output tensor."""
    with context.graph_mode():
      input_tensors = {
          'input-1':
              array_ops.placeholder(
                  dtypes.string, 1, name='input-tensor-1')
      }
      classes = array_ops.placeholder(dtypes.string, 1, name='output-tensor-1')

      export_output = export_output_lib.ClassificationOutput(classes=classes)
      actual_signature_def = export_output.as_signature_def(input_tensors)

      expected_signature_def = meta_graph_pb2.SignatureDef()
      shape = tensor_shape_pb2.TensorShapeProto(
          dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
      dtype_string = types_pb2.DataType.Value('DT_STRING')
      expected_signature_def.inputs[
          signature_constants.CLASSIFY_INPUTS].CopyFrom(
              meta_graph_pb2.TensorInfo(name='input-tensor-1:0',
                                        dtype=dtype_string,
                                        tensor_shape=shape))
      expected_signature_def.outputs[
          signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
              meta_graph_pb2.TensorInfo(name='output-tensor-1:0',
                                        dtype=dtype_string,
                                        tensor_shape=shape))

      expected_signature_def.method_name = (
          signature_constants.CLASSIFY_METHOD_NAME)
      self.assertEqual(actual_signature_def, expected_signature_def)
Ejemplo n.º 28
0
    def decorated(self, **kwargs):
      """Decorated the test method."""
      with context.graph_mode():
        with self.test_session(graph, config, use_gpu, force_gpu):
          f(self, **kwargs)

      if reset_test:
        # This decorator runs the wrapped test twice.
        # Reset the test environment between runs.
        self.tearDown()
        self.setUp()

      def run_eager_mode(self, **kwargs):
        if force_gpu:
          gpu_name = gpu_device_name()
          if not gpu_name:
            gpu_name = "/device:GPU:0"
          with context.device(gpu_name):
            f(self)
        elif use_gpu:
          # TODO(xpan): Support softplacement and gpu by default when available.
          f(self, **kwargs)
        else:
          with context.device("/device:CPU:0"):
            f(self, **kwargs)

      if assert_no_eager_garbage:
        run_eager_mode = assert_no_new_tensors(
            assert_no_garbage_created(run_eager_mode))

      with context.eager_mode():
        with IsolateTest():
          run_eager_mode(self, **kwargs)
Ejemplo n.º 29
0
 def _compute_backprop(self):
   """Computes the backprop function object for this function."""
   self._has_backprop = True
   with self._graph.as_default(), context.graph_mode():
     c = _CapturingContext()
     with c:
       filtered_outputs = [
           x for x in self._returns if x is not None
       ]
       self._out_grad_placeholders = [
           graph_placeholder(x.dtype, x.shape) for x in filtered_outputs
       ]
       in_gradients = gradients_impl.gradients(
           filtered_outputs,
           self._input_placeholders,
           grad_ys=self._out_grad_placeholders)
       shapes = [x.shape for x in in_gradients if x is not None]
   captures = list(sorted(c.captured_tensors, key=lambda x: x.name))
   forward_function_def = make_function_def(
       self._graph, self._ops, self._input_placeholders,
       filtered_outputs + captures)
   self._forward_fdef = _DefinedFunction(forward_function_def)
   _register_with_name(_forward_name(self._func_name), forward_function_def)
   backward_outputs = [x for x in in_gradients if x is not None]
   all_inputs = self._out_grad_placeholders + captures
   backward_function_def = make_function_def(
       self._graph, [x.op for x in self._out_grad_placeholders
                    ] + list(sorted(c.known_ops, key=lambda x: x.name)),
       all_inputs, backward_outputs)
   _register_with_name(_backward_name(self._func_name), backward_function_def)
   self._backward_function = _GraphModeFunction(
       all_inputs, [], backward_function_def, self._graph, c.known_ops,
       in_gradients, _map_sequence_obj_to_idx(backward_outputs), shapes)
Ejemplo n.º 30
0
 def benchmark_keras_model_functional_fit_graph_mode_with_profiler(self):
   profiler.start()
   with context.graph_mode():
     model = make_keras_model(initializer="glorot_uniform")
     self._benchmark_keras_model_fit(model)
   result = profiler.stop()
   assert result is not None
Ejemplo n.º 31
0
  def test_head_export_outputs(self, head):
    with context.graph_mode():
      tf_compat.v1.train.create_global_step()
      ensemble_builder = _HeadEnsembleBuilder(head)
      builder = _IterationBuilder(
          _FakeCandidateBuilder(),
          _FakeSubnetworkManager(),
          ensemble_builder,
          summary_maker=_ScopedSummary,
          ensemblers=[_FakeEnsembler()],
          max_steps=10)
      features = [[1., -1., 0.]]
      labels = [1]
      mode = tf.estimator.ModeKeys.PREDICT
      subnetwork_builders = [_FakeBuilder("test")]
      iteration = builder.build_iteration(
          base_global_step=0,
          iteration_number=0,
          ensemble_candidates=[
              EnsembleCandidate("test", subnetwork_builders, [tf.Variable(1.)])
          ],
          subnetwork_builders=subnetwork_builders,
          features=features,
          labels=labels,
          config=tf.estimator.RunConfig(model_dir=self.test_subdirectory),
          mode=mode)

      # Compare iteration outputs with default head outputs.
      spec = head.create_estimator_spec(
          features=features, labels=labels, mode=mode, logits=[[.5]])
      self.assertEqual(
          len(spec.export_outputs),
          len(iteration.estimator_spec.export_outputs))
      for key in spec.export_outputs:
        if isinstance(spec.export_outputs[key],
                      tf.estimator.export.RegressionOutput):
          self.assertAlmostEqual(
              self.evaluate(spec.export_outputs[key].value),
              self.evaluate(iteration.estimator_spec.export_outputs[key].value))
          continue
        if isinstance(spec.export_outputs[key],
                      tf.estimator.export.ClassificationOutput):
          self.assertAllClose(
              self.evaluate(spec.export_outputs[key].scores),
              self.evaluate(
                  iteration.estimator_spec.export_outputs[key].scores))
          self.assertAllEqual(
              self.evaluate(spec.export_outputs[key].classes),
              self.evaluate(
                  iteration.estimator_spec.export_outputs[key].classes))
          continue
        if isinstance(spec.export_outputs[key],
                      tf.estimator.export.PredictOutput):
          if "classes" in spec.export_outputs[key].outputs:
            # Verify string Tensor outputs separately.
            self.assertAllEqual(
                self.evaluate(spec.export_outputs[key].outputs["classes"]),
                self.evaluate(iteration.estimator_spec.export_outputs[key]
                              .outputs["classes"]))
            del spec.export_outputs[key].outputs["classes"]
            del iteration.estimator_spec.export_outputs[key].outputs["classes"]
          if "all_classes" in spec.export_outputs[key].outputs:
            # Verify string Tensor outputs separately.
            self.assertAllEqual(
                self.evaluate(spec.export_outputs[key].outputs["all_classes"]),
                self.evaluate(iteration.estimator_spec.export_outputs[key]
                              .outputs["all_classes"]))
            del spec.export_outputs[key].outputs["all_classes"]
            del iteration.estimator_spec.export_outputs[key].outputs[
                "all_classes"]
          self.assertAllClose(
              self.evaluate(spec.export_outputs[key].outputs),
              self.evaluate(
                  iteration.estimator_spec.export_outputs[key].outputs))
          continue
        self.fail("Invalid export_output for {}.".format(key))
Ejemplo n.º 32
0
 def _create_definition_if_needed(self):
   """Creates the function definition if it's not created yet."""
   with context.graph_mode():
     self._create_definition_if_needed_impl()
Ejemplo n.º 33
0
  def test_mean_ensembler(self,
                          multi_head=False,
                          add_mean_last_layer_predictions=False,
                          diff_last_layer_shapes=False):
    with context.graph_mode():
      ensembler = ensemble.MeanEnsembler(
          add_mean_last_layer_predictions=add_mean_last_layer_predictions)
      last_layer_dims = [3, 3]
      if diff_last_layer_shapes:
        last_layer_dims = [3, 5]
      if multi_head:
        subnetworks = [
            self._build_subnetwork(
                multi_head=multi_head, last_layer_dim=last_layer_dim)
            for last_layer_dim in last_layer_dims
        ]
      else:
        subnetworks = [
            self._build_subnetwork(last_layer_dim=last_layer_dim)
            for last_layer_dim in last_layer_dims
        ]

      if diff_last_layer_shapes:
        with self.assertRaisesRegexp(
            ValueError, r'Shape of \`last_layer\` tensors must be same'):
          built_ensemble = ensembler.build_ensemble(
              subnetworks=subnetworks,
              previous_ensemble_subnetworks=None,
              features=None,
              labels=None,
              logits_dimension=None,
              training=None,
              iteration_step=None,
              summary=None,
              previous_ensemble=None,
              previous_iteration_checkpoint=None)
        return
      built_ensemble = ensembler.build_ensemble(
          subnetworks=subnetworks,
          previous_ensemble_subnetworks=None,
          features=None,
          labels=None,
          logits_dimension=None,
          training=None,
          iteration_step=None,
          summary=None,
          previous_ensemble=None,
          previous_iteration_checkpoint=None)

      with self.test_session() as sess:
        sess.run(tf_compat.v1.global_variables_initializer())
        got_logits = sess.run(built_ensemble.logits)

        if add_mean_last_layer_predictions:
          got_predictions = sess.run(built_ensemble.predictions)

        logits = sess.run([s.logits for s in subnetworks])
        last_layer = sess.run([s.last_layer for s in subnetworks])
        if not multi_head:
          expected_logits = np.mean(logits, axis=0)
          expected_predictions = {
              ensemble.MeanEnsemble.MEAN_LAST_LAYER: np.mean(
                  last_layer, axis=0)
          }
        else:
          expected_logits = {
              head_name: np.mean([s[head_name] for s in logits
                                 ], axis=0) for head_name in multi_head
          }
          expected_predictions = {
              '{}_{}'.format(ensemble.MeanEnsemble.MEAN_LAST_LAYER, head_name):
              np.mean([s[head_name] for s in last_layer], axis=0)
              for head_name in multi_head
          }

        self.assertAllClose(expected_logits, got_logits)
        if add_mean_last_layer_predictions:
          self.assertAllClose(expected_predictions, got_predictions)
Ejemplo n.º 34
0
    def _make_metrics(self,
                      metric_fn,
                      mode=tf.estimator.ModeKeys.EVAL,
                      multi_head=False,
                      sess=None):

        with context.graph_mode():
            if multi_head:
                head = multi_head_lib.MultiHead(heads=[
                    binary_class_head.BinaryClassHead(
                        name="head1", loss_reduction=tf_compat.SUM),
                    binary_class_head.BinaryClassHead(
                        name="head2", loss_reduction=tf_compat.SUM)
                ])
                labels = {
                    "head1": tf.constant([0, 1]),
                    "head2": tf.constant([0, 1])
                }
            else:
                head = binary_class_head.BinaryClassHead(
                    loss_reduction=tf_compat.SUM)
                labels = tf.constant([0, 1])
            features = {"x": tf.constant([[1.], [2.]])}
            builder = _EnsembleBuilder(head, metric_fn=metric_fn)
            subnetwork_manager = _SubnetworkManager(head, metric_fn=metric_fn)
            subnetwork_builder = _Builder(lambda unused0, unused1: tf.no_op(),
                                          lambda unused0, unused1: tf.no_op(),
                                          use_logits_last_layer=True)

            subnetwork_spec = subnetwork_manager.build_subnetwork_spec(
                name="test",
                subnetwork_builder=subnetwork_builder,
                summary=_FakeSummary(),
                features=features,
                mode=mode,
                labels=labels)
            ensemble_spec = builder.build_ensemble_spec(
                name="test",
                candidate=EnsembleCandidate("foo", [subnetwork_builder], None),
                ensembler=ComplexityRegularizedEnsembler(
                    mixture_weight_type=MixtureWeightType.SCALAR),
                subnetwork_specs=[subnetwork_spec],
                summary=_FakeSummary(),
                features=features,
                iteration_number=0,
                labels=labels,
                mode=mode)
            subnetwork_metric_ops = subnetwork_spec.eval_metrics.eval_metrics_ops(
            )
            ensemble_metric_ops = ensemble_spec.eval_metrics.eval_metrics_ops()
            evaluate = self.evaluate
            if sess is not None:
                evaluate = sess.run
            evaluate((tf_compat.v1.global_variables_initializer(),
                      tf_compat.v1.local_variables_initializer()))
            evaluate((subnetwork_metric_ops, ensemble_metric_ops))
            # Return the idempotent tensor part of the (tensor, op) metrics tuple.
            return {
                k: evaluate(subnetwork_metric_ops[k][0])
                for k in subnetwork_metric_ops
            }, {
                k: evaluate(ensemble_metric_ops[k][0])
                for k in ensemble_metric_ops
            }
Ejemplo n.º 35
0
 def test_build_train_op_no_op(self):
   with context.graph_mode():
     train_op = ensemble.ComplexityRegularizedEnsembler().build_train_op(
         *[None] * 7)  # arguments unused
     self.assertEqual(train_op.type, tf.no_op().type)
Ejemplo n.º 36
0
 def testNoMemoryLeak_graphMode(self):
     logdir = self.get_temp_dir()
     with context.graph_mode(), ops.Graph().as_default():
         summary_ops.create_file_writer_v2(logdir)
Ejemplo n.º 37
0
    def testMultipleGraphsNonSlotVariables(self):
        with context.graph_mode():
            checkpoint_directory = self.get_temp_dir()
            checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
            optimizer = adam.AdamOptimizer(0.001)
            # Construct a model in one graph
            first_graph = ops.Graph()
            first_session = session_lib.Session(graph=first_graph)
            with first_graph.as_default(), first_session.as_default():
                first_variable = resource_variable_ops.ResourceVariable([1.])
                first_root_trackable = trackable_utils.Checkpoint(
                    optimizer=optimizer, variable=first_variable)
                train_op = optimizer.minimize(first_variable.read_value)
                self.evaluate(
                    trackable_utils.gather_initializers(first_root_trackable))
                self.evaluate(train_op)
                self.evaluate(first_variable.assign([1.]))
                self.evaluate(
                    optimizer.get_slot(var=first_variable,
                                       name="m").assign([2.]))
                beta1_power, _ = optimizer._get_beta_accumulators()
                self.evaluate(beta1_power.assign(3.))

            # Save and load in a second graph
            second_graph = ops.Graph()
            with second_graph.as_default(), session_lib.Session(
                    graph=second_graph):
                second_variable = resource_variable_ops.ResourceVariable([1.])
                second_root_trackable = trackable_utils.Checkpoint(
                    optimizer=optimizer, variable=second_variable)
                train_op = optimizer.minimize(second_variable.read_value)
                second_root_trackable.restore(None).initialize_or_restore()
                self.evaluate(train_op)
                self.evaluate(second_variable.assign([4.]))
                self.evaluate(
                    optimizer.get_slot(var=second_variable,
                                       name="m").assign([5.]))
                beta1_power, _ = optimizer._get_beta_accumulators()
                self.evaluate(beta1_power.assign(6.))
                save_path = second_root_trackable.save(checkpoint_prefix)
                self.evaluate(second_variable.assign([7.]))
                self.evaluate(
                    optimizer.get_slot(var=second_variable,
                                       name="m").assign([8.]))
                beta1_power, _ = optimizer._get_beta_accumulators()
                self.assertAllEqual(6., self.evaluate(beta1_power))
                status = second_root_trackable.restore(save_path)
                status.assert_consumed().run_restore_ops()
                self.assertAllEqual([4.], self.evaluate(second_variable))
                self.assertAllEqual([5.],
                                    self.evaluate(
                                        optimizer.get_slot(var=second_variable,
                                                           name="m")))
                beta1_power, _ = optimizer._get_beta_accumulators()
                self.assertAllEqual(6., self.evaluate(beta1_power))

            # Check that the first graph is unmolested
            with first_graph.as_default(), first_session.as_default():
                self.assertAllEqual([1.], self.evaluate(first_variable))
                self.assertAllEqual([2.],
                                    self.evaluate(
                                        optimizer.get_slot(var=first_variable,
                                                           name="m")))
                beta1_power, _ = optimizer._get_beta_accumulators()
                self.assertAllEqual(3., self.evaluate(beta1_power))
Ejemplo n.º 38
0
 def decorated(self, **kwargs):
   with context.graph_mode():
     with self.test_session(use_gpu=use_gpu):
       f(self, **kwargs)
Ejemplo n.º 39
0
def graph_mode():
    """pytest fixture for running test in graph mode"""
    with context.graph_mode():
        yield
Ejemplo n.º 40
0
  def test_materialize_subnetwork_reports(self,
                                          input_fn,
                                          subnetwork_reports_fn,
                                          steps,
                                          iteration_number=0,
                                          included_subnetwork_names=None,
                                          want_materialized_reports=None):
    with context.graph_mode():
      tf.constant(0.)  # dummy op so that the session graph is never empty.
      features, labels = input_fn()
      subnetwork_reports = subnetwork_reports_fn(features, labels)
      with self.test_session() as sess:
        sess.run(tf_compat.v1.initializers.local_variables())
        report_materializer = ReportMaterializer(input_fn=input_fn, steps=steps)
        materialized_reports = (
            report_materializer.materialize_subnetwork_reports(
                sess, iteration_number, subnetwork_reports,
                included_subnetwork_names))
        self.assertEqual(
            len(want_materialized_reports), len(materialized_reports))
        materialized_reports_dict = {
            blrm.name: blrm for blrm in materialized_reports
        }
        for want_materialized_report in want_materialized_reports:
          materialized_report = (
              materialized_reports_dict[want_materialized_report.name])
          self.assertEqual(iteration_number,
                           materialized_report.iteration_number)
          self.assertEqual(
              set(want_materialized_report.hparams.keys()),
              set(materialized_report.hparams.keys()))
          for hparam_key, want_hparam in (
              want_materialized_report.hparams.items()):
            if isinstance(want_hparam, float):
              self.assertAllClose(want_hparam,
                                  materialized_report.hparams[hparam_key])
            else:
              self.assertEqual(want_hparam,
                               materialized_report.hparams[hparam_key])

          self.assertSetEqual(
              set(want_materialized_report.attributes.keys()),
              set(materialized_report.attributes.keys()))
          for attribute_key, want_attribute in (
              want_materialized_report.attributes.items()):
            if isinstance(want_attribute, float):
              self.assertAllClose(
                  want_attribute,
                  decode(materialized_report.attributes[attribute_key]))
            else:
              self.assertEqual(
                  want_attribute,
                  decode(materialized_report.attributes[attribute_key]))

          self.assertSetEqual(
              set(want_materialized_report.metrics.keys()),
              set(materialized_report.metrics.keys()))
          for metric_key, want_metric in (
              want_materialized_report.metrics.items()):
            if isinstance(want_metric, float):
              self.assertAllClose(
                  want_metric, decode(materialized_report.metrics[metric_key]))
            else:
              self.assertEqual(want_metric,
                               decode(materialized_report.metrics[metric_key]))
Ejemplo n.º 41
0
 def testTrace_cannotEnableTraceInGraphMode(self):
     with test.mock.patch.object(logging, 'warn') as mock_log:
         with context.graph_mode():
             summary_ops.trace_on(graph=True, profiler=False)
         self.assertRegexpMatches(str(mock_log.call_args),
                                  'Must enable trace in eager mode.')
Ejemplo n.º 42
0
 def testTwoDevicesPerWorker(self, input_type):
     worker_devices = self._cpu_and_one_gpu_devices()
     with context.graph_mode(), self.cached_session() as sess:
         dataset_fn = lambda _: dataset_ops.Dataset.range(4)
         self._test_iterator(input_type, dataset_fn, worker_devices,
                             [[0, 1, 0, 1], [2, 3, 2, 3]], sess)
Ejemplo n.º 43
0
def _v1_graph_test(f, test_or_class, config, *args, **kwargs):
    with context.graph_mode(), testing_utils.run_eagerly_scope(False):
        with test_or_class.test_session(use_gpu=True, config=config):
            f(test_or_class, *args, **kwargs)
Ejemplo n.º 44
0
 def benchmark_performance_graph(self):
     with context.graph_mode(), session_lib.Session(config=_config):
         self._benchmark_performance_with_standard_cudnn_impl()
Ejemplo n.º 45
0
 def testOneDevicePerWorker(self, input_type):
     worker_devices = self._cpu_devices()
     with context.graph_mode(), self.cached_session() as sess:
         dataset_fn = lambda _: dataset_ops.Dataset.range(4)
         self._test_iterator(input_type, dataset_fn, worker_devices,
                             [[0, 0], [1, 1], [2, 2], [3, 3]], sess)
Ejemplo n.º 46
0
  def test_build_ensemble(self,
                          mixture_weight_type=ensemble.MixtureWeightType.SCALAR,
                          mixture_weight_initializer=None,
                          warm_start_mixture_weights=False,
                          adanet_lambda=0.,
                          adanet_beta=0.,
                          multi_head=None,
                          use_bias=False,
                          num_subnetworks=1,
                          num_previous_ensemble_subnetworks=0,
                          expected_complexity_regularization=0.,
                          expected_summary_scalars=None,
                          name=None):
    with context.graph_mode():
      model_dir = None
      if warm_start_mixture_weights:
        model_dir = 'fake_checkpoint_dir'
      ensembler = ensemble.ComplexityRegularizedEnsembler(
          optimizer=self._optimizer,
          mixture_weight_type=mixture_weight_type,
          mixture_weight_initializer=mixture_weight_initializer,
          warm_start_mixture_weights=warm_start_mixture_weights,
          model_dir=model_dir,
          adanet_lambda=adanet_lambda,
          adanet_beta=adanet_beta,
          use_bias=use_bias,
          name=name)

      if name:
        self.assertEqual(ensembler.name, name)
      else:
        self.assertEqual(ensembler.name, 'complexity_regularized')

      with tf_compat.v1.variable_scope('dummy_adanet_scope_iteration_0'):
        previous_ensemble_subnetworks_all = [
            self._build_subnetwork(multi_head),
            self._build_subnetwork(multi_head)
        ]

        previous_ensemble = self._build_easy_ensemble(
            previous_ensemble_subnetworks_all)

      with tf_compat.v1.variable_scope('dummy_adanet_scope_iteration_1'):
        subnetworks_pool = [
            self._build_subnetwork(multi_head),
            self._build_subnetwork(multi_head),
        ]

        subnetworks = subnetworks_pool[:num_subnetworks]

        previous_ensemble_subnetworks = previous_ensemble_subnetworks_all[:(
            num_previous_ensemble_subnetworks)]

        self.summary.clear_scalars()

        built_ensemble = ensembler.build_ensemble(
            subnetworks=subnetworks,
            previous_ensemble_subnetworks=previous_ensemble_subnetworks,
            features=None,
            labels=None,
            logits_dimension=None,
            training=None,
            iteration_step=None,
            summary=self.summary,
            previous_ensemble=previous_ensemble)

        with self.test_session() as sess:
          sess.run(tf_compat.v1.global_variables_initializer())

        summary_scalars, complexity_regularization = sess.run(
            (self.summary.scalars, built_ensemble.complexity_regularization))

        if expected_summary_scalars:
          for key in expected_summary_scalars.keys():
            print(summary_scalars)
            self.assertAllClose(expected_summary_scalars[key],
                                summary_scalars[key])

        self.assertEqual(
            [l.subnetwork for l in built_ensemble.weighted_subnetworks],
            previous_ensemble_subnetworks + subnetworks)

        self.assertAllClose(expected_complexity_regularization,
                            complexity_regularization)
        self.assertIsNotNone(sess.run(built_ensemble.logits))
Ejemplo n.º 47
0
def _graph_callable_internal(func, shape_and_dtypes):
  """Defines and returns a template version of func.

  Under the hood we make two function objects, each wrapping a different version
  of the graph-mode code. One version immediately runs variable initialization
  before making the variable's Tensors available for use, while the other
  version replaces the Variables with placeholders which become function
  arguments and get the current variable's value.

  Limitations in (2) and (4) are because this does not implement a graph-mode
  Variable class which has a convert_to_tensor(as_ref=True) method and a
  initialized_value method. This is fixable.

  Args:
    func: The tfe Python function to compile.
    shape_and_dtypes: A possibly nested list or tuple of ShapeAndDtype objects.

  Raises:
    ValueError: If any one of func's outputs is not a Tensor.

  Returns:
    Callable graph object.
  """
  container = tf_ops.get_default_graph()._container  # pylint: disable=protected-access
  graph_key = tf_ops.get_default_graph()._graph_key  # pylint: disable=protected-access
  with context.graph_mode():
    # This graph will store both the initialization and the call version of the
    # wrapped function. It will later be used by the backprop code to build the
    # backprop graph, if necessary.
    tmp_graph = function.CapturingGraph()
    # Inherit the graph key from the original graph to ensure optimizers don't
    # misbehave.
    tmp_graph._container = container  # pylint: disable=protected-access
    tmp_graph._graph_key = graph_key  # pylint: disable=protected-access
    with tmp_graph.as_default():
      # Placeholders for the non-variable inputs.
      func_inputs = _get_graph_callable_inputs(shape_and_dtypes)
      func_num_args = len(tf_inspect.getfullargspec(func).args)
      if len(func_inputs) != func_num_args:
        raise TypeError("The number of arguments accepted by the decorated "
                        "function `%s` (%d) must match the number of "
                        "ShapeAndDtype objects passed to the graph_callable() "
                        "decorator (%d)." %
                        (func.__name__, func_num_args, len(func_inputs)))

      # First call the function to generate a graph which can initialize all
      # variables. As a side-effect this will populate the variable capturing
      # scope's view of which variables exist.
      variable_captures = _VariableCapturingScope()
      with variable_captures.initializing_scope(
          ), function.AutomaticControlDependencies() as a:
        func_outputs = func(*func_inputs)
        outputs_list = nest.flatten(func_outputs)
        for i, x in enumerate(outputs_list):
          if x is not None:
            outputs_list[i] = a.mark_as_return(x)
      if len(outputs_list) == 1 and outputs_list[0] is None:
        outputs_list = []
      output_shapes = [x.shape for x in outputs_list]
      if not all(isinstance(x, tf_ops.Tensor) for x in outputs_list):
        raise ValueError("Found non-tensor output in %s" % str(outputs_list))
      initializing_operations = tmp_graph.get_operations()

      # Call the function again, now replacing usages of variables with
      # placeholders. This assumes the variable capturing scope created above
      # knows about all variables.
      tmp_graph.clear_resource_control_flow_state()
      with variable_captures.capturing_scope(
          ), function.AutomaticControlDependencies() as a:
        captured_outputs = func(*func_inputs)
      captured_outlist = nest.flatten(captured_outputs)
      for i, x in enumerate(captured_outlist):
        if x is not None:
          captured_outlist[i] = a.mark_as_return(x)
      capturing_operations = tmp_graph.get_operations()[
          len(initializing_operations):]

  sorted_variables = sorted(variable_captures.variables.values(),
                            key=lambda x: x.name)

  extra_inputs = tmp_graph.captures.keys()
  extra_placeholders = tmp_graph.captures.values()

  flat_inputs = [x for x in nest.flatten(func_inputs)
                 if isinstance(x, tf_ops.Tensor)]
  placeholder_inputs = flat_inputs+ list(extra_placeholders)

  func_def_outputs = [x for x in outputs_list if isinstance(x, tf_ops.Tensor)]
  initialization_name = function._inference_name(func.__name__)  # pylint: disable=protected-access
  # TODO(ashankar): Oh lord, forgive me for this lint travesty.
  # Also, what about the gradient registry of these functions? Those need to be
  # addressed as well.
  for f in tmp_graph._functions.values():  # pylint: disable=protected-access
    function._register(f._c_func.func)  # pylint: disable=protected-access
  initializer_function = function.GraphModeFunction(
      initialization_name,
      placeholder_inputs,
      extra_inputs,
      tmp_graph,
      initializing_operations,
      func_def_outputs,
      func_outputs,
      output_shapes)

  capture_func_def_outputs = [
      x for x in captured_outlist if isinstance(x, tf_ops.Tensor)]
  captured_function_name = function._inference_name(func.__name__)  # pylint: disable=protected-access
  captured_function = function.GraphModeFunction(
      captured_function_name,
      placeholder_inputs,
      extra_inputs,
      tmp_graph,
      capturing_operations,
      capture_func_def_outputs,
      captured_outputs,
      output_shapes,
      variables=[x.variable for x in sorted_variables])

  return _InitializingFunctionObject(captured_function, initializer_function,
                                     shape_and_dtypes)
Ejemplo n.º 48
0
 def testGraphMode(self):
     graph = ops.Graph()
     with graph.as_default(), context.graph_mode():
         array_ops.placeholder(dtypes.int32)
     self.assertLen(graph.get_operations(), 1)
Ejemplo n.º 49
0
def graph_mode():
    """pytest fixture for running test in graph mode"""
    with context.graph_mode():
        with tf.compat.v1.Session().as_default():
            yield
Ejemplo n.º 50
0
def _defun_internal(name, func, args, kwds):
    """Defines and returns graph-mode version of func."""
    graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
    with context.graph_mode():
        captures = {}
        tmp_graph = CapturingGraph(captures)
        # Inherit the graph key, since this is used for matching variables in
        # optimizers.
        tmp_graph._graph_key = graph_key  # pylint: disable=protected-access
        # Copy the graph collections to ensure summaries and other things work. This
        # lets the function access (but not mutate) collections of the containing
        # graph, such as the global step and the summary writer collections.
        curr_graph = ops.get_default_graph()
        for collection in curr_graph.collections:
            tmp_graph.get_collection_ref(
                collection)[:] = curr_graph.get_collection(collection)
        with tmp_graph.as_default(), AutomaticControlDependencies() as a:
            func_inputs = _get_defun_inputs(args)

            def convert(x):
                if x is None:
                    return None
                x = ops.convert_to_tensor_or_indexed_slices(x)
                x = a.mark_as_return(x)
                return x

            with capture_tensors(captures):
                this_tape = tape.push_new_tape()
                try:
                    func_outputs = func(*func_inputs, **kwds)
                    func_outputs = nest.map_structure(convert, func_outputs)
                finally:
                    tape.pop_tape(this_tape)
                variables = this_tape.watched_variables()

                # Returning a closed-over tensor as an output does not trigger a
                # call to convert_to_tensor, so we manually capture all such tensors.
                outputs_list = _flatten(func_outputs)
                func_def_outputs = [
                    _convert_to_graph_tensor(x) for x in outputs_list
                    if x is not None
                ]

            ids = list(sorted(captures.keys()))
            if ids:
                extra_inputs, extra_placeholders = zip(
                    *[captures[x] for x in ids])
            else:
                extra_inputs = []
                extra_placeholders = []
            output_shapes = tuple(
                x.shape if isinstance(x, ops.Tensor) else None
                for x in outputs_list)

    flat_inputs = [
        x for x in nest.flatten(func_inputs) if isinstance(x, ops.Tensor)
    ]
    all_inputs = flat_inputs + list(extra_placeholders)
    all_ignored_ops = frozenset(x.op for x in all_inputs)
    fname = _inference_name(name)
    operations = tuple(x for x in tmp_graph.get_operations()
                       if x not in all_ignored_ops)
    # Register any other functions defined in the graph
    # TODO(ashankar): Oh lord, forgive me for this lint travesty.
    if context.executing_eagerly():
        for f in tmp_graph._functions.values():  # pylint: disable=protected-access
            # TODO(ashankar): What about the gradient registry?
            _register(f._c_func)  # pylint: disable=protected-access
    return GraphModeFunction(fname, all_inputs, extra_inputs, tmp_graph,
                             operations, func_def_outputs, func_outputs,
                             output_shapes, variables)
Ejemplo n.º 51
0
 def testDataDistributionOneDevicePerWorker(self):
     worker_device_map, devices = self._cpu_devices()
     with context.graph_mode():
         dataset_fn = lambda: dataset_ops.Dataset.range(8)
         self._test_dataset(dataset_fn, worker_device_map, devices,
                            [[0, 1], [2, 3], [4, 5], [6, 7]])
Ejemplo n.º 52
0
    def decorated(self, **kwargs):
        """A wrapped test method that treats some arguments in a special way."""
        mode = kwargs.pop("mode", "graph")

        distribution = kwargs.get("distribution", None)
        required_tpu = kwargs.pop("required_tpu", False)
        required_gpus = kwargs.pop("required_gpus", None)

        if distribution:
            assert required_gpus is None, (
                "Do not use `required_gpus` and `distribution` together.")
            assert required_tpu is False, (
                "Do not use `required_tpu` and `distribution` together.")
            required_gpus = distribution.required_gpus
            required_tpu = distribution.required_tpu

        if required_tpu and not TPU_TEST:
            self.skipTest("Test requires a TPU, but it's not available.")
        if not required_tpu and TPU_TEST:
            self.skipTest("Test that doesn't require a TPU.")

        if not required_gpus:
            if GPU_TEST:
                self.skipTest("Test that doesn't require GPUs.")
        elif context.num_gpus() < required_gpus:
            # TODO(priyag): Consider allowing tests in graph mode using soft
            # placement.
            self.skipTest(
                "{} GPUs are not available for this test. {} GPUs are available"
                .format(required_gpus, context.num_gpus()))

        # At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
        # that the user might have specified.  `kwargs` still has `mode`, which
        # the test is allowed to accept or ignore.
        requested_arguments = tf_inspect.getfullargspec(test_method).args
        missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
            set(requested_arguments + ["mode"]))
        if missing_arguments:
            raise ValueError(
                "The test is missing arguments {} .".format(missing_arguments))

        kwargs_to_pass = {}
        for arg in requested_arguments:
            if arg == "self":
                kwargs_to_pass[arg] = self
            else:
                kwargs_to_pass[arg] = kwargs[arg]

        if mode == "eager":
            with context.eager_mode():
                if distribution:
                    kwargs_to_pass["distribution"] = distribution.strategy
                test_method(**kwargs_to_pass)
        elif mode == "graph":
            with ops.Graph().as_default(), context.graph_mode():
                if distribution:
                    kwargs_to_pass["distribution"] = distribution.strategy
                test_method(**kwargs_to_pass)
        else:
            raise ValueError(
                "'mode' has to be either 'eager' or 'graph' and not {}".format(
                    mode))
Ejemplo n.º 53
0
 def benchmark_keras_model_sequential_fit_graph_mode(self):
     with context.graph_mode():
         model = make_sequential_keras_model(initializer="glorot_uniform")
         self._benchmark_keras_model_fit(model)
Ejemplo n.º 54
0
    def _test_minimize_loss_graph(self,
                                  d,
                                  soft_placement=False,
                                  learning_rate=0.2):
        config = config_pb2.ConfigProto()
        config.allow_soft_placement = soft_placement
        config.gpu_options.per_process_gpu_memory_fraction = 0.3
        with context.graph_mode(), \
             ops.Graph().as_default(), \
             self.cached_session(config=config) as sess, \
             d.scope():
            l = core.Dense(1, use_bias=False)

            def loss(x):
                # TODO(josh11b): What if this constant was instead a captured
                # value?  Would it need to be a value that has been passed
                # through d.broadcast()?
                y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
                return y * y

            grad_fn = backprop.implicit_grad(loss)

            def update(v, g):
                return v.assign_sub(learning_rate * g)

            one = d.broadcast(constant_op.constant([[1.]]))

            def step():
                """Perform one optimization step."""
                # Run forward & backward to get gradients, variables list.
                g_v = d.extended.call_for_each_replica(grad_fn, args=(one, ))

                # Update the variables using the gradients and the update() function.
                before_list = []
                after_list = []
                for g, v in g_v:
                    fetched = d.extended.read_var(v)
                    before_list.append(fetched)
                    with ops.control_dependencies([fetched]):
                        g = d.extended.reduce_to(reduce_util.ReduceOp.SUM,
                                                 g,
                                                 destinations=v)
                        with ops.control_dependencies(
                                d.extended.update(v,
                                                  update,
                                                  args=(g, ),
                                                  group=False)):
                            after_list.append(d.extended.read_var(v))
                return before_list, after_list

            before_out, after_out = step()
            variables.global_variables_initializer().run()
            for i in range(10):
                b, a = sess.run((before_out, after_out))
                if i == 0:
                    before, = b
                after, = a

            error_before = abs(before - 1)
            error_after = abs(after - 1)
            # Error should go down
            self.assertLess(error_after, error_before)
Ejemplo n.º 55
0
 def test_invalid_forward_pass_in_graph_mode(self):
     with context.graph_mode():
         inputs = keras.Input((3, ))
         with self.assertRaisesRegexp(ValueError,
                                      'You did something wrong!'):
             _ = InvalidLayer()(inputs)
Ejemplo n.º 56
0
    def test_generate_candidates(self,
                                 want_names,
                                 want_subnetwork_losses,
                                 want_mixture_weight_losses,
                                 want_complexities,
                                 learn_mixture_weights=False,
                                 initial_num_layers=0,
                                 previous_ensemble=None):
        feature_columns = [tf.feature_column.numeric_column("x")]
        generator = simple_dnn.Generator(
            feature_columns=feature_columns,
            optimizer=tf.compat.v1.train.GradientDescentOptimizer(.1),
            layer_size=3,
            initial_num_layers=initial_num_layers,
            learn_mixture_weights=learn_mixture_weights,
            seed=42)
        with context.graph_mode(), tf.Graph().as_default() as g:
            iteration_step = tf.compat.v1.train.create_global_step()
            features = {"x": [[1.], [2.]]}
            labels = tf.constant([[0.], [1.]])
            names = []
            subnetwork_losses = []
            mixture_weight_losses = []
            complexities = []
            for builder in generator.generate_candidates(
                    previous_ensemble,
                    # The following arguments are not used by
                    # simple_dnn.BuilderGenerator's generate_candidates.
                    iteration_number=0,
                    previous_ensemble_reports=[],
                    all_reports=[]):
                names.append(builder.name)

                # 1. Build subnetwork graph.
                subnetwork = builder.build_subnetwork(
                    features,
                    logits_dimension=1,
                    training=True,
                    iteration_step=iteration_step,
                    summary=tf.summary,
                    previous_ensemble=previous_ensemble)

                # 2. Build subnetwork train ops.
                subnetwork_loss = tf.reduce_mean(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=subnetwork.logits, labels=labels))
                subnetwork_train_op = builder.build_subnetwork_train_op(
                    subnetwork,
                    subnetwork_loss,
                    var_list=None,
                    labels=labels,
                    iteration_step=iteration_step,
                    summary=tf.summary,
                    previous_ensemble=None)

                # 3. Build mixture weight train ops.

                # Stop gradients since mixture weights should have not propagate
                # beyond top layer.
                subnetwork_logits = tf.stop_gradient(subnetwork.logits)

                # Mixture weight will initialize to a one-valued scalar.
                mixture_weight_logits = tf.compat.v1.layers.dense(
                    subnetwork_logits,
                    units=1,
                    use_bias=False,
                    kernel_initializer=tf.ones_initializer())
                mixture_weight_loss = tf.reduce_mean(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=mixture_weight_logits, labels=labels))
                mixture_weight_train_op = builder.build_mixture_weights_train_op(
                    mixture_weight_loss,
                    var_list=None,
                    labels=labels,
                    logits=mixture_weight_logits,
                    iteration_step=iteration_step,
                    summary=tf.summary)

                with self.test_session(graph=g) as sess:
                    sess.run(tf.compat.v1.global_variables_initializer())
                    sess.run(subnetwork_train_op)
                    sess.run(mixture_weight_train_op)
                    subnetwork_losses.append(sess.run(subnetwork_loss))
                    mixture_weight_losses.append(sess.run(mixture_weight_loss))
                    complexities.append(sess.run(subnetwork.complexity))

        self.assertEqual(want_names, names)
        self.assertAllClose(want_subnetwork_losses,
                            subnetwork_losses,
                            atol=1e-3)
        self.assertAllClose(want_mixture_weight_losses,
                            mixture_weight_losses,
                            atol=1e-3)
        self.assertAllClose(want_complexities, complexities, atol=1e-3)
Ejemplo n.º 57
0
 def benchmark_keras_model_functional_fit_graph_mode_with_profiler(self):
     profiler.start("")
     with context.graph_mode():
         model = make_keras_model(initializer="glorot_uniform")
         self._benchmark_keras_model_fit(model)
     profiler.stop(save=False)
Ejemplo n.º 58
0
 def benchmark_keras_model_subclassed_fit_graph_mode(self):
     with context.graph_mode():
         model = SubclassedKerasModel(initializer="glorot_uniform")
         self._benchmark_keras_model_fit(model)
Ejemplo n.º 59
0
  def test_build_iteration(self,
                           ensemble_builder,
                           subnetwork_builders,
                           features,
                           labels,
                           want_predictions,
                           want_best_candidate_index,
                           want_eval_metric_ops=(),
                           previous_iteration=None,
                           want_loss=None,
                           want_export_outputs=None,
                           mode=tf.estimator.ModeKeys.TRAIN,
                           summary_maker=_ScopedSummary,
                           want_chief_hooks=False):
    with context.graph_mode():
      tf_compat.v1.train.create_global_step()
      builder = _IterationBuilder(
          _FakeCandidateBuilder(),
          _FakeSubnetworkManager(),
          ensemble_builder,
          summary_maker=summary_maker,
          ensemblers=[_FakeEnsembler()],
          max_steps=1)
      iteration = builder.build_iteration(
          base_global_step=0,
          iteration_number=0,
          ensemble_candidates=[
              EnsembleCandidate(b.name, [b], None) for b in subnetwork_builders
          ],
          previous_iteration=previous_iteration()
          if previous_iteration else None,
          subnetwork_builders=subnetwork_builders,
          features=features(),
          labels=labels(),
          mode=mode,
          config=tf.estimator.RunConfig(model_dir=self.test_subdirectory))
      init = tf.group(tf_compat.v1.global_variables_initializer(),
                      tf_compat.v1.local_variables_initializer())
      self.evaluate(init)
      estimator_spec = iteration.estimator_spec
      if want_chief_hooks:
        self.assertNotEmpty(iteration.estimator_spec.training_chief_hooks)
      self.assertAllClose(
          want_predictions,
          self.evaluate(estimator_spec.predictions),
          atol=1e-3)
      # A default architecture metric is always included, even if we don't
      # specify one.
      eval_metric_ops = estimator_spec.eval_metric_ops
      if "architecture/adanet/ensembles" in eval_metric_ops:
        del eval_metric_ops["architecture/adanet/ensembles"]
      self.assertEqual(set(want_eval_metric_ops), set(eval_metric_ops.keys()))

      self.assertEqual(want_best_candidate_index,
                       self.evaluate(iteration.best_candidate_index))
      if mode == tf.estimator.ModeKeys.PREDICT:
        self.assertIsNotNone(estimator_spec.export_outputs)
        self.assertAllClose(
            want_export_outputs,
            self.evaluate(
                _export_output_tensors(estimator_spec.export_outputs)),
            atol=1e-3)
        self.assertIsNone(iteration.estimator_spec.train_op)
        self.assertIsNone(iteration.estimator_spec.loss)
        self.assertIsNotNone(want_export_outputs)
        return

      self.assertAlmostEqual(
          want_loss, self.evaluate(iteration.estimator_spec.loss), places=3)
      self.assertIsNone(iteration.estimator_spec.export_outputs)
      if mode == tf.estimator.ModeKeys.TRAIN:
        self.evaluate(iteration.estimator_spec.train_op)
Ejemplo n.º 60
0
    def _test_minimize_loss_graph(self,
                                  d,
                                  soft_placement=False,
                                  learning_rate=0.2):
        config = config_pb2.ConfigProto()
        config.allow_soft_placement = soft_placement
        config.gpu_options.per_process_gpu_memory_fraction = 0.3
        with context.graph_mode(), \
             ops.Graph().as_default(), \
             self.cached_session(config=config) as sess, \
             d.scope():
            kernel = create_variable_like_keras_layer(name="kernel",
                                                      shape=(1, 1),
                                                      dtype=dtypes.float32)

            def loss(x):
                y = array_ops.reshape(gen_math_ops.mat_mul(x, kernel),
                                      []) - array_ops.identity(1.)
                return y * y

            grad_fn = backprop.implicit_grad(loss)

            def update(v, g):
                return v.assign_sub(learning_rate * g)

            one = array_ops.identity([[1.]])

            def step():
                """Perform one optimization step."""
                # Run forward & backward to get gradients, variables list.
                g_v = d.extended.call_for_each_replica(grad_fn, args=(one, ))

                # Update the variables using the gradients and the update() function.
                before_list = []
                after_list = []
                for g, v in g_v:
                    fetched = d.extended.read_var(v)
                    before_list.append(fetched)
                    with ops.control_dependencies([fetched]):
                        g = d.extended.reduce_to(reduce_util.ReduceOp.SUM,
                                                 g,
                                                 destinations=v)
                        with ops.control_dependencies(
                                d.extended.update(v,
                                                  update,
                                                  args=(g, ),
                                                  group=False)):
                            after_list.append(d.extended.read_var(v))
                return before_list, after_list

            before_out, after_out = step()
            variables.global_variables_initializer().run()
            for i in range(10):
                b, a = sess.run((before_out, after_out))
                if i == 0:
                    before, = b
                after, = a

            error_before = abs(before - 1)
            error_after = abs(after - 1)
            # Error should go down
            self.assertLess(error_after, error_before)