def test_run_all_keras_modes_with_all_model_types_annotate_class_2(self):
    l = []

    @keras_parameterized.run_with_all_model_types
    class ExampleTest(keras_parameterized.TestCase):

      def runTest(self):
        pass

      @keras_parameterized.run_all_keras_modes
      @parameterized.named_parameters(dict(testcase_name="_arg",
                                           arg=True))
      def testBody(self, arg):
        mode = "eager" if context.executing_eagerly() else "graph"
        should_run_eagerly = testing_utils.should_run_eagerly()
        l.append((mode, should_run_eagerly, testing_utils.get_model_type()))

    e = ExampleTest()
    e.testBody_arg_v2_eager_functional()
    e.testBody_arg_v2_function_functional()
    e.testBody_arg_v2_eager_sequential()
    e.testBody_arg_v2_function_sequential()
    e.testBody_arg_v2_eager_subclass()
    e.testBody_arg_v2_function_subclass()

    if not tf2.enabled():
      e.testBody_arg_v1_graph_functional()
      e.testBody_arg_v1_graph_sequential()
      e.testBody_arg_v1_graph_subclass()

    expected_combinations = {
        ("eager", True, "functional"),
        ("eager", False, "functional"),
        ("eager", True, "sequential"),
        ("eager", False, "sequential"),
        ("eager", True, "subclass"),
        ("eager", False, "subclass"),
    }

    if not tf2.enabled():
      expected_combinations = expected_combinations.union({
          ("graph", False, "functional"),
          ("graph", False, "sequential"),
          ("graph", False, "subclass"),
      })

    self.assertLen(l, len(expected_combinations))
    self.assertEqual(set(l), expected_combinations)

    ts = unittest.makeSuite(ExampleTest)
    res = unittest.TestResult()
    ts.run(res)

    self.assertLen(l, len(expected_combinations) * 2)
  def test_run_all_keras_modes_extra_params(self):
    l = []

    class ExampleTest(keras_parameterized.TestCase):

      def runTest(self):
        pass

      @keras_parameterized.run_all_keras_modes
      @parameterized.named_parameters(
          [dict(testcase_name="_0", with_brackets=True),
           dict(testcase_name="_1", with_brackets=False)])
      def testBody(self, with_brackets):
        mode = "eager" if context.executing_eagerly() else "graph"
        with_brackets = "with_brackets" if with_brackets else "without_brackets"
        should_run_eagerly = testing_utils.should_run_eagerly()
        l.append((with_brackets, mode, should_run_eagerly))

    e = ExampleTest()
    if not tf2.enabled():
      e.testBody_0_v1_graph()
      e.testBody_1_v1_graph()

    e.testBody_0_v2_eager()
    e.testBody_0_v2_function()
    e.testBody_1_v2_eager()
    e.testBody_1_v2_function()

    expected_combinations = {
        ("with_brackets", "eager", True),
        ("with_brackets", "eager", False),
        ("without_brackets", "eager", True),
        ("without_brackets", "eager", False),
    }

    if not tf2.enabled():
      expected_combinations = expected_combinations.union({
          ("with_brackets", "graph", False),
          ("without_brackets", "graph", False),
      })

    self.assertLen(l, len(expected_combinations))
    self.assertEqual(set(l), expected_combinations)

    ts = unittest.makeSuite(ExampleTest)
    res = unittest.TestResult()
    ts.run(res)

    self.assertLen(l, len(expected_combinations) * 2)
  def test_optimizer_errors(self):
    opt = 1
    if tf2.enabled():
      expected_regex = ('"opt" must be an instance of a '
                        'tf.keras.optimizers.Optimizer, but got')
    else:
      expected_regex = ('"opt" must be an instance of a tf.train.Optimizer or '
                        'a tf.keras.optimizers.Optimizer, but got')
    with self.assertRaisesRegexp(ValueError, expected_regex):
      enable_mixed_precision_graph_rewrite(opt)
    self.assertFalse(config.get_optimizer_experimental_options()
                     .get('auto_mixed_precision', False))

    opt = gradient_descent_v1.GradientDescentOptimizer(1.0)
    opt = loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer(opt,
                                                                   'dynamic')
    with self.assertRaisesRegexp(ValueError,
                                 '"opt" must not already be an instance of a '
                                 'MixedPrecisionLossScaleOptimizer.'):
      enable_mixed_precision_graph_rewrite(opt)
    self.assertFalse(config.get_optimizer_experimental_options()
                     .get('auto_mixed_precision', False))

    opt = gradient_descent_v2.SGD(1.0)
    opt = loss_scale_optimizer_v2.LossScaleOptimizer(opt, 'dynamic')
    with self.assertRaisesRegexp(ValueError,
                                 '"opt" must not already be an instance of a '
                                 'LossScaleOptimizer.'):
      enable_mixed_precision_graph_rewrite(opt)
    self.assertFalse(config.get_optimizer_experimental_options()
                     .get('auto_mixed_precision', False))
Esempio n. 4
0
def deserialize(config, custom_objects=None):
  """Instantiates a layer from a config dictionary.

  Arguments:
      config: dict of the form {'class_name': str, 'config': dict}
      custom_objects: dict mapping class names (or function names)
          of custom (non-Keras) objects to class/functions

  Returns:
      Layer instance (may be Model, Sequential, Network, Layer...)
  """
  from tensorflow.python.keras import models  # pylint: disable=g-import-not-at-top
  globs = globals()  # All layers.
  globs['Network'] = models.Network
  globs['Model'] = models.Model
  globs['Sequential'] = models.Sequential
  layer_class_name = config['class_name']
  if layer_class_name in _DESERIALIZATION_TABLE:
    version = 'v2' if tf2.enabled() else 'v1'
    config['class_name'] = _DESERIALIZATION_TABLE[layer_class_name][version]

  return deserialize_keras_object(
      config,
      module_objects=globs,
      custom_objects=custom_objects,
      printable_module_name='layer')
Esempio n. 5
0
def get_expected_metric_variable_names(var_names, name_suffix=''):
  """Returns expected metric variable names given names and prefix/suffix."""
  if tf2.enabled() or context.executing_eagerly():
    # In V1 eager mode and V2 variable names are not made unique.
    return [n + ':0' for n in var_names]
  # In V1 graph mode variable names are made unique using a suffix.
  return [n + name_suffix + ':0' for n in var_names]
Esempio n. 6
0
  def testBatchSplitting(self, input_type, api_type, iteration_type,
                         split_batch_by, distribution,
                         enable_get_next_as_optional):
    worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])]
    batch_size = 10
    if tf2.enabled():
      dataset_fn = lambda _: dataset_ops.DatasetV2.range(100).batch(batch_size)
    else:
      dataset_fn = lambda _: dataset_ops.Dataset.range(100).batch(batch_size)

    updated_batch_size = (
        batch_size // split_batch_by if split_batch_by else batch_size)
    expected_values = [[range(i, i+updated_batch_size),
                        range(i+updated_batch_size, i+2*updated_batch_size)]
                       for i in range(0, 100, updated_batch_size*2)]

    self._test_input_iteration(
        input_type,
        api_type,
        iteration_type,
        dataset_fn,
        worker_device_pairs,
        expected_values,
        distribution,
        sess=None,
        split_batch_by=split_batch_by,
        enable_get_next_as_optional=True)
Esempio n. 7
0
 def testUnevenDatasetBatches(self, input_type, api_type, iteration_type):
   strategy = mirrored_strategy.MirroredStrategy(
       devices=(self._cpu_and_one_gpu_devices()[0][1] +
                self._cpu_and_one_gpu_devices()[1][1]),
       cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(
           ["/job:worker/task:0", "/job:worker/task:1"], 2))
   worker_devices = self._cpu_and_one_gpu_devices()
   with context.graph_mode(), strategy.scope(), self.cached_session() as sess:
     if tf2.enabled():
       dataset_fn = lambda _: dataset_ops.DatasetV2.range(9).batch(2)
     else:
       dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch(2)
     if input_type == "dataset":
       # Autosharded
       expected_values = [[[0, 1], [4, 5], [2, 3], [6, 7]], [[8], [], [], []]]
     else:
       expected_values = [[[0, 1], [2, 3], [0, 1], [2, 3]],
                          [[4, 5], [6, 7], [4, 5], [6, 7]], [[8], [], [8], []]]
     self._test_input_iteration(
         input_type,
         api_type,
         iteration_type,
         dataset_fn,
         worker_devices,
         expected_values,
         strategy,
         sess=sess,
         enable_get_next_as_optional=True)
Esempio n. 8
0
  def testOneDevicePerWorker(self, input_type, api_type, iteration_type,
                             enable_get_next_as_optional):
    strategy = mirrored_strategy.MirroredStrategy(
        devices=(self._cpu_devices()[0][1] + self._cpu_devices()[1][1]),
        cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(
            ["/job:worker/task:0", "/job:worker/task:1"], 1))
    worker_devices = self._cpu_devices()
    with context.graph_mode(), strategy.scope(), self.cached_session() as sess:
      if tf2.enabled():
        dataset_fn = lambda _: dataset_ops.DatasetV2.range(4)
      else:
        dataset_fn = lambda _: dataset_ops.Dataset.range(4)

      if input_type == "dataset":
        # Autosharded
        expected_values = [[0, 1], [2, 3]]
      else:
        expected_values = [[0, 0], [1, 1], [2, 2], [3, 3]]
      self._test_input_iteration(
          input_type,
          api_type,
          iteration_type,
          dataset_fn,
          worker_devices,
          expected_values,
          strategy,
          sess=sess,
          enable_get_next_as_optional=enable_get_next_as_optional)
Esempio n. 9
0
  def testGradientFloat16(self):

    def grad(x):
      with backprop.GradientTape() as tape:
        tape.watch(x)
        y = nn_ops.l2_loss(nn_ops.relu(x))
      return tape.gradient(y, x)

    def f():
      with test_util.use_gpu():
        # Randomly construct a 1D shape from [1, 40)
        shape = random_ops.random_uniform([1],
                                          minval=1,
                                          maxval=40,
                                          dtype=dtypes.int32)
        x32 = random_ops.random_uniform(shape, minval=-1, maxval=1)
        x16 = math_ops.cast(x32, dtype=dtypes.float16)
        return grad(x32), grad(x16)

    # We're going to ensure that the fp16 and fp32 gradients
    # are "close" to each other for ~100 random values.
    #
    # In TensorFlow 1.x, invoking f() (without eager execution enabled)
    # would construct a graph. Instead of construct a graph with O(100) nodes,
    # we construct a single graph to be executed ~100 times in a Session.
    if not tf2.enabled():
      d32_tensor, d16_tensor = f()
      with self.cached_session() as sess:
        f = lambda: sess.run([d32_tensor, d16_tensor])

    # Repeat the experiment for 100 times. All tensor shapes and its tensor
    # values are randomly generated for each run.
    for _ in xrange(100):
      d32, d16 = f()
      self.assertAllClose(d32, d16, atol=3e-4)
 def setUpClass(cls):
   if tf2.enabled():
     stats_aggregator._DEFAULT_MAX_QUEUE = 0  # pylint: disable=protected-access
     stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV2
     # TODO(b/116314787): add graph mode support for StatsAggregatorV2.
   else:
     stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV1
     return test_util.run_all_in_graph_and_eager_modes(cls)
Esempio n. 11
0
 def dataset_fn(ctx):
   del ctx
   if tf2.enabled():
     dataset1 = dataset_ops.DatasetV2.range(4)
     dataset2 = dataset_ops.DatasetV2.range(4).map(lambda x: x**2)
     return dataset_ops.DatasetV2.zip((dataset1, dataset2))
   else:
     dataset1 = dataset_ops.Dataset.range(4)
     dataset2 = dataset_ops.Dataset.range(4).map(lambda x: x**2)
     return dataset_ops.Dataset.zip((dataset1, dataset2))
  def test_run_all_keras_modes(self):
    l = []

    class ExampleTest(keras_parameterized.TestCase):

      def runTest(self):
        pass

      @keras_parameterized.run_all_keras_modes
      def testBody(self):
        mode = "eager" if context.executing_eagerly() else "graph"
        should_run_eagerly = testing_utils.should_run_eagerly()
        l.append((mode, should_run_eagerly))

    e = ExampleTest()
    if not tf2.enabled():
      e.testBody_v1_graph()
    e.testBody_v2_eager()
    e.testBody_v2_function()

    if not tf2.enabled():
      self.assertLen(l, 3)
      self.assertAllEqual(l, [
          ("graph", False),
          ("eager", True),
          ("eager", False),
      ])

      ts = unittest.makeSuite(ExampleTest)
      res = unittest.TestResult()
      ts.run(res)
      self.assertLen(l, 6)
    else:
      self.assertLen(l, 2)
      self.assertAllEqual(l, [
          ("eager", True),
          ("eager", False),
      ])

      ts = unittest.makeSuite(ExampleTest)
      res = unittest.TestResult()
      ts.run(res)
      self.assertLen(l, 4)
 def assertStatisticsHasSum(self,
                            handle,
                            tag,
                            expected_value,
                            num_events=-1,
                            offset=0):
   if tf2.enabled():
     self._assertEventHasSum(handle, tag, expected_value, num_events, offset)
   else:
     self._assertSummaryHasSum(handle, tag, expected_value)
Esempio n. 14
0
 def test_singleton_list(self):
   shape = tensor_shape.TensorShape([])
   fn_true = lambda: [constant_op.constant(1)]
   fn_false = lambda: [constant_op.constant(3)]
   # Non-strict cond is only available in v1
   if not tf2.enabled():
     self._testShape(fn_true, fn_false, shape)
     self._testReturnValues(fn_true, fn_false, 1, 3)
   self._testShape(fn_true, fn_false, [shape], strict=True)
   self._testReturnValues(fn_true, fn_false, [1], [3], strict=True)
Esempio n. 15
0
 def test_serialize_deserialize_gru(self, layer):
   gru = layer(5, return_sequences=True)
   config = keras.layers.serialize(gru)
   self.assertEqual(config['class_name'], 'GRU')
   new_layer = keras.layers.deserialize(config)
   self.assertEqual(new_layer.units, 5)
   self.assertEqual(new_layer.return_sequences, True)
   if tf2.enabled():
     self.assertIsInstance(new_layer, keras.layers.UnifiedGRU)
   else:
     self.assertIsInstance(new_layer, keras.layers.GRU)
Esempio n. 16
0
def run_main(_):
  """Main in toco_convert.py."""
  if tf2.enabled():
    parser = _get_tf2_parser()
  else:
    parser = _get_tf1_parser()

  tflite_flags, unparsed = parser.parse_known_args(args=sys.argv[1:])

  if tf2.enabled():
    _convert_tf2_model(tflite_flags)
  else:
    try:
      _check_tf1_flags(tflite_flags, unparsed)
    except ValueError as e:
      parser.print_usage()
      file_name = os.path.basename(sys.argv[0])
      sys.stderr.write("{0}: error: {1}\n".format(file_name, str(e)))
      sys.exit(1)
    _convert_tf1_model(tflite_flags)
Esempio n. 17
0
 def test_serialize_deserialize_lstm(self, layer):
   lstm = layer(5, return_sequences=True)
   config = keras.layers.serialize(lstm)
   self.assertEqual(config['class_name'], 'LSTM')
   new_layer = keras.layers.deserialize(config)
   self.assertEqual(new_layer.units, 5)
   self.assertEqual(new_layer.return_sequences, True)
   if tf2.enabled():
     self.assertIsInstance(new_layer, keras.layers.UnifiedLSTM)
   else:
     self.assertIsInstance(new_layer, keras.layers.LSTM)
Esempio n. 18
0
 def test_singleton_tuple(self):
   shape = tensor_shape.TensorShape([])
   fn_true = lambda: (constant_op.constant(1),)
   fn_false = lambda: (constant_op.constant(3),)
   # Non-strict cond is only available in v1
   if not tf2.enabled():
     self._testShape(fn_true, fn_false, shape)
     self._testReturnValues(fn_true, fn_false, 1, 3)
   self._testShape(fn_true, fn_false, (shape,), strict=True)
   self._testReturnValues(fn_true, fn_false, (1,), (3,),
                          strict=True)
Esempio n. 19
0
  def testTwoDevicesOneGPUOneCPU(self, input_type, api_type, iteration_type):
    worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])]
    if tf2.enabled():
      dataset_fn = lambda _: dataset_ops.DatasetV2.range(10)
    else:
      dataset_fn = lambda _: dataset_ops.Dataset.range(10)

    expected_values = [[i, i+1] for i in range(0, 10, 2)]

    self._test_input_iteration(input_type, api_type, iteration_type, dataset_fn,
                               worker_device_pairs, expected_values)
 def assertStatisticsHasRange(self,
                              handle,
                              tag,
                              min_value,
                              max_value,
                              num_events=-1,
                              offset=0):
   if tf2.enabled():
     self._assertEventHasRange(handle, tag, min_value, max_value, num_events,
                               offset)
   else:
     self._assertSummaryHasRange(handle, tag, min_value, max_value)
Esempio n. 21
0
  def testUnevenDatasetBatches(self, input_type, api_type, iteration_type):
    worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])]
    if tf2.enabled():
      dataset_fn = lambda _: dataset_ops.DatasetV2.range(9).batch(2)
    else:
      dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch(2)

    # The last global batch only contains data for one replica.
    expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], []]]
    self._test_input_iteration(input_type, api_type, iteration_type, dataset_fn,
                               worker_device_pairs, expected_values,
                               enable_get_next_as_optional=True)
 def assertStatisticsHasCount(self,
                              handle,
                              tag,
                              count,
                              num_events=-1,
                              greater_than=False,
                              offset=0):
   if tf2.enabled():
     self._assertEventHasCount(handle, tag, count, num_events, greater_than,
                               offset)
   else:
     self._assertSummaryHasCount(handle, tag, count, greater_than)
Esempio n. 23
0
def _ProcessNewOps(graph):
  """Processes the newly-added TF_Operations in `graph`."""
  # Maps from a node to the names of the ops it's colocated with, if colocation
  # is specified in the attributes.
  colocation_pairs = {}

  for new_op in graph._add_new_tf_operations(compute_devices=False):  # pylint: disable=protected-access
    original_device = new_op.device
    new_op._set_device('')  # pylint: disable=protected-access
    colocation_names = _GetColocationNames(new_op)
    if colocation_names:
      colocation_pairs[new_op] = colocation_names
      # Don't set a device for this op, since colocation constraints override
      # device functions and the original device. Note that this op's device may
      # still be set by the loop below.
      # TODO(skyewm): why does it override the original device?
    else:
      with _MaybeDevice(original_device):
        graph._apply_device_functions(new_op)  # pylint: disable=protected-access

  # The following loop populates the device field of ops that are colocated
  # with another op.  This is implied by the colocation attribute, but we
  # propagate the device field for completeness.
  for op, coloc_op_list in colocation_pairs.items():
    coloc_device = None
    # Find any device in the list of colocated ops that have a device, if it
    # exists.  We assume that if multiple ops have devices, they refer to the
    # same device.  Otherwise, a runtime error will occur since the colocation
    # property cannot be guaranteed.  Note in TF2 colocations have been removed
    # from the public API and will be considered a hint, so there is no runtime
    # error.
    #
    # One possible improvement is to try to check for compatibility of all
    # devices in this list at import time here, which would require
    # implementing a compatibility function for device specs in python.
    for coloc_op_name in coloc_op_list:
      try:
        coloc_op = graph._get_operation_by_name_unsafe(coloc_op_name)  # pylint: disable=protected-access
      except KeyError:
        # Do not error in TF2 if the colocation cannot be guaranteed
        if tf2.enabled() or control_flow_util.EnableControlFlowV2(graph):
          continue

        raise ValueError('Specified colocation to an op that '
                         'does not exist during import: %s in %s' %
                         (coloc_op_name, op.name))
      if coloc_op.device:
        coloc_device = pydev.DeviceSpec.from_string(coloc_op.device)
        break
    if coloc_device:
      op._set_device(coloc_device)  # pylint: disable=protected-access
Esempio n. 24
0
 def test_serialize_deserialize_batchnorm(self):
   layer = keras.layers.BatchNormalization(
       momentum=0.9, beta_initializer='zeros', gamma_regularizer='l2')
   config = keras.layers.serialize(layer)
   self.assertEqual(config['class_name'], 'BatchNormalization')
   new_layer = keras.layers.deserialize(config)
   self.assertEqual(new_layer.momentum, 0.9)
   if tf2.enabled():
     self.assertEqual(new_layer.beta_initializer.__class__,
                      keras.initializers.ZerosV2)
   else:
     self.assertEqual(new_layer.beta_initializer.__class__,
                      keras.initializers.Zeros)
   self.assertEqual(new_layer.gamma_regularizer.__class__,
                    keras.regularizers.L1L2)
 def test_serialize_deserialize(self):
   layer = keras.layers.Dense(
       3, activation='relu', kernel_initializer='ones', bias_regularizer='l2')
   config = keras.layers.serialize(layer)
   new_layer = keras.layers.deserialize(config)
   self.assertEqual(new_layer.activation, keras.activations.relu)
   self.assertEqual(new_layer.bias_regularizer.__class__,
                    keras.regularizers.L1L2)
   if tf2.enabled():
     self.assertEqual(new_layer.kernel_initializer.__class__,
                      keras.initializers.OnesV2)
   else:
     self.assertEqual(new_layer.kernel_initializer.__class__,
                      keras.initializers.Ones)
   self.assertEqual(new_layer.units, 3)
Esempio n. 26
0
    def _worker_fn(task_type, task_id, num_gpus):
      del num_gpus
      tf_config = {
          "cluster": self._cluster_spec,
          "task": {
              "type": task_type,
              "index": task_id
          }
      }
      with context.graph_mode(), lock, test.mock.patch.dict(
          "os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
        strategy = strategy_cls()
      with context.graph_mode(), strategy.scope(), self.cached_session(
          target="grpc://" + self._cluster_spec[task_type][task_id]) as sess:
        if tf2.enabled():
          dataset_fn = lambda _: dataset_ops.DatasetV2.range(5).batch(2)
        else:
          dataset_fn = lambda _: dataset_ops.Dataset.range(5).batch(2)
        if (input_type == "dataset" and strategy_cls is
            collective_all_reduce_strategy.CollectiveAllReduceStrategy):
          # Autosharded
          if task_id == 0:
            expected_values = [[[0, 1]], [[4]]]
          else:
            expected_values = [[[2, 3]], [[]]]

          # input_context is for between-graph auto-sharding.
          input_context = distribute_lib.InputContext(
              num_input_pipelines=2,
              input_pipeline_id=task_id,
              num_replicas_in_sync=2)
        else:
          expected_values = [[[0, 1]], [[2, 3]], [[4]]]
          input_context = None

        self._test_input_iteration(
            input_type,
            api_type,
            iteration_type,
            dataset_fn,
            [("/job:%s/task:%d" %
              (task_type, task_id), strategy.extended.worker_devices)],
            expected_values,
            strategy,
            sess=sess,
            enable_get_next_as_optional=True,
            input_context=input_context)
        return True
Esempio n. 27
0
  def testTwoDevicesPerWorker(self, input_type, api_type, iteration_type):
    worker_devices = self._cpu_and_one_gpu_devices()
    with context.graph_mode(), self.cached_session() as sess:
      if tf2.enabled():
        dataset_fn = lambda _: dataset_ops.DatasetV2.range(4)
      else:
        dataset_fn = lambda _: dataset_ops.Dataset.range(4)

      if input_type == "dataset":
        # Autosharded
        expected_values = [[0, 2, 1, 3]]
      else:
        expected_values = [[0, 1, 0, 1], [2, 3, 2, 3]]
      self._test_input_iteration(input_type, api_type, iteration_type,
                                 dataset_fn, worker_devices,
                                 expected_values, sess)
Esempio n. 28
0
 def testUnevenDatasetBatches(self, input_type, api_type, iteration_type):
   worker_devices = self._cpu_and_one_gpu_devices()
   with context.graph_mode(), self.cached_session() as sess:
     if tf2.enabled():
       dataset_fn = lambda _: dataset_ops.DatasetV2.range(9).batch(2)
     else:
       dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch(2)
     if input_type == "dataset":
       # Autosharded
       expected_values = [[[0, 1], [4, 5], [2, 3], [6, 7]], [[8], [], [], []]]
     else:
       expected_values = [[[0, 1], [2, 3], [0, 1], [2, 3]],
                          [[4, 5], [6, 7], [4, 5], [6, 7]], [[8], [], [8], []]]
     self._test_input_iteration(input_type, api_type, iteration_type,
                                dataset_fn, worker_devices, expected_values,
                                sess, enable_get_next_as_optional=True)
Esempio n. 29
0
def deserialize(config, custom_objects=None):
  """Return an `Initializer` object from its config."""
  if tf2.enabled():
    # Class names are the same for V1 and V2 but the V2 classes
    # are aliased in this file so we need to grab them directly
    # from `init_ops_v2`.
    module_objects = {
        obj_name: getattr(init_ops_v2, obj_name)
        for obj_name in dir(init_ops_v2)
    }
  else:
    module_objects = globals()
  return deserialize_keras_object(
      config,
      module_objects=module_objects,
      custom_objects=custom_objects,
      printable_module_name='initializer')
Esempio n. 30
0
  def testOneDeviceCPU(self, input_type, api_type, iteration_type, distribution,
                       enable_get_next_as_optional):
    worker_device_pairs = [("", ["/device:CPU:0"])]
    if tf2.enabled():
      dataset_fn = lambda _: dataset_ops.DatasetV2.range(10)
    else:
      dataset_fn = lambda _: dataset_ops.Dataset.range(10)

    expected_values = [[i] for i in range(10)]

    self._test_input_iteration(
        input_type,
        api_type,
        iteration_type,
        dataset_fn,
        worker_device_pairs,
        expected_values,
        distribution,
        enable_get_next_as_optional=enable_get_next_as_optional)
Esempio n. 31
0
def save_model(model,
               filepath,
               overwrite=True,
               include_optimizer=True,
               save_format=None,
               signatures=None,
               options=None):
    """Saves a model as a TensorFlow SavedModel or HDF5 file.

  The saved model contains:
      - the model's configuration (topology)
      - the model's weights
      - the model's optimizer's state (if any)

  Thus the saved model can be reinstantiated in
  the exact same state, without any of the code
  used for model definition or training.

  Note that the model weights may have different scoped names after being
  loaded. Scoped names include the model/layer names, such as
  "dense_1/kernel:0"`. It is recommended that you use the layer properties to
  access specific variables, e.g. `model.get_layer("dense_1").kernel`.

  _SavedModel serialization_

  The SavedModel serialization path uses `tf.saved_model.save` to save the model
  and all trackable objects attached to the model (e.g. layers and variables).
  `@tf.function`-decorated methods are also saved. Additional trackable objects
  and functions are added to the SavedModel to allow the model to be
  loaded back as a Keras Model object.

  Arguments:
      model: Keras model instance to be saved.
      filepath: One of the following:
        - String or `pathlib.Path` object, path where to save the model
        - `h5py.File` object where to save the model
      overwrite: Whether we should overwrite any existing model at the target
        location, or instead ask the user with a manual prompt.
      include_optimizer: If True, save optimizer's state together.
      save_format: Either 'tf' or 'h5', indicating whether to save the model
        to Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5'
        in TF 1.X.
      signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
        format only. Please see the `signatures` argument in
        `tf.saved_model.save` for details.
      options: Optional `tf.saved_model.SaveOptions` object that specifies
        options for saving to SavedModel.

  Raises:
      ImportError: If save format is hdf5, and h5py is not available.
  """
    from tensorflow.python.keras.engine import sequential  # pylint: disable=g-import-not-at-top

    default_format = 'tf' if tf2.enabled() else 'h5'
    save_format = save_format or default_format

    if sys.version_info >= (3, 4) and isinstance(filepath, pathlib.Path):
        filepath = str(filepath)

    if (save_format == 'h5'
            or (h5py is not None and isinstance(filepath, h5py.File))
            or os.path.splitext(filepath)[1] in _HDF5_EXTENSIONS):
        # TODO(b/130258301): add utility method for detecting model type.
        if (not model._is_graph_network and  # pylint:disable=protected-access
                not isinstance(model, sequential.Sequential)):
            raise NotImplementedError(
                'Saving the model to HDF5 format requires the model to be a '
                'Functional model or a Sequential model. It does not work for '
                'subclassed models, because such models are defined via the body of '
                'a Python method, which isn\'t safely serializable. Consider saving '
                'to the Tensorflow SavedModel format (by setting save_format="tf") '
                'or using `save_weights`.')
        hdf5_format.save_model_to_hdf5(model, filepath, overwrite,
                                       include_optimizer)
    else:
        saved_model_save.save(model, filepath, overwrite, include_optimizer,
                              signatures, options)
Esempio n. 32
0
    def decorated(self, *args, **kwargs):
      if not tf2.enabled():
        self.skipTest('Test is only compatible with v2')

      return f(self, *args, **kwargs)
  def __init__(self,
               proc_func,
               cluster_spec,
               rpc_layer=None,
               max_run_time=None,
               grpc_fail_fast=None,
               stream_stdout=True,
               list_stdout=False,
               args=None,
               kwargs=None):
    """Creates a multi-process runner.

    Args:
      proc_func: Function to be run on child processes. This will be run on
        processes for all task types.
      cluster_spec: Dict for cluster spec. The following is an example of
        cluster with three workers and two ps's.
        {"worker": ["worker0.example.com:2222",
                    "worker1.example.com:2222",
                    "worker2.example.com:2222"],
         "ps": ["ps0.example.com:2222",
                "ps1.example.com:2222"]}
      rpc_layer: RPC layer to use. Default value is 'grpc+loas'.
      max_run_time: If set, child processes is forced to exit at approximately
        this many seconds after `start` is called. We achieve this through
        `signal.alarm()` api. Note that this is best effort at Python level
        since Python signal handler does not get executed when it runs lower
        level C/C++ code. So it can be delayed for arbitrarily long time.
      grpc_fail_fast: Whether GRPC connection between processes should fail
        without retrying. Defaults to None, in which case the environment
        variable is not explicitly set.
      stream_stdout: True if the output/error from the subprocesses should be
        streamed to be printed in parent process' log. Defaults to True.
      list_stdout: True if the output/error from the subprocesses should be
        collected to be attached to the resulting `MultiProcessRunnerResult`
        returned from `MultiProcessRunner.join()`. If True, the list of stdout
        can be retrieved via `MultiProcessRunnerResult.stdout` attribute.
        Defaults to False.
      args: Positional arguments to be sent to functions run on processes.
      kwargs: Keyword arguments to be sent to functions run on processes.

    Raises:
      RuntimeError: if `multi_process_runner.test_main()` is not called.
      ValueError: if there are more than one chief in the `cluster_spec`.
    """
    assert cluster_spec is not None
    if 'chief' in cluster_spec and len(cluster_spec['chief']) > 1:
      raise ValueError('If chief exists in the cluster, there must be at most '
                       'one chief. Current `cluster_spec` has {} chiefs.'
                       .format(len(cluster_spec['chief'])))

    assert callable(proc_func)

    if not multi_process_lib.using_context_manager():
      raise RuntimeError('`multi_process_runner` is not initialized. '
                         'Please call `multi_process_runner.test_main()` '
                         'within `if __name__ == \'__main__\':` block '
                         'in your python module to properly initialize '
                         '`multi_process_runner`.')

    self._proc_func = proc_func
    self._cluster_spec = cluster_spec
    self._rpc_layer = rpc_layer
    self._max_run_time = max_run_time
    self._grpc_fail_fast = grpc_fail_fast
    self._stream_stdout = stream_stdout
    # TODO(rchao): Revisit list_stdout argument to consider other solution.
    self._list_stdout = list_stdout
    self._dependence_on_chief = True
    self._args = args or ()
    self._kwargs = kwargs or {}

    self._outstanding_subprocess_count = 0

    # Child processes should have the same v2 and eager behavior.
    self._v2_enabled = tf2.enabled()
    self._executing_eagerly = context.executing_eagerly()

    # This flag will be set to True once terminate_all() is called.
    self._all_forced_terminated = False
Esempio n. 34
0
 def _build_graph_network_for_inferred_shape(self,
                                             input_shape,
                                             input_dtype=None):
     if input_shape is None or not self.layers:
         return
     if not tf2.enabled() or not ops.executing_eagerly_outside_functions():
         # This behavior is disabled in V1 or when eager execution is disabled.
         return
     if (not self._has_explicit_input_shape
             and not self._use_legacy_deferred_behavior):
         # Determine whether the input shape is novel, i.e. whether the model
         # should be rebuilt.
         input_shape = tuple(input_shape)
         if self._inferred_input_shape is None:
             new_shape = input_shape
         else:
             new_shape = relax_input_shape(self._inferred_input_shape,
                                           input_shape)
         if (new_shape is not None
                 and new_shape != self._inferred_input_shape):
             # A novel shape has been received: we need to rebuild the model.
             # In case we are inside a graph function, we step out of it.
             with ops.init_scope():
                 inputs = input_layer.Input(batch_shape=new_shape,
                                            dtype=input_dtype,
                                            name=self.layers[0].name +
                                            '_input')
                 layer_input = inputs
                 created_nodes = set()
                 for layer in self.layers:
                     # Clear nodes previously created via this method. This prevents
                     # node accumulation and ensures that e.g. `layer.output` is
                     # always connected to `model.inputs`
                     # (this is important e.g. for the feature extraction use case).
                     # We don't just do `layer._inbound_nodes = []` in order
                     # not to break shared layers added to Sequential models (which is
                     # technically illegal as per the `add()` docstring,
                     # but wasn't previously disabled).
                     clear_previously_created_nodes(layer,
                                                    self._created_nodes)
                     try:
                         # Create Functional API connection by calling the current layer
                         layer_output = layer(layer_input)
                     except:  # pylint:disable=bare-except
                         # Functional API calls may fail for a number of reasons:
                         # 1) The layer may be buggy. In this case it will be easier for
                         # the user to debug if we fail on the first call on concrete data,
                         # instead of our own call on a symbolic input.
                         # 2) The layer is dynamic (graph-incompatible) and hasn't
                         # overridden `compute_output_shape`. In this case, it is
                         # impossible to build a graph network.
                         # 3) The layer is otherwise incompatible with the Functional API
                         # (e.g. this is the case for some probabilistic layers that rely
                         # on hacks and that do not return tensors).
                         # In all these cases, we should avoid creating a graph network
                         # (or we simply can't).
                         self._use_legacy_deferred_behavior = True
                         return
                     if len(nest.flatten(layer_output)) != 1:
                         raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
                     # Keep track of nodes just created above
                     track_nodes_created_by_last_call(layer, created_nodes)
                     layer_input = layer_output
                     outputs = layer_output
                 self._created_nodes = created_nodes
                 try:
                     # Initialize a graph Network. This call will never fail for
                     # a stack of valid Keras layers.
                     # However some users have layers that are fundamentally incompatible
                     # with the Functional API, which do not return tensors. In this
                     # case, we fall back to the legacy deferred behavior.
                     # TODO(fchollet): consider raising here, as we should not be
                     # supporting such layers.
                     self._init_graph_network(inputs,
                                              outputs,
                                              name=self.name)
                     self._graph_initialized = True
                 except:  # pylint:disable=bare-except
                     self._use_legacy_deferred_behavior = True
             self._inferred_input_shape = new_shape
 def setUp(self):
     super().setUp()
     if (not tf2.enabled() or tft_test_case.is_external_environment()
             and version.parse(tf.version.VERSION) < version.parse('2.3')):
         raise tft_test_case.SkipTest(
             'This test requires TF version >= 2.3')
Esempio n. 36
0
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer as loss_scale_optimizer_v2
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import gradient_descent as gradient_descent_v1
from tensorflow.python.training.experimental import loss_scale_optimizer as loss_scale_optimizer_v1
from tensorflow.python.training.experimental import mixed_precision
from tensorflow.python.training.experimental import mixed_precision_global_state


if tf2.enabled():
  enable_mixed_precision_graph_rewrite = (
      mixed_precision.enable_mixed_precision_graph_rewrite)
else:
  enable_mixed_precision_graph_rewrite = (
      mixed_precision.enable_mixed_precision_graph_rewrite_v1)


class MixedPrecisionTest(test.TestCase, parameterized.TestCase):

  IGNORE_PERF_VAR = 'TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE'

  def setUp(self):
    super(MixedPrecisionTest, self).setUp()
    # Enable the tests to be run on pre-Volta GPUs by telling the grappler pass
    # to ignore performance and always transform the graph.
Esempio n. 37
0
    def __init__(self,
                 proc_func,
                 cluster_spec,
                 rpc_layer=None,
                 max_run_time=None,
                 grpc_fail_fast=None,
                 stream_stdout=True,
                 list_stdout=False,
                 use_dill_for_args=True,
                 daemon=False,
                 dependence_on_chief=True,
                 auto_restart=False,
                 args=None,
                 kwargs=None):
        """Creates a multi-process runner.

    Args:
      proc_func: Function to be run on child processes. This will be run on
        processes for all task types.
      cluster_spec: Dict for cluster spec. The following is an example of
        cluster with three workers and two ps's.
        {"worker": ["worker0.example.com:2222",
                    "worker1.example.com:2222",
                    "worker2.example.com:2222"],
         "ps": ["ps0.example.com:2222",
                "ps1.example.com:2222"]}
      rpc_layer: RPC layer to use. Default value is 'grpc'.
      max_run_time: If set, child processes is forced to exit at approximately
        this many seconds after `start` is called. We achieve this through
        `signal.alarm()` api. Note that this is best effort at Python level
        since Python signal handler does not get executed when it runs lower
        level C/C++ code. So it can be delayed for arbitrarily long time.
        If any of the child process is still running when `max_run_time` is up,
        they will be force-terminated and a `UnexpectedSubprocessExitError`
        may be raised at `join()`.
      grpc_fail_fast: Whether GRPC connection between processes should fail
        without retrying. Defaults to None, in which case the environment
        variable is not explicitly set.
      stream_stdout: True if the output/error from the subprocesses should be
        streamed to be printed in parent process' log. Defaults to True.
      list_stdout: True if the output/error from the subprocesses should be
        collected to be attached to the resulting `MultiProcessRunnerResult`
        returned from `MultiProcessRunner.join()`. If True, the list of stdout
        can be retrieved via `MultiProcessRunnerResult.stdout` attribute.
        Defaults to False.
      use_dill_for_args: Whether to use dill to pickle `args` and `kwargs`. dill
        can pickle more objects, but doesn't work with types in
        `multiprocessing` library like `Mutex`.
      daemon: Whether to start processes as daemons.
      dependence_on_chief: Whether to terminates the cluster if the chief exits.
        If auto_restart is True, it only terminates the cluster if the chief
        exits with a zero exit code.
      auto_restart: Whether to automatically restart processes that exit with
        non-zero exit code.
      args: Positional arguments to be sent to functions run on processes.
      kwargs: Keyword arguments to be sent to functions run on processes.

    Raises:
      RuntimeError: if `multi_process_runner.test_main()` is not called.
      ValueError: if there are more than one chief in the `cluster_spec`.
    """
        assert cluster_spec is not None
        if 'chief' in cluster_spec and len(cluster_spec['chief']) > 1:
            raise ValueError(
                'If chief exists in the cluster, there must be at most '
                'one chief. Current `cluster_spec` has {} chiefs.'.format(
                    len(cluster_spec['chief'])))
        if not multi_process_lib.initialized():
            raise RuntimeError(
                '`multi_process_runner` is not initialized. '
                'Please call `multi_process_runner.test_main()` '
                'within `if __name__ == \'__main__\':` block '
                'in your python module to properly initialize '
                '`multi_process_runner`.')
        if not callable(proc_func):
            raise ValueError('proc_func is not a callable')

        self._proc_func = proc_func
        self._cluster_spec = cluster_spec
        self._rpc_layer = rpc_layer or 'grpc'
        self._max_run_time = max_run_time
        self._grpc_fail_fast = grpc_fail_fast
        self._stream_stdout = stream_stdout
        # TODO(rchao): Revisit list_stdout argument to consider other solution.
        self._list_stdout = list_stdout
        self._dependence_on_chief = dependence_on_chief
        self._use_dill_for_args = use_dill_for_args
        self._daemon = daemon
        self._auto_restart = auto_restart
        self._args = args or ()
        self._kwargs = kwargs or {}

        # Child processes should have the same v2 and eager behavior.
        self._v2_enabled = tf2.enabled()
        self._executing_eagerly = context.executing_eagerly()

        self._joined = False
        self._process_lock = threading.Lock()
        # Guarded by self._process_lock.
        self._processes = {}
        # Record which processes are terminated. Due to a bug in Python<3.7,
        # terminated processes return 255 exit code, which should cause an exception
        # in join().
        # https://bugs.python.org/issue30589
        # Guarded by self._process_lock.
        self._terminated = set()
        self._reading_threads = []

        self._manager = manager()
        self._process_status_queue = self._manager.Queue()
        self._parent_to_sub_queue = self._manager.Queue()
        parties = sum(
            len(addresses) for addresses in self._cluster_spec.values())
        self._barrier = self._manager.Barrier(parties)

        # We use a queue to collect outputs from worker processes since it's thread
        # safe.
        self._streaming_queue = self._manager.Queue()

        self._watchdog_thread = None
def v2_dtype_behavior_enabled():
    """Returns True if the V2 dtype behavior is enabled."""
    if V2_DTYPE_BEHAVIOR is None:
        return tf2.enabled()
    return V2_DTYPE_BEHAVIOR
def run_all_keras_modes(test_or_class=None,
                        config=None,
                        always_skip_v1=False,
                        always_skip_eager=False):
    """Execute the decorated test with all keras execution modes.

  This decorator is intended to be applied either to individual test methods in
  a `keras_parameterized.TestCase` class, or directly to a test class that
  extends it. Doing so will cause the contents of the individual test
  method (or all test methods in the class) to be executed multiple times -
  once executing in legacy graph mode, once running eagerly and with
  `should_run_eagerly` returning True, and once running eagerly with
  `should_run_eagerly` returning False.

  If Tensorflow v2 behavior is enabled, legacy graph mode will be skipped, and
  the test will only run twice.

  Note: if stacking this decorator with absl.testing's parameterized decorators,
  those should be at the bottom of the stack.

  For example, consider the following unittest:

  ```python
  class MyTests(testing_utils.KerasTestCase):

    @testing_utils.run_all_keras_modes
    def test_foo(self):
      model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
      optimizer = RMSPropOptimizer(learning_rate=0.001)
      loss = 'mse'
      metrics = ['mae']
      model.compile(optimizer, loss, metrics=metrics,
                    run_eagerly=testing_utils.should_run_eagerly())

      inputs = np.zeros((10, 3))
      targets = np.zeros((10, 4))
      dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)
      dataset = dataset.batch(10)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)

  if __name__ == "__main__":
    tf.test.main()
  ```

  This test will try compiling & fitting the small functional mlp using all
  three Keras execution modes.

  Args:
    test_or_class: test method or class to be annotated. If None,
      this method returns a decorator that can be applied to a test method or
      test class. If it is not None this returns the decorator applied to the
      test or class.
    config: An optional config_pb2.ConfigProto to use to configure the
      session when executing graphs.
    always_skip_v1: If True, does not try running the legacy graph mode even
      when Tensorflow v2 behavior is not enabled.
    always_skip_eager: If True, does not execute the decorated test
      with eager execution modes.

  Returns:
    Returns a decorator that will run the decorated test method multiple times.

  Raises:
    ImportError: If abseil parameterized is not installed or not included as
      a target dependency.
  """

    params = [('_v2_function', 'v2_function')]
    if not always_skip_eager:
        params.append(('_v2_eager', 'v2_eager'))
    if not (always_skip_v1 or tf2.enabled()):
        params.append(('_v1_graph', 'v1_graph'))

    def single_method_decorator(f):
        """Decorator that constructs the test cases."""

        # Use named_parameters so it can be individually run from the command line
        @parameterized.named_parameters(*params)
        @functools.wraps(f)
        def decorated(self, run_mode, *args, **kwargs):
            """A run of a single test case w/ specified run mode."""
            if run_mode == 'v1_graph':
                _v1_graph_test(f, self, config, *args, **kwargs)
            elif run_mode == 'v2_function':
                _v2_graph_functions_test(f, self, *args, **kwargs)
            elif run_mode == 'v2_eager':
                _v2_eager_test(f, self, *args, **kwargs)
            else:
                return ValueError('Unknown run mode %s' % run_mode)

        return decorated

    return _test_or_class_decorator(test_or_class, single_method_decorator)
  def test_run_all_keras_modes_with_all_model_types_annotate_class_2(self):
    l = []

    @keras_parameterized.run_with_all_model_types
    class ExampleTest(keras_parameterized.TestCase):

      def runTest(self):
        pass

      @keras_parameterized.run_all_keras_modes
      @parameterized.named_parameters(dict(testcase_name="_arg",
                                           arg=True))
      def testBody(self, arg):
        mode = "eager" if context.executing_eagerly() else "graph"
        should_run_eagerly = testing_utils.should_run_eagerly()
        should_run_tf_function = testing_utils.should_run_tf_function()
        l.append((mode, should_run_eagerly, should_run_tf_function,
                  testing_utils.get_model_type()))

    e = ExampleTest()
    e.testBody_arg_v2_eager_functional()
    e.testBody_arg_v2_funcgraph_functional()
    e.testBody_arg_v2_function_functional()
    e.testBody_arg_v2_eager_sequential()
    e.testBody_arg_v2_funcgraph_sequential()
    e.testBody_arg_v2_function_sequential()
    e.testBody_arg_v2_eager_subclass()
    e.testBody_arg_v2_funcgraph_subclass()
    e.testBody_arg_v2_function_subclass()

    if not tf2.enabled():
      e.testBody_arg_v1_session_functional()
      e.testBody_arg_v1_session_sequential()
      e.testBody_arg_v1_session_subclass()

    expected_combinations = {
        ("eager", True, True, "functional"),
        ("eager", False, False, "functional"),
        ("eager", False, True, "functional"),
        ("eager", True, True, "sequential"),
        ("eager", False, False, "sequential"),
        ("eager", False, True, "sequential"),
        ("eager", True, True, "subclass"),
        ("eager", False, False, "subclass"),
        ("eager", False, True, "subclass"),
    }

    if not tf2.enabled():
      expected_combinations = expected_combinations.union({
          ("graph", False, False, "functional"),
          ("graph", False, False, "sequential"),
          ("graph", False, False, "subclass"),
      })

    self.assertLen(l, len(expected_combinations))
    self.assertEqual(set(l), expected_combinations)

    ts = unittest.makeSuite(ExampleTest)
    res = unittest.TestResult()
    ts.run(res)

    self.assertLen(l, len(expected_combinations) * 2)
Esempio n. 41
0
def is_v2_behavior_enabled():
  """Returns if user called tf.enable_v2_behavior."""

  # Since there is no actual tf.is_v2_behavior enabled, check that the
  # settings were enabled.
  return tf2.enabled()
def block(inputs,
          activation_fn=swish,
          drop_rate=0.,
          name='',
          filters_in=32,
          filters_out=16,
          kernel_size=3,
          strides=1,
          expand_ratio=1,
          se_ratio=0.,
          id_skip=True):
    """A mobile inverted residual block.
    # Arguments
        inputs: input tensor.
        activation_fn: activation function.
        drop_rate: float between 0 and 1, fraction of the input units to drop.
        name: string, block label.
        filters_in: integer, the number of input filters.
        filters_out: integer, the number of output filters.
        kernel_size: integer, the dimension of the convolution window.
        strides: integer, the stride of the convolution.
        expand_ratio: integer, scaling coefficient for the input filters.
        se_ratio: float between 0 and 1, fraction to squeeze the input filters.
        id_skip: boolean.
    # Returns
        output tensor for the block.
    """
    bn_axis = 3 if K.image_data_format() == 'channels_last' else 1

    # Expansion phase
    filters = filters_in * expand_ratio
    if expand_ratio != 1:
        x = Conv2D(filters,
                   1,
                   padding='same',
                   use_bias=False,
                   kernel_initializer=CONV_KERNEL_INITIALIZER,
                   name=name + 'expand_conv')(inputs)
        x = BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x)
        x = Activation(activation_fn, name=name + 'expand_activation')(x)
    else:
        x = inputs

    # Depthwise Convolution
    if strides == 2:
        x = ZeroPadding2D(padding=correct_pad(K, x, kernel_size),
                          name=name + 'dwconv_pad')(x)
        conv_pad = 'valid'
    else:
        conv_pad = 'same'
    x = DepthwiseConv2D(kernel_size,
                        strides=strides,
                        padding=conv_pad,
                        use_bias=False,
                        depthwise_initializer=CONV_KERNEL_INITIALIZER,
                        name=name + 'dwconv')(x)
    x = BatchNormalization(axis=bn_axis, name=name + 'bn')(x)
    x = Activation(activation_fn, name=name + 'activation')(x)

    # Squeeze and Excitation phase
    if 0 < se_ratio <= 1:
        filters_se = max(1, int(filters_in * se_ratio))
        se = GlobalAveragePooling2D(name=name + 'se_squeeze')(x)
        se = Reshape((1, 1, filters), name=name + 'se_reshape')(se)
        se = Conv2D(filters_se,
                    1,
                    padding='same',
                    activation=activation_fn,
                    kernel_initializer=CONV_KERNEL_INITIALIZER,
                    name=name + 'se_reduce')(se)
        se = Conv2D(filters,
                    1,
                    padding='same',
                    activation='sigmoid',
                    kernel_initializer=CONV_KERNEL_INITIALIZER,
                    name=name + 'se_expand')(se)
        if K.backend() == 'theano':
            # For the Theano backend, we have to explicitly make
            # the excitation weights broadcastable.
            se = Lambda(
                lambda x: K.pattern_broadcast(x, [True, True, True, False]),
                output_shape=lambda input_shape: input_shape,
                name=name + 'se_broadcast')(se)
        x = multiply([x, se], name=name + 'se_excite')

    # Output phase
    x = Conv2D(filters_out,
               1,
               padding='same',
               use_bias=False,
               kernel_initializer=CONV_KERNEL_INITIALIZER,
               name=name + 'project_conv')(x)
    x = BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x)
    if (id_skip is True and strides == 1 and filters_in == filters_out):
        if drop_rate > 0:
            if tf2.enabled():
                x = Dropout(drop_rate,
                            noise_shape=(None, 1, 1, 1),
                            name=name + 'drop')(x)
            else:
                x = Dropout(
                    drop_rate,
                    #noise_shape=(None, 1, 1, 1),
                    name=name + 'drop')(x)
        x = add([x, inputs], name=name + 'add')

    return x
Esempio n. 43
0
    def testUnevenDatasetBatchesBetweenGraph(self, input_type, api_type,
                                             iteration_type, strategy_cls):
        if api_type == "wrap_into_dataset" and input_type == "input_fn":
            self.skipTest("unsupported test combination.")
        if tf2.enabled():
            # The V2 tests are skipped since we don't support creating an
            # iterator for DistributedDataset in graph mode.
            self.skipTest("unsupported test combination")
        # Environment variable is global, we need locking when patching TF_CONFIG.
        lock = threading.Lock()

        def _worker_fn(task_type, task_id, num_gpus):
            del num_gpus
            tf_config = {
                "cluster": self._cluster_spec,
                "task": {
                    "type": task_type,
                    "index": task_id
                }
            }
            with context.graph_mode(), lock, test.mock.patch.dict(
                    "os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
                strategy = strategy_cls()
            with context.graph_mode(), strategy.scope(), self.cached_session(
                    target="grpc://" +
                    self._cluster_spec[task_type][task_id]) as sess:
                if tf2.enabled():
                    dataset_fn = lambda _: dataset_ops.DatasetV2.range(
                        5).batch(2)
                else:
                    dataset_fn = lambda _: dataset_ops.Dataset.range(5).batch(2
                                                                              )
                dataset_or_input_fn = self._create_dataset_or_input_fn(
                    input_type, dataset_fn)
                if (input_type == "dataset"
                        and strategy_cls is collective_all_reduce_strategy.
                        CollectiveAllReduceStrategy):
                    # Autosharded
                    if task_id == 0:
                        expected_values = [[[0, 1]], [[4]]]
                    else:
                        expected_values = [[[2, 3]], [[]]]

                    # input_context is for between-graph auto-sharding.
                    input_context = distribute_lib.InputContext(
                        num_input_pipelines=2,
                        input_pipeline_id=task_id,
                        num_replicas_in_sync=2)
                else:
                    expected_values = [[[0, 1]], [[2, 3]], [[4]]]
                    input_context = None

                strategy.extended.experimental_enable_get_next_as_optional = True
                self._test_input_iteration(
                    input_type,
                    api_type,
                    iteration_type,
                    dataset_or_input_fn,
                    [("/job:%s/task:%d" %
                      (task_type, task_id), strategy.extended.worker_devices)],
                    expected_values,
                    strategy,
                    sess=sess,
                    input_context=input_context)

        self._run_between_graph_clients(_worker_fn, self._cluster_spec, 0)
Esempio n. 44
0
    def testRaggedSparse(self, distribution, input_type, drop_remainder,
                         defun):
        """Test with `RaggedTensor`s and `SparseTensor`s."""
        if not tf2.enabled():
            self.skipTest("Only V2 is supported.")

        distribution.extended.experimental_enable_get_next_as_optional = True
        global_batch_size = 8

        def dataset_fn(ctx=None):
            ctx = ctx or distribute_lib.InputContext()
            batch_size = ctx.get_per_replica_batch_size(global_batch_size)
            # Use 20 which isn't divisible by 8 to test partial batch behavior.
            row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
            ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
                np.repeat(np.arange(20, dtype=np.float32), row_lengths),
                row_lengths)
            dataset = dataset_ops.DatasetV2.from_tensor_slices({
                "dense":
                ragged_tensor.to_tensor(),
                "ragged":
                ragged_tensor,
                "sparse":
                ragged_tensor.to_sparse(),
            })
            dataset = dataset.shard(ctx.num_input_pipelines,
                                    ctx.input_pipeline_id)
            return dataset.batch(batch_size, drop_remainder=drop_remainder)

        dataset_or_input_fn = self._create_dataset_or_input_fn(
            input_type, dataset_fn)
        dataset = self._wrap_dataset(input_type, dataset_or_input_fn,
                                     distribution.extended._input_workers,
                                     len(distribution.extended.worker_devices),
                                     distribution)
        # Assert that the tensors are rebatched and sparsity is preserved.
        per_replica_batch = defun(lambda x: next(iter(x)))(dataset)
        self.assertAllEqual(
            values.select_replica(0, per_replica_batch["dense"]),
            [[0., 0., 0.], [1., 0., 0.], [2., 2., 0.], [3., 3., 3.]])
        self.assertAllEqual(
            values.select_replica(1, per_replica_batch["dense"]),
            [[0., 0., 0.], [5., 0., 0.], [6., 6., 0.], [7., 7., 7.]])
        # Transitively check the ragged and sparse tensors by densification.
        for i in range(2):
            self.assertLen(
                values.select_replica(i, per_replica_batch["ragged"]).values,
                6)
            self.assertAllEqual(
                values.select_replica(i,
                                      per_replica_batch["ragged"]).to_tensor(),
                values.select_replica(i, per_replica_batch["dense"]))
            self.assertLen(
                values.select_replica(i, per_replica_batch["sparse"]).indices,
                6)
            self.assertAllEqual(
                sparse_ops.sparse_tensor_to_dense(
                    values.select_replica(i, per_replica_batch["sparse"])),
                values.select_replica(i, per_replica_batch["dense"]))
        # Iterate through all the batches and sum them up.
        def sum_batch(per_replica_features):
            """Sums the `PerReplica` values in the `per_replica_features` map."""
            def map_fn(per_replica_values):
                per_replica_sums = distribution.experimental_run_v2(
                    (lambda x: math_ops.reduce_sum(x.values)) if all(
                        map(sparse_tensor.is_sparse,
                            per_replica_values.values)) else
                    math_ops.reduce_sum, (per_replica_values, ))
                return distribution.reduce(reduce_util.ReduceOp.SUM,
                                           per_replica_sums,
                                           axis=None)

            return nest.map_structure(map_fn, per_replica_features)

        def _reduce(state, batch):
            sums = sum_batch(batch)
            return {name: value + sums[name] for name, value in state.items()}

        def sum_for_loop(dataset):
            sums = {"dense": 0., "ragged": 0., "sparse": 0.}
            for batch in dataset:
                sums = _reduce(sums, batch)
            return sums

        def sum_while_loop(iterator, reduce_fn):
            sums = {"dense": 0., "ragged": 0., "sparse": 0.}
            while True:
                try:
                    sums = reduce_fn(sums, iterator)
                except (StopIteration, errors.OutOfRangeError):
                    return sums

        sums = sum_while_loop(
            iter(dataset),
            defun(lambda state, iterator: _reduce(state, next(iterator))))
        self.assertDictEqual(sums, defun(sum_for_loop)(dataset))
        self.assertAllEqual(
            nest.flatten(sums),
            # When there's no partial batch, the sum is smaller.
            [200. if input_type == "dataset" and drop_remainder else 310.] * 3)
Esempio n. 45
0
    def __init__(self,
                 proc_func,
                 cluster_spec,
                 max_run_time=None,
                 capture_std_stream=False,
                 grpc_fail_fast=False,
                 args=None,
                 kwargs=None):
        """Creates a multi-process runner.

    Args:
      proc_func: Function to be run on child processes. This will be run on
        processes for all task types.
      cluster_spec: Dict for cluster spec. The following is an example of
        cluster with three workers and two ps's.
        {"worker": ["worker0.example.com:2222",
                    "worker1.example.com:2222",
                    "worker2.example.com:2222"],
         "ps": ["ps0.example.com:2222",
                "ps1.example.com:2222"]}
      max_run_time: If set, child processes is forced to exit at approximately
        this many seconds after `start` is called. We achieve this through
        `signal.alarm()` api. Note that this is best effort at Python level
        since Python signal handler does not get executed when it runs lower
        level C/C++ code. So it can be delayed for arbitrarily long time.
      capture_std_stream: Boolean, whether the messages streamed to stdout and
        stderr in subprocesses are captured.
      grpc_fail_fast: Whether GRPC connection between processes should fail
        without retrying. Defaults to False.
      args: Positional arguments to be sent to functions run on processes.
      kwargs: Keyword arguments to be sent to functions run on processes.

    Raises:
      RuntimeError: if `multi_process_runner.test_main()` is not called.
      ValueError: if there are more than one chief in the `cluster_spec`.
    """
        assert cluster_spec is not None
        if 'chief' in cluster_spec and len(cluster_spec['chief']) > 1:
            raise ValueError(
                'If chief exists in the cluster, there must be at most '
                'one chief. Current `cluster_spec` has {} chiefs.'.format(
                    len(cluster_spec['chief'])))

        assert callable(proc_func)

        if not multi_process_lib.using_context_manager():
            raise RuntimeError(
                '`multi_process_runner` is not initialized. '
                'Please call `multi_process_runner.test_main()` '
                'within `if __name__ == \'__main__\':` block '
                'in your python module to properly initialize '
                '`multi_process_runner`.')

        self._proc_func = proc_func
        self._cluster_spec = cluster_spec
        self._max_run_time = max_run_time
        self._capture_std_stream = capture_std_stream
        self._grpc_fail_fast = grpc_fail_fast
        self._args = args or ()
        self._kwargs = kwargs or {}
        self._outstanding_subprocess_count = 0

        # Child processes should have the same v2 and eager behavior.
        self._v2_enabled = tf2.enabled()
        self._executing_eagerly = context.executing_eagerly()
 def assertStatisticsContains(self, handle, tag, num_events=-1, offset=0):
     if tf2.enabled():
         self._assertEventContains(handle, tag, num_events, offset)
     else:
         self._assertSummaryContains(handle, tag)
Esempio n. 47
0
 def creator(*args, **kwargs):
     if tf2.enabled():
         return tf2_cls(*args, **kwargs)
     return tf1_cls(*args, **kwargs)
Esempio n. 48
0
                        min_iters=25,
                        name="SVD_gpu_{shape}".format(shape=shape_))


if __name__ == "__main__":
    for compute_uv in False, True:
        for full_matrices in False, True:
            for dtype in np.float32, np.float64, np.complex64, np.complex128:
                for rows in 1, 2, 5, 10, 32, 100:
                    for cols in 1, 2, 5, 10, 32, 100:
                        for batch_dims in [
                            (), (3, )
                        ] + [(3, 2)] * (max(rows, cols) < 10):
                            shape = batch_dims + (rows, cols)
                            # TF2 does not support placeholders under eager so we skip it
                            for use_static_shape in set([True, tf2.enabled()]):
                                name = "%s_%s_static_shape_%s__compute_uv_%s_full_%s" % (
                                    dtype.__name__, "_".join(map(
                                        str, shape)), use_static_shape,
                                    compute_uv, full_matrices)
                                _AddTest(
                                    SvdOpTest, "Svd", name,
                                    _GetSvdOpTest(dtype, shape,
                                                  use_static_shape, compute_uv,
                                                  full_matrices))
    for compute_uv in False, True:
        for full_matrices in False, True:
            dtypes = ([np.float32, np.float64] +
                      [np.complex64, np.complex128] * (not compute_uv))
            for dtype in dtypes:
                mat_shapes = [(10, 11), (11, 10), (11, 11), (2, 2, 2, 3)]
Esempio n. 49
0
def populate_deserializable_objects():
  """Populates dict ALL_OBJECTS with every built-in initializer.
  """
  global LOCAL
  if not hasattr(LOCAL, 'ALL_OBJECTS'):
    LOCAL.ALL_OBJECTS = {}
    LOCAL.GENERATED_WITH_V2 = None

  if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 == tf2.enabled():
    # Objects dict is already generated for the proper TF version:
    # do nothing.
    return

  LOCAL.ALL_OBJECTS = {}
  LOCAL.GENERATED_WITH_V2 = tf2.enabled()

  # Compatibility aliases (need to exist in both V1 and V2).
  LOCAL.ALL_OBJECTS['ConstantV2'] = initializers_v2.Constant
  LOCAL.ALL_OBJECTS['GlorotNormalV2'] = initializers_v2.GlorotNormal
  LOCAL.ALL_OBJECTS['GlorotUniformV2'] = initializers_v2.GlorotUniform
  LOCAL.ALL_OBJECTS['HeNormalV2'] = initializers_v2.HeNormal
  LOCAL.ALL_OBJECTS['HeUniformV2'] = initializers_v2.HeUniform
  LOCAL.ALL_OBJECTS['IdentityV2'] = initializers_v2.Identity
  LOCAL.ALL_OBJECTS['LecunNormalV2'] = initializers_v2.LecunNormal
  LOCAL.ALL_OBJECTS['LecunUniformV2'] = initializers_v2.LecunUniform
  LOCAL.ALL_OBJECTS['OnesV2'] = initializers_v2.Ones
  LOCAL.ALL_OBJECTS['OrthogonalV2'] = initializers_v2.Orthogonal
  LOCAL.ALL_OBJECTS['RandomNormalV2'] = initializers_v2.RandomNormal
  LOCAL.ALL_OBJECTS['RandomUniformV2'] = initializers_v2.RandomUniform
  LOCAL.ALL_OBJECTS['TruncatedNormalV2'] = initializers_v2.TruncatedNormal
  LOCAL.ALL_OBJECTS['VarianceScalingV2'] = initializers_v2.VarianceScaling
  LOCAL.ALL_OBJECTS['ZerosV2'] = initializers_v2.Zeros

  # Out of an abundance of caution we also include these aliases that have
  # a non-zero probability of having been included in saved configs in the past.
  LOCAL.ALL_OBJECTS['glorot_normalV2'] = initializers_v2.GlorotNormal
  LOCAL.ALL_OBJECTS['glorot_uniformV2'] = initializers_v2.GlorotUniform
  LOCAL.ALL_OBJECTS['he_normalV2'] = initializers_v2.HeNormal
  LOCAL.ALL_OBJECTS['he_uniformV2'] = initializers_v2.HeUniform
  LOCAL.ALL_OBJECTS['lecun_normalV2'] = initializers_v2.LecunNormal
  LOCAL.ALL_OBJECTS['lecun_uniformV2'] = initializers_v2.LecunUniform

  if tf2.enabled():
    # For V2, entries are generated automatically based on the content of
    # initializers_v2.py.
    v2_objs = {}
    base_cls = initializers_v2.Initializer
    generic_utils.populate_dict_with_module_objects(
        v2_objs,
        [initializers_v2],
        obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))
    for key, value in v2_objs.items():
      LOCAL.ALL_OBJECTS[key] = value
      # Functional aliases.
      LOCAL.ALL_OBJECTS[generic_utils.to_snake_case(key)] = value
  else:
    # V1 initializers.
    v1_objs = {
        'Constant': init_ops.Constant,
        'GlorotNormal': init_ops.GlorotNormal,
        'GlorotUniform': init_ops.GlorotUniform,
        'Identity': init_ops.Identity,
        'Ones': init_ops.Ones,
        'Orthogonal': init_ops.Orthogonal,
        'VarianceScaling': init_ops.VarianceScaling,
        'Zeros': init_ops.Zeros,
        'HeNormal': initializers_v1.HeNormal,
        'HeUniform': initializers_v1.HeUniform,
        'LecunNormal': initializers_v1.LecunNormal,
        'LecunUniform': initializers_v1.LecunUniform,
        'RandomNormal': initializers_v1.RandomNormal,
        'RandomUniform': initializers_v1.RandomUniform,
        'TruncatedNormal': initializers_v1.TruncatedNormal,
    }
    for key, value in v1_objs.items():
      LOCAL.ALL_OBJECTS[key] = value
      # Functional aliases.
      LOCAL.ALL_OBJECTS[generic_utils.to_snake_case(key)] = value

  # More compatibility aliases.
  LOCAL.ALL_OBJECTS['normal'] = LOCAL.ALL_OBJECTS['random_normal']
  LOCAL.ALL_OBJECTS['uniform'] = LOCAL.ALL_OBJECTS['random_uniform']
  LOCAL.ALL_OBJECTS['one'] = LOCAL.ALL_OBJECTS['ones']
  LOCAL.ALL_OBJECTS['zero'] = LOCAL.ALL_OBJECTS['zeros']
Esempio n. 50
0
# limitations under the License.
# ==============================================================================
"""Utility functions for control flow.

This file is necessary to avoid cyclic dependencies between ops.py and
control_flow_ops.py.
"""

import os
import traceback

from tensorflow.python import tf2
from tensorflow.python.platform import tf_logging as logging

ENABLE_CONTROL_FLOW_V2 = (
    (tf2.enabled() and os.getenv("TF_ENABLE_CONTROL_FLOW_V2") != "0")
    or os.getenv("TF_ENABLE_CONTROL_FLOW_V2", "0") != "0"
    or os.getenv("TF_ENABLE_COND_V2", "0") != "0"
    or os.getenv("TF_ENABLE_WHILE_V2", "0") != "0"
    or os.getenv("TF_ENABLE_TENSOR_ARRAY_V2", "0") != "0")


# TODO(b/137793122): Remove this.
def enable_control_flow_v2():  # pylint: disable=invalid-name
    """Use control flow v2.

  Do not use this symbol. This will be removed.
  """
    global ENABLE_CONTROL_FLOW_V2
    ENABLE_CONTROL_FLOW_V2 = True
Esempio n. 51
0
import threading

from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export

GRAPH_MODE = 0
EAGER_MODE = 1

default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE

# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}

_MAXINT32 = 2**31 - 1

DEVICE_PLACEMENT_EXPLICIT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
    pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
Esempio n. 52
0
 def _v2_behavior(self):
   if _TENSORSHAPE_V2_OVERRIDE is None:
     return tf2.enabled()
   return _TENSORSHAPE_V2_OVERRIDE
def get_dataset_from_tensor_slices(inp_array):
    dataset = dataset_ops.DatasetV2.from_tensor_slices(inp_array)
    # TODO(b/138326910): Remove Dataset V1 version once bug resolved.
    if not tf2.enabled():
        dataset = dataset_ops.Dataset.from_tensor_slices(inp_array)
    return dataset
Esempio n. 54
0
 def setUpClass(cls):
   if tf2.enabled():
     dataset_ops.Dataset = dataset_ops.DatasetV2
   else:
     dataset_ops.Dataset = dataset_ops.DatasetV1
Esempio n. 55
0
from tensorflow.python.util.all_util import make_all
from tensorflow.python.util.tf_export import tf_export

# Eager execution
from tensorflow.python.eager.context import executing_eagerly
from tensorflow.python.eager.remote import connect_to_remote_host
from tensorflow.python.eager.def_function import function
from tensorflow.python.framework.ops import enable_eager_execution

# Check whether TF2_BEHAVIOR is turned on.
from tensorflow.python.eager import monitoring as _monitoring
from tensorflow.python import tf2 as _tf2
_tf2_gauge = _monitoring.BoolGauge(
    '/tensorflow/api/tf2_enable', 'Environment variable TF2_BEHAVIOR is set".')
_tf2_gauge.get_cell().set(_tf2.enabled())

# Necessary for the symbols in this module to be taken into account by
# the namespace management system (API decorators).
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell

# TensorFlow Debugger (tfdbg).
from tensorflow.python.debug.lib import check_numerics_callback
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.ops import gen_debug_ops

# DLPack
from tensorflow.python.dlpack.dlpack import from_dlpack
from tensorflow.python.dlpack.dlpack import to_dlpack
Esempio n. 56
0
    def testSaveAndLoadSavedModelExport(self, model_builder,
                                        uses_learning_phase, optimizer_cls,
                                        train_before_export):
        optimizer = None if optimizer_cls is None else optimizer_cls()

        saved_model_dir = self._save_model_dir()

        np.random.seed(130)
        input_arr = np.random.random((1, 3))
        target_arr = np.random.random((1, 3))

        model = model_builder(uses_learning_phase)
        if optimizer is not None:
            model.compile(loss='mse', optimizer=optimizer, metrics=['mae'])
            if train_before_export:
                model.train_on_batch(input_arr, target_arr)

            ref_loss, ref_mae = model.evaluate(input_arr, target_arr)

        ref_predict = model.predict(input_arr)

        # Export SavedModel
        keras_saved_model.export_saved_model(model, saved_model_dir)

        input_name = model.input_names[0]
        output_name = model.output_names[0]
        target_name = output_name + '_target'

        # Load predict graph, and test predictions
        with session.Session(graph=ops.Graph()) as sess:
            inputs, outputs, _ = load_model(sess, saved_model_dir,
                                            mode_keys.ModeKeys.PREDICT)

            predictions = sess.run(outputs[output_name],
                                   {inputs[input_name]: input_arr})
            self.assertAllClose(ref_predict, predictions, atol=1e-05)

        if optimizer:
            # Load eval graph, and test predictions, loss and metric values
            with session.Session(graph=ops.Graph()) as sess:
                inputs, outputs, _ = load_model(sess, saved_model_dir,
                                                mode_keys.ModeKeys.TEST)

                # First obtain the loss and predictions, and run the metric update op by
                # feeding in the inputs and targets.
                metrics_name = 'mae' if tf2.enabled(
                ) else 'mean_absolute_error'
                metrics_update_op_key = 'metrics/' + metrics_name + '/update_op'
                metrics_value_op_key = 'metrics/' + metrics_name + '/value'

                loss, predictions, _ = sess.run(
                    (outputs['loss'], outputs['predictions/' + output_name],
                     outputs[metrics_update_op_key]), {
                         inputs[input_name]: input_arr,
                         inputs[target_name]: target_arr
                     })

                # The metric value should be run after the update op, to ensure that it
                # reflects the correct value.
                metric_value = sess.run(outputs[metrics_value_op_key])

                self.assertEqual(int(train_before_export),
                                 sess.run(training_module.get_global_step()))
                self.assertAllClose(ref_loss, loss, atol=1e-05)
                self.assertAllClose(ref_mae, metric_value, atol=1e-05)
                self.assertAllClose(ref_predict, predictions, atol=1e-05)

            # Load train graph, and check for the train op, and prediction values
            with session.Session(graph=ops.Graph()) as sess:
                inputs, outputs, meta_graph_def = load_model(
                    sess, saved_model_dir, mode_keys.ModeKeys.TRAIN)
                self.assertEqual(int(train_before_export),
                                 sess.run(training_module.get_global_step()))
                self.assertIn('loss', outputs)
                self.assertIn(metrics_update_op_key, outputs)
                self.assertIn(metrics_value_op_key, outputs)
                self.assertIn('predictions/' + output_name, outputs)

                # Train for a step
                train_op = loader_impl.get_train_op(meta_graph_def)
                train_outputs, _ = sess.run([outputs, train_op], {
                    inputs[input_name]: input_arr,
                    inputs[target_name]: target_arr
                })
                self.assertEqual(
                    int(train_before_export) + 1,
                    sess.run(training_module.get_global_step()))

                if uses_learning_phase:
                    self.assertAllClose([[0, 0, 0]],
                                        train_outputs['predictions/' +
                                                      output_name],
                                        atol=1e-05)
                else:
                    self.assertNotAllClose([[0, 0, 0]],
                                           train_outputs['predictions/' +
                                                         output_name],
                                           atol=1e-05)
Esempio n. 57
0
 def _get_dataset(self):
     if tf2.enabled():
         return dataset_ops.DatasetV2.range(10).batch(2)
     else:
         return dataset_ops.Dataset.range(10).batch(2)
Esempio n. 58
0
File: misc.py Progetto: deargen/cmg
def is_v2():
    """Returns whether it is v2."""
    return tf2_internal.enabled()
Esempio n. 59
0
 def _run(self, flags_str, should_succeed):
     if tf2.enabled():
         flags_str += ' --enable_v1_converter'
     super(TfLiteConvertV1Test, self)._run(flags_str, should_succeed)
Esempio n. 60
0
def save_model(model,
               filepath,
               overwrite=True,
               include_optimizer=True,
               save_format=None):
    """Saves a model as a TensorFlow SavedModel or HDF5 file.

  The saved model contains:
      - the model's configuration (topology)
      - the model's weights
      - the model's optimizer's state (if any)

  Thus the saved model can be reinstantiated in
  the exact same state, without any of the code
  used for model definition or training.

  _SavedModel serialization_ (not yet added)

  The SavedModel serialization path uses `tf.saved_model.save` to save the model
  and all trackable objects attached to the model (e.g. layers and variables).
  `@tf.function`-decorated methods are also saved. Additional trackable objects
  and functions are added to the SavedModel to allow the model to be
  loaded back as a Keras Model object.

  Arguments:
      model: Keras model instance to be saved.
      filepath: One of the following:
        - String, path where to save the model
        - `h5py.File` object where to save the model
      overwrite: Whether we should overwrite any existing model at the target
        location, or instead ask the user with a manual prompt.
      include_optimizer: If True, save optimizer's state together.
      save_format: Either 'tf' or 'h5', indicating whether to save the model
        to Tensorflow SavedModel or HDF5. The 'tf' option is currently disabled,
        and will be enabled when Keras SavedModel export is no longer
        experimental. (The experimental function is
        tf.keras.experimental.export_saved_model).

  Raises:
      ImportError: If save format is hdf5, and h5py is not available.
  """
    from tensorflow.python.keras.engine import sequential  # pylint: disable=g-import-not-at-top

    if (not tf2.enabled() and not ops.executing_eagerly_outside_functions()
            and save_format == 'tf'):
        raise NotImplementedError(
            'Saving the model as SavedModel is not supported in TensorFlow 1.X'
            'graph mode. Please enable eager execution or use the "h5" save format.'
        )

    if _KERAS_SAVED_MODEL_STILL_EXPERIMENTAL and save_format == 'tf':
        raise NotImplementedError(
            'Saving the model as SavedModel is still in experimental stages. '
            'Please use tf.keras.experimental.export_saved_model, or use '
            'save_format="h5" to save to HDF5.')

    # TODO(kathywu): Remove this when Keras SavedModel is not experimental.
    save_format = 'h5'

    if (save_format == 'h5'
            or (h5py is not None and isinstance(filepath, h5py.File))
            or os.path.splitext(filepath)[1] in _HDF5_EXTENSIONS):
        # TODO(b/130258301): add utility method for detecting model type.
        if (not model._is_graph_network and  # pylint:disable=protected-access
                not isinstance(model, sequential.Sequential)):
            raise NotImplementedError(
                'Saving the model to HDF5 format requires the model to be a '
                'Functional model or a Sequential model. It does not work for '
                'subclassed models, because such models are defined via the body of '
                'a Python method, which isn\'t safely serializable. Consider saving '
                'to the Tensorflow SavedModel format (by setting save_format="tf") '
                'or using `save_weights`.')
        hdf5_format.save_model_to_hdf5(model, filepath, overwrite,
                                       include_optimizer)
        return