Ejemplo n.º 1
0
 def wrapper(*args, **kwargs):
   try:
     return fn(*args, **kwargs)
   finally:
     del context._context
     context._context = context.Context()
     ops.enable_eager_execution()
  def setUp(self):
    # test for enable eager test
    ops.enable_eager_execution()
    self.assertTrue(context.executing_eagerly())

    # Calling enable eager execution a second time should not cause an error.
    ops.enable_eager_execution()
    self.assertTrue(context.executing_eagerly())
Ejemplo n.º 3
0
def enable_v2_behavior():
  """Enables TensorFlow 2.x behaviors.

  This function can be called at the beginning of the program (before `Tensors`,
  `Graphs` or other structures have been created, and before devices have been
  initialized. It switches all global behaviors that are different between
  TensorFlow 1.x and 2.x to behave as intended for 2.x.

  This function is called in the main TensorFlow `__init__.py` file, user should
  not need to call it, except during complex migrations.
  """
  tf2.enable()  # Switches TensorArrayV2 and control flow V2
  ops.enable_eager_execution()
  tensor_shape.enable_v2_tensorshape()  # Also switched by tf2
  variable_scope.enable_resource_variables()
Ejemplo n.º 4
0
def main(_):
  if flags.FLAGS.enable_eager:
    ops.enable_eager_execution()
    logging.info('Eager execution enabled for MNIST Multi-Worker.')
  else:
    logging.info('Eager execution not enabled for MNIST Multi-Worker.')

  # Build the train and eval datasets from the MNIST data.
  train_ds, eval_ds = get_input_datasets()

  if flags.FLAGS.distribution_strategy == 'multi_worker_mirrored':
    # MultiWorkerMirroredStrategy for multi-worker distributed MNIST training.
    strategy = collective_strategy.CollectiveAllReduceStrategy()
  else:
    raise ValueError('Only `multi_worker_mirrored` is supported strategy '
                     'in Keras MNIST example at this time. Strategy passed '
                     'in is %s' % flags.FLAGS.distribution_strategy)

  # Create and compile the model under Distribution strategy scope.
  # `fit`, `evaluate` and `predict` will be distributed based on the strategy
  # model was compiled with.
  with strategy.scope():
    model = get_model()
    optimizer = rmsprop.RMSProp(learning_rate=0.001)
    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=optimizer,
        metrics=['accuracy'])

  # Train the model with the train dataset.
  tensorboard_callback = keras.callbacks.TensorBoard(
      log_dir=flags.FLAGS.model_dir)
  model.fit(
      x=train_ds,
      epochs=20,
      steps_per_epoch=468,
      callbacks=[tensorboard_callback])

  # Evaluate the model with the eval dataset.
  score = model.evaluate(eval_ds, steps=10, verbose=0)
  logging.info('Test loss:{}'.format(score[0]))
  logging.info('Test accuracy:{}'.format(score[1]))
  def testOptimization(self):
    dataset = dataset_ops.Dataset.range(10)
    dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))
    dataset = dataset.skip(0)  # this should be optimized away
    dataset = dataset.cache()

    options = dataset_ops.Options()
    options.experimental_optimization.noop_elimination = True
    dataset = dataset.with_options(options)

    multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
        dataset, ["/cpu:1", "/cpu:2"])

    config = config_pb2.ConfigProto(device_count={"CPU": 3})
    with self.test_session(config=config):
      self.evaluate(multi_device_iterator.initializer)
      for i in range(0, 10, 2):
        elem_on_1, elem_on_2 = multi_device_iterator.get_next()
        self.assertEqual(i, self.evaluate(elem_on_1))
        self.assertEqual(i + 1, self.evaluate(elem_on_2))
      with self.assertRaises(errors.OutOfRangeError):
        elem_on_1, elem_on_2 = multi_device_iterator.get_next()
        self.evaluate(elem_on_1)
        self.evaluate(elem_on_2)


if __name__ == "__main__":
  ops.enable_eager_execution(
      config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 1}))
  test.main()
Ejemplo n.º 6
0
def main(argv=None):  # pylint: disable=function-redefined
  _ops.enable_eager_execution()
  _test.main(argv)
Ejemplo n.º 7
0
class MultiDeviceTest(xla_test.XLATestCase):
  """Test running TPU computation on more than one core."""

  def testBasic(self):
    if not multiple_tpus():
      self.skipTest('MultiDeviceTest requires multiple TPU devices.')

    # Compute 10 on TPU core 0
    with ops.device('device:TPU:0'):
      two = constant_op.constant(2)
      five = constant_op.constant(5)
      ten = two * five
      self.assertAllEqual(10, ten)

    # Compute 6 on TPU core 1
    with ops.device('device:TPU:1'):
      two = constant_op.constant(2)
      three = constant_op.constant(3)
      six = two * three
      self.assertAllEqual(6, six)

    # Copy 10 and 6 to CPU and sum them
    self.assertAllEqual(16, ten + six)


if __name__ == '__main__':
  ops.enable_eager_execution(
      config=config_pb2.ConfigProto(log_device_placement=True))
  googletest.main()
Ejemplo n.º 8
0
  """Test that layers and models produce the correct tensor types."""

  # In v1 graph there are only symbolic tensors.
  @keras_parameterized.run_all_keras_modes(always_skip_v1=True)
  @parameterized.named_parameters(*OUTPUT_TEST_CASES)
  def test_layer_outputs(self, layer_to_test, input_shape, layer_kwargs):
    layer = layer_to_test(**layer_kwargs)

    input_data = np.ones(shape=(2,) + input_shape, dtype=np.float32)
    layer_result = layer(input_data)

    inp = keras.layers.Input(shape=input_shape, batch_size=2)
    model = keras.models.Model(inp, layer_to_test(**layer_kwargs)(inp))
    model_result = model(input_data)

    for x in [layer_result, model_result]:
      if not isinstance(x, ops.Tensor):
        raise ValueError('Tensor or EagerTensor expected, got type {}'
                         .format(type(x)))

      if isinstance(x, ops.EagerTensor) != context.executing_eagerly():
        expected_type = (ops.EagerTensor if context.executing_eagerly()
                         else ops.Tensor)
        raise ValueError('Expected type {}, got type {}'
                         .format(expected_type, type(x)))


if __name__ == '__main__':
  ops.enable_eager_execution()
  test.main()
Ejemplo n.º 9
0
def main(argv=None):
  _ops.enable_eager_execution()
  _test.main(argv)
Ejemplo n.º 10
0

class MultiDeviceTest(xla_test.XLATestCase):
    """Test running TPU computation on more than one core."""
    def testBasic(self):
        if not multiple_tpus():
            self.skipTest('MultiDeviceTest requires multiple TPU devices.')

        # Compute 10 on TPU core 0
        with ops.device('device:TPU:0'):
            two = constant_op.constant(2)
            five = constant_op.constant(5)
            ten = two * five
            self.assertAllEqual(10, ten)

        # Compute 6 on TPU core 1
        with ops.device('device:TPU:1'):
            two = constant_op.constant(2)
            three = constant_op.constant(3)
            six = two * three
            self.assertAllEqual(6, six)

        # Copy 10 and 6 to CPU and sum them
        self.assertAllEqual(16, ten + six)


if __name__ == '__main__':
    ops.enable_eager_execution(config=config_pb2.ConfigProto(
        log_device_placement=True))
    googletest.main()
Ejemplo n.º 11
0
        dataset = dataset.with_options(options)
        self.assertDatasetProduces(dataset, range(1, 11))

    def testErrorWithoutPrefetch(self):
        """The rewrite fails if there is no prefetch() in the pipeline."""
        dataset = dataset_ops.Dataset.range(10)
        options = dataset_ops.Options()
        options.experimental_slack = True
        dataset = dataset.with_options(options)
        with self.assertRaises(errors.InvalidArgumentError):
            get_next = self.getNext(dataset)
            self.evaluate(get_next())

    def testErrorWithInvalidDataset(self):
        """With a nested dataset op after prefetch, the rewrite should fail."""
        dataset = dataset_ops.Dataset.range(10)
        dataset = dataset.prefetch(1)
        dataset = dataset.flat_map(dataset_ops.Dataset.from_tensors)
        options = dataset_ops.Options()
        options.experimental_slack = True
        dataset = dataset.with_options(options)
        with self.assertRaises(errors.InvalidArgumentError):
            get_next = self.getNext(dataset)
            self.evaluate(get_next())


if __name__ == "__main__":
    ops.enable_eager_execution(config=config_pb2.ConfigProto(
        device_count={"CPU": 3}))
    test.main()
import numpy as np
a = tf.constant(np.array([1., 2., 3.]))
print(type(a))

b = tf.constant(np.array([4.,5.,6.]))
c = tf.tensordot(a, b, 1)
print(type(c))

print(c)

session = tf.compat.v1.Session()
output = session.run(c)
session.close()
print(output) """

enable_eager_execution()
print(tf.executing_eagerly())

x = [[4]]
m = tf.matmul(x, x)
print("Result, {}".format(m))

a = tf.constant(np.array([1., 2., 3.]))
print(type(a))

print(a.numpy())

b = tf.constant(np.array([4., 5., 6.]))
c = tf.tensordot(a, b, 1)
print(type(c))
Ejemplo n.º 13
0
def main(argv=None):
    _ops.enable_eager_execution()
    _test.main(argv)
Ejemplo n.º 14
0
    @function.defun
    def train():
      v = resource_variable_ops.ResourceVariable(1.0)
      grad = backprop.implicit_grad(loss)(v)
      optimizer.apply_gradients(grad)
      return v.read_value()

    train()

  def testOptimizerInDefunWithCapturedVariable(self):
    v = resource_variable_ops.ResourceVariable(1.0)
    def loss():
      return v**2

    optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)

    @function.defun
    def train():
      grad = backprop.implicit_grad(loss)()
      optimizer.apply_gradients(grad)

    train()
    self.assertEqual(v.numpy(), -1.0)


if __name__ == '__main__':
  ops.enable_eager_execution(
      config=config_pb2.ConfigProto(device_count={'CPU': 3}))
  test.main()
Ejemplo n.º 15
0
 def testConnectToClusterInGraphModeWillFail(self):
     ops.disable_eager_execution()
     with self.assertRaises(ValueError):
         remote.connect_to_cluster(self._cluster_resolver)
     ops.enable_eager_execution()
Ejemplo n.º 16
0
        loss='sparse_categorical_crossentropy',
        optimizer=RMSPropOptimizer(learning_rate=0.001))
    x = np.ones((100, 4), dtype=np.float32)
    np.random.seed(123)
    y = np.random.randint(0, 1, size=(100, 1))
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset.make_one_shot_iterator()
    history = model.fit(iterator, epochs=1, steps_per_epoch=10)
    self.assertEqual(np.around(history.history['loss'][-1], decimals=4), 0.6173)

  def test_no_loss_in_call(self):

    class HasLoss(keras.layers.Layer):

      def call(self, x):
        self.add_loss(x)
        return x

    layer = HasLoss()
    with self.assertRaises(RuntimeError):
      layer(1.)

    with ops.Graph().as_default():
      layer(1.)

if __name__ == '__main__':
  ops.enable_eager_execution()
  test.main()
Ejemplo n.º 17
0
def main(argv=None):  # pylint: disable=function-redefined
  _ops.enable_eager_execution()
  _test.main(argv)