Example #1
0
  def test_subscribe_tensors_on_different_devices(self):
    """Side effect ops are added with the same device of the subscribed op."""
    c1 = constant_op.constant(10)
    c2 = constant_op.constant(20)

    with ops.device('cpu:0'):
      add = math_ops.add(c1, c2)

    with ops.device('cpu:1'):
      mul = math_ops.multiply(c1, c2)

    def sub(t):
      return t

    add_sub = subscribe.subscribe(
        add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))

    mul_sub = subscribe.subscribe(
        mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))

    # Expect the identity tensors injected by subscribe to have been created
    # on the same device as their original tensors.
    self.assertNotEqual(add_sub.device, mul_sub.device)
    self.assertEqual(add.device, add_sub.device)
    self.assertEqual(mul.device, mul_sub.device)
Example #2
0
  def testCaching(self):
    """Confirm caching of control output is recalculated between calls."""
    a = constant_op.constant(1)
    b = constant_op.constant(2)
    with ops.control_dependencies([a]):
      c = constant_op.constant(42)

    shared = {}

    def sub(t):
      shared[t] = shared.get(t, 0) + 1
      return t

    a = subscribe.subscribe(a,
                            lambda t: script_ops.py_func(sub, [t], [t.dtype]))

    with ops.control_dependencies([b]):
      d = constant_op.constant(11)

    # If it was using outdated cached control_outputs then
    # evaling would not trigger the new subscription.
    b = subscribe.subscribe(b,
                            lambda t: script_ops.py_func(sub, [t], [t.dtype]))

    with self.cached_session() as sess:
      c_out = self.evaluate([c])
      d_out = self.evaluate([d])

    self.assertEqual(c_out, [42])
    self.assertEqual(d_out, [11])
    self.assertEqual(shared, {2: 1, 1: 1})
Example #3
0
 def testLarge(self):
   with self.test_session() as sess:
     x = array_ops.zeros([1000000], dtype=np.float32)
     y = script_ops.py_func(lambda x: x + 1, [x], [dtypes.float32])
     z = script_ops.py_func(lambda x: x * 2, [x], [dtypes.float32])
     for _ in xrange(100):
       sess.run([y[0].op, z[0].op])
Example #4
0
 def testGradientFunction(self):
   # Input to tf.py_func is necessary, otherwise get_gradient_function()
   # returns None per default.
   a = constant_op.constant(0)
   x, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64])
   y, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64], stateful=False)
   self.assertEqual(None, ops.get_gradient_function(x.op))
   self.assertEqual(None, ops.get_gradient_function(y.op))
    def testStrings(self):
        def read_fixed_length_numpy_strings():
            return np.array([b" there"])

        def read_and_return_strings(x, y):
            return x + y

        with self.test_session():
            x = constant_op.constant([b"hello", b"hi"], dtypes.string)
            y, = script_ops.py_func(read_fixed_length_numpy_strings, [], [dtypes.string])
            z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string])
            self.assertListEqual(list(z.eval()), [b"hello there", b"hi there"])
Example #6
0
 def make_graphs():
   for _ in xrange(1000):
     g = ops.Graph()
     with g.as_default():
       c = constant_op.constant([1.], dtypes.float32)
       _ = script_ops.py_func(lambda x: x + 1, [c], [dtypes.float32])
       _ = script_ops.eager_py_func(lambda x: x + 1, [c], [dtypes.float32])
       # These ops have a reference to 'c' which has a reference to the graph.
       # Checks if the functions are being deleted though the graph is referenced from them.
       # (see #18292)
       _ = script_ops.py_func(lambda x: x + c.shape[0], [c], [dtypes.float32])
       _ = script_ops.eager_py_func(lambda x: x + c.shape[0], [c], [dtypes.float32])
Example #7
0
  def testObjectArraysAreConvertedToBytes(self):

    def read_object_array():
      return np.array([b" there", u" ya"], dtype=np.object)

    def read_and_return_strings(x, y):
      return x + y

    with self.test_session():
      x = constant_op.constant(["hello", "hi"], dtypes.string)
      y, = script_ops.py_func(read_object_array, [],
                              [dtypes.string])
      z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string])
      self.assertListEqual(list(z.eval()), [b"hello there", b"hi ya"])
def _compute_vmeasure_score(labels, predictions):
  vmeasure_score = math_ops.cast(
      script_ops.py_func(
          metrics.v_measure_score, [labels, predictions], [dtypes.float64],
          name='vmeasure'),
      dtypes.float32)
  return math_ops.maximum(0.0, vmeasure_score)
def _compute_nmi_score(labels, predictions):
  return math_ops.cast(
      script_ops.py_func(
          metrics.normalized_mutual_info_score, [labels, predictions],
          [dtypes.float64],
          name='nmi'),
      dtypes.float32)
Example #10
0
  def testSideEffect(self):
    a = constant_op.constant(1)
    b = constant_op.constant(1)
    c = math_ops.add(a, b)
    with ops.control_dependencies([c]):
      d = constant_op.constant(42)
    n = math_ops.negative(c)

    shared = []

    def sub(t):
      shared.append(t)
      return t

    c = subscribe.subscribe(c,
                            lambda t: script_ops.py_func(sub, [t], [t.dtype]))

    with self.test_session() as sess:
      c_out = sess.run([c])
      n_out = sess.run([n])
      d_out = sess.run([d])

    self.assertEquals(n_out, [-2])
    self.assertEquals(c_out, [2])
    self.assertEquals(d_out, [42])
    self.assertEquals(shared, [2, 2, 2])
Example #11
0
  def _testExceptionHandling(self, py_exp, tf_exp, eager=False):

    def inner_exception():
      raise py_exp("blah")  # pylint: disable=not-callable

    def raise_exception():
      inner_exception()

    expected_regexp = r": blah.*"               # Error at the top
    expected_regexp += r"in raise_exception.*"  # Stacktrace outer
    expected_regexp += r"in inner_exception.*"  # Stacktrace inner
    expected_regexp += r": blah"                # Stacktrace of raise
    def expected_error_check(exception):
      return re.search(expected_regexp, str(exception), re.DOTALL)

    if eager:
      if context.executing_eagerly():
        with self.assertRaisesWithPredicateMatch(tf_exp, expected_error_check):
          f = script_ops.eager_py_func(raise_exception, [], [])
        return
      else:
        f = script_ops.eager_py_func(raise_exception, [], [])
    else:
      f = script_ops.py_func(raise_exception, [], [])

    with self.test_session():
      with self.assertRaisesWithPredicateMatch(tf_exp, expected_error_check):
        self.evaluate(f)
Example #12
0
  def testOverrideThreadPool(self):

    def get_thread_id(_):
      # Python creates a dummy thread object to represent the current
      # thread when called from an "alien" thread (such as a
      # `PrivateThreadPool` thread in this case). It does not include
      # the TensorFlow-given display name, but it has a unique
      # identifier that maps one-to-one with the underlying OS thread.
      return np.array(threading.current_thread().ident).astype(np.int64)

    for num_threads in [1, 2, 4, 8, 16]:

      dataset = (
          Dataset.range(1000).map(
              lambda x: script_ops.py_func(get_thread_id, [x], dtypes.int64),
              num_parallel_calls=32).apply(unique.unique()))

      dataset = threadpool.override_threadpool(
          dataset,
          threadpool.PrivateThreadPool(
              num_threads, display_name='private_thread_pool_%d' % num_threads))

      thread_ids = []
      for next_element in datasets.Iterator(dataset):
        thread_ids.append(next_element)
      self.assertEqual(len(thread_ids), len(set(thread_ids)))
      self.assertGreater(len(thread_ids), 0)
      # NOTE(mrry): We don't control the thread pool scheduling, and
      # so cannot guarantee that all of the threads in the pool will
      # perform work.
      self.assertLessEqual(len(thread_ids), num_threads)
def _batch_to_patches(batch, patches_per_image, patch_size):
  """Extract patches from a batch.

  Args:
      batch: (tensor) The batch of images (batch, height, width, channels).
      patches_per_image: (int) Number of patches to extract per image.
      patch_size: (int) Size of the patches (size, size, channels) to extract.
  Returns:
      Tensor (batch*patches_per_image, patch_size, patch_size, channels) of
      patches.
  """

  def py_func_random_patches(batch):
    """Numpy wrapper."""
    batch_size, height, width, channels = batch.shape
    patch_count = patches_per_image * batch_size
    hs = patch_size // 2
    # Randomly pick patches.
    patch_id, y, x, chan = np.ogrid[0:patch_count, -hs:hs + 1, -hs:hs + 1, 0:3]
    img_id = patch_id // patches_per_image
    # pylint: disable=g-no-augmented-assignment
    # Need explicit addition for broadcast to work properly.
    y = y + np.random.randint(hs, height - hs, size=(patch_count, 1, 1, 1))
    x = x + np.random.randint(hs, width - hs, size=(patch_count, 1, 1, 1))
    # pylint: enable=g-no-augmented-assignment
    idx = ((img_id * height + y) * width + x) * channels + chan
    patches = batch.flat[idx]
    return patches

  patches = script_ops.py_func(
      py_func_random_patches, [batch], batch.dtype, stateful=False)
  return patches
Example #14
0
  def testMapAndBatchOutOfRangeError(self, threshold, numa_aware):

    def raising_py_fn(i):
      if i == threshold:
        raise StopIteration()
      elif i > threshold:
        raise RuntimeError("Alternate error; you shouldn't see me! (i: %s)" % i)
      else:
        return i

    dataset = dataset_ops.Dataset.range(100).apply(
        batching.map_and_batch(
            lambda x: script_ops.py_func(raising_py_fn, [x], dtypes.int64),
            batch_size=10))
    if numa_aware:
      options = dataset_ops.Options()
      options.experimental_numa_aware = True
      dataset = dataset.with_options(options)
    iterator = dataset_ops.make_one_shot_iterator(dataset)
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(threshold // 10):
        self.assertAllEqual([i * 10 + j for j in range(10)],
                            self.evaluate(get_next))
      if threshold % 10 != 0:
        self.assertAllEqual(
            [threshold // 10 * 10 + j for j in range(threshold % 10)],
            self.evaluate(get_next))
      with self.assertRaises(errors.OutOfRangeError):
        self.evaluate(get_next)
  def _testNumThreadsHelper(self, num_threads, override_threadpool_fn):

    def get_thread_id(_):
      # Python creates a dummy thread object to represent the current
      # thread when called from an "alien" thread (such as a
      # `PrivateThreadPool` thread in this case). It does not include
      # the TensorFlow-given display name, but it has a unique
      # identifier that maps one-to-one with the underlying OS thread.
      return np.array(threading.current_thread().ident).astype(np.int64)

    dataset = (
        dataset_ops.Dataset.range(1000).map(
            lambda x: script_ops.py_func(get_thread_id, [x], dtypes.int64),
            num_parallel_calls=32).apply(unique.unique()))
    dataset = override_threadpool_fn(dataset)
    next_element = self.getNext(dataset, requires_initialization=True)

    thread_ids = []
    try:
      while True:
        thread_ids.append(self.evaluate(next_element()))
    except errors.OutOfRangeError:
      pass
    self.assertLen(thread_ids, len(set(thread_ids)))
    self.assertNotEmpty(thread_ids)
    if num_threads:
      # NOTE(mrry): We don't control the thread pool scheduling, and
      # so cannot guarantee that all of the threads in the pool will
      # perform work.
      self.assertLessEqual(len(thread_ids), num_threads)
  def testMapAndBatchOutOfRangeError(self, threshold):

    def raising_py_fn(i):
      if i >= threshold:
        raise StopIteration()
      else:
        return i

    iterator = (
        dataset_ops.Dataset.range(100).apply(
            batching.map_and_batch(
                lambda x: script_ops.py_func(raising_py_fn, [x], dtypes.int64),
                batch_size=10)).make_one_shot_iterator())
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(threshold // 10):
        self.assertAllEqual([i * 10 + j for j in range(10)],
                            self.evaluate(get_next))
      if threshold % 10 != 0:
        self.assertAllEqual(
            [threshold // 10 * 10 + j for j in range(threshold % 10)],
            sess.run(get_next))
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
 def create_unknown_shape_dataset(x):
   return script_ops.py_func(
       lambda _: (  # pylint: disable=g-long-lambda
           np.ones(2, dtype=np.float32),
           np.zeros((3, 4), dtype=np.int32)),
       [x],
       [dtypes.float32, dtypes.int32])
Example #18
0
  def testResourceType(self):
    """Confirm that subscribe correctly handles tensors with 'resource' type."""
    tensor_array = tensor_array_ops.TensorArray(
        dtype=dtypes.float32,
        tensor_array_name='test',
        size=3,
        infer_shape=False)
    writer = tensor_array.write(0, [[4.0, 5.0]])
    reader = writer.read(0)

    shared = []

    def sub(t):
      shared.append(t)
      return t

    # TensorArray's handle output tensor has a 'resource' type and cannot be
    # subscribed as it's not 'numpy compatible' (see dtypes.py).
    # Expect that the original tensor is returned when subscribing to it.
    tensor_array_sub = subscribe.subscribe(
        tensor_array.handle, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
    self.assertIs(tensor_array_sub, tensor_array.handle)
    self.assertFalse(subscribe._is_subscribed_identity(tensor_array.handle))

    with self.cached_session() as sess:
      self.evaluate([reader])
    self.assertEqual(0, len(shared))
Example #19
0
    def testNoReturnValueStateless(self):
        def do_nothing(unused_x):
            pass

        f = script_ops.py_func(do_nothing, [constant_op.constant(3, dtypes.int64)], [], stateful=False)
        with self.test_session() as sess:
            self.assertEqual(sess.run(f), [])
Example #20
0
  def testSideEffect(self):
    a = constant_op.constant(1)
    b = constant_op.constant(1)
    c = math_ops.add(a, b)
    with ops.control_dependencies([c]):
      d = constant_op.constant(42)
    n = math_ops.negative(c)

    shared = []

    def sub(t):
      shared.append(t)
      return t

    c0 = c
    self.assertTrue(c0.op in d.op.control_inputs)
    c = subscribe.subscribe(c,
                            lambda t: script_ops.py_func(sub, [t], [t.dtype]))
    # Verify that control dependencies are correctly moved to the subscription.
    self.assertFalse(c0.op in d.op.control_inputs)
    self.assertTrue(c.op in d.op.control_inputs)

    with self.cached_session() as sess:
      c_out = self.evaluate([c])
      n_out = self.evaluate([n])
      d_out = self.evaluate([d])

    self.assertEqual(n_out, [-2])
    self.assertEqual(c_out, [2])
    self.assertEqual(d_out, [42])
    self.assertEqual(shared, [2, 2, 2])
Example #21
0
  def testMultipleOutputs(self):
    """Handle subscriptions to multiple outputs from the same op."""
    sparse_tensor_1 = sparse_tensor.SparseTensor(
        indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
    sparse_tensor_2 = sparse_tensor.SparseTensor(
        indices=[[0, 0], [1, 2]], values=[2, 3], dense_shape=[3, 4])

    # This op has three outputs.
    sparse_add = sparse_ops.sparse_add(sparse_tensor_1, sparse_tensor_2)

    self.assertEqual(3, len(sparse_add.op.outputs))

    c1 = constant_op.constant(1)

    with ops.control_dependencies(sparse_add.op.outputs):
      # This op depends on all the three outputs.
      neg = -c1

    shared = []
    def sub(t):
      shared.append(t)
      return t

    # Subscribe the three outputs at once.
    subscribe.subscribe(sparse_add.op.outputs,
                        lambda t: script_ops.py_func(sub, [t], [t.dtype]))

    with self.cached_session() as sess:
      self.evaluate([neg])

    # All three ops have been processed.
    self.assertEqual(3, len(shared))
Example #22
0
 def testArray(self):
   with self.test_session():
     x = constant_op.constant([1.0, 2.0], dtypes.float64)
     y = constant_op.constant([2.0, 3.0], dtypes.float64)
     z = self.evaluate(script_ops.py_func(np_func, [x, y], [dtypes.float64]))
     self.assertAllEqual(z[0],
                         np_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
Example #23
0
  def testStringsAreConvertedToBytes(self):

    def read_fixed_length_numpy_strings():
      return np.array([" there"])

    def read_and_return_strings(x, y):
      return x + y

    with self.test_session():
      x = constant_op.constant(["hello", "hi"], dtypes.string)
      y = self.evaluate(
          script_ops.py_func(read_fixed_length_numpy_strings, [],
                             dtypes.string))
      z = self.evaluate(
          script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
      self.assertAllEqual(z, [b"hello there", b"hi there"])
Example #24
0
 def testAlias(self):
   with self.test_session():
     np_array = np.array([1.0, 2.0], dtype=np.float32)
     tf_array = script_ops.py_func(lambda: np_array, [], [dtypes.float32])
     value = tf_array + constant_op.constant([2.0, 3.0], dtype=dtypes.float32)
     value.op.run()
     self.assertAllEqual(np_array, [1.0, 2.0])
Example #25
0
def _compute_ami_score(labels, predictions):
  ami_score = math_ops.to_float(
      script_ops.py_func(
          metrics.adjusted_mutual_info_score, [labels, predictions],
          [dtypes.float64],
          name='ami'))
  return math_ops.maximum(0.0, ami_score)
Example #26
0
 def testCleanup(self):
     for _ in xrange(1000):
         g = ops.Graph()
         with g.as_default():
             c = constant_op.constant([1.0], dtypes.float32)
             _ = script_ops.py_func(lambda x: x + 1, [c], [dtypes.float32])
     self.assertTrue(script_ops._py_funcs.size() < 100)
  def testMapAndBatchMapError(self, threshold, numa_aware):

    def raising_py_fn(i):
      if i >= threshold:
        raise StopIteration()
      else:
        return i

    dataset = dataset_ops.Dataset.range(100).apply(
        batching.map_and_batch(
            lambda x: script_ops.py_func(raising_py_fn, [x], dtypes.int64),
            batch_size=10))
    if numa_aware:
      options = dataset_ops.Options()
      options.experimental_numa_aware = True
      dataset = dataset.with_options(options)

    get_next = self.getNext(dataset)
    for i in range(threshold // 10):
      self.assertAllEqual([i * 10 + j for j in range(10)],
                          self.evaluate(get_next()))
    if numa_aware:
      if threshold % 10 != 0:
        self.assertAllEqual(
            [threshold // 10 * 10 + j for j in range(threshold % 10)],
            self.evaluate(get_next()))
    else:
      for i in range(threshold // 10, 10):
        with self.assertRaises(errors.InvalidArgumentError):
          self.evaluate(get_next())
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(get_next())
Example #28
0
    def generator_map_fn(iterator_id_t):
      """Generates the next element from iterator with ID `iterator_id_t`.

      We map this function across an infinite repetition of the
      `iterator_id_t`, and raise `StopIteration` to terminate the iteration.

      Args:
        iterator_id_t: A `tf.int64` tensor whose value uniquely identifies
          the iterator in `generator_state` from which to generate an element.

      Returns:
        A nested structure of tensors representing an element from the iterator.
      """

      def generator_py_func(iterator_id):
        """A `py_func` that will be called to invoke the iterator."""
        try:
          values = next(generator_state.get_iterator(iterator_id))
        except StopIteration:
          generator_state.iterator_completed(iterator_id)
          raise StopIteration("Iteration finished.")

        # Use the same _convert function from the py_func() implementation to
        # convert the returned values to arrays early, so that we can inspect
        # their values.
        # pylint: disable=protected-access
        ret_arrays = [
            script_ops.FuncRegistry._convert(ret, dtype=dtype.as_numpy_dtype)
            for ret, dtype in zip(nest.flatten_up_to(output_types, values),
                                  flattened_types)
        ]
        # pylint: enable=protected-access

        # Additional type and shape checking to ensure that the components
        # of the generated element match the `output_types` and `output_shapes`
        # arguments.
        for (ret_array, expected_dtype, expected_shape) in zip(
            ret_arrays, flattened_types, flattened_shapes):
          if ret_array.dtype != expected_dtype.as_numpy_dtype:
            raise TypeError(
                "`generator` yielded an element of type %s where an element "
                "of type %s was expected." % (ret_array.dtype,
                                              expected_dtype.as_numpy_dtype))
          if not expected_shape.is_compatible_with(ret_array.shape):
            raise ValueError(
                "`generator` yielded an element of shape %s where an element "
                "of shape %s was expected." % (ret_array.shape, expected_shape))

        return ret_arrays

      flat_values = script_ops.py_func(
          generator_py_func, [iterator_id_t], flattened_types, stateful=True)

      # The `py_func()` op drops the inferred shapes, so we add them back in
      # here.
      if output_shapes is not None:
        for ret_t, shape in zip(flat_values, flattened_shapes):
          ret_t.set_shape(shape)

      return nest.pack_sequence_as(output_types, flat_values)
Example #29
0
def _compute_ari_score(labels, predictions):
  ari_score = math_ops.to_float(
      script_ops.py_func(
          metrics.adjusted_rand_score, [labels, predictions], [dtypes.float64],
          name='ari'))
  # ari score can go below 0
  # http://scikit-learn.org/stable/modules/clustering.html#adjusted-rand-score
  return math_ops.maximum(0.0, ari_score)
Example #30
0
 def testStateful(self):
   # Not using self.test_session(), which disables optimization.
   with session_lib.Session() as sess:
     producer = iter(range(3))
     x, = script_ops.py_func(lambda: next(producer), [], [dtypes.int64])
     self.assertEqual(sess.run(x), 0)
     self.assertEqual(sess.run(x), 1)
     self.assertEqual(sess.run(x), 2)
Example #31
0
 def testNoInput(self):
     with self.cached_session():
         x = self.evaluate(
             script_ops.py_func(lambda: 42.0, [], dtypes.float64))
         self.assertAllClose(x, 42.0)
Example #32
0
        def scan_fn(state, val):
            def py_fn(_):
                raise StopIteration()

            return state, script_ops.py_func(py_fn, [val], dtypes.int64)
Example #33
0
 def _map_fn(_):
     tids = []
     for _ in range(10):
         tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
     return tids
Example #34
0
 def map_function(x):
     if math_ops.equal(x, 0):
         return script_ops.py_func(sleep, [x], x.dtype)
     else:
         return x
Example #35
0
        def generator_map_fn(iterator_id_t):
            """Generates the next element from iterator with ID `iterator_id_t`.

      We map this function across an infinite repetition of the
      `iterator_id_t`, and raise `StopIteration` to terminate the iteration.

      Args:
        iterator_id_t: A `tf.int64` tensor whose value uniquely identifies
          the iterator in `generator_state` from which to generate an element.

      Returns:
        A nested structure of tensors representing an element from the iterator.
      """
            def generator_py_func(iterator_id):
                """A `py_func` that will be called to invoke the iterator."""
                try:
                    values = next(generator_state.get_iterator(iterator_id))
                except StopIteration:
                    generator_state.iterator_completed(iterator_id)
                    raise StopIteration("Iteration finished.")

                # Use the same _convert function from the py_func() implementation to
                # convert the returned values to arrays early, so that we can inspect
                # their values.
                # pylint: disable=protected-access
                ret_arrays = [
                    script_ops.FuncRegistry._convert(
                        ret, dtype=dtype.as_numpy_dtype) for ret, dtype in zip(
                            nest.flatten_up_to(output_types, values),
                            flattened_types)
                ]
                # pylint: enable=protected-access

                # Additional type and shape checking to ensure that the components
                # of the generated element match the `output_types` and `output_shapes`
                # arguments.
                for (ret_array, expected_dtype,
                     expected_shape) in zip(ret_arrays, flattened_types,
                                            flattened_shapes):
                    if ret_array.dtype != expected_dtype.as_numpy_dtype:
                        raise TypeError(
                            "`generator` yielded an element of type %s where an element "
                            "of type %s was expected." %
                            (ret_array.dtype, expected_dtype.as_numpy_dtype))
                    if not expected_shape.is_compatible_with(ret_array.shape):
                        raise ValueError(
                            "`generator` yielded an element of shape %s where an element "
                            "of shape %s was expected." %
                            (ret_array.shape, expected_shape))

                return ret_arrays

            flat_values = script_ops.py_func(generator_py_func,
                                             [iterator_id_t],
                                             flattened_types,
                                             stateful=True)

            # The `py_func()` op drops the inferred shapes, so we add them back in
            # here.
            if output_shapes is not None:
                for ret_t, shape in zip(flat_values, flattened_shapes):
                    ret_t.set_shape(shape)

            return nest.pack_sequence_as(output_types, flat_values)
Example #36
0
def _compute_ami_score(labels, predictions):
    ami_score = math_ops.cast(
        script_ops.py_func(metrics.adjusted_mutual_info_score,
                           [labels, predictions], [dtypes.float64],
                           name='ami'), dtypes.float32)
    return math_ops.maximum(0.0, ami_score)
Example #37
0
 def testStringPaddingAreConvertedToBytes(self):
     inp = ["this", "is", "a", "test"]
     correct = [b"this", b"is", b"a", b"test"]
     with self.cached_session():
         s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
         self.assertAllEqual(s, correct)
Example #38
0
 def _map_fn(x):
     return script_ops.py_func(_map_py_func, [x], x.dtype)
Example #39
0
def _compute_vmeasure_score(labels, predictions):
    vmeasure_score = math_ops.cast(
        script_ops.py_func(metrics.v_measure_score, [labels, predictions],
                           [dtypes.float64],
                           name='vmeasure'), dtypes.float32)
    return math_ops.maximum(0.0, vmeasure_score)
Example #40
0
    def testBasic(self):
        def my_func(x, y):
            return np.sinh(x) + np.cosh(y)

        # single type
        with self.test_session():
            x = constant_op.constant(1.0, dtypes.float32)
            y = constant_op.constant(2.0, dtypes.float32)
            z = script_ops.py_func(my_func, [x, y], dtypes.float32)
            self.assertEqual(z.eval(), my_func(1.0, 2.0).astype(np.float32))

        # scalar
        with self.test_session():
            x = constant_op.constant(1.0, dtypes.float32)
            y = constant_op.constant(2.0, dtypes.float32)
            z = script_ops.py_func(my_func, [x, y], [dtypes.float32])
            self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32))

        # array
        with self.test_session():
            x = constant_op.constant([1.0, 2.0], dtypes.float64)
            y = constant_op.constant([2.0, 3.0], dtypes.float64)
            z = script_ops.py_func(my_func, [x, y], [dtypes.float64])
            self.assertAllEqual(
                z[0].eval(),
                my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))

        # a bit exotic type (complex64)
        with self.test_session():
            x = constant_op.constant(1 + 2j, dtypes.complex64)
            y = constant_op.constant(3 + 4j, dtypes.complex64)
            z, = script_ops.py_func(my_func, [x, y], [dtypes.complex64])
            self.assertAllClose(z.eval(), my_func(1 + 2j, 3 + 4j))

        # a bit excotic function (rfft)
        with self.test_session():
            x = constant_op.constant([1., 2., 3., 4.], dtypes.float32)

            def rfft(x):
                return np.fft.rfft(x).astype(np.complex64)

            y, = script_ops.py_func(rfft, [x], [dtypes.complex64])
            self.assertAllClose(y.eval(), np.fft.rfft([1., 2., 3., 4.]))

        # returns a python literal.
        with self.test_session():

            def literal(x):
                return 1.0 if x == 0.0 else 0.0

            x = constant_op.constant(0.0, dtypes.float64)
            y, = script_ops.py_func(literal, [x], [dtypes.float64])
            self.assertAllClose(y.eval(), 1.0)

        # returns a list
        with self.test_session():

            def list_func(x):
                return [x, x + 1]

            x = constant_op.constant(0.0, dtypes.float64)
            y, z = script_ops.py_func(list_func, [x], [dtypes.float64] * 2)
            self.assertAllClose(y.eval(), 0.0)
            self.assertAllClose(z.eval(), 1.0)

        # returns a tuple
        with self.test_session():

            def tuple_func(x):
                return x, x + 1

            x = constant_op.constant(0.0, dtypes.float64)
            y, z = script_ops.py_func(tuple_func, [x], [dtypes.float64] * 2)
            self.assertAllClose(y.eval(), 0.0)
            self.assertAllClose(z.eval(), 1.0)

        # returns a tuple, Tout and inp a tuple
        with self.test_session():
            x = constant_op.constant(0.0, dtypes.float64)
            y, z = script_ops.py_func(tuple_func, (x, ),
                                      (dtypes.float64, dtypes.float64))
            self.assertAllClose(y.eval(), 0.0)
            self.assertAllClose(z.eval(), 1.0)
Example #41
0
 def map_fn(x):
     return script_ops.py_func(map_py_fn, [x], x.dtype)
Example #42
0
        def _map_fn(x_tensor):
            def _map_py_func(x):
                return x, np.array(37.0, dtype=np.float64)

            return script_ops.py_func(_map_py_func, [x_tensor],
                                      [dtypes.int64, dtypes.float64])
Example #43
0
 def testNulTerminatedStrings(self):
     inp = np.array(["this\0", "is\0\0", "a\0", "test\0\0"], dtype=np.str_)
     correct = [b"this", b"is", b"a", b"test"]
     with self.cached_session():
         s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
         self.assertAllEqual(s, correct)
Example #44
0
def _compute_nmi_score(labels, predictions):
    return math_ops.cast(
        script_ops.py_func(metrics.normalized_mutual_info_score,
                           [labels, predictions], [dtypes.float64],
                           name='nmi'), dtypes.float32)
Example #45
0
 def testComplexType(self):
   with self.cached_session():
     x = constant_op.constant(1 + 2j, dtypes.complex64)
     y = constant_op.constant(3 + 4j, dtypes.complex64)
     z = self.evaluate(script_ops.py_func(np_func, [x, y], dtypes.complex64))
     self.assertAllClose(z, np_func(1 + 2j, 3 + 4j))
Example #46
0
 def fn(x):
     # Upon exiting this function, the py_func holds the sole reference
     # to this lambda, without which it would be garbage collected.
     return script_ops.py_func(lambda x: x, [x], [dtypes.float32])
Example #47
0
 def testStringPadding(self):
     correct = [b"this", b"is", b"a", b"test"]
     with self.cached_session():
         s, = script_ops.py_func(lambda: [correct], [], [dtypes.string])
         self.assertAllEqual(s, correct)
 def interleave_fn(x):
   dataset = dataset_ops.Dataset.from_tensors(x)
   y = script_ops.py_func(map_py_fn, [x], x.dtype)
   dataset = dataset.repeat(y)
   return dataset
Example #49
0
 def testSingleType(self):
   with self.cached_session():
     x = constant_op.constant(1.0, dtypes.float32)
     y = constant_op.constant(2.0, dtypes.float32)
     z = self.evaluate(script_ops.py_func(np_func, [x, y], dtypes.float32))
     self.assertEqual(z, np_func(1.0, 2.0).astype(np.float32))
Example #50
0
 def increment(self, diff):
     return script_ops.py_func(self._increment, [diff], [],
                               stateful=True)
 def _remote_fn(h):
     handle = script_ops.py_func(_encode_raw, [h], dtypes.string)
     remote_iterator = iterator_ops.Iterator.from_string_handle(
         handle, dataset_3.output_types, dataset_3.output_shapes)
     return remote_iterator.get_next()
Example #52
0
 def testCOrder(self):
     with self.cached_session():
         val = [[1, 2], [3, 4]]
         x, = script_ops.py_func(lambda: np.array(val, order="F"), [],
                                 [dtypes.int64])
         self.assertAllEqual(val, self.evaluate(x))
Example #53
0
  def test_training_save_restore(self):
    opt = de.DynamicEmbeddingOptimizer(adam.AdamOptimizer(0.3))
    id = 0
    if test_util.is_gpu_available():
      dim_list = [1, 2, 4, 8, 10, 16, 32, 64, 100, 256, 500]
    else:
      dim_list = [10]
    for key_dtype, value_dtype, dim, step in itertools.product(
        [dtypes.int64],
        [dtypes.float32],
        dim_list,
        [10],
    ):
      id += 1
      save_dir = os.path.join(self.get_temp_dir(), "save_restore")
      save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")

      ids = script_ops.py_func(_create_dynamic_shape_tensor(),
                               inp=[],
                               Tout=key_dtype,
                               stateful=True)

      params = de.get_variable(
          name="params-test-0915-" + str(id),
          key_dtype=key_dtype,
          value_dtype=value_dtype,
          initializer=init_ops.random_normal_initializer(0.0, 0.01),
          dim=dim,
      )
      _, var0 = de.embedding_lookup(params, ids, return_trainable=True)

      def loss():
        return var0 * var0

      params_keys, params_vals = params.export()
      mini = opt.minimize(loss, var_list=[var0])
      opt_slots = [opt.get_slot(var0, _s) for _s in opt.get_slot_names()]
      _saver = saver.Saver([params] + [_s.params for _s in opt_slots])

      with self.session(config=default_config,
                        use_gpu=test_util.is_gpu_available()) as sess:
        self.evaluate(variables.global_variables_initializer())
        for _i in range(step):
          self.evaluate([mini])
        size_before_saved = self.evaluate(params.size())
        np_params_keys_before_saved = self.evaluate(params_keys)
        np_params_vals_before_saved = self.evaluate(params_vals)
        opt_slots_kv_pairs = [_s.params.export() for _s in opt_slots]
        np_slots_kv_pairs_before_saved = [
            self.evaluate(_kv) for _kv in opt_slots_kv_pairs
        ]
        _saver.save(sess, save_path)

      with self.session(config=default_config,
                        use_gpu=test_util.is_gpu_available()) as sess:
        self.evaluate(variables.global_variables_initializer())
        self.assertAllEqual(0, self.evaluate(params.size()))

        _saver.restore(sess, save_path)
        params_keys_restored, params_vals_restored = params.export()
        size_after_restored = self.evaluate(params.size())
        np_params_keys_after_restored = self.evaluate(params_keys_restored)
        np_params_vals_after_restored = self.evaluate(params_vals_restored)

        opt_slots_kv_pairs_restored = [_s.params.export() for _s in opt_slots]
        np_slots_kv_pairs_after_restored = [
            self.evaluate(_kv) for _kv in opt_slots_kv_pairs_restored
        ]
        self.assertAllEqual(size_before_saved, size_after_restored)
        self.assertAllEqual(
            np.sort(np_params_keys_before_saved),
            np.sort(np_params_keys_after_restored),
        )
        self.assertAllEqual(
            np.sort(np_params_vals_before_saved, axis=0),
            np.sort(np_params_vals_after_restored, axis=0),
        )
        for pairs_before, pairs_after in zip(np_slots_kv_pairs_before_saved,
                                             np_slots_kv_pairs_after_restored):
          self.assertAllEqual(
              np.sort(pairs_before[0], axis=0),
              np.sort(pairs_after[0], axis=0),
          )
          self.assertAllEqual(
              np.sort(pairs_before[1], axis=0),
              np.sort(pairs_after[1], axis=0),
          )
        if test_util.is_gpu_available():
          self.assertTrue("GPU" in params.tables[0].resource_handle.device)
def python_input(generator, features, name=None):
    """Easily feed data from a python generator into TensorFlow queues.

  Example usage:

  ```python
  def generator():
    for i in range(3):
      yield {"value": i}

  features = {
    "value": tf.FixedLenFeature(shape=[], dtype=dtypes.int32)
  }

  tensor_dict = tf.contrib.training.python_input(generator, features)
  batched_dict = tf.train.batch(
    tensor_dict, batch_size=2, allow_smaller_final_batch=True)

  s = tf.Session()
  tf.train.start_queue_runners()

  batch1 = s.run(batched_dict)  # returns {"value": np.array([0, 1])}
  batch2 = s.run(batched_dict)  # returns {"value": np.array([2])}
  s.run(batched_dict)  # error: Queue is closed (generator finished at i==3)
  ```

  Args:
    generator: A python generator that takes no arguments, and yields dicts
      containing a single minibatch entry one at a time.
    features: A python `dict` mapping keys expected from the generator to
      instances of `tf.FixedLenFeature`, or `tf.FixedLenSequenceFeature`.
    name: (Optional) A name for the operations.

  Returns:
    A dict mapping keys of the `features` dict to `Tensor` objects.
    These `Tensor` objects are outputs of a queue that is fed by `generator`.

  Raises:
    TypeError: If generator is not callable or features is not a dict.
    TypeError: If any of features' values are not a Feature object.
    NotImplementedError: If any of features' values are instances of
      `SparseFeature` or `VarLenFeature`  (these are not currently supported).
    ValueError: If any FixedLenSequenceFeatures contain a default value
      (this field is not supported).
    ValueError: if any FixedLenSequenceFeatures have allow_missing=False
      (this field is not supported).
  """
    if not callable(generator):
        raise TypeError("generator must be callable, saw: %s" % generator)
    if not isinstance(features, dict):
        raise TypeError("features must be a dict, saw: %s" %
                        type(features).__name__)

    with ops.name_scope(name, "python_input"):
        shapes = {}
        dtypes = {}
        for k, v in features.items():
            if isinstance(v, parsing_ops.FixedLenFeature):
                if v.default_value is not None:
                    value = ops.convert_to_tensor(v.default_value,
                                                  dtype=v.dtype,
                                                  name=k)
                    shapes[k] = value.shape
                    dtypes[k] = value.dtype
                else:
                    tensor_shape.TensorShape(v.shape).assert_is_fully_defined()
                    shapes[k] = tensor_shape.TensorShape(v.shape)
                    dtypes[k] = v.dtype
            elif isinstance(v, parsing_ops.VarLenFeature):
                raise NotImplementedError("VarLenFeature not supported")
            elif isinstance(v, parsing_ops.SparseFeature):
                raise NotImplementedError("SparseFeature not supported")
            elif isinstance(v, parsing_ops.FixedLenSequenceFeature):
                if v.default_value is not None:
                    raise ValueError(
                        "FixedLenSequenceFeature with default value not "
                        "supported")
                if not v.allow_missing:
                    raise ValueError(
                        "FixedLenSequenceFeature with allow_missing=False "
                        "not supported")
                tensor_shape.TensorShape(v.shape).assert_is_fully_defined()
                shapes[k] = tensor_shape.TensorShape([None
                                                      ]).concatenate(v.shape)
                dtypes[k] = v.dtype
            else:
                raise TypeError(
                    "Expected value for features key '%s' to be one of "
                    "FixedLenFeature, VarLenFeature, SparseFeature, or "
                    "FixedLenSequenceFeature.  Got: %s" % (k, v))

        keys = list(shapes.keys())
        dtypes_list = [dtypes[pk] for pk in keys]

        counter = [0]
        lock = threading.Lock()
        iterator = iter(generator())

        def generator_iter():
            """Iterate through generator output and return np.arrays to py_func."""
            with lock:
                try:
                    feature_values = next(iterator)
                    counter[0] += 1
                except StopIteration as e:
                    raise StopIteration(
                        "Iteration finished.  Processed %d entries (%s)" %
                        (counter[0], e))

            processed_dict = _process_yielded_dict(feature_values, keys,
                                                   features, dtypes, shapes)
            return [processed_dict[pk] for pk in keys]

        generator_pyfunc_values = script_ops.py_func(generator_iter,
                                                     inp=[],
                                                     Tout=dtypes_list,
                                                     stateful=True)

        pyfunc_input = {k: v for (k, v) in zip(keys, generator_pyfunc_values)}
        for k, v in shapes.items():
            pyfunc_input[k].set_shape(v)

    return pyfunc_input
Example #55
0
 def finalize_fn(_):
   def finalize_py_func():
     event.set()
     return 0
   return script_ops.py_func(finalize_py_func, [], [dtypes.int64],
                             stateful=True)
Example #56
0
 def testNoInput(self):
     with self.test_session():
         x, = script_ops.py_func(lambda: 42.0, [], [dtypes.float64])
         self.assertAllClose(x.eval(), 42.0)