コード例 #1
0
ファイル: ops.py プロジェクト: Immexxx/tensorflow
def foldl(fn, labeled_tensor, initial_value, name=None):
  """Left fold on the list of tensors unpacked from labeled_tensor.

  See tf.foldl.

  Args:
    fn: The function to apply to each unpacked LabeledTensor.
      It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
      Its arguments are (accumulated_value, next_value).
    labeled_tensor: The input tensor.
    initial_value: The initial value of the accumulator.
    name: Optional op name.

  Returns:
    The accumulated value.
  """
  with ops.name_scope(name, 'lt_foldl',
                      [labeled_tensor, initial_value]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
    initial_value = core.convert_to_labeled_tensor(initial_value)

    @tc.returns(ops.Tensor)
    @tc.accepts(ops.Tensor, ops.Tensor)
    def tf_fn(accumulator, next_element):
      accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
      next_element_lt = core.LabeledTensor(
          next_element, list(labeled_tensor.axes.values())[1:])
      return fn(accumulator_lt, next_element_lt).tensor

    foldl_op = functional_ops.foldl(
        tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
    foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)

    return core.identity(foldl_lt, name=scope)
コード例 #2
0
ファイル: ops.py プロジェクト: jhabikal21/tensorflow
def foldl(fn, labeled_tensor, initial_value, name=None):
  """Left fold on the list of tensors unpacked from labeled_tensor.

  See tf.foldl.

  Args:
    fn: The function to apply to each unpacked LabeledTensor.
      It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
      Its arguments are (accumulated_value, next_value).
    labeled_tensor: The input tensor.
    initial_value: The initial value of the accumulator.
    name: Optional op name.

  Returns:
    The accumulated value.
  """
  with ops.name_scope(name, 'lt_foldl',
                      [labeled_tensor, initial_value]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
    initial_value = core.convert_to_labeled_tensor(initial_value)

    @tc.returns(ops.Tensor)
    @tc.accepts(ops.Tensor, ops.Tensor)
    def tf_fn(accumulator, next_element):
      accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
      next_element_lt = core.LabeledTensor(
          next_element, list(labeled_tensor.axes.values())[1:])
      return fn(accumulator_lt, next_element_lt).tensor

    foldl_op = functional_ops.foldl(
        tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
    foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)

    return core.identity(foldl_lt, name=scope)
コード例 #3
0
def map_fn(fn, labeled_tensor, name=None):
    """Map on the list of tensors unpacked from labeled_tensor.

  See tf.map_fn.

  Args:
    fn: The function to apply to each unpacked LabeledTensor.
      It should have type LabeledTensor -> LabeledTensor.
    labeled_tensor: The input tensor.
    name: Optional op name.

  Returns:
    A tensor that packs the results of applying fn to the list of tensors
    unpacked from labeled_tensor.
  """
    with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
        labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)

        unpack_lts = unpack(labeled_tensor)

        # TODO(ericmc): Fix this upstream.
        if labeled_tensor.dtype == dtypes.string:
            # We must construct the full graph here, because map_fn_lib.map_fn
            # doesn't work for string-valued tensors.
            # Constructing the full graph may be slow.
            map_lts = [fn(t) for t in unpack_lts]
            return pack(map_lts,
                        list(labeled_tensor.axes.values())[0],
                        name=scope)
        else:
            # Figure out what the axis labels should be, but use tf.map_fn to
            # construct the graph because it's efficient.
            # It may be slow to construct the full graph, so we infer the labels from
            # the first element.
            # TODO(ericmc): This builds a subgraph which then gets thrown away.
            # Find a more elegant solution.
            first_map_lt = fn(unpack_lts[0])
            final_axes = list(labeled_tensor.axes.values())[:1] + list(
                first_map_lt.axes.values())

            @tc.returns(ops.Tensor)
            @tc.accepts(ops.Tensor)
            def tf_fn(tensor):
                original_axes = list(labeled_tensor.axes.values())[1:]
                tensor_lt = core.LabeledTensor(tensor, original_axes)
                return fn(tensor_lt).tensor

            map_op = map_fn_lib.map_fn(tf_fn,
                                       labeled_tensor.tensor,
                                       dtype=first_map_lt.dtype)
            map_lt = core.LabeledTensor(map_op, final_axes)

            return core.identity(map_lt, name=scope)
コード例 #4
0
ファイル: ops.py プロジェクト: Ajaycs99/tensorflow
def map_fn(fn, labeled_tensor, name=None):
  """Map on the list of tensors unpacked from labeled_tensor.

  See tf.map_fn.

  Args:
    fn: The function to apply to each unpacked LabeledTensor.
      It should have type LabeledTensor -> LabeledTensor.
    labeled_tensor: The input tensor.
    name: Optional op name.

  Returns:
    A tensor that packs the results of applying fn to the list of tensors
    unpacked from labeled_tensor.
  """
  with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)

    unpack_lts = unpack(labeled_tensor)

    # TODO(ericmc): Fix this upstream.
    if labeled_tensor.dtype == dtypes.string:
      # We must construct the full graph here, because functional_ops.map_fn
      # doesn't work for string-valued tensors.
      # Constructing the full graph may be slow.
      map_lts = [fn(t) for t in unpack_lts]
      return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
    else:
      # Figure out what the axis labels should be, but use tf.map_fn to
      # construct the graph because it's efficient.
      # It may be slow to construct the full graph, so we infer the labels from
      # the first element.
      # TODO(ericmc): This builds a subgraph which then gets thrown away.
      # Find a more elegant solution.
      first_map_lt = fn(unpack_lts[0])
      final_axes = list(labeled_tensor.axes.values())[:1] + list(
          first_map_lt.axes.values())

      @tc.returns(ops.Tensor)
      @tc.accepts(ops.Tensor)
      def tf_fn(tensor):
        original_axes = list(labeled_tensor.axes.values())[1:]
        tensor_lt = core.LabeledTensor(tensor, original_axes)
        return fn(tensor_lt).tensor

      map_op = functional_ops.map_fn(
          tf_fn, labeled_tensor.tensor, dtype=first_map_lt.dtype)
      map_lt = core.LabeledTensor(map_op, final_axes)

      return core.identity(map_lt, name=scope)
コード例 #5
0
 def test_name(self):
     identity_lt = core.identity(self.original_lt)
     self.assertIn('lt_identity', identity_lt.name)
コード例 #6
0
ファイル: core_test.py プロジェクト: tonydeep/tensorflow
 def test_name(self):
   identity_lt = core.identity(self.original_lt)
   self.assertIn('lt_identity', identity_lt.name)