コード例 #1
0
ファイル: ops.py プロジェクト: sanketg10/tensorflow
def _batch_helper(default_name,
                  batch_fn,
                  batch_size,
                  enqueue_many,
                  labeled_tensors,
                  allow_smaller_final_batch,
                  name=None):
    with ops.name_scope(name, default_name, labeled_tensors) as scope:
        labeled_tensors = [
            core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
        ]

        batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
        # TODO(shoyer): Remove this when they sanitize the TF API.
        if not isinstance(batch_ops, list):
            assert isinstance(batch_ops, ops.Tensor)
            batch_ops = [batch_ops]

        if allow_smaller_final_batch:
            batch_size = None

        @tc.returns(core.Axes)
        @tc.accepts(core.Axes)
        def output_axes(axes):
            if enqueue_many:
                if 'batch' not in axes or list(
                        axes.keys()).index('batch') != 0:
                    raise ValueError(
                        'When enqueue_many is True, input tensors must have an axis '
                        'called "batch" as their first dimension, '
                        'but axes were %s' % axes)
                culled_axes = axes.remove('batch')
                return core.Axes([('batch', batch_size)] +
                                 list(culled_axes.values()))
            else:
                return core.Axes([('batch', batch_size)] + list(axes.values()))

        output_labeled_tensors = []
        for i, tensor in enumerate(batch_ops):
            axes = output_axes(labeled_tensors[i].axes)
            output_labeled_tensors.append(core.LabeledTensor(tensor, axes))

        return output_labeled_tensors
コード例 #2
0
ファイル: nn_test.py プロジェクト: Puschel2020/tensorflow
 def test_binary_ops(self):
     ops = [
         ('sigmoid_cross_entropy_with_logits',
          nn_impl.sigmoid_cross_entropy_with_logits,
          nn.sigmoid_cross_entropy_with_logits),
         ('softmax_cross_entropy_with_logits',
          nn_ops.softmax_cross_entropy_with_logits,
          nn.softmax_cross_entropy_with_logits),
         ('sparse_softmax_cross_entropy_with_logits',
          nn_ops.sparse_softmax_cross_entropy_with_logits,
          nn.sparse_softmax_cross_entropy_with_logits),
     ]
     for op_name, tf_op, lt_op in ops:
         golden_tensor = tf_op(self.original_lt.tensor,
                               self.other_lt.tensor)
         golden_lt = core.LabeledTensor(golden_tensor, self.axes)
         actual_lt = lt_op(self.original_lt, self.other_lt)
         self.assertIn(op_name, actual_lt.name)
         self.assertLabeledTensorsEqual(golden_lt, actual_lt)
コード例 #3
0
ファイル: ops.py プロジェクト: jhabikal21/tensorflow
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
  """Randomly crops a tensor to a given size.

  See tf.random_crop.

  Args:
    labeled_tensor: The input tensor.
    shape_map: A dictionary mapping axis names to the size of the random crop
      for that dimension.
    seed: An optional random seed.
    name: An optional op name.

  Returns:
    A tensor of the same rank as `labeled_tensor`, cropped randomly in the
    selected dimensions.

  Raises:
    ValueError: If the shape map contains an axis name not in the input tensor.
  """
  with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)

    for axis_name in shape_map:
      if axis_name not in labeled_tensor.axes:
        raise ValueError('Selection axis %s not in axes %s' %
                         (axis_name, labeled_tensor.axes))

    shape = []
    axes = []
    for axis in labeled_tensor.axes.values():
      if axis.name in shape_map:
        size = shape_map[axis.name]
        shape.append(size)
        # We lose labels for the axes we crop, leaving just the size.
        axes.append((axis.name, size))
      else:
        shape.append(len(axis))
        axes.append(axis)

    crop_op = random_ops.random_crop(
        labeled_tensor.tensor, shape, seed=seed, name=scope)

    return core.LabeledTensor(crop_op, axes)
コード例 #4
0
ファイル: ops.py プロジェクト: jhabikal21/tensorflow
def verify_tensor_all_finite(labeled_tensor, message, name=None):
  """Asserts a tensor doesn't contain NaNs or Infs.

  See tf.verify_tensor_all_finite.

  Args:
    labeled_tensor: The input tensor.
    message: Message to log on failure.
    name: Optional op name.

  Returns:
    The input tensor.
  """
  with ops.name_scope(name, 'lt_verify_tensor_all_finite',
                      [labeled_tensor]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
    op = numerics.verify_tensor_all_finite(
        labeled_tensor.tensor, msg=message, name=scope)
    return core.LabeledTensor(op, labeled_tensor.axes)
コード例 #5
0
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
    """Pack tensors along a new axis.

  See tf.pack.

  Args:
    labeled_tensors: The input tensors, which must have identical axes.
    new_axis: The name of the new axis, or a tuple containing the name
      and coordinate labels.
    axis_position: Optional integer position at which to insert the new axis.
    name: Optional op name.

  Returns:
    The packed tensors as a single LabeledTensor, with `new_axis` in the given
    `axis_position`.

  Raises:
    ValueError: If fewer than one input tensors is provided, or if the tensors
      don't have identical axes.
  """
    with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
        labeled_tensors = [
            core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
        ]

        if len(labeled_tensors) < 1:
            raise ValueError(
                'pack expects at least 1 tensors, but received %s' %
                labeled_tensors)

        axes_0 = labeled_tensors[0].axes
        for t in labeled_tensors:
            if t.axes != axes_0:
                raise ValueError('Non-identical axes. Expected %s but got %s' %
                                 (axes_0, t.axes))

        pack_op = array_ops.stack([t.tensor for t in labeled_tensors],
                                  axis=axis_position,
                                  name=scope)
        axes = list(axes_0.values())
        axes.insert(axis_position, new_axis)
        return core.LabeledTensor(pack_op, axes)
コード例 #6
0
def boolean_mask(labeled_tensor, mask, name=None):
    """Apply a boolean mask to a labeled tensor.

  Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
  The mask is applied to the first axis of `labeled_tensor`. Labels on the first
  axis are removed, because True indices in `mask` may not be known dynamically.

  Args:
    labeled_tensor: The input tensor.
    mask: The type of the returned tensor.
    name: Optional op name.

  Returns:
    The masked labeled tensor.

  Raises:
    ValueError: if the first axis of the mask
  """
    with ops.name_scope(name, 'lt_boolean_mask',
                        [labeled_tensor, mask]) as scope:
        labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
        mask = core.convert_to_labeled_tensor(mask)

        if len(mask.axes) > 1:
            raise NotImplementedError(
                "LabeledTensor's boolean_mask currently only supports 1D masks"
            )
        mask_axis = list(mask.axes.values())[0]
        lt_axis = list(labeled_tensor.axes.values())[0]
        if mask_axis != lt_axis:
            raise ValueError(
                'the first axis of the labeled tensor and the mask '
                'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
        op = array_ops.boolean_mask(labeled_tensor.tensor,
                                    mask.tensor,
                                    name=scope)
        # TODO(shoyer): attempt to infer labels for the masked values, by calling
        # tf.get_static_value on the mask?
        axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
        return core.LabeledTensor(op, axes)
コード例 #7
0
    def setUp(self):
        super(FloatBinaryOpsTest, self).setUp()

        self.ops = [
            ('igamma', None, math_ops.igamma, core.igamma),
            ('igammac', None, math_ops.igammac, core.igammac),
            ('zeta', None, math_ops.zeta, core.zeta),
            ('polygamma', None, math_ops.polygamma, core.polygamma),
            ('maximum', None, math_ops.maximum, core.maximum),
            ('minimum', None, math_ops.minimum, core.minimum),
            ('squared_difference', None, math_ops.squared_difference,
             core.squared_difference),
        ]
        total_size = np.prod([v.size for v in self.original_lt.axes.values()])
        test_lt = core.LabeledTensor(
            math_ops.cast(self.original_lt, dtypes.float32) / total_size,
            self.original_lt.axes)
        self.test_lt_1 = test_lt
        self.test_lt_2 = 1.0 - test_lt
        self.test_lt_1_broadcast = self.test_lt_1.tensor
        self.test_lt_2_broadcast = self.test_lt_2.tensor
        self.broadcast_axes = self.test_lt_1.axes
コード例 #8
0
def foldl(fn, labeled_tensor, initial_value, name=None):
    """Left fold on the list of tensors unpacked from labeled_tensor.

  See tf.foldl.

  Args:
    fn: The function to apply to each unpacked LabeledTensor.
      It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
      Its arguments are (accumulated_value, next_value).
    labeled_tensor: The input tensor.
    initial_value: The initial value of the accumulator.
    name: Optional op name.

  Returns:
    The accumulated value.
  """
    with ops.name_scope(name, 'lt_foldl',
                        [labeled_tensor, initial_value]) as scope:
        labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
        initial_value = core.convert_to_labeled_tensor(initial_value)

        @tc.returns(ops.Tensor)
        @tc.accepts(ops.Tensor, ops.Tensor)
        def tf_fn(accumulator, next_element):
            accumulator_lt = core.LabeledTensor(accumulator,
                                                initial_value.axes)
            next_element_lt = core.LabeledTensor(
                next_element,
                list(labeled_tensor.axes.values())[1:])
            return fn(accumulator_lt, next_element_lt).tensor

        foldl_op = functional_ops.foldl(tf_fn,
                                        labeled_tensor.tensor,
                                        initializer=initial_value.tensor)
        foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)

        return core.identity(foldl_lt, name=scope)
コード例 #9
0
def constant(value, dtype=None, axes=None, name=None):
    """Creates a constant tensor.

  If `axes` includes any strings, shape is inferred from `value`. Otherwise,
  the sizes of the given `axes` are used to set `shape` for `tf.constant`.

  See tf.constant for more details.

  Args:
    value: The input tensor.
    dtype: The type of the returned tensor.
    axes: Optional Axes, list of strings or list of objects coercible to Axis
      objects. By default, axes are assumed to be an empty list (i.e., `value`
      is treated as a scalar).
    name: Optional op name.

  Returns:
    The tensor with elements set to zero.
  """
    with ops.name_scope(name, 'lt_constant', [value]) as scope:

        if axes is None:
            axes = []

        if isinstance(axes, core.Axes):
            axes = axes.values()

        if any(isinstance(ax, string_types) for ax in axes):
            # need to infer shape
            shape = None
        else:
            # axes already indicate shape
            axes = [core.as_axis(a) for a in axes]
            shape = [a.size for a in axes]

        op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
        return core.LabeledTensor(op, axes)
コード例 #10
0
    def test_invalid(self):
        scalar_lt = core.LabeledTensor(array_ops.ones(()), [])
        x_lt = core.LabeledTensor(array_ops.ones((2, )), ['x'])
        x2_lt = core.LabeledTensor(array_ops.ones((3, )), ['x'])
        y_lt = core.LabeledTensor(array_ops.ones((3, )), ['y'])
        xy_lt = core.LabeledTensor(array_ops.ones((2, 3)), ['x', 'y'])
        xyz_lt = core.LabeledTensor(array_ops.ones((2, 3, 1)), ['x', 'y', 'z'])

        with self.assertRaisesRegexp(ValueError, 'inputs with at least rank'):
            ops.matmul(x_lt, scalar_lt)

        with self.assertRaises(NotImplementedError):
            ops.matmul(x_lt, xyz_lt)

        with self.assertRaisesRegexp(ValueError, 'exactly one axis in common'):
            ops.matmul(x_lt, y_lt)

        with self.assertRaises(NotImplementedError):
            ops.matmul(xy_lt, xy_lt)

        with self.assertRaisesRegexp(ValueError, 'does not match'):
            ops.matmul(x_lt, x2_lt)
コード例 #11
0
def concat(labeled_tensors, axis_name, name=None):
    """Concatenate tensors along a dimension.

  See tf.concat.

  Args:
    labeled_tensors: A list of input LabeledTensors.
    axis_name: The name of the axis along which to concatenate.
    name: Optional op name.

  Returns:
    The concatenated tensor.
    The coordinate labels for the concatenation dimension are also concatenated,
    if they are available for every tensor.

  Raises:
    ValueError: If fewer than one tensor inputs is provided, if the tensors
      have incompatible axes, or if `axis_name` isn't the name of an axis.
  """
    with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
        labeled_tensors = [
            core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
        ]

        if len(labeled_tensors) < 1:
            raise ValueError(
                'concat expects at least 1 tensor, but received %s' %
                labeled_tensors)

        # All tensors must have these axes.
        axes_0 = labeled_tensors[0].axes
        axis_names = list(axes_0.keys())

        if axis_name not in axis_names:
            raise ValueError('%s not in %s' % (axis_name, axis_names))

        shared_axes = axes_0.remove(axis_name)

        tensors = [labeled_tensors[0].tensor]
        concat_axis_list = [axes_0[axis_name]]
        for labeled_tensor in labeled_tensors[1:]:
            current_shared_axes = labeled_tensor.axes.remove(axis_name)
            if current_shared_axes != shared_axes:
                # TODO(shoyer): add more specific checks about what went wrong,
                # including raising AxisOrderError when appropriate
                raise ValueError('Mismatched shared axes: the first tensor '
                                 'had axes %r but this tensor has axes %r.' %
                                 (shared_axes, current_shared_axes))

            # Accumulate the axis labels, if they're available.
            concat_axis_list.append(labeled_tensor.axes[axis_name])
            tensors.append(labeled_tensor.tensor)

        concat_axis = core.concat_axes(concat_axis_list)
        concat_dimension = axis_names.index(axis_name)
        concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
        values = list(axes_0.values())
        concat_axes = (values[:concat_dimension] + [concat_axis] +
                       values[concat_dimension + 1:])

        return core.LabeledTensor(concat_tensor, concat_axes)
コード例 #12
0
 def test(self):
     actual_lt = ops.reduce_mean(self.original_lt, {'channel'})
     golden_lt = core.LabeledTensor(
         math_ops.reduce_mean(self.original_lt.tensor, 1),
         [self.a0, self.a2, self.a3])
     self.assertLabeledTensorsEqual(actual_lt, golden_lt)
コード例 #13
0
 def test_mismatched_axes(self):
     condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
     with self.assertRaisesRegexp(ValueError, 'equal axes'):
         ops.where(condition, condition[:3], condition)
     with self.assertRaisesRegexp(ValueError, 'equal axes'):
         ops.where(condition, condition, condition[:3])
コード例 #14
0
 def test_name(self):
     condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
     where_lt = ops.where(condition, condition, condition)
     self.assertIn('lt_where', where_lt.name)
コード例 #15
0
 def test(self):
     result_lt = ops.reduce_any(self.bool_lt, {'channel'})
     golden_lt = core.LabeledTensor(
         math_ops.reduce_any(self.bool_tensor, 1),
         [self.a0, self.a2, self.a3])
     self.assertLabeledTensorsEqual(result_lt, golden_lt)
コード例 #16
0
 def tf_fn(tensor):
     original_axes = list(labeled_tensor.axes.values())[1:]
     tensor_lt = core.LabeledTensor(tensor, original_axes)
     return fn(tensor_lt).tensor
コード例 #17
0
 def test_name(self):
     mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0])
     masked_lt = ops.boolean_mask(self.original_lt, mask)
     self.assertIn('lt_boolean_mask', masked_lt.name)
コード例 #18
0
 def test_scalar(self):
     select_lt = ops.select(self.original_lt, {'channel': 'green'})
     golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :],
                                    [self.a0, self.a2, self.a3])
     self.assertLabeledTensorsEqual(select_lt, golden_lt)
コード例 #19
0
def matmul(a, b, name=None):
    """Matrix multiply two tensors with rank 1 or 2.

  If both tensors have rank 2, a matrix-matrix product is performed.
  If one tensor has rank 1 and the other has rank 2, then a matrix-vector
  product is performed.
  If both tensors have rank 1, then a vector dot-product is performed.
  (This behavior matches that of `numpy.dot`.)

  Both tensors must share exactly one dimension in common, which is the
  dimension the operation is summed along. The inputs will be automatically
  transposed if necessary as part of the matmul op.

  We intend to eventually support `matmul` on higher rank input, and also
  eventually support summing over any number shared dimensions (via an `axis`
  argument), but neither of these features has been implemented yet.

  Args:
    a: First LabeledTensor.
    b: Second LabeledTensor.
    name: Optional op name.

  Returns:
    LabeledTensor with the result of matrix multiplication. Axes are ordered by
    the current axis_order_scope, if set, or in or order of appearance on the
    inputs.

  Raises:
    NotImplementedError: If inputs have rank >2 or share multiple axes.
    ValueError: If the inputs have rank 0 or do not share any axes.
  """
    with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:

        a = core.convert_to_labeled_tensor(a)
        b = core.convert_to_labeled_tensor(b)

        if len(a.axes) > 2 or len(b.axes) > 2:
            # We could pass batched inputs to tf.matmul to make this work, but we
            # would also need to use tf.tile and/or tf.transpose. These are more
            # expensive than doing reshapes, so it's not clear if it's a good idea to
            # do this automatically.
            raise NotImplementedError(
                'matmul currently requires inputs with rank 2 or less, but '
                'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))

        if not a.axes or not b.axes:
            raise ValueError(
                'matmul currently requires inputs with at least rank 1, but '
                'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))

        shared_axes = set(a.axes) & set(b.axes)
        if len(shared_axes) > 1:
            raise NotImplementedError(
                'matmul does not yet support summing over multiple shared axes: %r. '
                'Use transpose and reshape to create a single shared axis to sum '
                'over.' % shared_axes)
        if not shared_axes:
            raise ValueError(
                'there must have exactly one axis in common between '
                'input to matmul: %r, %r' % (a.axes.keys(), b.axes.keys()))
        shared_axis, = shared_axes

        if a.axes[shared_axis] != b.axes[shared_axis]:
            raise ValueError(
                'axis %r does not match on input arguments: %r vs %r' %
                (shared_axis, a.axes[shared_axis].value,
                 b.axes[shared_axis].value))

        result_axes = []
        for axes in [a.axes, b.axes]:
            for axis in axes.values():
                if axis.name != shared_axis:
                    result_axes.append(axis)

        axis_scope_order = core.get_axis_order()
        if axis_scope_order is not None:
            result_axis_names = [axis.name for axis in result_axes]
            new_axis_names = [
                name for name in axis_scope_order if name in result_axis_names
            ]
            if new_axis_names != result_axis_names:
                # switch a and b
                b, a = a, b
                # result_axes is a list of length 1 or 2
                result_axes = result_axes[::-1]

        squeeze_dims = []

        if len(a.axes) == 1:
            a_tensor = array_ops.reshape(a.tensor, (1, -1))
            squeeze_dims.append(0)
            transpose_a = False
        else:
            a_tensor = a.tensor
            transpose_a = list(a.axes.keys()).index(shared_axis) == 0

        if len(b.axes) == 1:
            b_tensor = array_ops.reshape(b.tensor, (-1, 1))
            squeeze_dims.append(1)
            transpose_b = False
        else:
            b_tensor = b.tensor
            transpose_b = list(b.axes.keys()).index(shared_axis) == 1

        result_op = math_ops.matmul(a_tensor,
                                    b_tensor,
                                    transpose_a=transpose_a,
                                    transpose_b=transpose_b)

        if squeeze_dims:
            result_op = array_ops.squeeze(result_op, squeeze_dims)
        result_op = array_ops.identity(result_op, name=scope)

        return core.LabeledTensor(result_op, result_axes)
コード例 #20
0
 def test_existing_axes(self):
     golden_lt = core.LabeledTensor(constant_op.constant([1, 2]), ['x'])
     constant_lt = ops.constant([1, 2], axes=golden_lt.axes)
     self.assertLabeledTensorsEqual(constant_lt, golden_lt)
コード例 #21
0
 def test_specify_shape(self):
     constant_lt = ops.constant(1, axes=[('x', 3)])
     golden_lt = core.LabeledTensor(constant_op.constant(1, shape=(3, )),
                                    ['x'])
     self.assertLabeledTensorsEqual(constant_lt, golden_lt)
コード例 #22
0
 def test_infer_shape(self):
     constant_lt = ops.constant([1, 2], axes=['x'])
     golden_lt = core.LabeledTensor(constant_op.constant([1, 2]), ['x'])
     self.assertLabeledTensorsEqual(constant_lt, golden_lt)
コード例 #23
0
 def test_scalar(self):
     constant_lt = ops.constant(1)
     golden_lt = core.LabeledTensor(constant_op.constant(1), [])
     self.assertLabeledTensorsEqual(constant_lt, golden_lt)
コード例 #24
0
 def test_invalid_rank(self):
     mask = core.LabeledTensor(
         array_ops.ones((7, 3)) > 3, [self.a0, self.a1])
     with self.assertRaises(NotImplementedError):
         ops.boolean_mask(self.original_lt, mask)
コード例 #25
0
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
    """Reshape specific axes of a LabeledTensor.

  Non-indicated axes remain in their original locations.

  Args:
    labeled_tensor: The input tensor.
    existing_axes: List of axis names found on the input tensor. These must
      appear sequentially in the list of axis names on the input. In other
      words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
    new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
      providing new axes with which to replace `existing_axes` in the reshaped
      result. At most one element of `new_axes` may be a string, indicating an
      axis with unknown size.
    name: Optional op name.

  Returns:
    The reshaped LabeledTensor.

  Raises:
    ValueError: If `existing_axes` are not all axes on the input, or if more
     than one of `new_axes` has unknown size.
    AxisOrderError: If `existing_axes` are not a slice of axis names on the
      input.
  """
    with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
        labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)

        original_axis_names = list(labeled_tensor.axes.keys())
        existing_axes = list(existing_axes)
        if not set(existing_axes) <= set(original_axis_names):
            raise ValueError(
                'existing_axes %r are not contained in the set of axis '
                'names %r on the input labeled tensor' %
                (existing_axes, original_axis_names))

        start = original_axis_names.index(existing_axes[0])
        stop = original_axis_names.index(existing_axes[-1]) + 1

        if existing_axes != original_axis_names[start:stop]:
            # We could support existing_axes that aren't a slice by using transpose,
            # but that could lead to unpredictable performance consequences because
            # transposes are not free in TensorFlow. If we did transpose
            # automatically, the user might never realize that their data is being
            # produced with the wrong order. (The later will occur with some frequency
            # because of how broadcasting automatically choose axis order.)
            # So for now we've taken the strict approach.
            raise core.AxisOrderError(
                'existing_axes %r are not a slice of axis names %r on the input '
                'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
                'axes on the input explicitly.' %
                (existing_axes, original_axis_names))

        if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
            raise ValueError(
                'at most one axis in new_axes can have unknown size. All other '
                'axes must have an indicated integer size or labels: %r' %
                new_axes)

        original_values = list(labeled_tensor.axes.values())
        axis_size = lambda axis: -1 if axis.size is None else axis.size
        shape = [axis_size(axis) for axis in original_values[:start]]
        for axis_ref in new_axes:
            if isinstance(axis_ref, string_types):
                shape.append(-1)
            else:
                axis = core.as_axis(axis_ref)
                shape.append(axis_size(axis))
        shape.extend(axis_size(axis) for axis in original_values[stop:])

        reshaped_tensor = array_ops.reshape(labeled_tensor.tensor,
                                            shape,
                                            name=scope)
        axes = original_values[:start] + list(
            new_axes) + original_values[stop:]
        return core.LabeledTensor(reshaped_tensor, axes)
コード例 #26
0
 def test_mismatched_axis(self):
     mask = core.LabeledTensor(math_ops.range(7) > 3, ['foo'])
     with self.assertRaisesRegexp(ValueError, 'not equal'):
         ops.boolean_mask(self.original_lt, mask)
コード例 #27
0
 def test(self):
     like_lt = ops.ones_like(self.original_lt)
     golden_lt = core.LabeledTensor(
         array_ops.ones_like(self.original_lt.tensor),
         self.original_lt.axes)
     self.assertLabeledTensorsEqual(like_lt, golden_lt)
コード例 #28
0
 def setUp(self):
     super(BaseReduceBoolean, self).setUp()
     self.bool_tensor = math_ops.cast(self.original_lt.tensor > 5,
                                      dtypes.bool)
     self.bool_lt = core.LabeledTensor(self.bool_tensor,
                                       self.original_lt.axes)
コード例 #29
0
    def op(labeled_tensor, axes=None, name=None):
        """Computes the given reduction across the given axes of a LabeledTensor.

    See `tf.{op_name}` for full details.

    Args:
      labeled_tensor: The input tensor.
      axes: A set of axes or None.
        If None, all axes will be reduced.
        Axes must all be strings, in which case those dimensions will be
        removed, or pairs of (name, None) or (name, label), in which case those
        dimensions will be kept.
      name: Optional op name.

    Returns:
      The reduced LabeledTensor.

    Raises:
      ValueError: if any of the axes to reduce over are not found on
        `labeled_tensor`.
    """
        with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
            labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)

            if axes is None:
                axes = labeled_tensor.axes.keys()

            if isinstance(axes, (string_types, tuple)):
                axes = [axes]

            reduction_axes = {}
            axes_to_squeeze = []
            for a in axes:
                if isinstance(a, string_types):
                    # We squeeze out this axis.
                    reduction_axes[a] = a
                    axes_to_squeeze.append(a)
                else:
                    # We keep this axis, with the user-provided labels.
                    (axis_name, label) = a
                    if label is not None:
                        # The input was a single label, so make it a list so it can be
                        # turned into an Axis.
                        label = [label]
                    reduction_axes[axis_name] = (axis_name, label)

            for axis_name in reduction_axes:
                if axis_name not in labeled_tensor.axes:
                    raise ValueError('Axis %s not in axes %s' %
                                     (axis_name, labeled_tensor.axes))

            intermediate_axes = []
            reduction_dimensions = []
            for i, axis in enumerate(labeled_tensor.axes.values()):
                if axis.name in reduction_axes:
                    intermediate_axes.append(reduction_axes[axis.name])
                    reduction_dimensions.append(i)
                else:
                    intermediate_axes.append(axis)

            reduce_op = reduce_fn(labeled_tensor.tensor,
                                  reduction_dimensions,
                                  keepdims=True)
            reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)

            return squeeze(reduce_lt, axes_to_squeeze, name=scope)
コード例 #30
0
 def test(self):
     cast_lt = ops.cast(self.original_lt, dtypes.float16)
     golden_lt = core.LabeledTensor(
         math_ops.cast(self.original_lt.tensor, dtypes.float16),
         self.original_lt.axes)
     self.assertLabeledTensorsEqual(cast_lt, golden_lt)