Ejemplo n.º 1
0
  def testConcat(self):
    tf_val = tf.concat(0, [[16, 37], tf.placeholder(tf.int32, shape=(2,))])
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, None], c_val.as_list())

    tf_val = tf.concat(0,
                       [[16, 37], tf.placeholder(tf.int32, shape=(1,)), [48]])
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, 48], c_val.as_list())
Ejemplo n.º 2
0
  def testConstant(self):
    np_val = np.random.rand(3).astype(np.int32)
    tf_val = tf.constant(np_val)
    self.assertEqual(tf.TensorShape(np_val),
                     tensor_util.constant_value_as_shape(tf_val))

    tf_val = tf.constant([], dtype=tf.int32)
    self.assertEqual(tf.TensorShape([]),
                     tensor_util.constant_value_as_shape(tf_val))
Ejemplo n.º 3
0
    def testConstant(self):
        np_val = np.random.rand(3).astype(np.int32)
        tf_val = constant_op.constant(np_val)
        self.assertEqual(tensor_shape.TensorShape(np_val),
                         tensor_util.constant_value_as_shape(tf_val))

        tf_val = constant_op.constant([], dtype=dtypes.int32)
        self.assertEqual(tensor_shape.TensorShape([]),
                         tensor_util.constant_value_as_shape(tf_val))
Ejemplo n.º 4
0
  def testConcat(self):
    tf_val = tf.concat(0, [[16, 37], tf.placeholder(tf.int32, shape=(2,))])
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, None], c_val.as_list())

    tf_val = tf.concat(0,
                       [[16, 37], tf.placeholder(tf.int32, shape=(1,)), [48]])
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, 48], c_val.as_list())
Ejemplo n.º 5
0
  def __init__(self, event_shape_out, event_shape_in,
               validate_args=False, name=None):
    """Creates a `Reshape` bijector.

    Args:
      event_shape_out: An `int`-like vector-shaped `Tensor`
        representing the fully specified (no -1's) event shape of the
        transformed output.
      event_shape_in: An `int`-like vector-shaped `Tensor`
        representing the fully specified (no -1's) event shape of the
        input.
      validate_args: Python `bool` indicating whether arguments should
        be checked for correctness.
      name: Python `str`, name given to ops managed by this object.

    Raises:
      TypeError: if either `event_shape_in` or `event_shape_out` has
       non-vector shape (`rank > 1`), or non-integer `dtype`.
      ValueError: if either `event_shape_in` or `event_shape_out`
       contains non-positive entries, or if their sizes do not match
       (`prod(event_shape_in)` != `prod(event_shape_out)`), or if
       their dimensionality(s) cannot be statically inferred.
    """
    with ops.name_scope(name, "reshape",
                        values=[event_shape_out, event_shape_in]):

      event_shape_out = ops.convert_to_tensor(event_shape_out,
                                              name="event_shape_out",
                                              preferred_dtype=dtypes.int32)
      event_shape_in = ops.convert_to_tensor(event_shape_in,
                                             name="event_shape_in",
                                             preferred_dtype=dtypes.int32)

      # check that input shapes are positive integers
      assertions = []
      assertions += self._maybe_check_valid_shape(
          event_shape_out, "event_shape_out",
          validate_args=validate_args)
      assertions += self._maybe_check_valid_shape(
          event_shape_in, "event_shape_in", validate_args=validate_args)

      # check that prod(event_shape_in) = prod(event_shape_out)
      assertions += self._maybe_check_matching_sizes(
          event_shape_in, event_shape_out, validate_args=validate_args)

      self._assertions = assertions
      self._event_shape_in = event_shape_in
      self._event_shape_out = event_shape_out
      self._event_shape_in_static = tensor_util.constant_value_as_shape(
          event_shape_in)
      self._event_shape_out_static = tensor_util.constant_value_as_shape(
          event_shape_out)

      super(Reshape, self).__init__(is_constant_jacobian=True,
                                    validate_args=validate_args,
                                    name=name or "reshape")
Ejemplo n.º 6
0
    def testConcat(self):
        tf_val = array_ops.concat(
            [[16, 37],
             array_ops.placeholder(dtypes.int32, shape=(2, ))], 0)
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([16, 37, None, None], c_val.as_list())

        tf_val = array_ops.concat(
            [[16, 37],
             array_ops.placeholder(dtypes.int32, shape=(1, )), [48]], 0)
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([16, 37, None, 48], c_val.as_list())
Ejemplo n.º 7
0
  def testConcat(self):
    tf_val = array_ops.concat(
        [[16, 37], array_ops.placeholder(
            dtypes.int32, shape=(2,))], 0)
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, None], c_val.as_list())

    tf_val = array_ops.concat(
        [[16, 37], array_ops.placeholder(
            dtypes.int32, shape=(1,)), [48]], 0)
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, 48], c_val.as_list())
Ejemplo n.º 8
0
  def testConcat(self):
    # This test needs a placeholder which means we need to construct a graph.
    with ops.Graph().as_default():
      tf_val = array_ops.concat(
          [[16, 37], array_ops.placeholder(dtypes.int32, shape=(2,))], 0)
      c_val = tensor_util.constant_value_as_shape(tf_val)
      self.assertEqual([16, 37, None, None], c_val.as_list())

      tf_val = array_ops.concat(
          [[16, 37],
           array_ops.placeholder(dtypes.int32, shape=(1,)), [48]], 0)
      c_val = tensor_util.constant_value_as_shape(tf_val)
      self.assertEqual([16, 37, None, 48], c_val.as_list())
Ejemplo n.º 9
0
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
    """Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
    batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
    if batch_shape_static.is_fully_defined():
        return np.int32(batch_shape_static.as_list()), batch_shape_static, []
    with ops.name_scope(name, "calculate_reshape",
                        [original_shape, new_shape]):
        original_size = math_ops.reduce_prod(original_shape)
        implicit_dim = math_ops.equal(new_shape, -1)
        size_implicit_dim = (
            original_size //
            math_ops.maximum(1, -math_ops.reduce_prod(new_shape)))
        new_ndims = array_ops.shape(new_shape)
        expanded_new_shape = array_ops.where_v2(  # Assumes exactly one `-1`.
            implicit_dim, array_ops.fill(new_ndims, size_implicit_dim),
            new_shape)
        validations = [] if not validate else [
            check_ops.assert_rank(
                original_shape, 1, message="Original shape must be a vector."),
            check_ops.assert_rank(
                new_shape, 1, message="New shape must be a vector."),
            check_ops.assert_less_equal(
                math_ops.count_nonzero(implicit_dim, dtype=dtypes.int32),
                1,
                message="At most one dimension can be unknown."),
            check_ops.assert_positive(expanded_new_shape,
                                      message="Shape elements must be >=-1."),
            check_ops.assert_equal(math_ops.reduce_prod(expanded_new_shape),
                                   original_size,
                                   message="Shape sizes do not match."),
        ]
        return expanded_new_shape, batch_shape_static, validations
Ejemplo n.º 10
0
  def sample(self, sample_shape=(), seed=None, name="sample"):
    """Generate samples of the specified shape.

    Note that a call to `sample()` without arguments will generate a single
    sample.

    Args:
      sample_shape: Rank 1 `int32` `Tensor`. Shape of the generated samples.
      seed: Python integer seed for RNG
      name: name to give to the op.

    Returns:
      samples: a `Tensor` with prepended dimensions `sample_shape`.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[sample_shape]):
        sample_shape = ops.convert_to_tensor(sample_shape,
                                             dtype=dtypes.int32,
                                             name="sample_shape")
        total = math_ops.reduce_prod(sample_shape)
        samples = self.sample_n(total, seed)
        output_shape = array_ops.concat(0, [sample_shape, array_ops.slice(
            array_ops.shape(samples), [1], [-1])])
        output = array_ops.reshape(samples, output_shape, name=name)
        output.set_shape(tensor_util.constant_value_as_shape(
            sample_shape).concatenate(samples.get_shape()[1:]))
    return output
Ejemplo n.º 11
0
  def _merge_batch_beams(self, t, s=None):
    """Merges the tensor from a batch of beams into a batch by beams.

    More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
    reshape this into [batch_size*beam_width, s]

    Args:
      t: Tensor of dimension [batch_size, beam_width, s]
      s: (Possibly known) depth shape.

    Returns:
      A reshaped version of t with dimension [batch_size * beam_width, s].
    """
    if isinstance(s, ops.Tensor):
      s = tensor_util.constant_value_as_shape(s)
    else:
      s = tensor_shape.TensorShape(s)
    t_shape = array_ops.shape(t)
    static_batch_size = tensor_util.constant_value(self._batch_size)
    batch_size_beam_width = (
        None if static_batch_size is None
        else static_batch_size * self._beam_width)
    reshaped_t = array_ops.reshape(
        t, array_ops.concat(
            ([self._batch_size * self._beam_width], t_shape[2:]), 0))
    reshaped_t.set_shape(
        (tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
    return reshaped_t
Ejemplo n.º 12
0
    def sample(self, sample_shape=(), seed=None, name="sample"):
        """Generate samples of the specified shape.

    Note that a call to `sample()` without arguments will generate a single
    sample.

    Args:
      sample_shape: int32 `Tensor` or tuple or list. Shape of the generated
        samples.
      seed: Python integer seed for RNG
      name: name to give to the op.

    Returns:
      samples: a `Tensor` with prepended dimensions `sample_shape`.
    """
        with ops.name_scope(self.name):
            with ops.op_scope([sample_shape], name):
                sample_shape = ops.convert_to_tensor(sample_shape,
                                                     dtype=dtypes.int32,
                                                     name="sample_shape")
                total = math_ops.reduce_prod(sample_shape)
                samples = self.sample_n(total, seed)
                output_shape = array_ops.concat(0, [
                    sample_shape,
                    array_ops.slice(array_ops.shape(samples), [1], [-1])
                ])
                output = array_ops.reshape(samples, output_shape, name=name)
                output.set_shape(
                    tensor_util.constant_value_as_shape(
                        sample_shape).concatenate(samples.get_shape()[1:]))
        return output
Ejemplo n.º 13
0
    def get_shape(self):
        """Get the `TensorShape` that represents the shape of the dense tensor.

    Returns:
      A `TensorShape` object.
    """
        return tensor_util.constant_value_as_shape(self._shape)
Ejemplo n.º 14
0
def validate_init_args_statically(distribution, batch_shape):
    """Helper to __init__ which makes or raises assertions."""
    if tensorshape_util.rank(batch_shape.shape) is not None:
        if tensorshape_util.rank(batch_shape.shape) != 1:
            raise ValueError("`batch_shape` must be a vector "
                             "(saw rank: {}).".format(
                                 tensorshape_util.rank(batch_shape.shape)))

    batch_shape_static = tensor_util.constant_value_as_shape(batch_shape)
    batch_size_static = tensorshape_util.num_elements(batch_shape_static)
    dist_batch_size_static = tensorshape_util.num_elements(
        distribution.batch_shape)

    if batch_size_static is not None and dist_batch_size_static is not None:
        if batch_size_static != dist_batch_size_static:
            raise ValueError("`batch_shape` size ({}) must match "
                             "`distribution.batch_shape` size ({}).".format(
                                 batch_size_static, dist_batch_size_static))

    if tensorshape_util.dims(batch_shape_static) is not None:
        if any(
                tf.compat.dimension_value(dim) is not None
                and tf.compat.dimension_value(dim) < 1
                for dim in batch_shape_static):
            raise ValueError("`batch_shape` elements must be >=-1.")
Ejemplo n.º 15
0
  def _replace_event_shape_in_tensorshape(
      self, tensorshape_in, event_shape_in, event_shape_out):
    """Replaces the event shape dims of a `TensorShape`.

    Args:
      tensorshape_in: a `TensorShape` instance in which to attempt replacing
        event shape.
      event_shape_in: `Tensor` containing the event shape expected to be present
        in (rightmost dims of) `tensorshape_in`. Must be compatible with
        the rightmost dims of `tensorshape_in`.
      event_shape_out: `Tensor` containing the shape values with which to
        replace `event_shape_in` in `tensorshape_in`.

    Returns:
      tensorshape_out_: A `TensorShape` with the event shape replaced, if doing
        so is possible given the statically known shape data in
        `tensorshape_in` and `event_shape_in`. Else, `tf.TensorShape(None)`.

    Raises:
      ValueError: if we can determine the event shape portion of
        `tensorshape_in` as well as `event_shape_in` both statically, and they
        are not compatible. "Compatible" here means that they are identical on
        any dims that are not -1 in `event_shape_in`.
    """
    # Default to returning unknown shape
    tensorshape_out_ = tf.TensorShape(None)

    event_ndims_in_ = event_shape_in.shape.num_elements()
    if (event_ndims_in_ is not None and
        self._is_event_shape_fully_defined(tensorshape_in, event_ndims_in_)):
      ndims_ = tensorshape_in.ndims
      sample_and_batch_shape = tensorshape_in[:(ndims_ - event_ndims_in_)]
      event_shape_ = np.int32(tensorshape_in[ndims_ - event_ndims_in_:])

      # If both `event_shape_in` and the event shape dims of `tensorshape_in`
      # are statically known, we can statically validate the event shape.
      #
      # If `event_shape_in` is not statically known, we can only add runtime
      # validations to the graph (if enabled).
      event_shape_in_ = tensor_util.constant_value(event_shape_in)
      if event_shape_in_ is not None:
        # Check that `event_shape_` and `event_shape_in` are compatible in
        # the sense that they have equal entries in any position that isn't a
        # `-1` in `event_shape_in`. Note that our validations at construction
        # time ensure there is at most one such entry in `event_shape_in`.
        event_shape_specified_ = event_shape_[event_shape_in_ >= 0]
        event_shape_in_specified_ = event_shape_in_[event_shape_in_ >= 0]
        if not all(event_shape_specified_ == event_shape_in_specified_):
          raise ValueError(
              'Input `event_shape` does not match `event_shape_in`. ' +
              '({} vs {}).'.format(event_shape_, event_shape_in_))
      else:
        with tf.control_dependencies(self._maybe_validate_event_shape(
            event_shape_, event_shape_in)):
          event_shape_out = tf.identity(event_shape_out)

      tensorshape_out_ = sample_and_batch_shape.concatenate(
          tensor_util.constant_value_as_shape(event_shape_out))

    return tensorshape_out_
Ejemplo n.º 16
0
    def shape(self):
        """Get the `TensorShape` representing the shape of the dense tensor.

    Returns:
      A `TensorShape` object.
    """
        return tensor_util.constant_value_as_shape(self._dense_shape)
Ejemplo n.º 17
0
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
  """Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
  batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
  if batch_shape_static.is_fully_defined():
    return np.int32(batch_shape_static.as_list()), batch_shape_static, []
  with tf.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
    original_size = tf.reduce_prod(original_shape)
    implicit_dim = tf.equal(new_shape, -1)
    size_implicit_dim = (
        original_size // tf.maximum(1, -tf.reduce_prod(new_shape)))
    new_ndims = tf.shape(new_shape)
    expanded_new_shape = tf.where(  # Assumes exactly one `-1`.
        implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
    validations = [] if not validate else [
        tf.assert_rank(
            original_shape, 1, message="Original shape must be a vector."),
        tf.assert_rank(new_shape, 1, message="New shape must be a vector."),
        tf.assert_less_equal(
            tf.count_nonzero(implicit_dim, dtype=tf.int32),
            1,
            message="At most one dimension can be unknown."),
        tf.assert_positive(
            expanded_new_shape, message="Shape elements must be >=-1."),
        tf.assert_equal(
            tf.reduce_prod(expanded_new_shape),
            original_size,
            message="Shape sizes do not match."),
    ]
    return expanded_new_shape, batch_shape_static, validations
Ejemplo n.º 18
0
  def get_shape(self):
    """Get the `TensorShape` that represents the shape of the dense tensor.

    Returns:
      A `TensorShape` object.
    """
    return tensor_util.constant_value_as_shape(self._shape)
Ejemplo n.º 19
0
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
    """Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
    batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
    if tensorshape_util.is_fully_defined(batch_shape_static):
        return np.int32(tensorshape_util.as_list(
            batch_shape_static)), batch_shape_static, []
    with tf.name_scope(name or "calculate_reshape"):
        original_size = tf.reduce_prod(input_tensor=original_shape)
        implicit_dim = tf.equal(new_shape, -1)
        size_implicit_dim = (
            original_size //
            tf.maximum(1, -tf.reduce_prod(input_tensor=new_shape)))
        new_ndims = tf.shape(input=new_shape)
        expanded_new_shape = tf.where(  # Assumes exactly one `-1`.
            implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
        validations = [] if not validate else [  # pylint: disable=g-long-ternary
            assert_util.assert_rank(
                original_shape, 1, message="Original shape must be a vector."),
            assert_util.assert_rank(
                new_shape, 1, message="New shape must be a vector."),
            assert_util.assert_less_equal(
                tf.math.count_nonzero(implicit_dim, dtype=tf.int32),
                1,
                message="At most one dimension can be unknown."),
            assert_util.assert_positive(
                expanded_new_shape, message="Shape elements must be >=-1."),
            assert_util.assert_equal(tf.reduce_prod(
                input_tensor=expanded_new_shape),
                                     original_size,
                                     message="Shape sizes do not match."),
        ]
        return expanded_new_shape, batch_shape_static, validations
Ejemplo n.º 20
0
  def _replace_event_shape_in_shape_tensor(
      self, shape_in, event_shape_in, event_shape_out):
    """Replaces the rightmost dims in a `Tensor` representing a shape.

    Args:
      shape_in: a rank-1 `Tensor` of integers
      event_shape_in: the event shape expected to be present in (rightmost dims
        of) `shape_in`.
      event_shape_out: the event shape with which to replace `event_shape_in` in
        `shape_in`

    Returns:
      shape_out: A rank-1 integer `Tensor` with the same contents as `shape_in`
        except for the event dims, which are replaced with `event_shape_out`.
    """
    # If possible, extract statically known `TensorShape` and transform that.
    tensorshape = tensor_util.constant_value_as_shape(shape_in)
    if tensorshape is not None and tensorshape.is_fully_defined():
      shape_out_ = self._replace_event_shape_in_tensorshape(
          tensorshape, event_shape_in, event_shape_out)
      if shape_out_.is_fully_defined():
        shape_out = tf.convert_to_tensor(
            shape_out_.as_list(), preferred_dtype=tf.int32)
        return shape_out

    # If not possible statically, use fully dynamic reshaping.
    rank = _ndims_from_shape(shape_in)
    event_ndims = _ndims_from_shape(event_shape_in)

    event_shape = shape_in[rank - event_ndims:]
    with tf.control_dependencies(self._maybe_validate_event_shape(
        event_shape, event_shape_in)):
      sample_and_batch_shape = shape_in[:(rank - event_ndims)]
      shape_out = tf.concat([sample_and_batch_shape, event_shape_out], axis=0)
      return shape_out
Ejemplo n.º 21
0
  def sample(self, sample_shape=(), seed=None, name="sample",
             **condition_kwargs):
    """Generate samples of the specified shape.

    Note that a call to `sample()` without arguments will generate a single
    sample.

    Args:
      sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
      seed: Python integer seed for RNG
      name: name to give to the op.
      **condition_kwargs: Named arguments forwarded to subclass implementation.

    Returns:
      samples: a `Tensor` with prepended dimensions `sample_shape`.
    """
    with self._name_scope(name, values=[sample_shape]):
      sample_shape = ops.convert_to_tensor(
          sample_shape, dtype=dtypes.int32, name="sample_shape")
      if sample_shape.get_shape().ndims == 0:
        return self.sample_n(sample_shape, seed, **condition_kwargs)
      sample_shape, total = self._expand_sample_shape(sample_shape)
      samples = self.sample_n(total, seed, **condition_kwargs)
      output_shape = array_ops.concat(0, [sample_shape, array_ops.slice(
          array_ops.shape(samples), [1], [-1])])
      output = array_ops.reshape(samples, output_shape)
      output.set_shape(tensor_util.constant_value_as_shape(
          sample_shape).concatenate(samples.get_shape()[1:]))
      return output
Ejemplo n.º 22
0
  def _replace_event_shape_in_shape_tensor(
      self, shape_in, event_shape_in, event_shape_out):
    """Replaces the rightmost dims in a `Tensor` representing a shape.

    Args:
      shape_in: a rank-1 `Tensor` of integers
      event_shape_in: the event shape expected to be present in (rightmost dims
        of) `shape_in`.
      event_shape_out: the event shape with which to replace `event_shape_in` in
        `shape_in`

    Returns:
      shape_out: A rank-1 integer `Tensor` with the same contents as `shape_in`
        except for the event dims, which are replaced with `event_shape_out`.
    """
    # If possible, extract statically known `TensorShape` and transform that.
    tensorshape = tensor_util.constant_value_as_shape(shape_in)
    if tensorshape is not None and tensorshape.is_fully_defined():
      shape_out_ = self._replace_event_shape_in_tensorshape(
          tensorshape, event_shape_in, event_shape_out)
      if shape_out_.is_fully_defined():
        shape_out = tf.convert_to_tensor(
            shape_out_.as_list(), preferred_dtype=tf.int32)
        return shape_out

    # If not possible statically, use fully dynamic reshaping.
    rank = _ndims_from_shape(shape_in)
    event_ndims = _ndims_from_shape(event_shape_in)

    event_shape = shape_in[rank - event_ndims:]
    with tf.control_dependencies(self._maybe_validate_event_shape(
        event_shape, event_shape_in)):
      sample_and_batch_shape = shape_in[:(rank - event_ndims)]
      shape_out = tf.concat([sample_and_batch_shape, event_shape_out], axis=0)
      return shape_out
Ejemplo n.º 23
0
 def testPack(self):
     tf_val = array_ops.stack([
         constant_op.constant(16), 37,
         array_ops.placeholder(dtypes.int32)
     ])
     c_val = tensor_util.constant_value_as_shape(tf_val)
     self.assertEqual([16, 37, None], c_val.as_list())
Ejemplo n.º 24
0
  def shape(self):
    """Get the `TensorShape` representing the shape of the dense tensor.

    Returns:
      A `TensorShape` object.
    """
    return tensor_util.constant_value_as_shape(self._dense_shape)
    def _merge_batch_beams(self, t, s=None):
        """Merges the tensor from a batch of beams into a batch by beams.

    More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
    reshape this into [batch_size*beam_width, s]

    Args:
      t: Tensor of dimension [batch_size, beam_width, s]
      s: (Possibly known) depth shape.

    Returns:
      A reshaped version of t with dimension [batch_size * beam_width, s].
    """
        if isinstance(s, ops.Tensor):
            s = tensor_util.constant_value_as_shape(s)
        else:
            s = tensor_shape.TensorShape(s)
        t_shape = array_ops.shape(t)
        static_batch_size = tensor_util.constant_value(self._batch_size)
        batch_size_beam_width = (None if static_batch_size is None else
                                 static_batch_size * self._beam_width)
        reshaped_t = array_ops.reshape(
            t,
            array_ops.concat(
                ([self._batch_size * self._beam_width], t_shape[2:]), 0))
        reshaped_t.set_shape(
            (tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
        return reshaped_t
Ejemplo n.º 26
0
    def sample(self, sample_shape=(), seed=None, name="sample"):
        """Generate samples of the specified shape.

    Note that a call to `sample()` without arguments will generate a single
    sample.

    Args:
      sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
      seed: Python integer seed for RNG
      name: name to give to the op.

    Returns:
      samples: a `Tensor` with prepended dimensions `sample_shape`.
    """
        with self._name_scope(name, values=[sample_shape]):
            sample_shape = ops.convert_to_tensor(sample_shape,
                                                 dtype=dtypes.int32,
                                                 name="sample_shape")
            if sample_shape.get_shape().ndims == 0:
                return self.sample_n(sample_shape, seed)
            sample_shape, total = self._expand_sample_shape(sample_shape)
            samples = self.sample_n(total, seed)
            output_shape = array_ops.concat(0, [
                sample_shape,
                array_ops.slice(array_ops.shape(samples), [1], [-1])
            ])
            output = array_ops.reshape(samples, output_shape)
            output.set_shape(
                tensor_util.constant_value_as_shape(sample_shape).concatenate(
                    samples.get_shape()[1:]))
            return output
Ejemplo n.º 27
0
  def _replace_event_shape_in_tensorshape(
      self, tensorshape_in, event_shape_in, event_shape_out):
    """Replaces the event shape dims of a `TensorShape`.

    Args:
      tensorshape_in: a `TensorShape` instance in which to attempt replacing
        event shape.
      event_shape_in: `Tensor` containing the event shape expected to be present
        in (rightmost dims of) `tensorshape_in`. Must be compatible with
        the rightmost dims of `tensorshape_in`.
      event_shape_out: `Tensor` containing the shape values with which to
        replace `event_shape_in` in `tensorshape_in`.

    Returns:
      tensorshape_out_: A `TensorShape` with the event shape replaced, if doing
        so is possible given the statically known shape data in
        `tensorshape_in` and `event_shape_in`. Else, `tf.TensorShape(None)`.

    Raises:
      ValueError: if we can determine the event shape portion of
        `tensorshape_in` as well as `event_shape_in` both statically, and they
        are not compatible. "Compatible" here means that they are identical on
        any dims that are not -1 in `event_shape_in`.
    """
    # Default to returning unknown shape
    tensorshape_out_ = tf.TensorShape(None)

    event_ndims_in_ = event_shape_in.shape.num_elements()
    if (event_ndims_in_ is not None and
        self._is_event_shape_fully_defined(tensorshape_in, event_ndims_in_)):
      ndims_ = tensorshape_in.ndims
      sample_and_batch_shape = tensorshape_in[:(ndims_ - event_ndims_in_)]
      event_shape_ = np.int32(tensorshape_in[ndims_ - event_ndims_in_:])

      # If both `event_shape_in` and the event shape dims of `tensorshape_in`
      # are statically known, we can statically validate the event shape.
      #
      # If `event_shape_in` is not statically known, we can only add runtime
      # validations to the graph (if enabled).
      event_shape_in_ = tf.contrib.util.constant_value(event_shape_in)
      if event_shape_in_ is not None:
        # Check that `event_shape_` and `event_shape_in` are compatible in
        # the sense that they have equal entries in any position that isn't a
        # `-1` in `event_shape_in`. Note that our validations at construction
        # time ensure there is at most one such entry in `event_shape_in`.
        event_shape_specified_ = event_shape_[event_shape_in_ >= 0]
        event_shape_in_specified_ = event_shape_in_[event_shape_in_ >= 0]
        if not all(event_shape_specified_ == event_shape_in_specified_):
          raise ValueError(
              'Input `event_shape` does not match `event_shape_in`. ' +
              '({} vs {}).'.format(event_shape_, event_shape_in_))
      else:
        with tf.control_dependencies(self._maybe_validate_event_shape(
            event_shape_, event_shape_in)):
          event_shape_out = tf.identity(event_shape_out)

      tensorshape_out_ = sample_and_batch_shape.concatenate(
          tensor_util.constant_value_as_shape(event_shape_out))

    return tensorshape_out_
Ejemplo n.º 28
0
 def testPack(self):
   # This test needs a placeholder which means we need to construct a graph.
   with ops.Graph().as_default():
     tf_val = array_ops.stack(
         [constant_op.constant(16), 37,
          array_ops.placeholder(dtypes.int32)])
     c_val = tensor_util.constant_value_as_shape(tf_val)
     self.assertEqual([16, 37, None], c_val.as_list())
Ejemplo n.º 29
0
 def _to_batched_tensor_list(self, value):
   dense_shape = tensor_util.constant_value_as_shape(value.dense_shape)
   if self._shape.merge_with(dense_shape).ndims == 0:
     raise ValueError(
         "Unbatching a sparse tensor is only supported for rank >= 1")
   return [gen_sparse_ops.serialize_many_sparse(
       value.indices, value.values, value.dense_shape,
       out_type=dtypes.variant)]
Ejemplo n.º 30
0
def _maybe_set_static_shape_helper(tensor, shape, postfix_tensor):
    if (not context.executing_eagerly()
            and ops.get_default_graph().building_function
            and not tensor.shape.is_fully_defined()):
        shape = tensor_util.shape_tensor(shape)
        const_shape = tensor_util.constant_value_as_shape(shape)
        postfix_tensor = ops.convert_to_tensor(postfix_tensor)
        tensor.set_shape(const_shape.concatenate(postfix_tensor.shape))
Ejemplo n.º 31
0
 def indexed_slices_repr(x):
     """
 :param tf.IndexedSlices x:
 :rtype: str
 """
     dense_shape = tensor_util.constant_value_as_shape(x.dense_shape)
     return "<tf.IndexedSlices %r dense_shape=%r dtype=%r>" % (
         x.name, dense_shape, x.dtype)
Ejemplo n.º 32
0
def _sparse_reshape_gpu(sp_input, shape, name=None):
    if not hasattr(tfra_math_ops, 'tfra_sparse_reshape'):
        tf_logging.warn('`tfra.dynamic_embedding.sparse_reshape` is not'
                        ' found. Use tf.sparse.reshape instead.')
        return tf.sparse.reshape(sp_input, shape, name=name)

    sp_input = _convert_to_sparse_tensor(sp_input)
    shape = math_ops.cast(shape, dtype=dtypes.int64)
    with ops.name_scope(name, "SparseReshape", [sp_input]):
        # shape = ops.convert_to_tensor(shape, dtype=sp_input.values.dtype)
        reshaped_ind, reshaped_shape = tfra_math_ops.tfra_sparse_reshape(
            sp_input.indices, sp_input.dense_shape, shape, name=name)

        reshaped_shape_const = tensor_util.constant_value_as_shape(shape)
        reshaped_shape_const = (reshaped_shape_const.as_list()
                                if reshaped_shape_const.ndims is not None else
                                None)

        if (reshaped_shape_const is not None
                and sp_input.shape.is_fully_defined()):
            # constant_value_as_shape tends to get more information about the partial
            # shape values, but here we specifically need to know if the *user* passed
            # a shape with 2+ unknown dimensions; and for that constant_value
            # provides either the user's direct value or None if only partial elements
            # are known via the python shape inference code.
            shape_const_by_user = tensor_util.constant_value(shape)
            if shape_const_by_user is not None:
                num_implied_by_user = sum(d == -1 for d in shape_const_by_user)
                if num_implied_by_user > 1:
                    raise ValueError(
                        "At most one dimension can be inferred (-1). Found: %s"
                        % shape_const_by_user)
            original_reshaped_shape = list(reshaped_shape_const)  # A copy
            in_shape_size = np.prod(sp_input.shape.as_list())
            num_implied = sum(dim is None for dim in reshaped_shape_const)
            if num_implied == 1:
                implied_idx = original_reshaped_shape.index(None)
                non_implied_idx = (original_reshaped_shape[:implied_idx] +
                                   original_reshaped_shape[implied_idx + 1:])
                reshaped_shape_const[implied_idx] = int(
                    in_shape_size // np.prod(non_implied_idx))
            if num_implied <= 1:
                reshaped_size = np.prod(reshaped_shape_const)
                if reshaped_size != in_shape_size:
                    raise ValueError(
                        "Cannot reshape a tensor with %d elements to shape %s "
                        "(%d elements)." %
                        (in_shape_size, original_reshaped_shape,
                         reshaped_size))
                reshaped_shape = constant_op.constant(reshaped_shape_const,
                                                      dtype=dtypes.int64)

        return sparse_tensor.SparseTensor(indices=reshaped_ind,
                                          values=array_ops.identity(
                                              sp_input.values),
                                          dense_shape=reshaped_shape)
Ejemplo n.º 33
0
 def is_compatible_with(self, value):
   try:
     value = sparse_tensor_lib.SparseTensor.from_value(value)
   except TypeError:
     return False
   return (isinstance(value, (sparse_tensor_lib.SparseTensor,
                              sparse_tensor_lib.SparseTensorValue)) and
           self._dtype.is_compatible_with(value.dtype) and
           self._dense_shape.is_compatible_with(
               tensor_util.constant_value_as_shape(value.dense_shape)))
Ejemplo n.º 34
0
 def _batch_shape(self):
     # If there's a chance that the batch_shape has been overridden, we return
     # what we statically know about the `batch_shape_override`. This works
     # because: `_is_maybe_batch_override` means `static_override` is `None` or a
     # non-empty list, i.e., we don't statically know the `batch_shape` or we do.
     #
     # Notice that this implementation parallels the `_event_shape` except that
     # the `bijector` doesn't get to alter the `batch_shape`. Recall that
     # `batch_shape` is a property of a distribution while `event_shape` is
     # shared between both the `distribution` instance and the `bijector`.
     static_override = tensor_util.constant_value_as_shape(
         self._override_batch_shape)
     return (static_override if self._is_maybe_batch_override else
             self.distribution.batch_shape)
Ejemplo n.º 35
0
 def _event_shape(self):
     # If there's a chance that the event_shape has been overridden, we return
     # what we statically know about the `event_shape_override`. This works
     # because: `_is_maybe_event_override` means `static_override` is `None` or a
     # non-empty list, i.e., we don't statically know the `event_shape` or we do.
     #
     # Since the `bijector` may change the `event_shape`, we then forward what we
     # know to the bijector. This allows the `bijector` to have final say in the
     # `event_shape`.
     static_override = tensor_util.constant_value_as_shape(
         self._override_event_shape)
     return self.bijector.forward_event_shape(
         static_override if self._is_maybe_event_override else self.
         distribution.event_shape)
 def _event_shape(self):
   # If there's a chance that the event_shape has been overridden, we return
   # what we statically know about the `event_shape_override`. This works
   # because: `_is_maybe_event_override` means `static_override` is `None` or a
   # non-empty list, i.e., we don't statically know the `event_shape` or we do.
   #
   # Since the `bijector` may change the `event_shape`, we then forward what we
   # know to the bijector. This allows the `bijector` to have final say in the
   # `event_shape`.
   static_override = tensor_util.constant_value_as_shape(
       self._override_event_shape)
   return self.bijector.forward_event_shape(
       static_override
       if self._is_maybe_event_override
       else self.distribution.event_shape)
 def _batch_shape(self):
   # If there's a chance that the batch_shape has been overridden, we return
   # what we statically know about the `batch_shape_override`. This works
   # because: `_is_maybe_batch_override` means `static_override` is `None` or a
   # non-empty list, i.e., we don't statically know the `batch_shape` or we do.
   #
   # Notice that this implementation parallels the `_event_shape` except that
   # the `bijector` doesn't get to alter the `batch_shape`. Recall that
   # `batch_shape` is a property of a distribution while `event_shape` is
   # shared between both the `distribution` instance and the `bijector`.
   static_override = tensor_util.constant_value_as_shape(
       self._override_batch_shape)
   return (static_override
           if self._is_maybe_batch_override
           else self.distribution.batch_shape)
Ejemplo n.º 38
0
    def _maybe_check_valid_shape(self, shape, validate_args):
        """Check that a shape Tensor is int-type and otherwise sane."""
        if not shape.dtype.is_integer:
            raise TypeError("{} dtype ({}) should be `int`-like.".format(
                shape, shape.dtype.name))

        assertions = []

        ndims = array_ops.rank(shape)
        ndims_ = tensor_util.constant_value(ndims)
        if ndims_ is not None and ndims_ > 1:
            raise ValueError("`{}` rank ({}) should be <= 1.".format(
                shape, ndims_))
        elif validate_args:
            assertions.append(
                check_ops.assert_less_equal(
                    ndims,
                    1,
                    message="`{}` rank should be <= 1.".format(shape)))

        shape_ = tensor_util.constant_value_as_shape(shape)
        if shape_.is_fully_defined():
            es = np.int32(shape_.as_list())
            if sum(es == -1) > 1:
                raise ValueError(
                    "`{}` must have at most one `-1` (given {})".format(
                        shape, es))
            if np.any(es < -1):
                raise ValueError(
                    "`{}` elements must be either positive integers or `-1`"
                    "(given {}).".format(shape, es))
        elif validate_args:
            assertions.extend([
                check_ops.assert_less_equal(
                    math_ops.reduce_sum(
                        math_ops.cast(math_ops.equal(shape, -1),
                                      dtypes.int32)),
                    1,
                    message="`{}` elements must have at most one `-1`.".format(
                        shape)),
                check_ops.assert_greater_equal(
                    shape,
                    -1,
                    message=
                    "`{}` elements must be either positive integers or `-1`.".
                    format(shape)),
            ])
        return assertions
Ejemplo n.º 39
0
def constant_value_as_shape(tensor):  # pylint: disable=invalid-name
    """A version of `constant_value()` that returns a `TensorShape`.

  This version should be used when a constant tensor value is
  interpreted as a (possibly partial) shape, e.g. in the shape
  function for `tf.reshape()`. By explicitly requesting a
  `TensorShape` as the return value, it is possible to represent
  unknown dimensions; by contrast, `constant_value()` is
  all-or-nothing.

  Args:
    tensor: The rank-0 or rank-1 Tensor to be evaluated.

  Returns:
    A `TensorShape` based on the constant value of the given `tensor`.

  Raises:
    ValueError: If the shape is rank-0 and is not statically known to be -1.
  """
    shape = tf.get_static_value(tensor)
    if shape is not None:
        return tensor_shape.as_shape(
            [None if dim == -1 else dim for dim in shape])
    try:
        # Importing here, conditionally, to avoid a hard dependency on
        # DeferredTensor, because that creates a BUILD dependency cycle.
        # Why is it necessary to mention DeferredTensor at all?
        # Because TF's `constant_value_as_shape` barfs on it: b/142254634.
        # NOTE: In the JAX/NumPy backends, DeferredTensor is not a class/type.
        # pylint: disable=g-import-not-at-top
        from tensorflow_probability.python.util.deferred_tensor import DeferredTensor
        if isinstance(DeferredTensor, type) and isinstance(
                tensor, DeferredTensor):
            # Presumably not constant if deferred
            return tf.TensorShape(None)
    except ImportError:
        # If DeferredTensor doesn't even exist, couldn't have been an instance of
        # it.
        pass
    if tf.executing_eagerly():
        # Working around b/142251799
        if hasattr(ops, 'EagerTensor') and isinstance(tensor, ops.EagerTensor):
            return tensor_shape.as_shape(
                [dim if dim != -1 else None for dim in tensor.numpy()])
        else:
            return tf.TensorShape(None)
    return tensor_util.constant_value_as_shape(tensor)
Ejemplo n.º 40
0
def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False):
    """Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims."""
    fft_shape = _tensor_util.constant_value_as_shape(fft_length)

    # Edge case: skip padding empty tensors.
    if (input_tensor.shape.ndims is not None
            and any(dim.value == 0 for dim in input_tensor.shape.dims)):
        return input_tensor

    # If we know the shapes ahead of time, we can either skip or pre-compute the
    # appropriate paddings. Otherwise, fall back to computing paddings in
    # TensorFlow.
    if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None:
        # Slice the last FFT-rank dimensions from input_tensor's shape.
        input_fft_shape = input_tensor.shape[-fft_shape.ndims:]

        if input_fft_shape.is_fully_defined():
            # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
            if is_reverse:
                fft_shape = fft_shape[:-1].concatenate(
                    fft_shape.dims[-1].value // 2 + 1)

            paddings = [[0, max(fft_dim.value - input_dim.value,
                                0)] for fft_dim, input_dim in zip(
                                    fft_shape.dims, input_fft_shape.dims)]
            if any(pad > 0 for _, pad in paddings):
                outer_paddings = [[0, 0]] * max(
                    (input_tensor.shape.ndims - fft_shape.ndims), 0)
                return _array_ops.pad(input_tensor, outer_paddings + paddings)
            return input_tensor

    # If we can't determine the paddings ahead of time, then we have to pad. If
    # the paddings end up as zero, tf.pad has a special-case that does no work.
    input_rank = _array_ops.rank(input_tensor)
    input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:]
    outer_dims = _math_ops.maximum(0, input_rank - fft_rank)
    outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype)
    # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
    if is_reverse:
        fft_length = _array_ops.concat(
            [fft_length[:-1], fft_length[-1:] // 2 + 1], 0)
    fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape)
    paddings = _array_ops.concat([outer_paddings, fft_paddings], 0)
    paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings],
                                axis=1)
    return _array_ops.pad(input_tensor, paddings)
Ejemplo n.º 41
0
def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False):
  """Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims."""
  fft_shape = _tensor_util.constant_value_as_shape(fft_length)

  # Edge case: skip padding empty tensors.
  if (input_tensor.shape.ndims is not None and
      any(dim.value == 0 for dim in input_tensor.shape.dims)):
    return input_tensor

  # If we know the shapes ahead of time, we can either skip or pre-compute the
  # appropriate paddings. Otherwise, fall back to computing paddings in
  # TensorFlow.
  if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None:
    # Slice the last FFT-rank dimensions from input_tensor's shape.
    input_fft_shape = input_tensor.shape[-fft_shape.ndims:]

    if input_fft_shape.is_fully_defined():
      # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
      if is_reverse:
        fft_shape = fft_shape[:-1].concatenate(
            fft_shape.dims[-1].value // 2 + 1)

      paddings = [[0, max(fft_dim.value - input_dim.value, 0)]
                  for fft_dim, input_dim in zip(
                      fft_shape.dims, input_fft_shape.dims)]
      if any(pad > 0 for _, pad in paddings):
        outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims -
                                         fft_shape.ndims), 0)
        return _array_ops.pad(input_tensor, outer_paddings + paddings)
      return input_tensor

  # If we can't determine the paddings ahead of time, then we have to pad. If
  # the paddings end up as zero, tf.pad has a special-case that does no work.
  input_rank = _array_ops.rank(input_tensor)
  input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:]
  outer_dims = _math_ops.maximum(0, input_rank - fft_rank)
  outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype)
  # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
  if is_reverse:
    fft_length = _array_ops.concat([fft_length[:-1],
                                    fft_length[-1:] // 2 + 1], 0)
  fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape)
  paddings = _array_ops.concat([outer_paddings, fft_paddings], 0)
  paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings],
                              axis=1)
  return _array_ops.pad(input_tensor, paddings)
Ejemplo n.º 42
0
    def _split_batch_beams(self, t, s=None):
        """Splits the tensor from a batch by beams into a batch of beams.

    More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
    reshape this into [batch_size, beam_width, s]

    Args:
      t: Tensor of dimension [batch_size*beam_width, s].
      s: (Possibly known) depth shape.

    Returns:
      A reshaped version of t with dimension [batch_size, beam_width, s].

    Raises:
      ValueError: If, after reshaping, the new tensor is not shaped
        `[batch_size, beam_width, s]` (assuming batch_size and beam_width
        are known statically).
    """
        if isinstance(s, ops.Tensor):
            s = tensor_util.constant_value_as_shape(s)
        else:
            s = tensor_shape.TensorShape(s)
        t_shape = array_ops.shape(t)
        reshaped_t = array_ops.reshape(
            t,
            array_ops.concat(
                ([self._batch_size, self._beam_width], t_shape[1:]), 0))
        if isinstance(self._batch_size, tf.Tensor):
            static_batch_size = tensor_util.constant_value(self._batch_size)
        else:
            static_batch_size = self._batch_size
        expected_reshaped_shape = tensor_shape.TensorShape(
            [static_batch_size, self._beam_width]).concatenate(s)
        if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
            raise ValueError(
                "Unexpected behavior when reshaping between beam width "
                "and batch size.  The reshaped tensor has shape: %s.  "
                "We expected it to have shape "
                "(batch_size, beam_width, depth) == %s.  Perhaps you "
                "forgot to create a zero_state with "
                "batch_size=encoder_batch_size * beam_width?" %
                (reshaped_t.shape, expected_reshaped_shape))
        reshaped_t.set_shape(expected_reshaped_shape)
        return reshaped_t
Ejemplo n.º 43
0
  def _maybe_check_valid_shape(self, shape, validate_args):
    """Check that a shape Tensor is int-type and otherwise sane."""
    if not shape.dtype.is_integer:
      raise TypeError("{} dtype ({}) should be `int`-like.".format(
          shape, shape.dtype.name))

    assertions = []

    ndims = array_ops.rank(shape)
    ndims_ = tensor_util.constant_value(ndims)
    if ndims_ is not None and ndims_ > 1:
      raise ValueError("`{}` rank ({}) should be <= 1.".format(
          shape, ndims_))
    elif validate_args:
      assertions.append(check_ops.assert_less_equal(
          ndims, 1, message="`{}` rank should be <= 1.".format(shape)))

    shape_ = tensor_util.constant_value_as_shape(shape)
    if shape_.is_fully_defined():
      es = np.int32(shape_.as_list())
      if sum(es == -1) > 1:
        raise ValueError(
            "`{}` must have at most one `-1` (given {})"
            .format(shape, es))
      if np.any(es < -1):
        raise ValueError(
            "`{}` elements must be either positive integers or `-1`"
            "(given {})."
            .format(shape, es))
    elif validate_args:
      assertions.extend([
          check_ops.assert_less_equal(
              math_ops.reduce_sum(
                  math_ops.cast(math_ops.equal(shape, -1), dtypes.int32)),
              1,
              message="`{}` elements must have at most one `-1`."
              .format(shape)),
          check_ops.assert_greater_equal(
              shape, -1,
              message="`{}` elements must be either positive integers or `-1`."
              .format(shape)),
      ])
    return assertions
Ejemplo n.º 44
0
    def __init__(self, indices, values, dense_shape):
        """Creates a `SparseTensor`.

    Args:
      indices: A 2-D int64 tensor of shape `[N, ndims]`.
      values: A 1-D tensor of any type and shape `[N]`.
      dense_shape: A 1-D int64 tensor of shape `[ndims]`.

    Raises:
      ValueError: When building an eager SparseTensor if `dense_shape` is
        unknown or contains unknown elements (None or -1).
    """
        with ops.name_scope(None, "SparseTensor",
                            [indices, values, dense_shape]):
            indices = ops.convert_to_tensor(indices,
                                            name="indices",
                                            dtype=dtypes.int64)
            # TODO(touts): Consider adding mutable_values() when 'values'
            # is a VariableOp and updating users of SparseTensor.
            values = ops.convert_to_tensor(values, name="values")

            dense_shape = ops.convert_to_tensor(dense_shape,
                                                name="dense_shape",
                                                dtype=dtypes.int64)
            dense_shape_default = tensor_util.constant_value_as_shape(
                dense_shape)

        self._indices = indices
        self._values = values
        self._dense_shape = dense_shape
        self._dense_shape_default = dense_shape_default

        indices_shape = indices.shape.with_rank(2)
        values_shape = values.shape.with_rank(1)
        dense_shape_shape = dense_shape.shape.with_rank(1)

        # Assert number of rows in indices match the number of elements in values.
        indices_shape.dims[0].assert_is_compatible_with(values_shape.dims[0])
        # Assert number of columns in indices matches the number of elements in
        # dense_shape.
        indices_shape.dims[1].assert_is_compatible_with(
            dense_shape_shape.dims[0])
Ejemplo n.º 45
0
def validate_init_args_statically(distribution, batch_shape):
    """Helper to __init__ which makes or raises assertions."""
    if batch_shape.shape.ndims is not None:
        if batch_shape.shape.ndims != 1:
            raise ValueError("`batch_shape` must be a vector "
                             "(saw rank: {}).".format(batch_shape.shape.ndims))

    batch_shape_static = tensor_util.constant_value_as_shape(batch_shape)
    batch_size_static = batch_shape_static.num_elements()
    dist_batch_size_static = distribution.batch_shape.num_elements()

    if batch_size_static is not None and dist_batch_size_static is not None:
        if batch_size_static != dist_batch_size_static:
            raise ValueError("`batch_shape` size ({}) must match "
                             "`distribution.batch_shape` size ({}).".format(
                                 batch_size_static, dist_batch_size_static))

    if batch_shape_static.dims is not None:
        if any(dim.value is not None and dim.value < 1
               for dim in batch_shape_static.dims):
            raise ValueError("`batch_shape` elements must be >=-1.")
Ejemplo n.º 46
0
def validate_init_args_statically(distribution, batch_shape):
  """Helper to __init__ which makes or raises assertions."""
  if batch_shape.shape.ndims is not None:
    if batch_shape.shape.ndims != 1:
      raise ValueError("`batch_shape` must be a vector "
                       "(saw rank: {}).".format(batch_shape.shape.ndims))

  batch_shape_static = tensor_util.constant_value_as_shape(batch_shape)
  batch_size_static = batch_shape_static.num_elements()
  dist_batch_size_static = distribution.batch_shape.num_elements()

  if batch_size_static is not None and dist_batch_size_static is not None:
    if batch_size_static != dist_batch_size_static:
      raise ValueError("`batch_shape` size ({}) must match "
                       "`distribution.batch_shape` size ({}).".format(
                           batch_size_static, dist_batch_size_static))

  if batch_shape_static.dims is not None:
    if any(
        dim.value is not None and dim.value < 1 for dim in batch_shape_static):
      raise ValueError("`batch_shape` elements must be >=-1.")
Ejemplo n.º 47
0
  def _split_batch_beams(self, t, s=None):
    """Splits the tensor from a batch by beams into a batch of beams.

    More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
    reshape this into [batch_size, beam_width, s]

    Args:
      t: Tensor of dimension [batch_size*beam_width, s].
      s: (Possibly known) depth shape.

    Returns:
      A reshaped version of t with dimension [batch_size, beam_width, s].

    Raises:
      ValueError: If, after reshaping, the new tensor is not shaped
        `[batch_size, beam_width, s]` (assuming batch_size and beam_width
        are known statically).
    """
    if isinstance(s, ops.Tensor):
      s = tensor_util.constant_value_as_shape(s)
    else:
      s = tensor_shape.TensorShape(s)
    t_shape = array_ops.shape(t)
    reshaped_t = array_ops.reshape(
        t, array_ops.concat(
            ([self._batch_size, self._beam_width], t_shape[1:]), 0))
    static_batch_size = tensor_util.constant_value(self._batch_size)
    expected_reshaped_shape = tensor_shape.TensorShape(
        [static_batch_size, self._beam_width]).concatenate(s)
    if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
      raise ValueError("Unexpected behavior when reshaping between beam width "
                       "and batch size.  The reshaped tensor has shape: %s.  "
                       "We expected it to have shape "
                       "(batch_size, beam_width, depth) == %s.  Perhaps you "
                       "forgot to create a zero_state with "
                       "batch_size=encoder_batch_size * beam_width?"
                       % (reshaped_t.shape, expected_reshaped_shape))
    reshaped_t.set_shape(expected_reshaped_shape)
    return reshaped_t
Ejemplo n.º 48
0
def _call_cpp_shape_fn_impl(
    op, input_tensors_needed,
    input_tensors_as_shapes_needed,
    debug_python_shape_fn, require_shape_fn):
  """Core implementaton of call_cpp_shape_fn."""
  node_def_str = op.node_def.SerializeToString()

  def tensor_to_inference_result(t):
    r = cpp_shape_inference_pb2.CppShapeInferenceResult()
    r.shape.CopyFrom(t.get_shape().as_proto())
    # pylint: disable=protected-access
    r.handle_shape.CopyFrom(t._handle_shape)
    r.handle_dtype = t._handle_dtype
    # pylint: enable=protected-access
    return r.SerializeToString()
  input_shapes = [tensor_to_inference_result(i) for i in op.inputs]

  input_tensors = [None for i in input_shapes]
  for idx in input_tensors_needed:
    v = tensor_util.constant_value(op.inputs[idx])
    if v is not None:
      input_tensors[idx] = np.asarray(v)

  serialized_unknown_shape = (
      tensor_shape.TensorShape(None).as_proto().SerializeToString())
  arr = [serialized_unknown_shape for i in input_shapes]
  for idx in input_tensors_as_shapes_needed:
    s = tensor_util.constant_value_as_shape(op.inputs[idx])
    if s is not None:
      arr[idx] = s.as_proto().SerializeToString()
  input_tensors_as_shapes = arr

  missing_shape_fn = False
  try:
    with errors.raise_exception_on_not_ok_status() as status:
      output = pywrap_tensorflow.RunCppShapeInference(
          node_def_str, input_shapes, input_tensors, input_tensors_as_shapes,
          status)
  except errors.InvalidArgumentError as err:
    if err.message.startswith("No shape inference function exists for op"):
      missing_shape_fn = True
    else:
      raise ValueError(err.message)

  if missing_shape_fn:
    if require_shape_fn:
      raise RuntimeError(
          "No C++ shape function registered for standard op: %s" % op.type)
    return unknown_shape(op)

  output_shapes = output[:-1]

  # Convert TensorShapeProto values in output_shapes.
  result_protos = [
      cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
      for s in output_shapes
  ]
  result = [r.shape for r in result_protos]
  result_handle_shapes = [r.handle_shape for r in result_protos]
  result_handle_dtypes = [r.handle_dtype for r in result_protos]

  if debug_python_shape_fn:
    try:
      python_result = [tensor_shape.as_shape(s)
                       for s in debug_python_shape_fn(op)]
    except Exception as err:
      raise AssertionError("Python shape function return error but "
                           "C++ shape functon did not: %s" % str(err))
    result_as_shapes = [tensor_shape.as_shape(s) for s in result]
    if str(result_as_shapes) != str(python_result):
      raise ValueError(
          ("Python vs CPP shape mismatch.  "
           "CPP: %s vs python: %s on node %s "
           "with input shapes %s") % (
               str(result_as_shapes), str(python_result), str(op.node_def),
               ",".join([str(i.get_shape()) for i in op.inputs])))

  return {"shapes": result,
          "handle_shapes": result_handle_shapes,
          "handle_dtypes": result_handle_dtypes,
          "inputs_needed": output[-1]}
Ejemplo n.º 49
0
 def testPack(self):
   tf_val = tf.pack([tf.constant(16), 37, tf.placeholder(tf.int32)])
   c_val = tensor_util.constant_value_as_shape(tf_val)
   self.assertEqual([16, 37, None], c_val.as_list())
Ejemplo n.º 50
0
def call_cpp_shape_fn(
    op,
    input_tensors_needed=None,
    input_tensors_as_shapes_needed=None,
    debug_python_shape_fn=None,
    require_shape_fn=True,
):
    """A shape function that delegates to the registered C++ shape function.

  Args:
    op: the node in the graph for which to compute output shapes.
    input_tensors_needed: a list of input tensor indices for which to compute
      the input tensor's value and pass to the C++ shape function.
    input_tensors_as_shapes_needed: a list of input tensor indices for which to
      compute the constant_value_as_shape and pass to the C++ shape function.
    debug_python_shape_fn: For testing only during migration to using
      call_cpp_shape_fn. Do not submit calls that set this,
      as the comparison is slow. If non-None, the python shape function;
      this function will be called and its output compared to that of
      the C++ shape function.
    require_shape_fn: If true, and the C++ shape function is not registered
      in the current binary then an exception is raised; otherwise, if the
      C++ shape function is not registered then unknown_shape is used.

  Returns:
    A dictionary with the following keys:
      shapes: A TensorShape list of the output shapes of the op, as computed
        using the C++ shape inference function registered for the op.
      handle_shapes: A TensorShape list of the shapes for handle outputs, if
         any.
      handle_dtypes: A list of DataType enums for the handle outputs, if any.

  Raises:
    ValueError: If the C++ shape function returned an error (e.g. because the
      shapes of the inputs are of the wrong rank or otherwise incompatible
      according to the shape function).
    RuntimeError: If the C++ shape function is not registered and
      <require_shape_fn> is True.
  """
    if op.type == "Const":
        # To avoid serializing large constants, we special-case constant
        # here, even though it has a C++ shape function.  When Python
        # calls the C / C-API directly, we should be able to remove this.
        return {
            "shapes": [tensor_shape.TensorShape(op.get_attr("value").tensor_shape)],
            "handle_shapes": [tensor_shape.TensorShape(None).as_proto()],
            "handle_dtypes": [types_pb2.DT_INVALID],
        }

    node_def_str = op.node_def.SerializeToString()

    def tensor_to_inference_result(t):
        r = cpp_shape_inference_pb2.CppShapeInferenceResult()
        r.shape.CopyFrom(t.get_shape().as_proto())
        # pylint: disable=protected-access
        r.handle_shape.CopyFrom(t._handle_shape)
        r.handle_dtype = t._handle_dtype
        # pylint: enable=protected-access
        return r.SerializeToString()

    input_shapes = [tensor_to_inference_result(i) for i in op.inputs]

    input_tensors = [None for i in input_shapes]
    if input_tensors_needed:
        for idx in input_tensors_needed:
            v = tensor_util.constant_value(op.inputs[idx])
            if v is not None:
                input_tensors[idx] = np.asarray(v)

    serialized_unknown_shape = tensor_shape.TensorShape(None).as_proto().SerializeToString()
    arr = [serialized_unknown_shape for i in input_shapes]
    if input_tensors_as_shapes_needed:
        for idx in input_tensors_as_shapes_needed:
            s = tensor_util.constant_value_as_shape(op.inputs[idx])
            if s is not None:
                arr[idx] = s.as_proto().SerializeToString()
    input_tensors_as_shapes = arr

    missing_shape_fn = False
    try:
        with errors.raise_exception_on_not_ok_status() as status:
            output_shapes = pywrap_tensorflow.RunCppShapeInference(
                node_def_str, input_shapes, input_tensors, input_tensors_as_shapes, status
            )
    except errors.InvalidArgumentError as err:
        if err.message.startswith("No shape inference function exists for op"):
            missing_shape_fn = True
        else:
            raise ValueError(err.message)

    if missing_shape_fn:
        if require_shape_fn:
            raise RuntimeError("No C++ shape function registered for standard op: %s" % op.type)
        return unknown_shape(op)

    # Convert TensorShapeProto values in output_shapes.
    result_protos = [cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s) for s in output_shapes]
    result = [r.shape for r in result_protos]
    result_handle_shapes = [r.handle_shape for r in result_protos]
    result_handle_dtypes = [r.handle_dtype for r in result_protos]

    if debug_python_shape_fn:
        try:
            python_result = [tensor_shape.as_shape(s) for s in debug_python_shape_fn(op)]
        except Exception as err:
            raise AssertionError("Python shape function return error but " "C++ shape functon did not: %s" % str(err))
        result_as_shapes = [tensor_shape.as_shape(s) for s in result]
        if str(result_as_shapes) != str(python_result):
            raise ValueError(
                ("Python vs CPP shape mismatch.  " "CPP: %s vs python: %s on node %s " "with input shapes %s")
                % (
                    str(result_as_shapes),
                    str(python_result),
                    str(op.node_def),
                    ",".join([str(i.get_shape()) for i in op.inputs]),
                )
            )

    return {"shapes": result, "handle_shapes": result_handle_shapes, "handle_dtypes": result_handle_dtypes}
Ejemplo n.º 51
0
def _call_cpp_shape_fn_impl(
    op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn):
  """Core implementaton of call_cpp_shape_fn."""
  graph_def_version = op.graph.graph_def_versions.producer
  node_def_str = op.node_def.SerializeToString()

  def tensor_to_inference_result(t):
    r = cpp_shape_inference_pb2.CppShapeInferenceResult()
    r.shape.CopyFrom(t.get_shape().as_proto())
    # pylint: disable=protected-access
    if t._handle_data is not None:
      r.handle_data.CopyFrom(t._handle_data)
    # pylint: enable=protected-access
    return r.SerializeToString()
  input_shapes = [tensor_to_inference_result(i) for i in op.inputs]

  input_tensors = [None for i in input_shapes]
  for idx in input_tensors_needed:
    v = tensor_util.constant_value(op.inputs[idx])
    if v is not None:
      input_tensors[idx] = np.asarray(v)

  serialized_unknown_shape = (
      tensor_shape.TensorShape(None).as_proto().SerializeToString())
  arr = [serialized_unknown_shape for i in input_shapes]
  for idx in input_tensors_as_shapes_needed:
    s = tensor_util.constant_value_as_shape(op.inputs[idx])
    if s is not None:
      arr[idx] = s.as_proto().SerializeToString()
  input_tensors_as_shapes = arr

  missing_shape_fn = False
  try:
    with errors.raise_exception_on_not_ok_status() as status:
      output = pywrap_tensorflow.RunCppShapeInference(
          graph_def_version, node_def_str, input_shapes, input_tensors,
          input_tensors_as_shapes, status)
  except errors.InvalidArgumentError as err:
    if err.message.startswith("No shape inference function exists for op"):
      missing_shape_fn = True
    else:
      raise ValueError(err.message)

  if missing_shape_fn:
    if require_shape_fn:
      raise RuntimeError(
          "No C++ shape function registered for standard op: %s" % op.type)
    return unknown_shape(op)

  output_shapes = output[:-1]

  # Convert TensorShapeProto values in output_shapes.
  result_protos = [
      cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
      for s in output_shapes
  ]
  result = [r.shape for r in result_protos]
  result_handle_data = [
      r.handle_data if r.handle_data.is_set else None for r in result_protos
  ]

  return {
      "shapes": result,
      "handle_data": result_handle_data,
      "inputs_needed": output[-1]
  }
Ejemplo n.º 52
0
def call_cpp_shape_fn(op,
                      input_tensors_needed=None,
                      input_tensors_as_shapes_needed=None,
                      debug_python_shape_fn=None):
  """A shape function that delegates to the registered C++ shape function.

  Args:
    op: the node in the graph for which to compute output shapes.
    input_tensors_needed: a list of input tensor indices for which to compute
      the input tensor's value and pass to the C++ shape function.
    input_tensors_as_shapes_needed: a list of input tensor indices for which to
      compute the constant_value_as_shape and pass to the C++ shape function.
    debug_python_shape_fn: For testing only during migration to using
      call_cpp_shape_fn. Do not submit calls that set this,
      as the comparison is slow. If non-None, the python shape function;
      this function will be called and its output compared to that of
      the C++ shape function.

  Returns:
    A dictionary with the following keys:
      shapes: A TensorShape list of the output shapes of the op, as computed
        using the C++ shape inference function registered for the op.
      handle_shapes: A TensorShape list of the shapes for handle outputs, if
         any.
      handle_dtypes: A list of DataType enums for the handle outputs, if any.

  Raises:
    ValueError: If the C++ shape function returned an error (e.g. because the
    shapes of the inputs are of the wrong rank or otherwise incompatible
    according to the shape function).
  """
  node_def_str = op.node_def.SerializeToString()

  def tensor_to_inference_result(t):
    r = cpp_shape_inference_pb2.CppShapeInferenceResult()
    r.shape.CopyFrom(t.get_shape().as_proto())
    # pylint: disable=protected-access
    r.handle_shape.CopyFrom(t._handle_shape)
    r.handle_dtype = t._handle_dtype
    # pylint: enable=protected-access
    return r.SerializeToString()
  input_shapes = [tensor_to_inference_result(i) for i in op.inputs]

  input_tensors = [None for i in input_shapes]
  if input_tensors_needed:
    for idx in input_tensors_needed:
      v = tensor_util.constant_value(op.inputs[idx])
      if v is not None:
        input_tensors[idx] = np.asarray(v)

  serialized_unknown_shape = (
      tensor_shape.TensorShape(None).as_proto().SerializeToString())
  arr = [serialized_unknown_shape for i in input_shapes]
  if input_tensors_as_shapes_needed:
    for idx in input_tensors_as_shapes_needed:
      s = tensor_util.constant_value_as_shape(op.inputs[idx])
      if s is not None:
        arr[idx] = s.as_proto().SerializeToString()
  input_tensors_as_shapes = arr

  try:
    with errors.raise_exception_on_not_ok_status() as status:
      output_shapes = pywrap_tensorflow.RunCppShapeInference(
          node_def_str, input_shapes, input_tensors, input_tensors_as_shapes,
          status)
  except errors.InvalidArgumentError as err:
    raise ValueError(err.message)

  # Convert TensorShapeProto values in output_shapes.
  result_protos = [
      cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
      for s in output_shapes
  ]
  result = [r.shape for r in result_protos]
  result_handle_shapes = [r.handle_shape for r in result_protos]
  result_handle_dtypes = [r.handle_dtype for r in result_protos]

  if debug_python_shape_fn:
    try:
      python_result = [tensor_shape.as_shape(s)
                       for s in debug_python_shape_fn(op)]
    except Exception as err:
      raise AssertionError("Python shape function return error but "
                           "C++ shape functon did not: %s" % str(err))
    if str(result) != str(python_result):
      raise ValueError(
          ("Python vs CPP shape mismatch.  "
           "CPP: %s vs python: %s on node %s "
           "with input shapes %s") % (
               str(result), str(python_result), str(op.node_def),
               ",".join([str(i.get_shape()) for i in op.inputs])))

  return {"shapes": result,
          "handle_shapes": result_handle_shapes,
          "handle_dtypes": result_handle_dtypes}
Ejemplo n.º 53
0
 def testPack(self):
   tf_val = array_ops.stack(
       [constant_op.constant(16), 37, array_ops.placeholder(dtypes.int32)])
   c_val = tensor_util.constant_value_as_shape(tf_val)
   self.assertEqual([16, 37, None], c_val.as_list())
Ejemplo n.º 54
0
 def testMinusOneBecomesNone(self):
   tf_val = constant_op.constant([-1, 1, -1], shape=[3])
   c_val = tensor_util.constant_value_as_shape(tf_val)
   self.assertEqual([None, 1, None], c_val.as_list())
Ejemplo n.º 55
0
 def testShape(self):
   tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
   c_val = tensor_util.constant_value_as_shape(tf_val)
   self.assertEqual(tensor_shape.TensorShape([1, 2, 3]), c_val)
Ejemplo n.º 56
0
  def testSlice(self):
    tf_val = array_ops.placeholder(dtypes.int32, shape=(4,))[0:2]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([None, None], c_val.as_list())

    # begin:end
    tf_val = constant_op.constant([10, 20, 30])[1:3]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([20, 30], c_val.as_list())

    # begin:end:stride
    tf_val = array_ops.strided_slice(
        constant_op.constant([10, 20, 30]), [1], [3], strides=[2])
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([20], c_val.as_list())

    # [1, 2, 16, 37, None, 48]
    tf_val_orig = array_ops.concat(
        [[1, 2, 16, 37], array_ops.placeholder(
            dtypes.int32, shape=(1,)), [48]], 0)

    # begin: no end
    tf_val = tf_val_orig[2:]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, 48], c_val.as_list())

    # begin::negative slice
    tf_val = tf_val_orig[2::-1]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 2, 1], c_val.as_list())

    # :end:negative slice
    tf_val = tf_val_orig[:1:-2]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([48, 37], c_val.as_list())

    # begin:end:negative slice
    tf_val = tf_val_orig[3:1:-1]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([37, 16], c_val.as_list())

    # begin:negative end:slice
    tf_val = tf_val_orig[1:-3:1]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([2, 16], c_val.as_list())

    # negative begin::slice
    tf_val = tf_val_orig[-3::1]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([37, None, 48], c_val.as_list())

    # negative begin::negative slice
    tf_val = tf_val_orig[-3::-1]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([37, 16, 2, 1], c_val.as_list())

    # negative begin:negative end:negative slice
    tf_val = tf_val_orig[-3:-5:-1]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([37, 16], c_val.as_list())

    # Do not support shape inference for additional arguments
    tf_val = constant_op.constant([10, 20, 30])[...]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([None, None, None], c_val.as_list())

    # Do not support shape inference for tensor slices.
    tf_val = constant_op.constant([10, 20, 30])[
        array_ops.placeholder(dtypes.int32, shape=()):]
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual(tensor_shape.unknown_shape(), c_val)

    # Do not support shape inference for higher rank
    with self.assertRaises(ValueError):
      tf_val = constant_op.constant([[10], [20], [30]])[:, 0:]
      c_val = tensor_util.constant_value_as_shape(tf_val)
Ejemplo n.º 57
0
def resize_images(images,
                  size,
                  method=ResizeMethod.BILINEAR,
                  align_corners=False):
  """Resize `images` to `size` using the specified `method`.

  Resized images will be distorted if their original aspect ratio is not
  the same as `size`.  To avoid distortions see
  [`resize_image_with_crop_or_pad`](#resize_image_with_crop_or_pad).

  `method` can be one of:

  *   <b>`ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.](https://en.wikipedia.org/wiki/Bilinear_interpolation)
  *   <b>`ResizeMethod.NEAREST_NEIGHBOR`</b>: [Nearest neighbor interpolation.](https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)
  *   <b>`ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.](https://en.wikipedia.org/wiki/Bicubic_interpolation)
  *   <b>`ResizeMethod.AREA`</b>: Area interpolation.

  Args:
    images: 4-D Tensor of shape `[batch, height, width, channels]` or
            3-D Tensor of shape `[height, width, channels]`.
    size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
          new size for the images.
    method: ResizeMethod.  Defaults to `ResizeMethod.BILINEAR`.
    align_corners: bool. If true, exactly align all 4 corners of the input and
                   output. Defaults to `false`.

  Raises:
    ValueError: if the shape of `images` is incompatible with the
      shape arguments to this function
    ValueError: if `size` has invalid shape or type.
    ValueError: if an unsupported resize method is specified.

  Returns:
    If `images` was 4-D, a 4-D float Tensor of shape
    `[batch, new_height, new_width, channels]`.
    If `images` was 3-D, a 3-D float Tensor of shape
    `[new_height, new_width, channels]`.
  """
  images = ops.convert_to_tensor(images, name='images')
  if images.get_shape().ndims is None:
    raise ValueError('\'images\' contains no shape.')
  # TODO(shlens): Migrate this functionality to the underlying Op's.
  is_batch = True
  if images.get_shape().ndims == 3:
    is_batch = False
    images = array_ops.expand_dims(images, 0)
  elif images.get_shape().ndims != 4:
    raise ValueError('\'images\' must have either 3 or 4 dimensions.')

  _, height, width, _ = images.get_shape().as_list()

  try:
    size = ops.convert_to_tensor(size, dtypes.int32, name='size')
  except (TypeError, ValueError):
    raise ValueError('\'size\' must be a 1-D int32 Tensor')
  if not size.get_shape().is_compatible_with([2]):
    raise ValueError('\'size\' must be a 1-D Tensor of 2 elements: '
                     'new_height, new_width')
  size_const_as_shape = tensor_util.constant_value_as_shape(size)
  new_height_const = size_const_as_shape[0].value
  new_width_const = size_const_as_shape[1].value

  # If we can determine that the height and width will be unmodified by this
  # transformation, we avoid performing the resize.
  if all(x is not None
         for x in [new_width_const, width, new_height_const, height]) and (
             width == new_width_const and height == new_height_const):
    if not is_batch:
      images = array_ops.squeeze(images, squeeze_dims=[0])
    return images

  if method == ResizeMethod.BILINEAR:
    images = gen_image_ops.resize_bilinear(images,
                                           size,
                                           align_corners=align_corners)
  elif method == ResizeMethod.NEAREST_NEIGHBOR:
    images = gen_image_ops.resize_nearest_neighbor(images,
                                                   size,
                                                   align_corners=align_corners)
  elif method == ResizeMethod.BICUBIC:
    images = gen_image_ops.resize_bicubic(images,
                                          size,
                                          align_corners=align_corners)
  elif method == ResizeMethod.AREA:
    images = gen_image_ops.resize_area(images,
                                       size,
                                       align_corners=align_corners)
  else:
    raise ValueError('Resize method is not implemented.')

  # NOTE(mrry): The shape functions for the resize ops cannot unpack
  # the packed values in `new_size`, so set the shape here.
  images.set_shape([None, new_height_const, new_width_const, None])

  if not is_batch:
    images = array_ops.squeeze(images, squeeze_dims=[0])
  return images
Ejemplo n.º 58
0
 def _as_batch_shape(self, shape_like):
   return tensor_shape.vector(None).concatenate(
       tensor_util.constant_value_as_shape(shape_like))