예제 #1
0
def _ParseSingleSequenceExampleShape(op):
    """Shape function for the ParseExample op."""
    op.inputs[0].get_shape().with_rank(0)  # input
    # feature_list_dense_missing_assumed_empty
    op.inputs[1].get_shape().with_rank(1)
    num_context_sparse = op.get_attr("Ncontext_sparse")
    num_context_dense = op.get_attr("Ncontext_dense")
    num_feature_list_dense = op.get_attr("Nfeature_list_dense")
    context_dense_shapes = op.get_attr("context_dense_shapes")
    num_feature_list_sparse = op.get_attr("Nfeature_list_sparse")
    feature_list_dense_shapes = op.get_attr("feature_list_dense_shapes")
    context_sparse_index_shapes = [tensor_shape.matrix(None, 1) for _ in range(num_context_sparse)]
    context_sparse_value_shapes = [tensor_shape.vector(None) for _ in range(num_context_sparse)]
    context_sparse_shape_shapes = [tensor_shape.vector(1) for _ in range(num_context_sparse)]
    context_dense_shapes = [tensor_shape.TensorShape(dense_shape) for dense_shape in context_dense_shapes]
    feature_list_sparse_index_shapes = [tensor_shape.matrix(None, 2) for _ in range(num_feature_list_sparse)]
    feature_list_sparse_value_shapes = [tensor_shape.vector(None) for _ in range(num_feature_list_sparse)]
    feature_list_sparse_shape_shapes = [tensor_shape.vector(2) for _ in range(num_feature_list_sparse)]
    feature_list_dense_shapes = [
        tensor_shape.vector(None).concatenate(dense_shape) for dense_shape in feature_list_dense_shapes
    ]
    assert num_context_dense == len(context_dense_shapes)
    assert num_feature_list_dense == len(feature_list_dense_shapes)
    return (
        context_sparse_index_shapes
        + context_sparse_value_shapes
        + context_sparse_shape_shapes
        + context_dense_shapes
        + feature_list_sparse_index_shapes
        + feature_list_sparse_value_shapes
        + feature_list_sparse_shape_shapes
        + feature_list_dense_shapes
    )
  def testShapes(self):
    fdef = self._build_function_def()

    g = function_def_to_graph.function_def_to_graph(fdef)
    self.assertIsNone(g.inputs[0].shape.dims)  # Unknown dims.
    self.assertIsNone(g.inputs[1].shape.dims)  # Unknown dims.
    self.assertIsNone(g.outputs[0].shape.dims)  # Unknown dims.
    self.assertIsNone(g.outputs[1].shape.dims)  # Unknown dims.

    g = function_def_to_graph.function_def_to_graph(
        fdef, input_shapes=[tensor_shape.vector(5),
                            tensor_shape.vector(5)])
    self.assertSequenceEqual(g.inputs[0].shape.dims, [5])
    self.assertSequenceEqual(g.inputs[1].shape.dims, [5])
    self.assertSequenceEqual(g.outputs[0].shape.dims, [5])
    self.assertSequenceEqual(g.outputs[1].shape.dims, [5])

    g = function_def_to_graph.function_def_to_graph(
        fdef, input_shapes=[None, tensor_shape.matrix(5, 7)])
    self.assertIsNone(g.inputs[0].shape.dims)
    self.assertSequenceEqual(g.inputs[1].shape.dims, [5, 7])
    self.assertSequenceEqual(g.outputs[0].shape.dims, [5, 7])
    self.assertSequenceEqual(g.outputs[1].shape.dims, [5, 7])

    # Should raise a ValueError if the length of input_shapes does not match
    # the number of input args in FunctionDef.signature.input_arg.
    with self.assertRaises(ValueError):
      g = function_def_to_graph.function_def_to_graph(
          fdef, input_shapes=[tensor_shape.matrix(5, 7)])
예제 #3
0
  def testBroadcast_one_dimension(self):
    s1 = tensor_shape.vector(5)
    s2 = tensor_shape.vector(7)

    unknown = tensor_shape.unknown_shape()
    scalar = tensor_shape.scalar()
    expanded_scalar = tensor_shape.TensorShape([1])

    # Tensors with same shape should have the same broadcast result.
    for shape in (s1, s2, unknown, scalar, expanded_scalar):
      self._assert_broadcast(expected=shape, shape1=shape, shape2=shape)

    # [] and [1] act like identity.
    self._assert_broadcast(expected=s1, shape1=s1, shape2=scalar)
    self._assert_broadcast(expected=s2, shape1=s2, shape2=scalar)
    self._assert_broadcast(expected=s1, shape1=s1, shape2=expanded_scalar)
    self._assert_broadcast(expected=s2, shape1=s2, shape2=expanded_scalar)

    self._assert_broadcast(expected=unknown, shape1=s1, shape2=unknown)
    self._assert_broadcast(expected=unknown, shape1=s2, shape2=unknown)

    self._assert_broadcast(
        expected=expanded_scalar, shape1=scalar, shape2=expanded_scalar)

    self._assert_incompatible_broadcast(shape1=s1, shape2=s2)
예제 #4
0
def _TensorArraySplitShape(op):
    # handle, value, lengths, flow_in
    op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
    op.inputs[2].get_shape().merge_with(tensor_shape.vector(None))
    op.inputs[3].get_shape().merge_with(tensor_shape.scalar())
    # flow_out
    return [tensor_shape.scalar()]
예제 #5
0
  def testBroadcast_one_dimension(self):
    s1 = tensor_shape.vector(5)
    s2 = tensor_shape.vector(7)

    unknown = tensor_shape.unknown_shape()
    scalar = tensor_shape.scalar()
    expanded_scalar = tensor_shape.TensorShape([1])

    # Tensors with same shape should have the same broadcast result.
    self.assertEqual(s1, common_shapes.broadcast_shape(s1, s1))
    self.assertEqual(s2, common_shapes.broadcast_shape(s2, s2))
    self.assertEqual(unknown, common_shapes.broadcast_shape(unknown, unknown))
    self.assertEqual(scalar, common_shapes.broadcast_shape(scalar, scalar))
    self.assertEqual(expanded_scalar, common_shapes.broadcast_shape(
        expanded_scalar, expanded_scalar))

    # [] acts like an identity.
    self.assertEqual(s1, common_shapes.broadcast_shape(s1, scalar))
    self.assertEqual(s2, common_shapes.broadcast_shape(s2, scalar))

    self.assertEqual(s1, common_shapes.broadcast_shape(s1, expanded_scalar))
    self.assertEqual(s2, common_shapes.broadcast_shape(s2, expanded_scalar))

    self.assertEqual(unknown, common_shapes.broadcast_shape(s1, unknown))
    self.assertEqual(unknown, common_shapes.broadcast_shape(s2, unknown))

    self.assertEqual(expanded_scalar, common_shapes.broadcast_shape(
        scalar, expanded_scalar))

    with self.assertRaises(ValueError):
      common_shapes.broadcast_shape(s1, s2)
      common_shapes.broadcast_shape(s2, s1)
예제 #6
0
def _RangeShape(op):
    start_value = tensor_util.constant_value(op.inputs[0])
    limit_value = tensor_util.constant_value(op.inputs[1])
    delta_value = tensor_util.constant_value(op.inputs[2])
    if start_value is None or limit_value is None or delta_value is None:
        return [tensor_shape.vector(None)]
    else:
        return [tensor_shape.vector((limit_value - start_value + delta_value - 1) // delta_value)]
예제 #7
0
def _SparseSoftmaxCrossEntropyWithLogitsShape(op):
  """Shape function for SparseSoftmaxCrossEntropyWithLogits op."""
  logits_shape = op.inputs[0].get_shape()
  input_shape = logits_shape.with_rank(2)
  batch_size = input_shape[0]
  # labels_shape
  op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size))
  return [tensor_shape.vector(batch_size.value), input_shape]
예제 #8
0
def _CandidateSamplerShape(op):
  true_classes_shape = op.inputs[0].get_shape().with_rank(2)
  batch_size = true_classes_shape[0]
  num_sampled = op.get_attr("num_sampled")
  num_true = op.get_attr("num_true")
  return [tensor_shape.vector(num_sampled),
          tensor_shape.matrix(batch_size, num_true),
          tensor_shape.vector(num_sampled)]
예제 #9
0
def _DeserializeSparseShape(op):  # pylint: disable=invalid-name
  """Shape function for DeserializeManySparse op."""
  serialized_sparse_shape = op.inputs[0].get_shape().with_rank(2)
  serialized_sparse_shape.merge_with(
      tensor_shape.TensorShape([None, 3]))

  return [tensor_shape.matrix(None, None),
          tensor_shape.vector(None),
          tensor_shape.vector(None)]
예제 #10
0
def _SaveSlicesShape(op):
    """Shape function for SaveSlices op."""
    # Validate input shapes.
    unused_filename = op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
    data_count = len(op.inputs) - 3
    unused_tensor_names_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(data_count))
    unused_shapes_and_slices_shape = op.inputs[2].get_shape().merge_with(tensor_shape.vector(data_count))
    # TODO(mrry): Attempt to parse the shapes_and_slices values and use
    # them to constrain the shape of the remaining inputs.
    return []
예제 #11
0
    def take_many(self, num_elements, allow_small_batch=False, timeout=None, name=None):
        """Takes the given number of completed elements from this barrier.

    This operation concatenates completed-element component tensors along
    the 0th dimension to make a single component tensor.

    If barrier has no completed elements, this operation will block
    until there are 'num_elements' elements to take.

    Args:
      num_elements: The number of elements to take.
      allow_small_batch: If the barrier is closed, don't block if there are less
        completed elements than requested, but instead return all available
        completed elements.
        TODO(b/25743580): the semantics of `allow_small_batch` are experimental
        and may be extended to other cases in the future.
        TODO(ebrevdo): If a take_many(allow_small_batch=True) is blocking
        already when the barrier is closed, it will block for ever. Fix this
        by using asynchronous operations.
      timeout: This specifies the number of milliseconds to block
        before returning with DEADLINE_EXCEEDED. (This option is not
        supported yet.)
      name: A name for the operation (optional).

    Returns:
      A tuple of (index, key, value_list).
      "index" is a int64 tensor of length num_elements containing the
        index of the insert_many call for which the very first component of
        the given element was inserted into the Barrier, starting with
        the value -2**63.  Note, this value is different from the
        index of the insert_many call for which the element was completed.
      "key" is a string tensor of length num_elements containing the keys.
      "value_list" is a tuple of tensors, each one with size num_elements
        in the 0th dimension for each component in the barrier's values.

    """
        if name is None:
            name = "%s_BarrierTakeMany" % self._name
        ret = gen_data_flow_ops._barrier_take_many(
            self._barrier_ref, num_elements, self._types, allow_small_batch, timeout, name=name
        )

        # NOTE(mrry): Not using a shape function because we need access to
        # the Barrier object.
        op = ret[0].op
        if allow_small_batch:
            batch_dim = None
        else:
            batch_dim = tensor_shape.Dimension(tensor_util.constant_value(op.inputs[1]))
        op.outputs[0].set_shape(tensor_shape.vector(batch_dim))  # indices
        op.outputs[1].set_shape(tensor_shape.vector(batch_dim))  # keys
        for output, shape in zip(op.outputs[2:], self._shapes):  # value_list
            output.set_shape(tensor_shape.TensorShape([batch_dim]).concatenate(shape))

        return ret
예제 #12
0
def _CTCGreedyDecoderShape(op):
  """Shape function for the CTCGreedyDecoder op."""
  inputs_shape = op.inputs[0].get_shape().with_rank(3)
  sequence_length_shape = op.inputs[1].get_shape().with_rank(1)
  # merge batch_size
  sequence_length_shape[0].merge_with(inputs_shape[1])
  inputs_shape[1].merge_with(sequence_length_shape[0])
  batch_size = inputs_shape[1]
  # decoded_indices, decoded_values, decoded_shape, log_probability
  return [tensor_shape.matrix(None, 2),
          tensor_shape.vector(None),
          tensor_shape.vector(2),
          tensor_shape.matrix(batch_size, 1)]
예제 #13
0
def _ParseExampleShape(op):
    """Shape function for the ParseExample op."""
    input_shape = op.inputs[0].get_shape().with_rank(1)
    op.inputs[1].get_shape().with_rank(1)  # names
    num_sparse = op.get_attr("Nsparse")
    num_dense = op.get_attr("Ndense")
    dense_shapes = op.get_attr("dense_shapes")
    sparse_index_shapes = [tensor_shape.matrix(None, 2) for _ in range(num_sparse)]
    sparse_value_shapes = [tensor_shape.vector(None) for _ in range(num_sparse)]
    sparse_shape_shapes = [tensor_shape.vector(2) for _ in range(num_sparse)]
    assert num_dense == len(dense_shapes)
    dense_shapes = [input_shape.concatenate(dense_shape) for dense_shape in dense_shapes]
    return sparse_index_shapes + sparse_value_shapes + sparse_shape_shapes + dense_shapes
예제 #14
0
  def testBroadcast_unknown_dims(self):
    unknown = tensor_shape.unknown_shape()
    shape_0 = tensor_shape.scalar()
    shape_1 = tensor_shape.vector(1)
    # pylint: disable=invalid-name
    shape_U = tensor_shape.vector(None)
    shape_1xU = tensor_shape.matrix(1, None)
    shape_Ux1 = tensor_shape.matrix(None, 1)
    shape_4xU = tensor_shape.matrix(4, None)
    shape_Ux4 = tensor_shape.matrix(None, 4)
    # pylint: enable=invalid-name

    # Tensors with same shape should have the same broadcast result.
    for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
      self._assert_broadcast_with_unknown_dims(
          expected=shape, shape1=shape, shape2=shape)

    # [] and [1] act like identity.
    for identity in (shape_0, shape_1):
      for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
        self._assert_broadcast_with_unknown_dims(
            expected=shape, shape1=identity, shape2=shape)

    # Unknown in, unknown out.
    for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
      self._assert_broadcast_with_unknown_dims(
          expected=unknown, shape1=shape, shape2=unknown)

    self._assert_broadcast_with_unknown_dims(
        expected=shape_1xU, shape1=shape_U, shape2=shape_1xU)
    shape_UxU = tensor_shape.matrix(None, None)  # pylint: disable=invalid-name
    self._assert_broadcast_with_unknown_dims(
        expected=shape_UxU, shape1=shape_U, shape2=shape_Ux1)
    self._assert_broadcast_with_unknown_dims(
        expected=shape_4xU, shape1=shape_U, shape2=shape_4xU)
    self._assert_broadcast_with_unknown_dims(
        expected=shape_Ux4, shape1=shape_U, shape2=shape_Ux4)
    self._assert_broadcast_with_unknown_dims(
        expected=shape_UxU, shape1=shape_1xU, shape2=shape_Ux1)
    self._assert_broadcast_with_unknown_dims(
        expected=shape_4xU, shape1=shape_1xU, shape2=shape_4xU)
    self._assert_broadcast_with_unknown_dims(
        expected=shape_Ux4, shape1=shape_1xU, shape2=shape_Ux4)
    self._assert_broadcast_with_unknown_dims(
        expected=shape_4xU, shape1=shape_Ux1, shape2=shape_4xU)
    self._assert_broadcast_with_unknown_dims(
        expected=shape_Ux4, shape1=shape_Ux1, shape2=shape_Ux4)
    shape_4x4 = tensor_shape.matrix(4, 4)
    self._assert_broadcast_with_unknown_dims(
        expected=shape_4x4, shape1=shape_4xU, shape2=shape_Ux4)
예제 #15
0
  def sample_n(self, n, seed=None, name="sample_n"):
    """Sample `n` observations from the Exponential Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by the hyperparameters.
    """
    broadcast_shape = self._lam.get_shape()
    with ops.name_scope(name, "ExponentialSample", [self.lam, n]):
      n = ops.convert_to_tensor(n, name="n")
      shape = array_ops.concat(0, ([n], array_ops.shape(self._lam)))
      # Sample uniformly-at-random from the open-interval (0, 1).
      sampled = random_ops.random_uniform(
          shape, minval=np.nextafter(
              self.dtype.as_numpy_dtype(0.), self.dtype.as_numpy_dtype(1.)),
          maxval=constant_op.constant(1.0, dtype=self.dtype),
          seed=seed,
          dtype=self.dtype)

      n_val = tensor_util.constant_value(n)
      final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
      sampled.set_shape(final_shape)

      return -math_ops.log(sampled) / self._lam
예제 #16
0
  def sample(self, n, seed=None, name="sample"):
    """Sample `n` observations from the Uniform Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: a `Tensor` of shape `(n,) + self.batch_shape + self.event_shape`
          with values of type `self.dtype`.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self.a, self.b, n], name):
        n = ops.convert_to_tensor(n, name="n")
        n_val = tensor_util.constant_value(n)

        shape = array_ops.concat(0, [array_ops.pack([n]), self.batch_shape()])
        samples = random_ops.random_uniform(shape=shape,
                                            dtype=self.dtype,
                                            seed=seed)

        # Provide some hints to shape inference
        inferred_shape = tensor_shape.vector(n_val).concatenate(
            self.get_batch_shape())
        samples.set_shape(inferred_shape)

        return (array_ops.expand_dims(self.a, 0) + array_ops.expand_dims(
            self.range(), 0) * samples)
예제 #17
0
 def testHelpers(self):
   tensor_shape.TensorShape([]).assert_is_compatible_with(
       tensor_shape.scalar())
   tensor_shape.TensorShape([37]).assert_is_compatible_with(
       tensor_shape.vector(37))
   tensor_shape.TensorShape(
       [94, 43]).assert_is_compatible_with(tensor_shape.matrix(94, 43))
예제 #18
0
def _TransposeShape(op):
  """Shape function for the Transpose op.

  This op takes two inputs:

  * input: a rank-N tensor of arbitrary shape.
  * shuffle: a length-N vector.

  Its output is the rank-N tensor computed by permuting the dimensions
  of input according to shuffle.

  Args:
    op: A Transpose op.

  Returns:
    A single-element list containing the shape of the output.

  Raises:
    ValueError: If the shapes of input and shuffle are incompatible.
    IndexError: If shuffle contains an index that is >= the rank of input.
  """
  input_shape = op.inputs[0].get_shape()
  transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(
      input_shape.ndims))
  transpose_vec = tensor_util.ConstantValue(op.inputs[1])
  if transpose_vec is None:
    return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]
  else:
    return [tensor_shape.TensorShape([input_shape[i]
                                      for i in transpose_vec.tolist()])]
예제 #19
0
  def sample_n(self, n, seed=None, name="sample_n"):
    """Sample `n` observations from the Beta Distributions.

    Args:
      n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
        observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the hyperparameters.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self.a, self.b, n]):
        a = array_ops.ones_like(self._a_b_sum, dtype=self.dtype) * self.a
        b = array_ops.ones_like(self._a_b_sum, dtype=self.dtype) * self.b
        n = ops.convert_to_tensor(n, name="n")

        gamma1_sample = random_ops.random_gamma(
            [n,], a, dtype=self.dtype, seed=seed)
        gamma2_sample = random_ops.random_gamma(
            [n,], b, dtype=self.dtype, seed=seed)

        beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)

        n_val = tensor_util.constant_value(n)
        final_shape = tensor_shape.vector(n_val).concatenate(
            self._a_b_sum.get_shape())

        beta_sample.set_shape(final_shape)
        return beta_sample
예제 #20
0
  def sample(self, n, seed=None, name="sample"):
    """Sample `n` observations from the Categorical distribution.

    Args:
      n: 0-D.  Number of independent samples to draw for each distribution.
      seed: Random seed (optional).
      name: A name for this operation (optional).

    Returns:
      An `int64` `Tensor` with shape `[n, batch_shape, event_shape]`
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self.logits, n], name):
        n = ops.convert_to_tensor(n, name="n")
        logits_2d = array_ops.reshape(
            self.logits, array_ops.pack([-1, self.num_classes]))
        samples = random_ops.multinomial(logits_2d, n, seed=seed)
        samples = math_ops.cast(samples, self._dtype)
        ret = array_ops.reshape(
            array_ops.transpose(samples),
            array_ops.concat(
                0, [array_ops.expand_dims(n, 0), self.batch_shape()]))
        ret.set_shape(tensor_shape.vector(tensor_util.constant_value(n))
                      .concatenate(self.get_batch_shape()))
        return ret
예제 #21
0
def _SerializeSparseShape(op):  # pylint: disable=invalid-name
  """Shape function for SerializeSparse op."""
  op.inputs[0].get_shape().with_rank(2)
  op.inputs[1].get_shape().with_rank(1)
  op.inputs[2].get_shape().with_rank(1)

  return [tensor_shape.vector(3)]
예제 #22
0
  def sample(self, n, seed=None, name="sample"):
    """Sample `n` observations from the Normal Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the hyperparameters.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self._mu, self._sigma, n], name):
        broadcast_shape = (self._mu + self._sigma).get_shape()
        n = ops.convert_to_tensor(n)
        shape = array_ops.concat(
            0, [array_ops.pack([n]), array_ops.shape(self.mean())])
        sampled = random_ops.random_normal(
            shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)

        # Provide some hints to shape inference
        n_val = tensor_util.constant_value(n)
        final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
        sampled.set_shape(final_shape)

        return sampled * self._sigma + self._mu
예제 #23
0
  def sample(self, n, seed=None, name="sample"):
    """Generate `n` samples.

    Args:
      n: scalar.  Number of samples to draw from each distribution.
      seed: Python integer seed for RNG.
      name: name to give to the op.

    Returns:
      samples: a `Tensor` of shape `(n,) + self.batch_shape` with values of type
          `self.dtype`.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self.p, n], name):
        n = ops.convert_to_tensor(n, name="n")
        p_2d = array_ops.reshape(self.p, array_ops.pack([-1, 1]))
        q_2d = 1. - p_2d
        probs = array_ops.concat(1, [q_2d, p_2d])
        samples = random_ops.multinomial(math_ops.log(probs), n, seed=seed)
        ret = array_ops.reshape(
            array_ops.transpose(samples),
            array_ops.concat(0,
                             [array_ops.expand_dims(n, 0), self.batch_shape()]))
        ret.set_shape(tensor_shape.vector(tensor_util.constant_value(n))
                      .concatenate(self.get_batch_shape()))
        return math_ops.cast(ret, self.dtype)
예제 #24
0
def _TensorArrayReadShape(op):
    # handle, index, flow_in
    op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
    op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
    op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
    # value
    return [tensor_shape.unknown_shape()]
예제 #25
0
  def sample(self, n, seed=None, name=None):
    """Sample `n` observations from the Exponential Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by the hyperparameters.
    """
    broadcast_shape = self._lam.get_shape()
    with ops.op_scope([self.lam, n], name, "ExponentialSample"):
      n = ops.convert_to_tensor(n, name="n")
      shape = array_ops.concat(
          0, [array_ops.pack([n]), array_ops.shape(self._lam)])
      sampled = random_ops.random_uniform(
          shape, maxval=math_ops.cast(1.0, dtype=self.dtype),
          dtype=self.dtype)

      n_val = tensor_util.constant_value(n)
      final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
      sampled.set_shape(final_shape)

      return -math_ops.log(sampled) / self._lam
예제 #26
0
def _TensorArrayWriteShape(op):
    # handle, index, value, flow_in
    op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
    op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
    op.inputs[3].get_shape().merge_with(tensor_shape.scalar())
    # flow_out
    return [tensor_shape.scalar()]
예제 #27
0
def _SparseConcatShape(op):
  """Shape function for SparseConcat op."""
  num_inputs = int(op.get_attr("N"))

  # TF flattens and concatenates all list inputs, so reconstruct the lists here.
  ind_shapes = [ind.get_shape().with_rank(2) for ind in op.inputs[0:num_inputs]]
  val_shapes = [val.get_shape().with_rank(1)
                for val in op.inputs[num_inputs:2 * num_inputs]]
  shape_shapes = [shape.get_shape().with_rank(1)
                  for shape in op.inputs[2 * num_inputs:]]

  output_ind_rows = tensor_shape.Dimension(0)
  output_ind_cols = tensor_shape.Dimension(None)
  output_val_elems = tensor_shape.Dimension(0)
  output_shape_shape = tensor_shape.TensorShape(None)

  for i in xrange(num_inputs):
    num_elems_i = ind_shapes[i][0].merge_with(val_shapes[i][0])
    output_ind_rows += num_elems_i
    output_ind_cols = output_ind_cols.merge_with(ind_shapes[i][1])
    output_val_elems += num_elems_i
    output_shape_shape = output_shape_shape.merge_with(shape_shapes[i])

  output_ind_shape = tensor_shape.matrix(output_ind_rows, output_ind_cols)
  output_val_shape = tensor_shape.vector(output_val_elems)

  return [output_ind_shape, output_val_shape, output_shape_shape]
예제 #28
0
def conv2d_transpose(value, filter, output_shape, strides, padding="SAME",
                     name=None):
  """The transpose of `conv2d`.

  This operation is sometimes called "deconvolution" after (Deconvolutional
  Networks)[http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf], but is
  actually the transpose (gradient) of `conv2d` rather than an actual
  deconvolution.

  Args:
    value: A 4-D `Tensor` of type `float` and shape
      `[batch, height, width, in_channels]`.
    filter: A 4-D `Tensor` with the same type as `value` and shape
      `[height, width, output_channels, in_channels]`.  `filter`'s
      `in_channels` dimension must match that of `value`.
    output_shape: A 1-D `Tensor` representing the output shape of the
      deconvolution op.
    strides: A list of ints. The stride of the sliding window for each
      dimension of the input tensor.
    padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
    name: Optional name for the returned tensor.

  Returns:
    A `Tensor` with the same type as `value`.

  Raises:
    ValueError: If input/output depth does not match `filter`'s shape, or if
      padding is other than `'VALID'` or `'SAME'`.
  """
  with ops.op_scope([value, filter, output_shape], name,
                    "conv2d_transpose") as name:
    value = ops.convert_to_tensor(value, name="value")
    filter = ops.convert_to_tensor(filter, name="filter")
    if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]):
      raise ValueError(
          "input channels does not match filter's input channels, "
          "{} != {}".format(value.get_shape()[3], filter.get_shape()[3]))

    output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
    if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
      raise ValueError("output_shape must have shape (4,), got {}"
                       .format(output_shape_.get_shape()))

    if isinstance(output_shape, (list, np.ndarray)):
      # output_shape's shape should be == [4] if reached this point.
      if not filter.get_shape()[2].is_compatible_with(output_shape[3]):
        raise ValueError(
            "output_shape does not match filter's output channels, "
            "{} != {}".format(output_shape[3], filter.get_shape()[2]))

    if padding != "VALID" and padding != "SAME":
      raise ValueError("padding must be either VALID or SAME:"
                       " {}".format(padding))

    return gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
                                            filter=filter,
                                            out_backprop=value,
                                            strides=strides,
                                            padding=padding,
                                            name=name)
예제 #29
0
  def sample_n(self, n, seed=None, name="sample_n"):
    """Sample `n` observations from the Laplace Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the parameters.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self._loc, self._scale, n]):
        n = ops.convert_to_tensor(n)
        n_val = tensor_util.constant_value(n)
        shape = array_ops.concat(0, ([n], self.batch_shape()))
        # Sample uniformly-at-random from the open-interval (-1, 1).
        uniform_samples = random_ops.random_uniform(
            shape=shape,
            minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                                self.dtype.as_numpy_dtype(0.)),
            maxval=self.dtype.as_numpy_dtype(1.),
            dtype=self.dtype,
            seed=seed)

        # Provide some hints to shape inference
        inferred_shape = tensor_shape.vector(n_val).concatenate(
            self.get_batch_shape())
        uniform_samples.set_shape(inferred_shape)

        return (self._loc - self._scale * math_ops.sign(uniform_samples) *
                math_ops.log(1. - math_ops.abs(uniform_samples)))
예제 #30
0
 def _from_tensor_list(self, flat_value):
   if (len(flat_value) != 1 or flat_value[0].dtype != dtypes.variant or
       not flat_value[0].shape.is_compatible_with(tensor_shape.vector(3))):
     raise ValueError("SparseTensorStructure corresponds to a single "
                      "tf.variant vector of length 3.")
   return sparse_ops.deserialize_sparse(
       flat_value[0], dtype=self._dtype, rank=self._dense_shape.ndims)
예제 #31
0
 def __init__(self, input_dataset, batch_size, row_shape):
   """See `Dataset.dense_to_sparse_batch()` for more details."""
   super(_DenseToSparseBatchDataset, self).__init__(input_dataset)
   if not isinstance(input_dataset.output_types, dtypes.DType):
     raise TypeError("DenseToSparseDataset requires an input whose elements "
                     "have a single component, whereas the input has %r." %
                     input_dataset.output_types)
   self._input_dataset = input_dataset
   self._batch_size = batch_size
   self._row_shape = row_shape
   self._structure = structure.SparseTensorStructure(
       input_dataset.output_types,
       tensor_shape.vector(None).concatenate(self._row_shape))
예제 #32
0
def _BatchNormGradShape(op):
    """Shape function for BatchNormWithGlobalNormalizationGrad op."""
    input_shape = op.inputs[0].get_shape().with_rank(4)
    mean_shape = op.inputs[1].get_shape().with_rank(1)
    var_shape = op.inputs[2].get_shape().with_rank(1)
    beta_shape = op.inputs[3].get_shape().with_rank(1)
    out_backprop_shape = op.inputs[4].get_shape().with_rank(4)
    input_shape = input_shape.merge_with(out_backprop_shape)
    vector_dim = input_shape[3]
    vector_dim = vector_dim.merge_with(mean_shape[0])
    vector_dim = vector_dim.merge_with(var_shape[0])
    vector_dim = vector_dim.merge_with(beta_shape[0])
    return [input_shape] + ([tensor_shape.vector(vector_dim)] * 4)
예제 #33
0
  def testStr(self):
    self.assertEqual("<unknown>", str(tensor_shape.unknown_shape()))
    self.assertEqual("(?,)", str(tensor_shape.unknown_shape(ndims=1)))
    self.assertEqual("(?, ?)", str(tensor_shape.unknown_shape(ndims=2)))
    self.assertEqual("(?, ?, ?)", str(tensor_shape.unknown_shape(ndims=3)))

    self.assertEqual("()", str(tensor_shape.scalar()))
    self.assertEqual("(7,)", str(tensor_shape.vector(7)))
    self.assertEqual("(3, 8)", str(tensor_shape.matrix(3, 8)))
    self.assertEqual("(4, 5, 2)", str(tensor_shape.TensorShape([4, 5, 2])))

    self.assertEqual("(32, ?, 1, 9)",
                     str(tensor_shape.TensorShape([32, None, 1, 9])))
예제 #34
0
def _SparseApplyAdadeltaShape(op):
    """Shape function for the SparseApplyAdadelta op."""
    var_shape = op.inputs[0].get_shape()
    accum_grad_shape = op.inputs[1].get_shape().merge_with(var_shape)
    accum_update_shape = op.inputs[2].get_shape().merge_with(accum_grad_shape)
    _AssertInputIsScalar(op, 3)  # lr
    _AssertInputIsScalar(op, 4)  # decay_rate
    _AssertInputIsScalar(op, 5)  # epsilon
    grad_shape = op.inputs[6].get_shape().merge_with(
        tensor_shape.TensorShape([None]).concatenate(accum_update_shape[1:]))
    unused_indices_shape = op.inputs[7].get_shape().merge_with(
        tensor_shape.vector(grad_shape[0]))
    return [accum_update_shape]
예제 #35
0
  def testBroadcast_many_dimensions(self):
    unknown = tensor_shape.unknown_shape()
    shape_0 = tensor_shape.scalar()
    shape_1 = tensor_shape.vector(1)
    shape_4 = tensor_shape.vector(4)
    shape_1x4 = tensor_shape.matrix(1, 4)
    shape_4x1 = tensor_shape.matrix(4, 1)
    shape_3x4 = tensor_shape.matrix(3, 4)
    shape_4x3 = tensor_shape.matrix(4, 3)

    # Tensors with same shape should have the same broadcast result.
    for shape in (
        shape_0, shape_1, shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
      self._assert_broadcast(expected=shape, shape1=shape, shape2=shape)

    # [] and [1] act like identity.
    for identity in (shape_0, shape_1):
      for shape in (shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
        self._assert_broadcast(expected=shape, shape1=identity, shape2=shape)

    # Unknown in, unknown out.
    for shape in (shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
      self._assert_broadcast(expected=unknown, shape1=shape, shape2=unknown)

    self._assert_broadcast(expected=shape_1x4, shape1=shape_4, shape2=shape_1x4)
    shape_4x4 = tensor_shape.matrix(4, 4)
    self._assert_broadcast(expected=shape_4x4, shape1=shape_4, shape2=shape_4x1)
    self._assert_broadcast(expected=shape_3x4, shape1=shape_4, shape2=shape_3x4)
    self._assert_incompatible_broadcast(shape1=shape_4, shape2=shape_4x3)
    self._assert_broadcast(
        expected=shape_4x4, shape1=shape_1x4, shape2=shape_4x1)
    self._assert_broadcast(
        expected=shape_3x4, shape1=shape_1x4, shape2=shape_3x4)
    self._assert_incompatible_broadcast(shape1=shape_1x4, shape2=shape_4x3)
    self._assert_incompatible_broadcast(shape1=shape_4x1, shape2=shape_3x4)
    self._assert_broadcast(
        expected=shape_4x3, shape1=shape_4x1, shape2=shape_4x3)
    self._assert_incompatible_broadcast(shape1=shape_3x4, shape2=shape_4x3)
예제 #36
0
def _SparseApplyFtrlShape(op):
    """Shape function for the SparseApplyFtrl op."""
    var_shape = op.inputs[0].get_shape()
    accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
    linear_shape = op.inputs[2].get_shape().merge_with(accum_shape)
    grad_shape = op.inputs[3].get_shape().merge_with(
        tensor_shape.TensorShape([None]).concatenate(linear_shape[1:]))
    unused_indices_shape = op.inputs[4].get_shape().merge_with(
        tensor_shape.vector(grad_shape[0]))
    _AssertInputIsScalar(op, 5)  # lr
    _AssertInputIsScalar(op, 6)  # l1
    _AssertInputIsScalar(op, 7)  # l2
    _AssertInputIsScalar(op, 8)  # lr_power
    return [linear_shape]
예제 #37
0
def conv2d_transpose(
        value,
        filter,  # pylint: disable=redefined-builtin
        output_shape,
        strides,
        padding="SAME",
        data_format="NHWC",
        name=None):
    with ops.name_scope(name, "conv2d_transpose",
                        [value, filter, output_shape]) as name:
        if data_format not in ("NCHW", "NHWC"):
            raise ValueError("data_format has to be either NCHW or NHWC.")
        value = ops.convert_to_tensor(value, name="value")
        filter = ops.convert_to_tensor(filter, name="filter")  # pylint: disable=redefined-builtin
        axis = 3 if data_format == "NHWC" else 1
        if not value.get_shape()[axis].is_compatible_with(
                filter.get_shape()[3]):
            raise ValueError(
                "input channels does not match filter's input channels, "
                "{} != {}".format(value.get_shape()[axis],
                                  filter.get_shape()[3]))

        output_shape_ = ops.convert_to_tensor(output_shape,
                                              name="output_shape")
        if not output_shape_.get_shape().is_compatible_with(
                tensor_shape.vector(4)):
            raise ValueError(
                "output_shape must have shape (4,), got {}".format(
                    output_shape_.get_shape()))

        if isinstance(output_shape, (list, np.ndarray)):
            # output_shape's shape should be == [4] if reached this point.
            if not filter.get_shape()[2].is_compatible_with(
                    output_shape[axis]):
                raise ValueError(
                    "output_shape does not match filter's output channels, "
                    "{} != {}".format(output_shape[axis],
                                      filter.get_shape()[2]))

        if padding != "VALID" and padding != "SAME":
            raise ValueError("padding must be either VALID or SAME:"
                             " {}".format(padding))

        return gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
                                                filter=filter,
                                                out_backprop=value,
                                                strides=strides,
                                                padding=padding,
                                                data_format=data_format,
                                                name=name)
예제 #38
0
def _CTCLossShape(op):
    """Shape function for the CTCLoss op."""
    # inputs, label_indices, label_values, sequence_length
    inputs_shape = op.inputs[0].get_shape().with_rank(3)
    sequence_length_shape = op.inputs[3].get_shape().with_rank(1)
    # merge batch_size
    sequence_length_shape[0].merge_with(inputs_shape[1])
    inputs_shape[1].merge_with(sequence_length_shape[0])
    batch_size = inputs_shape[1]
    labels_index_shape = op.inputs[1].get_shape().with_rank(2)
    labels_value_shape = op.inputs[2].get_shape().with_rank(1)
    labels_value_shape[0].merge_with(labels_index_shape[0])
    # loss, gradient
    return [tensor_shape.vector(batch_size), inputs_shape]
예제 #39
0
def _SparseApplyRMSPropShape(op):
    """Shape function for the SparseApplyRMSProp op."""
    var_shape = op.inputs[0].get_shape()
    ms_shape = op.inputs[1].get_shape().merge_with(var_shape)
    mom_shape = op.inputs[2].get_shape().merge_with(ms_shape)
    _AssertInputIsScalar(op, 3)  # lr
    _AssertInputIsScalar(op, 4)  # rho
    _AssertInputIsScalar(op, 5)  # momentum
    _AssertInputIsScalar(op, 6)  # epsilon
    grad_shape = op.inputs[7].get_shape().merge_with(
        tensor_shape.TensorShape([None]).concatenate(mom_shape[1:]))
    unused_indices_shape = op.inputs[8].get_shape().merge_with(
        tensor_shape.vector(grad_shape[0]))
    return [mom_shape]
예제 #40
0
def _random_cropShape(op):
    """Shape function for the random_crop op."""
    input_shape = op.inputs[0].get_shape().with_rank(3)
    unused_size_shape = op.inputs[1].get_shape().merge_with(
        tensor_shape.vector(2))
    size = tensor_util.constant_value(op.inputs[1])
    if size is not None:
        height = size[0]
        width = size[1]
    else:
        height = None
        width = None
    channels = input_shape[2]
    return [tensor_shape.TensorShape([height, width, channels])]
예제 #41
0
def _SparseApplyAdagradDAShape(op):
    """Shape function for the SparseApplyAdagradDA op."""
    var_shape = op.inputs[0].get_shape()
    g_accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
    gg_accum_shape = op.inputs[2].get_shape().merge_with(g_accum_shape)
    grad_shape = op.inputs[3].get_shape().merge_with(
        tensor_shape.TensorShape([None]).concatenate(gg_accum_shape[1:]))
    unused_indices_shape = op.inputs[4].get_shape().merge_with(
        tensor_shape.vector(grad_shape[0]))
    _AssertInputIsScalar(op, 5)  # lr
    _AssertInputIsScalar(op, 6)  # l1
    _AssertInputIsScalar(op, 7)  # l2
    _AssertInputIsScalar(op, 8)  # global_step
    return [gg_accum_shape]
예제 #42
0
    def testBroadcast_one_dimension(self):
        s1 = tensor_shape.vector(5)
        s2 = tensor_shape.vector(7)

        unknown = tensor_shape.unknown_shape()
        scalar = tensor_shape.scalar()
        expanded_scalar = tensor_shape.TensorShape([1])

        # Tensors with same shape should have the same broadcast result.
        self.assertEqual(s1, common_shapes.broadcast_shape(s1, s1))
        self.assertEqual(s2, common_shapes.broadcast_shape(s2, s2))
        self.assertEqual(unknown,
                         common_shapes.broadcast_shape(unknown, unknown))
        self.assertEqual(scalar, common_shapes.broadcast_shape(scalar, scalar))
        self.assertEqual(
            expanded_scalar,
            common_shapes.broadcast_shape(expanded_scalar, expanded_scalar))

        # [] acts like an identity.
        self.assertEqual(s1, common_shapes.broadcast_shape(s1, scalar))
        self.assertEqual(s2, common_shapes.broadcast_shape(s2, scalar))

        self.assertEqual(s1,
                         common_shapes.broadcast_shape(s1, expanded_scalar))
        self.assertEqual(s2,
                         common_shapes.broadcast_shape(s2, expanded_scalar))

        self.assertEqual(unknown, common_shapes.broadcast_shape(s1, unknown))
        self.assertEqual(unknown, common_shapes.broadcast_shape(s2, unknown))

        self.assertEqual(
            expanded_scalar,
            common_shapes.broadcast_shape(scalar, expanded_scalar))

        with self.assertRaises(ValueError):
            common_shapes.broadcast_shape(s1, s2)
            common_shapes.broadcast_shape(s2, s1)
예제 #43
0
    def sample_n(self, n, seed=None, name="sample_n"):
        """Sample `n` observations from the Multivariate Normal Distributions.

    Args:
      n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
        observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the hyperparameters.
    """
        with ops.name_scope(self.name):
            with ops.name_scope(name, values=[self._mu, n] + self._cov.inputs):
                # Recall _check_mu ensures mu and self._cov have same batch shape.
                broadcast_shape = self.mu.get_shape()
                n = ops.convert_to_tensor(n, name="n")

                shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
                white_samples = random_ops.random_normal(shape=shape,
                                                         mean=0,
                                                         stddev=1,
                                                         dtype=self.dtype,
                                                         seed=seed)

                correlated_samples = self._cov.sqrt_matmul(white_samples)

                # Move the last dimension to the front
                perm = array_ops.concat(0, (array_ops.pack([
                    array_ops.rank(correlated_samples) - 1
                ]), math_ops.range(0,
                                   array_ops.rank(correlated_samples) - 1)))

                # TODO(ebrevdo): Once we get a proper tensor contraction op,
                # perform the inner product using that instead of batch_matmul
                # and this slow transpose can go away!
                correlated_samples = array_ops.transpose(
                    correlated_samples, perm)

                samples = correlated_samples + self.mu

                # Provide some hints to shape inference
                n_val = tensor_util.constant_value(n)
                final_shape = tensor_shape.vector(n_val).concatenate(
                    broadcast_shape)
                samples.set_shape(final_shape)

                return samples
예제 #44
0
def _ParseSingleSequenceExampleShape(op):
  """Shape function for the ParseExample op."""
  op.inputs[0].get_shape().with_rank(0)  # input
  op.inputs[-1].get_shape().with_rank(0)  # debug_name
  # feature_list_dense_missing_assumed_empty
  op.inputs[1].get_shape().with_rank(1)
  num_context_sparse = op.get_attr("Ncontext_sparse")
  num_context_dense = op.get_attr("Ncontext_dense")
  num_feature_list_dense = op.get_attr("Nfeature_list_dense")
  context_dense_shapes = op.get_attr("context_dense_shapes")
  num_feature_list_sparse = op.get_attr("Nfeature_list_sparse")
  feature_list_dense_shapes = op.get_attr("feature_list_dense_shapes")
  context_sparse_index_shapes = [
      tensor_shape.matrix(None, 1) for _ in range(num_context_sparse)]
  context_sparse_value_shapes = [
      tensor_shape.vector(None) for _ in range(num_context_sparse)]
  context_sparse_shape_shapes = [
      tensor_shape.vector(1) for _ in range(num_context_sparse)]
  context_dense_shapes = [
      tensor_shape.TensorShape(dense_shape)
      for dense_shape in context_dense_shapes]
  feature_list_sparse_index_shapes = [
      tensor_shape.matrix(None, 2) for _ in range(num_feature_list_sparse)]
  feature_list_sparse_value_shapes = [
      tensor_shape.vector(None) for _ in range(num_feature_list_sparse)]
  feature_list_sparse_shape_shapes = [
      tensor_shape.vector(2) for _ in range(num_feature_list_sparse)]
  feature_list_dense_shapes = [
      tensor_shape.vector(None).concatenate(dense_shape)
      for dense_shape in feature_list_dense_shapes]
  assert num_context_dense == len(context_dense_shapes)
  assert num_feature_list_dense == len(feature_list_dense_shapes)
  return (context_sparse_index_shapes + context_sparse_value_shapes +
          context_sparse_shape_shapes + context_dense_shapes +
          feature_list_sparse_index_shapes + feature_list_sparse_value_shapes +
          feature_list_sparse_shape_shapes + feature_list_dense_shapes)
예제 #45
0
 def testShapes(self):
   fdef = self._build_function_def()
   g, _ = function_def_to_graph.function_def_to_graph_def(
       fdef,
       input_shapes=[tensor_shape.scalar(),
                     tensor_shape.vector(5), None])
   self.assertEqual("shape" in g.node[0].attr, True)
   self.assertSequenceEqual(
       tensor_shape.TensorShape(g.node[0].attr["shape"].shape).as_list(), [])
   self.assertEqual(g.node[0].attr["shape"].shape.unknown_rank, False)
   self.assertEqual("shape" in g.node[1].attr, True)
   self.assertSequenceEqual(
       tensor_shape.TensorShape(g.node[1].attr["shape"].shape).as_list(), [5])
   self.assertEqual(g.node[0].attr["shape"].shape.unknown_rank, False)
   self.assertFalse("shape" in g.node[2].attr)
예제 #46
0
  def sample(self, n, seed=None, name=None):
    """Sample `n` observations from the Multivariate Normal Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the hyperparameters.
    """
    with ops.op_scope(
        [self._mu, self._sigma_chol, n], name, "MultivariateNormalSample"):
      # TODO(ebrevdo): Is there a better way to get broadcast_shape?
      broadcast_shape = self.mu.get_shape()
      n = ops.convert_to_tensor(n)
      sigma_shape_left = array_ops.slice(
          array_ops.shape(self._sigma_chol),
          [0], array_ops.pack([array_ops.rank(self._sigma_chol) - 2]))

      k_n = array_ops.pack([self._k, n])
      shape = array_ops.concat(0, [sigma_shape_left, k_n])
      white_samples = random_ops.random_normal(
          shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)

      correlated_samples = math_ops.batch_matmul(
          self._sigma_chol, white_samples)

      # Move the last dimension to the front
      perm = array_ops.concat(
          0,
          (array_ops.pack([array_ops.rank(correlated_samples) - 1]),
           math_ops.range(0, array_ops.rank(correlated_samples) - 1)))

      # TODO(ebrevdo): Once we get a proper tensor contraction op,
      # perform the inner product using that instead of batch_matmul
      # and this slow transpose can go away!
      correlated_samples = array_ops.transpose(correlated_samples, perm)

      samples = correlated_samples + self.mu

      # Provide some hints to shape inference
      n_val = tensor_util.constant_value(n)
      final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
      samples.set_shape(final_shape)

      return samples
예제 #47
0
    def make_merge_pipeline(self, args, record_name, chunks_to_merge, bpp):

        types = [dtypes.int32] + [dtypes.string] * len(self.inter_columns)
        shapes = [tensor_shape.scalar()
                  ] + [tensor_shape.vector(2)] * len(self.inter_columns)
        q = data_flow_ops.FIFOQueue(
            capacity=8,  # big because who cares
            dtypes=types,
            shapes=shapes,
            name="merge_output_queue")

        #bpp = persona_ops.buffer_pair_pool(size=0, bound=False, name="local_read_merge_buffer_list_pool")

        if args.order_by == location_value:
            merge = persona_ops.agd_merge
        else:
            merge = persona_ops.agd_merge_metadata

        merge_op = merge(chunk_size=args.chunk,
                         buffer_pair_pool=bpp,
                         chunk_group_handles=chunks_to_merge,
                         output_buffer_queue_handle=q.queue_ref,
                         name="agd_local_merge")

        tf.train.queue_runner.add_queue_runner(
            tf.train.queue_runner.QueueRunner(q, [merge_op]))

        # num_recs, results, base, qual, meta
        #num_recs, results, base, qual, meta = q.dequeue()
        val = q.dequeue()
        num_recs = val[0]

        record_name_constant = constant_op.constant(record_name)
        first_ordinal = tf.Variable(-1 * args.chunk,
                                    dtype=dtypes.int64,
                                    name="first_ordinal")
        first_ord = first_ordinal.assign_add(math_ops.to_int64(
            args.chunk, name="first_ord_cast_to_64"),
                                             use_locking=True)
        first_ord_str = string_ops.as_string(first_ord,
                                             name="first_ord_string")
        file_name = string_ops.string_join(
            [args.dataset_dir, "/", record_name_constant, first_ord_str],
            name="file_name_string_joiner")

        out_tuple = val[1:] + [record_name, first_ord, num_recs, file_name]

        return out_tuple
예제 #48
0
    def _create_value(self):
        """Create the value Tensor based on the value type, store as self._value."""

        if isinstance(self._value_type, MeanValue):
            value_tensor = self._dist.mean()
        elif isinstance(self._value_type, SampleValue):
            value_tensor = self._dist.sample(self._value_type.n)
        elif isinstance(self._value_type, SampleAndReshapeValue):
            if self._value_type.n == 1:
                value_tensor = array_ops.squeeze(self._dist.sample(1), [0])
            else:
                samples = self._dist.sample(self._value_type.n)
                samples_shape = array_ops.shape(samples)
                samples_static_shape = samples.get_shape()
                new_batch_size = samples_shape[0] * samples_shape[1]
                value_tensor = array_ops.reshape(
                    samples,
                    array_ops.concat(0, ([new_batch_size], samples_shape[2:])))
                if samples_static_shape.ndims is not None:
                    # Update the static shape for shape inference purposes
                    shape_list = samples_static_shape.as_list()
                    new_shape = tensor_shape.vector(
                        shape_list[0] *
                        shape_list[1] if shape_list[0] is not None
                        and shape_list[1] is not None else None)
                    new_shape = new_shape.concatenate(samples_static_shape[2:])
                    value_tensor.set_shape(new_shape)
        else:
            raise TypeError("Unrecognized Distribution Value Type: %s",
                            self._value_type)

        stop_gradient = self._value_type.stop_gradient

        if stop_gradient:
            # stop_gradient is being enforced by the value type
            return array_ops.stop_gradient(value_tensor)

        if isinstance(self._value_type, MeanValue):
            return value_tensor  # Using pathwise-derivative for this one.
        if (isinstance(self._dist, distributions.ContinuousDistribution)
                and self._dist.is_reparameterized):
            return value_tensor  # Using pathwise-derivative for this one.
        else:
            # Will have to perform some variant of score function
            # estimation.  Call stop_gradient on the sampler just in case we
            # may accidentally leak some gradient from it.
            return array_ops.stop_gradient(value_tensor)
예제 #49
0
def _ExtractGlimpseShape(op):
  """Shape function for ExtractGlimpse op."""
  input_shape = op.inputs[0].get_shape().with_rank(4)
  unused_size_shape = op.inputs[1].get_shape().merge_with(
      tensor_shape.vector(2))
  offsets_shape = op.inputs[2].get_shape().merge_with(
      input_shape[:1].concatenate([2]))
  offsets_shape = offsets_shape
  size_value = tensor_util.constant_value(op.inputs[1])
  if size_value is not None:
    height = size_value[0]
    width = size_value[1]
  else:
    height = None
    width = None
  return [tensor_shape.TensorShape(
      [input_shape[0], height, width, input_shape[3]])]
예제 #50
0
 def testStr(self):
     self.assertEqual("<unknown>", str(tensor_shape.unknown_shape()))
     self.assertEqual(
         "(None,)",
         str(tensor_shape.unknown_shape(rank=1)).replace("?", "None"))
     self.assertEqual(
         "(None, None)",
         str(tensor_shape.unknown_shape(rank=2)).replace("?", "None"))
     self.assertEqual(
         "(None, None, None)",
         str(tensor_shape.unknown_shape(rank=3)).replace("?", "None"))
     self.assertEqual(
         "(32, None, 1, 9)",
         str(tensor_shape.TensorShape([32, None, 1,
                                       9])).replace("?", "None"))
     self.assertEqual("()", str(tensor_shape.scalar()))
     self.assertEqual("(7,)", str(tensor_shape.vector(7)))
     self.assertEqual("(3, 8)", str(tensor_shape.matrix(3, 8)))
     self.assertEqual("(4, 5, 2)", str(tensor_shape.TensorShape([4, 5, 2])))
예제 #51
0
    def sample(self, n, seed=None, name="sample"):
        """Sample `n` observations from the Student t Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: a `Tensor` of shape `(n,) + self.batch_shape + self.event_shape`
          with values of type `self.dtype`.
    """
        with ops.op_scope([self._df, self._mu, self._sigma, n], self.name):
            with ops.name_scope(name):
                n = ops.convert_to_tensor(n, name="n")
                n_val = tensor_util.constant_value(n)

                # We use 2 uniform random floats to generate polar random variates.
                # http://dl.acm.org/citation.cfm?id=179631
                # Theorem 2. Let G, H be iid variates, uniformly distributed on [0,1].
                # Let theta = 2*pi*H, let R = sqrt(df*(G^(-2/df) - 1)) for df > 0.
                # Let X = R*cos(theta), and let Y = R*sin(theta).
                # Then X ~ t_df and Y ~ t_df.
                # The variates X and Y are not independent.
                shape = array_ops.concat(
                    0, [array_ops.pack([2, n]),
                        self.batch_shape()])
                uniform = random_ops.random_uniform(shape=shape,
                                                    dtype=self.dtype,
                                                    seed=seed)
                samples_g, samples_h = array_ops.unpack(uniform, num=2)
                theta = (2 * np.pi) * samples_h
                r = math_ops.sqrt(self._df *
                                  (math_ops.pow(samples_g, -2 / self._df) - 1))
                samples = r * math_ops.cos(theta)

                # Provide some hints to shape inference
                inferred_shape = tensor_shape.vector(n_val).concatenate(
                    self.get_batch_shape())
                samples.set_shape(inferred_shape)

                return samples * self._sigma + self._mu
예제 #52
0
파일: beta.py 프로젝트: zqkou/tensorflow
    def sample_n(self, n, seed=None, name="sample_n"):
        """Sample `n` observations from the Beta Distributions.

    Args:
      n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
        observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the hyperparameters.
    """
        with ops.name_scope(self.name):
            with ops.name_scope(name, values=[self.a, self.b, n]):
                a = array_ops.ones_like(self._a_b_sum,
                                        dtype=self.dtype) * self.a
                b = array_ops.ones_like(self._a_b_sum,
                                        dtype=self.dtype) * self.b
                n = ops.convert_to_tensor(n, name="n")

                gamma1_sample = random_ops.random_gamma([
                    n,
                ],
                                                        a,
                                                        dtype=self.dtype,
                                                        seed=seed)
                gamma2_sample = random_ops.random_gamma([
                    n,
                ],
                                                        b,
                                                        dtype=self.dtype,
                                                        seed=seed)

                beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)

                n_val = tensor_util.constant_value(n)
                final_shape = tensor_shape.vector(n_val).concatenate(
                    self._a_b_sum.get_shape())

                beta_sample.set_shape(final_shape)
                return beta_sample
예제 #53
0
  def __init__(self, input_dataset, batch_size, row_shape):
    """See `Dataset.dense_to_sparse_batch()` for more details."""
    if not isinstance(input_dataset.output_types, dtypes.DType):
      raise TypeError("DenseToSparseDataset requires an input whose elements "
                      "have a single component, whereas the input has %r." %
                      input_dataset.output_types)
    self._input_dataset = input_dataset
    self._batch_size = batch_size
    self._row_shape = row_shape
    self._structure = structure.SparseTensorStructure(
        input_dataset.output_types,
        tensor_shape.vector(None).concatenate(self._row_shape))

    variant_tensor = ged_ops.experimental_dense_to_sparse_batch_dataset(
        self._input_dataset._variant_tensor,  # pylint: disable=protected-access
        self._batch_size,
        row_shape=convert.partial_shape_to_tensor(self._row_shape),
        **dataset_ops.flat_structure(self))
    super(_DenseToSparseBatchDataset, self).__init__(input_dataset,
                                                     variant_tensor)
예제 #54
0
  def sample_n(self, n, seed=None, name="sample_n"):
    """Generate `n` samples.

    Args:
      n: scalar.  Number of samples to draw from each distribution.
      seed: Python integer seed for RNG.
      name: name to give to the op.

    Returns:
      samples: a `Tensor` of shape `(n,) + self.batch_shape` with values of type
          `self.dtype`.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self.p, n]):
        n = ops.convert_to_tensor(n, name="n")
        new_shape = array_ops.concat(0, ([n], self.batch_shape()))
        uniform = random_ops.random_uniform(
            new_shape, seed=seed, dtype=dtypes.float32)
        sample = math_ops.less(uniform, self.p)
        sample.set_shape(tensor_shape.vector(tensor_util.constant_value(n))
                         .concatenate(self.get_batch_shape()))
        return math_ops.cast(sample, self.dtype)
예제 #55
0
def _SelectShape(op):
  """Shape function for SelectOp."""
  # The inputs 'then' and 'else' must have the same shape.
  # The input 'cond' must either have the same shape as 'then' and
  # 'else', or be a vector if 'then' and 'else' are at least vectors.
  c_shape = op.inputs[0].get_shape()
  t_shape = op.inputs[1].get_shape()
  e_shape = op.inputs[2].get_shape()
  t_e_shape = t_shape.merge_with(e_shape)
  c_shape_list = c_shape.as_list() if c_shape.ndims is not None else None
  t_e_shape_list = t_e_shape.as_list() if t_e_shape.ndims is not None else None
  if c_shape_list is not None and t_e_shape_list is not None:
    if len(c_shape_list) != 1:
      # If the rank of 'cond' is != 1, the shape must match 'then' and 'else'
      t_e_shape = t_e_shape.merge_with(c_shape)
    if t_e_shape_list:
      # If then and else are not scalars, then cond must be at least
      # a vector, and its first value must match that of 'else'
      c_shape = c_shape.with_rank_at_least(1)
      if len(c_shape.as_list()) == 1:
        c_shape.merge_with(tensor_shape.vector(t_e_shape_list[0]))
  return [t_e_shape]
예제 #56
0
  def sample_n(self, n, seed=None, name="sample_n"):
    """Sample `n` observations from the distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the hyperparameters.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self.alpha, n]):
        gamma_sample = random_ops.random_gamma(
            [n,], self.alpha, dtype=self.dtype, seed=seed)
        n_val = tensor_util.constant_value(n)
        final_shape = tensor_shape.vector(n_val).concatenate(
            self.alpha.get_shape())

        gamma_sample.set_shape(final_shape)
        return gamma_sample / math_ops.reduce_sum(
            gamma_sample, reduction_indices=[-1], keep_dims=True)
예제 #57
0
  def sample(self, n, seed=None, name="sample"):
    """Sample `n` observations from the Categorical distribution.

    Args:
      n: 0-D.  Number of independent samples to draw for each distribution.
      seed: Random seed (optional).
      name: A name for this operation (optional).

    Returns:
      An `int64` `Tensor` with shape `[n, batch_shape, event_shape]`
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self.logits, n], name):
        n = ops.convert_to_tensor(n, name="n")
        logits_2d = array_ops.reshape(
            self.logits, array_ops.pack([-1, self.num_classes]))
        samples = random_ops.multinomial(logits_2d, n, seed=seed)
        ret = array_ops.reshape(
            array_ops.transpose(samples),
            array_ops.concat(
                0, [array_ops.expand_dims(n, 0), self.batch_shape()]))
        ret.set_shape(tensor_shape.vector(tensor_util.constant_value(n))
                      .concatenate(self.get_batch_shape()))
        return ret
예제 #58
0
    def sample(self, n, seed=None, name="sample"):
        """Sample `n` observations from the Laplace Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the parameters.
    """
        with ops.name_scope(self.name):
            with ops.op_scope([self._loc, self._scale, n], name):
                n = ops.convert_to_tensor(n)
                n_val = tensor_util.constant_value(n)
                shape = array_ops.concat(
                    0, [array_ops.pack([n]),
                        self.batch_shape()])
                # Sample uniformly-at-random from the open-interval (-1, 1).
                uniform_samples = random_ops.random_uniform(
                    shape=shape,
                    minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                                        self.dtype.as_numpy_dtype(0.)),
                    maxval=self.dtype.as_numpy_dtype(1.),
                    dtype=self.dtype,
                    seed=seed)

                # Provide some hints to shape inference
                inferred_shape = tensor_shape.vector(n_val).concatenate(
                    self.get_batch_shape())
                uniform_samples.set_shape(inferred_shape)

                return (self._loc -
                        self._scale * math_ops.sign(uniform_samples) *
                        math_ops.log(1. - math_ops.abs(uniform_samples)))
예제 #59
0
    def take_many(self,
                  num_elements,
                  allow_small_batch=False,
                  timeout=None,
                  name=None):
        """Takes the given number of completed elements from this barrier.

    This operation concatenates completed-element component tensors along
    the 0th dimension to make a single component tensor.

    If barrier has no completed elements, this operation will block
    until there are 'num_elements' elements to take.

    Args:
      num_elements: The number of elements to take.
      allow_small_batch: If the barrier is closed, don't block if there are less
        completed elements than requested, but instead return all available
        completed elements.
        TODO(b/25743580): the semantics of `allow_small_batch` are experimental
        and may be extended to other cases in the future.
        TODO(ebrevdo): If a take_many(allow_small_batch=True) is blocking
        already when the barrier is closed, it will block for ever. Fix this
        by using asynchronous operations.
      timeout: This specifies the number of milliseconds to block
        before returning with DEADLINE_EXCEEDED. (This option is not
        supported yet.)
      name: A name for the operation (optional).

    Returns:
      A tuple of (index, key, value_list).
      "index" is a int64 tensor of length num_elements containing the
        index of the insert_many call for which the very first component of
        the given element was inserted into the Barrier, starting with
        the value -2**63.  Note, this value is different from the
        index of the insert_many call for which the element was completed.
      "key" is a string tensor of length num_elements containing the keys.
      "value_list" is a tuple of tensors, each one with size num_elements
        in the 0th dimension for each component in the barrier's values.

    """
        if name is None:
            name = "%s_BarrierTakeMany" % self._name
        ret = gen_data_flow_ops._barrier_take_many(self._barrier_ref,
                                                   num_elements,
                                                   self._types,
                                                   allow_small_batch,
                                                   timeout,
                                                   name=name)

        # NOTE(mrry): Not using a shape function because we need access to
        # the Barrier object.
        op = ret[0].op
        if allow_small_batch:
            batch_dim = None
        else:
            batch_dim = tensor_shape.Dimension(
                tensor_util.constant_value(op.inputs[1]))
        op.outputs[0].set_shape(tensor_shape.vector(batch_dim))  # indices
        op.outputs[1].set_shape(tensor_shape.vector(batch_dim))  # keys
        for output, shape in zip(op.outputs[2:], self._shapes):  # value_list
            output.set_shape(
                tensor_shape.TensorShape([batch_dim]).concatenate(shape))

        return ret
예제 #60
0
def _LookupTableExportShape(op):
    """Shape function for data_flow_ops._lookup_table_export_values."""
    op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
    keys_shape = tensor_shape.vector(None)
    values_shape = tensor_shape.unknown_shape()
    return [keys_shape, values_shape]