コード例 #1
0
ファイル: random_seed.py プロジェクト: aritratony/tensorflow
def get_seed(seed):
  """Returns the local seeds an operation should use given an op-specific seed.

  See `tf.compat.v1.get_seed` for more details. This wrapper adds support for
  the case
  where `seed` may be a tensor.

  Args:
    seed: An integer or a `tf.int64` scalar tensor.

  Returns:
    A tuple of two `tf.int64` scalar tensors that should be used for the local
    seed of the calling dataset.
  """
  seed, seed2 = random_seed.get_seed(seed)
  if seed is None:
    seed = constant_op.constant(0, dtype=dtypes.int64, name="seed")
  else:
    seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name="seed")
  if seed2 is None:
    seed2 = constant_op.constant(0, dtype=dtypes.int64, name="seed2")
  else:
    with ops.name_scope("seed2") as scope:
      seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64)
      seed2 = array_ops.where(
          math_ops.logical_and(
              math_ops.equal(seed, 0), math_ops.equal(seed2, 0)),
          constant_op.constant(2**31 - 1, dtype=dtypes.int64),
          seed2,
          name=scope)
  return seed, seed2
コード例 #2
0
ファイル: random_ops.py プロジェクト: 1000sprites/tensorflow
def random_uniform(shape,
                   minval=0,
                   maxval=None,
                   dtype=dtypes.float32,
                   seed=None,
                   name=None):
  """Outputs random values from a uniform distribution.

  The generated values follow a uniform distribution in the range
  `[minval, maxval)`. The lower bound `minval` is included in the range, while
  the upper bound `maxval` is excluded.

  For floats, the default range is `[0, 1)`.  For ints, at least `maxval` must
  be specified explicitly.

  In the integer case, the random integers are slightly biased unless
  `maxval - minval` is an exact power of two.  The bias is small for values of
  `maxval - minval` significantly smaller than the range of the output (either
  `2**32` or `2**64`).

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
      range of random values to generate.  Defaults to 0.
    maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on
      the range of random values to generate.  Defaults to 1 if `dtype` is
      floating point.
    dtype: The type of the output: 'float16`, `float32`, `float64`, `int32`,
      or `int64`.
    seed: A Python integer. Used to create a random seed for the distribution.
      See @{tf.set_random_seed}
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random uniform values.

  Raises:
    ValueError: If `dtype` is integral and `maxval` is not specified.
  """
  dtype = dtypes.as_dtype(dtype)
  if dtype not in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
                   dtypes.int64):
    raise ValueError("Invalid dtype %r" % dtype)
  if maxval is None:
    if dtype.is_integer:
      raise ValueError("Must specify maxval for integer dtype %r" % dtype)
    maxval = 1
  with ops.name_scope(name, "random_uniform", [shape, minval, maxval]) as name:
    shape = _ShapeTensor(shape)
    minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
    maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
    seed1, seed2 = random_seed.get_seed(seed)
    if dtype.is_integer:
      return gen_random_ops._random_uniform_int(
          shape, minval, maxval, seed=seed1, seed2=seed2, name=name)
    else:
      rnd = gen_random_ops._random_uniform(
          shape, dtype, seed=seed1, seed2=seed2)
      return math_ops.add(rnd * (maxval - minval), minval, name=name)
コード例 #3
0
ファイル: random_ops.py プロジェクト: 1000sprites/tensorflow
def random_shuffle(value, seed=None, name=None):
  """Randomly shuffles a tensor along its first dimension.

  The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
  to one and only one `output[i]`. For example, a mapping that might occur for a
  3x2 tensor is:

  ```python
  [[1, 2],       [[5, 6],
   [3, 4],  ==>   [1, 2],
   [5, 6]]        [3, 4]]
  ```

  Args:
    value: A Tensor to be shuffled.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      @{tf.set_random_seed}
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of same shape and type as `value`, shuffled along its first
    dimension.
  """
  seed1, seed2 = random_seed.get_seed(seed)
  return gen_random_ops._random_shuffle(
      value, seed=seed1, seed2=seed2, name=name)
コード例 #4
0
ファイル: random_ops.py プロジェクト: 0ruben/tensorflow
def multinomial(logits, num_samples, seed=None, name=None):
  """Draws samples from a multinomial distribution.

  Example:

    samples = tf.multinomial(tf.log([[0.5, 0.5]]), 10)
    # samples has shape [1, 10], where each value is either 0 or 1.

    samples = tf.multinomial([[1, -1, -1]], 10)
    # samples is equivalent to tf.zeros([1, 10], dtype=tf.int64).

  Args:
    logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice
      `[i, :]` represents the unnormalized log probabilities for all classes.
    num_samples: 0-D.  Number of independent samples to draw for each row slice.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.
    name: Optional name for the operation.

  Returns:
    The drawn samples of shape `[batch_size, num_samples]`.
  """
  with ops.op_scope([logits], name, "multinomial"):
    logits = ops.convert_to_tensor(logits, name="logits")
    seed1, seed2 = random_seed.get_seed(seed)
    return gen_random_ops.multinomial(logits, num_samples, seed=seed1,
                                      seed2=seed2)
コード例 #5
0
 def testRandomSeed(self):
   test_cases = [
       # Each test case is a tuple with input to get_seed:
       # (input_graph_seed, input_op_seed)
       # and output from get_seed:
       # (output_graph_seed, output_op_seed)
       ((None, None), (None, None)),
       ((None, 1), (random_seed.DEFAULT_GRAPH_SEED, 1)),
       ((1, 1), (1, 1)),
       ((0, 0), (0, 2**31 - 1)),  # Avoid nondeterministic (0, 0) output
       ((2**31 - 1, 0), (0, 2**31 - 1)),  # Don't wrap to (0, 0) either
       ((0, 2**31 - 1), (0, 2**31 - 1)),  # Wrapping for the other argument
   ]
   if context.executing_eagerly():
     # operation seed is random number generated based on global seed.
     # it's not tested due to possibility of platform or version difference.
     pass
   else:
     # 0 will be the default_graph._lastid.
     test_cases.append(((1, None), (1, 0)))
   for tc in test_cases:
     tinput, toutput = tc[0], tc[1]
     random_seed.set_random_seed(tinput[0])
     g_seed, op_seed = random_seed.get_seed(tinput[1])
     msg = 'test_case = {0}, got {1}, want {2}'.format(tinput,
                                                       (g_seed, op_seed),
                                                       toutput)
     self.assertEqual((g_seed, op_seed), toutput, msg=msg)
     random_seed.set_random_seed(None)
コード例 #6
0
def all_candidate_sampler(true_classes, num_true, num_sampled, unique,
                          seed=None, name=None):
  """Generate the set of all classes.

  Deterministically generates and returns the set of all possible classes.
  For testing purposes.  There is no need to use this, since you might as
  well use full softmax or full logistic regression.

  Args:
    true_classes: A `Tensor` of type `int64` and shape `[batch_size,
      num_true]`. The target classes.
    num_true: An `int`.  The number of target classes per training example.
    num_sampled: An `int`.  The number of possible classes.
    unique: A `bool`. Ignored.
      unique.
    seed: An `int`. An operation-specific seed. Default is 0.
    name: A name for the operation (optional).

  Returns:
    sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
      This operation deterministically returns the entire range
      `[0, num_sampled]`.
    true_expected_count: A tensor of type `float`.  Same shape as
      `true_classes`. The expected counts under the sampling distribution
      of each of `true_classes`. All returned values are 1.0.
    sampled_expected_count: A tensor of type `float`. Same shape as
      `sampled_candidates`. The expected counts under the sampling distribution
      of each of `sampled_candidates`. All returned values are 1.0.
  """
  seed1, seed2 = random_seed.get_seed(seed)
  return gen_candidate_sampling_ops._all_candidate_sampler(
      true_classes, num_true, num_sampled, unique, seed=seed1, seed2=seed2,
      name=name)
コード例 #7
0
  def __init__(self,
               input_dataset,
               buffer_size,
               count=None,
               seed=None):
    """See `Dataset.map()` for details."""
    super(_ShuffleAndRepeatDataset, self).__init__()
    self._input_dataset = input_dataset
    self._buffer_size = ops.convert_to_tensor(
        buffer_size, dtype=dtypes.int64, name="buffer_size")
    if count is None:
      self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
    else:
      self._count = ops.convert_to_tensor(
          count, dtype=dtypes.int64, name="count")

    seed, seed2 = random_seed.get_seed(seed)
    if seed is None:
      self._seed = constant_op.constant(0, dtype=dtypes.int64, name="seed")
    else:
      self._seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name="seed")
    if seed2 is None:
      self._seed2 = constant_op.constant(0, dtype=dtypes.int64, name="seed2")
    else:
      self._seed2 = ops.convert_to_tensor(
          seed2, dtype=dtypes.int64, name="seed2")
コード例 #8
0
ファイル: random_ops.py プロジェクト: ygoverdhan/tensorflow
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None, name=None):
    """Outputs random values from a truncated normal distribution.

  The generated values follow a normal distribution with specified mean and
  standard deviation, except that values whose magnitude is more than 2 standard
  deviations from the mean are dropped and re-picked.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
      truncated normal distribution.
    stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
      of the truncated normal distribution.
    dtype: The type of the output.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random truncated normal values.
  """
    with ops.name_scope(name, "truncated_normal", [shape, mean, stddev]) as name:
        shape_tensor = _ShapeTensor(shape)
        mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
        stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
        seed1, seed2 = random_seed.get_seed(seed)
        rnd = gen_random_ops._truncated_normal(shape_tensor, dtype, seed=seed1, seed2=seed2)
        mul = rnd * stddev_tensor
        value = math_ops.add(mul, mean_tensor, name=name)
        return value
コード例 #9
0
ファイル: random_ops.py プロジェクト: 1000sprites/tensorflow
def random_normal(shape,
                  mean=0.0,
                  stddev=1.0,
                  dtype=dtypes.float32,
                  seed=None,
                  name=None):
  """Outputs random values from a normal distribution.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
      distribution.
    stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
      of the normal distribution.
    dtype: The type of the output.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      @{tf.set_random_seed}
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random normal values.
  """
  with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name:
    shape_tensor = _ShapeTensor(shape)
    mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
    stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
    seed1, seed2 = random_seed.get_seed(seed)
    rnd = gen_random_ops._random_standard_normal(
        shape_tensor, dtype, seed=seed1, seed2=seed2)
    mul = rnd * stddev_tensor
    value = math_ops.add(mul, mean_tensor, name=name)
    return value
コード例 #10
0
ファイル: random_ops.py プロジェクト: 1000sprites/tensorflow
def multinomial(logits, num_samples, seed=None, name=None):
  """Draws samples from a multinomial distribution.

  Example:

  ```python
  # samples has shape [1, 5], where each value is either 0 or 1 with equal
  # probability.
  samples = tf.multinomial(tf.log([[10., 10.]]), 5)
  ```

  Args:
    logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice
      `[i, :]` represents the unnormalized log-probabilities for all classes.
    num_samples: 0-D.  Number of independent samples to draw for each row slice.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      @{tf.set_random_seed}
      for behavior.
    name: Optional name for the operation.

  Returns:
    The drawn samples of shape `[batch_size, num_samples]`.
  """
  with ops.name_scope(name, "multinomial", [logits]):
    logits = ops.convert_to_tensor(logits, name="logits")
    seed1, seed2 = random_seed.get_seed(seed)
    return gen_random_ops.multinomial(
        logits, num_samples, seed=seed1, seed2=seed2)
コード例 #11
0
ファイル: data_flow_ops.py プロジェクト: JamesFysh/tensorflow
  def __init__(self, capacity, min_after_dequeue, dtypes, shapes=None,
               names=None, seed=None, shared_name=None,
               name="random_shuffle_queue"):
    """Create a queue that dequeues elements in a random order.

    A `RandomShuffleQueue` has bounded capacity; supports multiple
    concurrent producers and consumers; and provides exactly-once
    delivery.

    A `RandomShuffleQueue` holds a list of up to `capacity`
    elements. Each element is a fixed-length tuple of tensors whose
    dtypes are described by `dtypes`, and whose shapes are optionally
    described by the `shapes` argument.

    If the `shapes` argument is specified, each component of a queue
    element must have the respective fixed shape. If it is
    unspecified, different queue elements may have different shapes,
    but the use of `dequeue_many` is disallowed.

    The `min_after_dequeue` argument allows the caller to specify a
    minimum number of elements that will remain in the queue after a
    `dequeue` or `dequeue_many` operation completes, to ensure a
    minimum level of mixing of elements. This invariant is maintained
    by blocking those operations until sufficient elements have been
    enqueued. The `min_after_dequeue` argument is ignored after the
    queue has been closed.

    Args:
      capacity: An integer. The upper bound on the number of elements
        that may be stored in this queue.
      min_after_dequeue: An integer (described above).
      dtypes:  A list of `DType` objects. The length of `dtypes` must equal
        the number of tensors in each queue element.
      shapes: (Optional.) A list of fully-defined `TensorShape` objects
        with the same length as `dtypes`, or `None`.
      names: (Optional.) A list of string naming the components in the queue
        with the same length as `dtypes`, or `None`.  If specified the dequeue
        methods return a dictionary with the names as keys.
      seed: A Python integer. Used to create a random seed. See
        [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
        for behavior.
      shared_name: (Optional.) If non-empty, this queue will be shared under
        the given name across multiple sessions.
      name: Optional name for the queue operation.
    """
    dtypes = _as_type_list(dtypes)
    shapes = _as_shape_list(shapes, dtypes)
    names = _as_name_list(names, dtypes)
    # If shared_name is provided and an op seed was not provided, we must ensure
    # that we use the same seed for all queues with the same shared_name.
    if shared_name is not None and seed is None:
      seed = hash(shared_name)
    seed1, seed2 = random_seed.get_seed(seed)
    queue_ref = gen_data_flow_ops._random_shuffle_queue(
        component_types=dtypes, shapes=shapes, capacity=capacity,
        min_after_dequeue=min_after_dequeue, seed=seed1, seed2=seed2,
        shared_name=shared_name, name=name)

    super(RandomShuffleQueue, self).__init__(dtypes, shapes, names, queue_ref)
コード例 #12
0
ファイル: random_ops.py プロジェクト: AriaAsuka/tensorflow
def random_gamma(shape,
                 alpha,
                 beta=None,
                 dtype=dtypes.float32,
                 seed=None,
                 name=None):
  """Draws `shape` samples from each of the given Gamma distribution(s).

  `alpha` is the shape parameter describing the distribution(s), and `beta` is
  the inverse scale parameter(s).

  Example:

    samples = tf.random_gamma([10], [0.5, 1.5])
    # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
    # the samples drawn from each distribution

    samples = tf.random_gamma([7, 5], [0.5, 1.5])
    # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
    # represents the 7x5 samples drawn from each of the two distributions

    samples = tf.random_gamma([30], [[1.],[3.],[5.]], beta=[[3., 4.]])
    # samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output samples
      to be drawn per alpha/beta-parameterized distribution.
    alpha: A Tensor or Python value or N-D array of type `dtype`. `alpha`
      provides the shape parameter(s) describing the gamma distribution(s) to
      sample. Must be broadcastable with `beta`.
    beta: A Tensor or Python value or N-D array of type `dtype`. Defaults to 1.
      `beta` provides the inverse scale parameter(s) of the gamma
      distribution(s) to sample. Must be broadcastable with `alpha`.
    dtype: The type of alpha, beta, and the output: `float16`, `float32`, or
      `float64`.
    seed: A Python integer. Used to create a random seed for the distributions.
      See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.
    name: Optional name for the operation.

  Returns:
    samples: a `Tensor` of shape `tf.concat(shape, tf.shape(alpha + beta))` with
      values of type `dtype`.
  """
  with ops.name_scope(name, "random_gamma", [shape, alpha, beta]):
    shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
    alpha = ops.convert_to_tensor(alpha, name="alpha", dtype=dtype)
    beta = ops.convert_to_tensor(beta if beta is not None else 1,
                                 name="beta",
                                 dtype=dtype)
    alpha_broadcast = alpha + array_ops.zeros_like(beta)
    seed1, seed2 = random_seed.get_seed(seed)
    return gen_random_ops._random_gamma(shape,
                                        alpha_broadcast,
                                        seed=seed1,
                                        seed2=seed2) / beta
コード例 #13
0
def log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
                                  range_max, seed=None, name=None):
  """Samples a set of classes using a log-uniform (Zipfian) base distribution.

  This operation randomly samples a tensor of sampled classes
  (`sampled_candidates`) from the range of integers `[0, range_max)`.

  The elements of `sampled_candidates` are drawn without replacement
  (if `unique=True`) or with replacement (if `unique=False`) from
  the base distribution.

  The base distribution for this operation is an approximately log-uniform
  or Zipfian distribution:

  `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`

  This sampler is useful when the target classes approximately follow such
  a distribution - for example, if the classes represent words in a lexicon
  sorted in decreasing order of frequency. If your classes are not ordered by
  decreasing frequency, do not use this op.

  In addition, this operation returns tensors `true_expected_count`
  and `sampled_expected_count` representing the number of times each
  of the target classes (`true_classes`) and the sampled
  classes (`sampled_candidates`) is expected to occur in an average
  tensor of sampled classes.  These values correspond to `Q(y|x)`
  defined in [this
  document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
  If `unique=True`, then these are post-rejection probabilities and we
  compute them approximately.

  Args:
    true_classes: A `Tensor` of type `int64` and shape `[batch_size,
      num_true]`. The target classes.
    num_true: An `int`.  The number of target classes per training example.
    num_sampled: An `int`.  The number of classes to randomly sample per batch.
    unique: A `bool`. Determines whether all sampled classes in a batch are
      unique.
    range_max: An `int`. The number of possible classes.
    seed: An `int`. An operation-specific seed. Default is 0.
    name: A name for the operation (optional).

  Returns:
    sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
      The sampled classes.
    true_expected_count: A tensor of type `float`.  Same shape as
      `true_classes`. The expected counts under the sampling distribution
      of each of `true_classes`.
    sampled_expected_count: A tensor of type `float`. Same shape as
      `sampled_candidates`. The expected counts under the sampling distribution
      of each of `sampled_candidates`.
  """
  seed1, seed2 = random_seed.get_seed(seed)
  return gen_candidate_sampling_ops._log_uniform_candidate_sampler(
      true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
      seed2=seed2, name=name)
コード例 #14
0
def learned_unigram_candidate_sampler(true_classes, num_true, num_sampled,
                                      unique, range_max, seed=None, name=None):
  """Samples a set of classes from a distribution learned during training.

  This operation randomly samples a tensor of sampled classes
  (`sampled_candidates`) from the range of integers `[0, range_max)`.

  The elements of `sampled_candidates` are drawn without replacement
  (if `unique=True`) or with replacement (if `unique=False`) from
  the base distribution.

  The base distribution for this operation is constructed on the fly
  during training.  It is a unigram distribution over the target
  classes seen so far during training.  Every integer in `[0, range_max)`
  begins with a weight of 1, and is incremented by 1 each time it is
  seen as a target class.  The base distribution is not saved to checkpoints,
  so it is reset when the model is reloaded.

  In addition, this operation returns tensors `true_expected_count`
  and `sampled_expected_count` representing the number of times each
  of the target classes (`true_classes`) and the sampled
  classes (`sampled_candidates`) is expected to occur in an average
  tensor of sampled classes.  These values correspond to `Q(y|x)`
  defined in [this
  document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
  If `unique=True`, then these are post-rejection probabilities and we
  compute them approximately.

  Args:
    true_classes: A `Tensor` of type `int64` and shape `[batch_size,
      num_true]`. The target classes.
    num_true: An `int`.  The number of target classes per training example.
    num_sampled: An `int`.  The number of classes to randomly sample per batch.
    unique: A `bool`. Determines whether all sampled classes in a batch are
      unique.
    range_max: An `int`. The number of possible classes.
    seed: An `int`. An operation-specific seed. Default is 0.
    name: A name for the operation (optional).

  Returns:
    sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
      The sampled classes.
    true_expected_count: A tensor of type `float`.  Same shape as
      `true_classes`. The expected counts under the sampling distribution
      of each of `true_classes`.
    sampled_expected_count: A tensor of type `float`. Same shape as
      `sampled_candidates`. The expected counts under the sampling distribution
      of each of `sampled_candidates`.

  """
  seed1, seed2 = random_seed.get_seed(seed)
  return gen_candidate_sampling_ops._learned_unigram_candidate_sampler(
      true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
      seed2=seed2, name=name)
コード例 #15
0
ファイル: input_data.py プロジェクト: lingyuanCN/mnist
    def __init__(self,
                 images,
                 labels,
                 fake_data=False,
                 one_hot=False,
                 dtype=dtypes.float32,
                 reshape=True,
                 seed=None):
        """Construct a _DataSet.
    one_hot arg is used only if fake_data is true.  `dtype` can be either
    `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
    `[0, 1]`.  Seed arg provides for convenient deterministic testing.
    Args:
      images: The images
      labels: The labels
      fake_data: Ignore inages and labels, use fake data.
      one_hot: Bool, return the labels as one hot vectors (if True) or ints (if
        False).
      dtype: Output image dtype. One of [uint8, float32]. `uint8` output has
        range [0,255]. float32 output has range [0,1].
      reshape: Bool. If True returned images are returned flattened to vectors.
      seed: The random seed to use.
    """
        seed1, seed2 = random_seed.get_seed(seed)
        # If op level seed is not set, use whatever graph level seed is returned
        numpy.random.seed(seed1 if seed is None else seed2)
        dtype = dtypes.as_dtype(dtype).base_dtype
        if dtype not in (dtypes.uint8, dtypes.float32):
            raise TypeError(
                'Invalid image dtype %r, expected uint8 or float32' % dtype)
        if fake_data:
            self._num_examples = 10000
            self.one_hot = one_hot
        else:
            assert images.shape[0] == labels.shape[0], (
                'images.shape: %s labels.shape: %s' %
                (images.shape, labels.shape))
            self._num_examples = images.shape[0]

            # Convert shape from [num examples, rows, columns, depth]
            # to [num examples, rows*columns] (assuming depth == 1)
            if reshape:
                assert images.shape[3] == 1
                images = images.reshape(images.shape[0],
                                        images.shape[1] * images.shape[2])
            if dtype == dtypes.float32:
                # Convert from [0, 255] -> [0.0, 1.0].
                images = images.astype(numpy.float32)
                images = numpy.multiply(images, 1.0 / 255.0)
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
コード例 #16
0
def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
                              range_max, seed=None, name=None):
  """Samples a set of classes using a uniform base distribution.

  This operation randomly samples a tensor of sampled classes
  (`sampled_candidates`) from the range of integers `[0, range_max)`.

  The elements of `sampled_candidates` are drawn without replacement
  (if `unique=True`) or with replacement (if `unique=False`) from
  the base distribution.

  The base distribution for this operation is the uniform distribution
  over the range of integers `[0, range_max)`.

  In addition, this operation returns tensors `true_expected_count`
  and `sampled_expected_count` representing the number of times each
  of the target classes (`true_classes`) and the sampled
  classes (`sampled_candidates`) is expected to occur in an average
  tensor of sampled classes.  These values correspond to `Q(y|x)`
  defined in [this
  document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
  If `unique=True`, then these are post-rejection probabilities and we
  compute them approximately.

  Args:
    true_classes: A `Tensor` of type `int64` and shape `[batch_size,
      num_true]`. The target classes.
    num_true: An `int`.  The number of target classes per training example.
    num_sampled: An `int`.  The number of classes to randomly sample. The
      `sampled_candidates` return value will have shape `[num_sampled]`. If
      `unique=True`, `num_sampled` must be less than or equal to `range_max`.
    unique: A `bool`. Determines whether all sampled classes in a batch are
      unique.
    range_max: An `int`. The number of possible classes.
    seed: An `int`. An operation-specific seed. Default is 0.
    name: A name for the operation (optional).

  Returns:
    sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.  The
      sampled classes, either with possible duplicates (`unique=False`) or all
      unique (`unique=True`). In either case, `sampled_candidates` is
      independent of the true classes.
    true_expected_count: A tensor of type `float`.  Same shape as
      `true_classes`. The expected counts under the sampling distribution
      of each of `true_classes`.
    sampled_expected_count: A tensor of type `float`. Same shape as
      `sampled_candidates`. The expected counts under the sampling distribution
      of each of `sampled_candidates`.
  """
  seed1, seed2 = random_seed.get_seed(seed)
  return gen_candidate_sampling_ops._uniform_candidate_sampler(
      true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
      seed2=seed2, name=name)
コード例 #17
0
 def __init__(self, directory, mask_range=(-1, 1), seed=None):
     """
     Construct a DataSet
     """
     seed1, seed2 = random_seed.get_seed(seed)
     # If op level seed is not set, use whatever graph level seed is returned
     np.random.seed(seed1 if seed is None else seed2)
     self._epochs_completed = 0
     self._index_in_epoch = 0
     self.directory = directory
     self.mask_range = mask_range
     self.infect_dir = directory + 'infect/'
コード例 #18
0
ファイル: data_reader.py プロジェクト: ufukhurriyetoglu/han
    def __init__(self, images, labels, seed=None):
        seed1, seed2 = random_seed.get_seed(seed)
        # If op level seed is not set, use whatever graph level seed is returned
        numpy.random.seed(seed1 if seed is None else seed2)

        assert images.shape[0] == labels.shape[0], (
            'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
        self._num_examples = images.shape[0]
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
コード例 #19
0
def compute_accidental_hits(true_classes,
                            sampled_candidates,
                            num_true,
                            seed=None,
                            name=None):
    """Compute the position ids in `sampled_candidates` matching `true_classes`.

  In Candidate Sampling, this operation facilitates virtually removing
  sampled classes which happen to match target classes.  This is done
  in Sampled Softmax and Sampled Logistic.

  See our [Candidate Sampling Algorithms
  Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf).

  We presuppose that the `sampled_candidates` are unique.

  We call it an 'accidental hit' when one of the target classes
  matches one of the sampled classes.  This operation reports
  accidental hits as triples `(index, id, weight)`, where `index`
  represents the row number in `true_classes`, `id` represents the
  position in `sampled_candidates`, and weight is `-FLOAT_MAX`.

  The result of this op should be passed through a `sparse_to_dense`
  operation, then added to the logits of the sampled classes. This
  removes the contradictory effect of accidentally sampling the true
  target classes as noise classes for the same example.

  Args:
    true_classes: A `Tensor` of type `int64` and shape `[batch_size,
      num_true]`. The target classes.
    sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
      The sampled_candidates output of CandidateSampler.
    num_true: An `int`.  The number of target classes per training example.
    seed: An `int`. An operation-specific seed. Default is 0.
    name: A name for the operation (optional).

  Returns:
    indices: A `Tensor` of type `int32` and shape `[num_accidental_hits]`.
      Values indicate rows in `true_classes`.
    ids: A `Tensor` of type `int64` and shape `[num_accidental_hits]`.
      Values indicate positions in `sampled_candidates`.
    weights: A `Tensor` of type `float` and shape `[num_accidental_hits]`.
      Each value is `-FLOAT_MAX`.

  """
    seed1, seed2 = random_seed.get_seed(seed)
    return gen_candidate_sampling_ops._compute_accidental_hits(
        true_classes,
        sampled_candidates,
        num_true,
        seed=seed1,
        seed2=seed2,
        name=name)
コード例 #20
0
def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
                              range_max, seed=None, name=None):
  """Samples a set of classes using a uniform base distribution.

  This operation randomly samples a tensor of sampled classes
  (`sampled_candidates`) from the range of integers `[0, range_max)`.

  The elements of `sampled_candidates` are drawn without replacement
  (if `unique=True`) or with replacement (if `unique=False`) from
  the base distribution.

  The base distribution for this operation is the uniform distribution
  over the range of integers `[0, range_max)`.

  In addition, this operation returns tensors `true_expected_count`
  and `sampled_expected_count` representing the number of times each
  of the target classes (`true_classes`) and the sampled
  classes (`sampled_candidates`) is expected to occur in an average
  tensor of sampled classes.  These values correspond to `Q(y|x)`
  defined in [this
  document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
  If `unique=True`, then these are post-rejection probabilities and we
  compute them approximately.

  Args:
    true_classes: A `Tensor` of type `int64` and shape `[batch_size,
      num_true]`. The target classes.
    num_true: An `int`.  The number of target classes per training example.
    num_sampled: An `int`.  The number of classes to randomly sample. The
      `sampled_candidates` return value will have shape `[num_sampled]`. If
      `unique=True`, `num_sampled` must be less than or equal to `range_max`.
    unique: A `bool`. Determines whether all sampled classes in a batch are
      unique.
    range_max: An `int`. The number of possible classes.
    seed: An `int`. An operation-specific seed. Default is 0.
    name: A name for the operation (optional).

  Returns:
    sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.  The
      sampled classes, either with possible duplicates (`unique=False`) or all
      unique (`unique=True`). In either case, `sampled_candidates` is
      independent of the true classes.
    true_expected_count: A tensor of type `float`.  Same shape as
      `true_classes`. The expected counts under the sampling distribution
      of each of `true_classes`.
    sampled_expected_count: A tensor of type `float`. Same shape as
      `sampled_candidates`. The expected counts under the sampling distribution
      of each of `sampled_candidates`.
  """
  seed1, seed2 = random_seed.get_seed(seed)
  return gen_candidate_sampling_ops.uniform_candidate_sampler(
      true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
      seed2=seed2, name=name)
コード例 #21
0
def random_normal(shape,
                  mean=0.0,
                  stddev=1.0,
                  dtype=dtypes.float32,
                  seed=None,
                  name=None):
  """Outputs random values from a normal distribution.

  Example that generates a new set of random values every time:

  >>> tf.random.set_seed(5);
  >>> tf.random.normal([4], 0, 1, tf.float32)
  <tf.Tensor: shape=(4,), dtype=float32, numpy=..., dtype=float32)>

  Example that outputs a reproduceable result:

  >>> tf.random.set_seed(5);
  >>> tf.random.normal([2,2], 0, 1, tf.float32, seed=1)
  <tf.Tensor: shape=(2, 2), dtype=float32, numpy=
  array([[-1.3768897 , -0.01258316],
        [-0.169515   ,  1.0824056 ]], dtype=float32)>

  In this case, we are setting both the global and operation-level seed to
  ensure this result is reproduceable.  See `tf.random.set_seed` for more
  information.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    mean: A Tensor or Python value of type `dtype`, broadcastable with `stddev`.
      The mean of the normal distribution.
    stddev: A Tensor or Python value of type `dtype`, broadcastable with `mean`.
      The standard deviation of the normal distribution.
    dtype: The type of the output.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      `tf.random.set_seed`
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random normal values.
  """
  with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name:
    shape_tensor = tensor_util.shape_tensor(shape)
    mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
    stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
    seed1, seed2 = random_seed.get_seed(seed)
    rnd = gen_random_ops.random_standard_normal(
        shape_tensor, dtype, seed=seed1, seed2=seed2)
    mul = rnd * stddev_tensor
    value = math_ops.add(mul, mean_tensor, name=name)
    tensor_util.maybe_set_static_shape(value, shape)
    return value
コード例 #22
0
def parameterized_truncated_normal(shape,
                                   means=0.0,
                                   stddevs=1.0,
                                   minvals=-2.0,
                                   maxvals=2.0,
                                   dtype=dtypes.float32,
                                   seed=None,
                                   name=None):
  """Outputs random values from a truncated normal distribution.

  The generated values follow a normal distribution with specified mean and
  standard deviation, except that values whose magnitude is more than 2 standard
  deviations from the mean are dropped and re-picked.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    means: A 0-D Tensor or Python value of type `dtype`. The mean of the
      truncated normal distribution.
    stddevs: A 0-D Tensor or Python value of type `dtype`. The standard
      deviation of the truncated normal distribution.
    minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of
      the truncated normal distribution.
    maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of
      the truncated normal distribution.
    dtype: The type of the output.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      `tf.random.set_seed`
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random truncated normal values.
  """
  with ops.name_scope(name, "parameterized_truncated_normal",
                      [shape, means, stddevs, minvals, maxvals]) as name:
    shape_tensor = tensor_util.shape_tensor(shape)
    means_tensor = ops.convert_to_tensor(means, dtype=dtype, name="means")
    stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name="stddevs")
    minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name="minvals")
    maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name="maxvals")
    seed1, seed2 = random_seed.get_seed(seed)
    rnd = gen_random_ops.parameterized_truncated_normal(
        shape_tensor,
        means_tensor,
        stddevs_tensor,
        minvals_tensor,
        maxvals_tensor,
        seed=seed1,
        seed2=seed2)
    tensor_util.maybe_set_static_shape(rnd, shape)
    return rnd
コード例 #23
0
    def __init__(self,
                 rnn_mode,
                 num_layers,
                 num_units,
                 input_size,
                 input_mode=CUDNN_INPUT_LINEAR_MODE,
                 direction=CUDNN_RNN_UNIDIRECTION,
                 dtype=dtypes.float32,
                 dropout=0.,
                 seed=0):
        """Creates a CudnnRNN model from model spec.

    Args:
      rnn_mode: a string specifies the mode, under which this RNN model runs.
          Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.
      num_layers: the number of layers for the RNN model.
      num_units: the number of units within the RNN model.
      input_size: the size of the input, it could be different from the
          num_units.
      input_mode: indicate whether there is a linear projection between the
          input and the actual computation before the first layer. It could be
          'linear_input', 'skip_input' or 'auto_select'.
          'linear_input' (default) always applies a linear projection of input
          onto RNN hidden state. (standard RNN behavior).
          'skip_input' is only allowed when input_size == num_units;
          'auto_select' implies 'skip_input' when input_size == num_units;
          otherwise, it implies 'linear_input'.
      direction: the direction model that the model operates. Could be either
          'unidirectional' or 'bidirectional'
      dtype: dtype of params, tf.float32 or tf.float64.
      dropout: whether to enable dropout. With it is 0, dropout is disabled.
      seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
          for behavior.
    Raises:
      ValueError: if direction is invalid.
    """
        if direction not in (CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION):
            raise ValueError("Invalid direction: %s, expect %s or %s",
                             direction, CUDNN_RNN_UNIDIRECTION,
                             CUDNN_RNN_BIDIRECTION)
        self._num_layers = num_layers
        self._num_units = num_units
        self._input_size = input_size
        self._rnn_mode = rnn_mode
        self._input_mode = input_mode
        self._direction = direction
        self._dtype = dtype
        self._dropout = dropout
        # get graph and op seed.
        self._seed, self._seed2 = random_seed.get_seed(seed)
        if self._seed is None and self._seed2 is None:
            self._seed, self._seed2 = 0, 0
コード例 #24
0
 def __init__(self, seed=None):
   """A `Dataset` of pseudorandom values."""
   super(RandomDataset, self).__init__()
   seed, seed2 = random_seed.get_seed(seed)
   if seed is None:
     self._seed = constant_op.constant(0, dtype=dtypes.int64, name="seed")
   else:
     self._seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name="seed")
   if seed2 is None:
     self._seed2 = constant_op.constant(0, dtype=dtypes.int64, name="seed2")
   else:
     self._seed2 = ops.convert_to_tensor(
         seed2, dtype=dtypes.int64, name="seed2")
コード例 #25
0
  def __init__(self,
               images,
               labels,
               dtype=dtypes.float32,
               seed=None):

    self.check_data(images, labels)
    seed1, seed2 = random_seed.get_seed(seed)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
    self._total_batches = images.shape[0]
    def __init__(self, data, indices, n_steps, fea_size, seed=None):
        seed1, seed2 = random_seed.get_seed(seed)
        # If op level seed is not set, use whatever graph level seed is returned
        np.random.seed(seed1 if seed is None else seed2)
        self._num_examples = int(len(indices) / n_steps)
        self._indices = indices
        #     self._labels = labels
        self._n_steps = n_steps
        self._data = data

        self._epochs_completed = 0
        self._index_in_epoch = 0
        self._fea_size = fea_size
コード例 #27
0
def truncated_normal(shape,
                     mean=0.0,
                     stddev=1.0,
                     dtype=dtypes.float32,
                     seed=None,
                     name=None):
    """Outputs random values from a truncated normal distribution.

  The values are drawn from a normal distribution with specified mean and
  standard deviation, discarding and re-drawing any samples that are more than
  two standard deviations from the mean.

  Examples:

  >>> tf.random.truncated_normal(shape=[2])
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([..., ...], dtype=float32)>

  >>> tf.random.truncated_normal(shape=[2], mean=3, stddev=1, dtype=tf.float32)
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([..., ...], dtype=float32)>

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
      truncated normal distribution.
    stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
      of the normal distribution, before truncation.
    dtype: The type of the output. Restricted to floating-point types:
      `tf.half`, `tf.float`, `tf.double`, etc.
    seed: A Python integer. Used to create a random seed for the distribution.
      See `tf.random.set_seed` for more information.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random truncated normal values.
  """
    with ops.name_scope(name, "truncated_normal",
                        [shape, mean, stddev]) as name:
        shape_tensor = tensor_util.shape_tensor(shape)
        mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
        stddev_tensor = ops.convert_to_tensor(stddev,
                                              dtype=dtype,
                                              name="stddev")
        seed1, seed2 = random_seed.get_seed(seed)
        rnd = gen_random_ops.truncated_normal(shape_tensor,
                                              dtype,
                                              seed=seed1,
                                              seed2=seed2)
        mul = rnd * stddev_tensor
        value = math_ops.add(mul, mean_tensor, name=name)
        tensor_util.maybe_set_static_shape(value, shape)
        return value
コード例 #28
0
ファイル: cudnn_rnn_ops.py プロジェクト: Dr4KK/tensorflow
  def __init__(self,
               rnn_mode,
               num_layers,
               num_units,
               input_size,
               input_mode="linear_input",
               direction=CUDNN_RNN_UNIDIRECTION,
               dtype=dtypes.float32,
               dropout=0.,
               seed=0):
    """Creates a CudnnRNN model from model spec.

    Args:
      rnn_mode: a string specifies the mode, under which this RNN model runs.
          Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.
      num_layers: the number of layers for the RNN model.
      num_units: the number of units within the RNN model.
      input_size: the size of the input, it could be different from the
          num_units.
      input_mode: indicate whether there is a linear projection between the
          input and the actual computation before the first layer. It could be
          'linear_input', 'skip_input' or 'auto_select'.
          'linear_input' (default) always applies a linear projection of input
          onto RNN hidden state. (standard RNN behavior).
          'skip_input' is only allowed when input_size == num_units;
          'auto_select' implies 'skip_input' when input_size == num_units;
          otherwise, it implies 'linear_input'.
      direction: the direction model that the model operates. Could be either
          'unidirectional' or 'bidirectional'
      dtype: dtype of params, tf.float32 or tf.float64.
      dropout: whether to enable dropout. With it is 0, dropout is disabled.
      seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
          for behavior.
    Raises:
      ValueError: if direction is invalid.
    """
    if direction not in (CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION):
      raise ValueError("Invalid direction: %s, expect %s or %s",
                       direction, CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION)
    self._num_layers = num_layers
    self._num_units = num_units
    self._input_size = input_size
    self._rnn_mode = rnn_mode
    self._input_mode = input_mode
    self._direction = direction
    self._dtype = dtype
    self._dropout = dropout
    # get graph and op seed.
    self._seed, self._seed2 = random_seed.get_seed(seed)
    if self._seed is None and self._seed2 is None:
      self._seed, self._seed2 = 0, 0
コード例 #29
0
ファイル: random_ops.py プロジェクト: 1000sprites/tensorflow
def parameterized_truncated_normal(shape,
                                   means=0.0,
                                   stddevs=1.0,
                                   minvals=-2.0,
                                   maxvals=2.0,
                                   dtype=dtypes.float32,
                                   seed=None,
                                   name=None):
  """Outputs random values from a truncated normal distribution.

  The generated values follow a normal distribution with specified mean and
  standard deviation, except that values whose magnitude is more than 2 standard
  deviations from the mean are dropped and re-picked.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    means: A 0-D Tensor or Python value of type `dtype`. The mean of the
      truncated normal distribution.
    stddevs: A 0-D Tensor or Python value of type `dtype`. The standard
      deviation of the truncated normal distribution.
    minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of
      the truncated normal distribution.
    maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of
      the truncated normal distribution.
    dtype: The type of the output.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      @{tf.set_random_seed}
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random truncated normal values.
  """
  with ops.name_scope(name, "parameterized_truncated_normal",
                      [shape, means, stddevs, minvals, maxvals]) as name:
    shape_tensor = _ShapeTensor(shape)
    means_tensor = ops.convert_to_tensor(means, dtype=dtype, name="means")
    stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name="stddevs")
    minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name="minvals")
    maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name="maxvals")
    seed1, seed2 = random_seed.get_seed(seed)
    rnd = gen_random_ops._parameterized_truncated_normal(
        shape_tensor,
        means_tensor,
        stddevs_tensor,
        minvals_tensor,
        maxvals_tensor,
        seed=seed1,
        seed2=seed2)
    return rnd
コード例 #30
0
    def __init__(self,
                 images,
                 labels,
                 fake_data=False,
                 one_hot=False,
                 dtype=dtypes.float32,
                 reshape=True,
                 seed=None):
        """Construct a DataSet.
    one_hot arg is used only if fake_data is true.  `dtype` can be either
    `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
    `[0, 1]`.  Seed arg provides for convenient deterministic testing.
    """
        seed1, seed2 = random_seed.get_seed(seed)
        # If op level seed is not set, use whatever graph level seed is returned
        numpy.random.seed(seed1 if seed is None else seed2)
        dtype = dtypes.as_dtype(dtype).base_dtype
        if dtype not in (dtypes.uint8, dtypes.float32, dtypes.float64):
            raise TypeError(
                'Invalid image dtype %r, expected uint8 or float32' % dtype)
        if fake_data:
            self._num_examples = 10000
            self.one_hot = one_hot
        else:
            assert images.shape[0] == labels.shape[0], (
                'images.shape: %s labels.shape: %s' %
                (images.shape, labels.shape))
            self._num_examples = images.shape[0]

            # Convert shape from [num examples, rows, columns, depth]
            # to [num examples, rows*columns] (assuming depth == 1)
            if reshape:
                assert images.shape[3] == 1
                images = images.reshape(images.shape[0],
                                        images.shape[1] * images.shape[2])
            if dtype == dtypes.float32:
                # Convert from [0, 255] -> [0.0, 1.0].
                images = images.astype(numpy.float32)
                images = numpy.multiply(images, 1.0 / 255.0)
            elif dtype == dtypes.float64:
                # Convert from [0, 255] -> [0.0, 1.0].
                images = images.astype(numpy.float64)
                images = numpy.multiply(images, 1.0 / 255.0)

        self._images, self._labels = filter_data(images, labels)
        self._num_examples = self._images.shape[0]

        #    self._images = images
        #    self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
コード例 #31
0
    def __init__(self,
               xtrain,
               ytrain,
               dtype=float_type,
               seed=121):
        seed1, seed2 = random_seed.get_seed(seed)
        # If op level seed is not set, use whatever graph level seed is returned
        numpy.random.seed(seed1 if seed is None else seed2)
        self._num_examples = xtrain.shape[0]

        self._xtrain = xtrain
        self._ytrain = ytrain
        self._epochs_completed = 0
        self._index_in_epoch = 0
コード例 #32
0
  def __init__(self, capacity, min_after_dequeue, dtypes, shapes=None,
               seed=None, shared_name=None, name="random_shuffle_queue"):
    """Create a queue that dequeues elements in a random order.

    A `RandomShuffleQueue` has bounded capacity; supports multiple
    concurrent producers and consumers; and provides exactly-once
    delivery.

    A `RandomShuffleQueue` holds a list of up to `capacity`
    elements. Each element is a fixed-length tuple of tensors whose
    dtypes are described by `dtypes`, and whose shapes are optionally
    described by the `shapes` argument.

    If the `shapes` argument is specified, each component of a queue
    element must have the respective fixed shape. If it is
    unspecified, different queue elements may have different shapes,
    but the use of `dequeue_many` is disallowed.

    The `min_after_dequeue` argument allows the caller to specify a
    minimum number of elements that will remain in the queue after a
    `dequeue` or `dequeue_many` operation completes, to ensure a
    minimum level of mixing of elements. This invariant is maintained
    by blocking those operations until sufficient elements have been
    enqueued. The `min_after_dequeue` argument is ignored after the
    queue has been closed.

    Args:
      capacity: An integer. The upper bound on the number of elements
        that may be stored in this queue.
      min_after_dequeue: An integer (described above).
      dtypes:  A list of `DType` objects. The length of `dtypes` must equal
        the number of tensors in each queue element.
      shapes: (Optional.) A list of fully-defined `TensorShape` objects,
        with the same length as `dtypes` or `None`.
      seed: A Python integer. Used to create a random seed. See
        [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
        for behavior.
      shared_name: (Optional.) If non-empty, this queue will be shared under
        the given name across multiple sessions.
      name: Optional name for the queue operation.
    """
    dtypes = _as_type_list(dtypes)
    shapes = _as_shape_list(shapes, dtypes)
    seed1, seed2 = random_seed.get_seed(seed)
    queue_ref = gen_data_flow_ops._random_shuffle_queue(
        component_types=dtypes, shapes=shapes, capacity=capacity,
        min_after_dequeue=min_after_dequeue, seed=seed1, seed2=seed2,
        shared_name=shared_name, name=name)

    super(RandomShuffleQueue, self).__init__(dtypes, shapes, queue_ref)
コード例 #33
0
    def __init__(self,
                 full_data,
                 resize=False,
                 resize_shape=None,
                 return_type=0,
                 dtype=dtypes.float32,
                 inference=False,
                 seed=None):

        seed1, seed2 = random_seed.get_seed(seed)
        # If op level seed is not set, use whatever graph level seed is returned
        numpy.random.seed(seed1 if seed is None else seed2)
        dtype = dtypes.as_dtype(dtype).base_dtype
        if dtype not in (dtypes.uint8, dtypes.float32):
            raise TypeError(
                'Invalid image dtype %r, expected uint8 or float32' % dtype)

        if inference:
            assert (full_data['sen1'].shape[0] == full_data['sen2'].shape[0])
        else:
            assert (full_data['sen1'].shape[0] == full_data['label'].shape[0] \
                    and full_data['sen2'].shape[0] == full_data['label'].shape[0]), ( \
                        'sen1.shape: %s  sen2.shape: %s labels.shape: %s' % \
                        (full_data['sen1'].shape, full_data['sen2'].shape, full_data['labels']))

        self._num_examples = full_data['sen1'].shape[0]

        if not inference:
            self._labels = full_data['label']
        else:
            self._labels = None

        self._data_s1 = full_data['sen1']
        self._data_s2 = full_data['sen2']

        self._inference = inference

        self._epochs_completed = 0
        self._index_in_epoch = 0

        self._return_type = return_type

        # use resize to resize the image
        self._resize = resize
        if resize:
            self._shape = resize_shape
        else:
            self._shape = []
            self._shape.append(self._data_s1.shape[1])
            self._shape.append(self._data_s1.shape[2])
コード例 #34
0
    def __init__(self,
                 images,
                 labels,
                 fake_data=False,
                 one_hot=False,
                 dtype=dtypes.float32,
                 reshape=True,
                 seed=None):
        """Construct a DataSet.
    one_hot arg is used only if fake_data is true.  `dtype` can be either
    `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
    `[0, 1]`.  Seed arg provides for convenient deterministic testing.
    """
        seed1, seed2 = random_seed.get_seed(seed)
        # If op level seed is not set, use whatever graph level seed is returned
        numpy.random.seed(seed1 if seed is None else seed2)
        dtype = dtypes.as_dtype(
            dtype).base_dtype  # Converts the given `type_value` to a `DType`
        if dtype not in (dtypes.uint8, dtypes.float32):
            raise TypeError(
                'Invalid image dtype %r, expected uint8 or float32' % dtype)
        if fake_data:
            self._num_examples = 10000
            self.one_hot = one_hot
        else:
            assert images.shape[0] == labels.shape[0], (
                'images.shape: %s labels.shape: %s' %
                (images.shape, labels.shape))
            self._num_examples = images.shape[0]

            # Convert shape from [num examples, rows, columns, depth]
            # to [num examples, rows*columns] (assuming depth == 1)
            if reshape:
                assert images.shape[3] == 1
                images = images.reshape(images.shape[0],
                                        images.shape[1] * images.shape[2])
            ''' change the shape of the image array: means get each image's line merged 
      && in the new array, one line for one image'''
            if dtype == dtypes.float32:
                # Convert from [0, 255] -> [0.0, 1.0].
                images = images.astype(
                    numpy.float32
                )  # Copy of the array, cast to a specified type.
                images = numpy.multiply(images, 1.0 / 255.0)
                ''' 0.0 ---- 1.0'''
        self._images = images
        self._labels = labels
        self._epochs_completed = 0  ## what's the two parameters
        self._index_in_epoch = 0
コード例 #35
0
def multinomial_categorical_impl(logits, num_samples, dtype, seed):
    """Implementation for random.categorical (v1) and random.categorical (v2)."""
    logits = ops.convert_to_tensor(logits, name="logits")
    dtype = dtypes.as_dtype(dtype) if dtype else dtypes.int64
    accepted_dtypes = (dtypes.int32, dtypes.int64)
    if dtype not in accepted_dtypes:
        raise ValueError(
            f"Argument `dtype` got invalid value {dtype}. Accepted dtypes are "
            f"{accepted_dtypes}.")
    seed1, seed2 = random_seed.get_seed(seed)
    return gen_random_ops.multinomial(logits,
                                      num_samples,
                                      seed=seed1,
                                      seed2=seed2,
                                      output_dtype=dtype)
コード例 #36
0
  def __init__(self,
               images,
               labels,
               cut_images,
               cut_test_images,
               images_new_part_cut,
               images_rest_part_cut,
               images_new_part_cut_test,
               images_rest_part_cut_test,
               fake_data=False,
               one_hot=False,
               dtype=dtypes.float32,
               reshape=False,
               seed=None):
    
    seed1, seed2 = random_seed.get_seed(seed)
    numpy.random.seed(seed1 if seed is None else seed2)
    dtype = dtypes.as_dtype(dtype).base_dtype
    
    if dtype not in (dtypes.uint8, dtypes.float32):
      raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
                      dtype)
    if fake_data:
      self._num_examples = 10000
      self.one_hot = one_hot 
    else:  
      assert images.shape[0] == labels.shape[0], ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
      self._num_examples = images.shape[0]

      if reshape:
        assert images.shape[3] == 3
        images = images.reshape(images.shape[0], images.shape[1], images.shape[2], images.shape[3])
      
      if dtype == dtypes.float32:
        images = images.astype(numpy.float32)
        images = numpy.multiply(images, 1.0 / 255.0)
    
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
    
    self._cut_images = cut_images
    self._cut_test_images = cut_test_images
    self._images_new_part_cut = images_new_part_cut
    self._images_rest_part_cut = images_rest_part_cut
    self._images_new_part_cut_test = images_new_part_cut_test
    self._images_rest_part_cut_test = images_rest_part_cut_test   
コード例 #37
0
    def __init__(self, images, labels, reshape=True, dtype=dtypes.float32, seed=None):
        seed1, seed2 = random_seed.get_seed(seed)
        np.random.seed(seed1 if seed is None else seed2)
        if reshape:
            assert images.shape[3] == 1
            images = images.reshape(images.shape[0], images.shape[1] * images.shape[2])

        if dtype == dtypes.float32:
            images = images.astype(np.float32)
            images = np.multiply(images, 1.0 / 255.0)

        self._num_examples = images.shape[0]
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
コード例 #38
0
    def __init__(self,
                 images,
                 labels,
                 fake_data=False,
                 one_hot=False,
                 dtype=dtypes.float32,
                 reshape=True,
                 seed=None):
        seed1, seed2 = random_seed.get_seed(seed)
        # If op level seed is not set, use whatever graph level seed is returned
        numpy.random.seed(seed1 if seed is None else seed2)
        dtype = dtypes.as_dtype(dtype).base_dtype
        if dtype not in (dtypes.uint8, dtypes.float32):
            raise TypeError(
                'Invalid image dtype %r, expected uint8 or float32' % dtype)
        if fake_data:
            self._num_examples = 10000
            self.one_hot = one_hot
        else:
            assert images.shape[0] == labels.shape[0], (
                'images.shape: %s labels.shape: %s' %
                (images.shape, labels.shape))
            self._num_examples = images.shape[0]

            if reshape:
                #iassert images.shape[3] == 1
                images = images.reshape(images.shape[0],
                                        images.shape[1] * images.shape[2],
                                        images.shape[3])
            if dtype == dtypes.float32:
                num, dim, channels = images.shape
                count = num * dim * channels
                images.setflags(write=1)
                index = 1000
                images = images.astype(numpy.float32)
                for i in range(0, count, index):
                    if not (count - i) < index:
                        images[i:i + index] = numpy.multiply(
                            images[i:i + index], 1.0 / 255.0)
                    else:
                        images[i:count] = numpy.multiply(
                            images[i:count], 1.0 / 255.0)

        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
コード例 #39
0
def rtt_random_uniform(shape,
                       minval=0,
                       maxval=None,
                       dtype=dtypes.float32,
                       seed=None,
                       name=None):
    """Outputs random values from a uniform distribution."""

    dtype = dtypes.as_dtype(dtype)
    if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,
                     dtypes.float64, dtypes.int32, dtypes.int64,
                     dtypes.string):
        raise ValueError("Invalid dtype %r" % dtype)

    bk_dtype = dtype
    if (dtype == dtypes.string):
        dtype = dtypes.float32

    if maxval is None:
        if dtype.is_integer:
            raise ValueError("Must specify maxval for integer dtype %r" %
                             dtype)
        maxval = 1
    with ops.name_scope(name, "random_uniform",
                        [shape, minval, maxval]) as name:
        shape = random_ops._ShapeTensor(shape)
        minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
        maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
        seed1, seed2 = random_seed.get_seed(seed)
        if dtype.is_integer:
            rv = gen_random_ops.random_uniform_int(shape,
                                                   minval,
                                                   maxval,
                                                   seed=seed1,
                                                   seed2=seed2,
                                                   name=name)
        else:
            rnd = gen_random_ops.random_uniform(shape,
                                                dtypes.float32,
                                                seed=seed1,
                                                seed2=seed2)
            rv = math_ops.add(rnd * (maxval - minval), minval, name=name)

        if (bk_dtype == dtypes.string):
            return tf.as_string(rv)
        else:
            return rv
コード例 #40
0
ファイル: VQA.py プロジェクト: momih/vqa_tensorflow
 def __init__(self, data_dir='./data/', split="val", top_answers=3000,
              max_ques_len=15, seed=None):
     self.data_dir = data_dir
     self.split = split
     self.img_dir = self.data_dir + "{}2014/".format(self.split)
     self.top_answers = top_answers
     self.max_ques_len = max_ques_len
     self._data = self.preprocess_json(self.split)
     self.question_to_index = self.map_to_index(top=None, answer=False)
     self.vocab_size = len(self.question_to_index)
     self.answer_to_index = self.map_to_index(top=self.top_answers)
     self._num_examples = len(self._data)
     self._epochs_completed = 0
     self._index_in_epoch = 0
     self.number_of_questions = len(self._data)
     seed1, seed2 = random_seed.get_seed(seed)
     np.random.seed(seed1 if seed is None else seed2)
コード例 #41
0
ファイル: dataset_ops.py プロジェクト: vveitch/relational-ERM
    def __init__(self, sample_size, neighbours, lengths, offsets, seed=0):
        """ Initializes a new instance of the uniform edge sampler.

        Parameters
        ----------
        sample_size: The number of edges in each sample.
        neighbours: The array of neighbours.
        lengths: The array of lengths in the neighbours.
        offsets: The array of offsets into the neighbours.
        seed: The random seed to use.
        """
        self.sample_size = sample_size
        self.seed, self.seed2 = random_seed.get_seed(seed)
        self.neighbours = neighbours
        self.lengths = lengths
        self.offsets = offsets
        super(UniformEdgeDataset, self).__init__()
コード例 #42
0
def compute_accidental_hits(true_classes, sampled_candidates, num_true,
                            seed=None, name=None):
  """Compute the position ids in `sampled_candidates` matching `true_classes`.

  In Candidate Sampling, this operation facilitates virtually removing
  sampled classes which happen to match target classes.  This is done
  in Sampled Softmax and Sampled Logistic.

  See our [Candidate Sampling Algorithms
  Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf).

  We presuppose that the `sampled_candidates` are unique.

  We call it an 'accidental hit' when one of the target classes
  matches one of the sampled classes.  This operation reports
  accidental hits as triples `(index, id, weight)`, where `index`
  represents the row number in `true_classes`, `id` represents the
  position in `sampled_candidates`, and weight is `-FLOAT_MAX`.

  The result of this op should be passed through a `sparse_to_dense`
  operation, then added to the logits of the sampled classes. This
  removes the contradictory effect of accidentally sampling the true
  target classes as noise classes for the same example.

  Args:
    true_classes: A `Tensor` of type `int64` and shape `[batch_size,
      num_true]`. The target classes.
    sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
      The sampled_candidates output of CandidateSampler.
    num_true: An `int`.  The number of target classes per training example.
    seed: An `int`. An operation-specific seed. Default is 0.
    name: A name for the operation (optional).

  Returns:
    indices: A `Tensor` of type `int32` and shape `[num_accidental_hits]`.
      Values indicate rows in `true_classes`.
    ids: A `Tensor` of type `int64` and shape `[num_accidental_hits]`.
      Values indicate positions in `sampled_candidates`.
    weights: A `Tensor` of type `float` and shape `[num_accidental_hits]`.
      Each value is `-FLOAT_MAX`.

  """
  seed1, seed2 = random_seed.get_seed(seed)
  return gen_candidate_sampling_ops._compute_accidental_hits(
      true_classes, sampled_candidates, num_true, seed=seed1, seed2=seed2,
      name=name)
コード例 #43
0
ファイル: dataset_ops.py プロジェクト: vveitch/relational-ERM
    def __init__(self, walk_length, neighbours, lengths, offsets, seed=0):
        """ Initialize a new random walk dataset.

        Parameters
        ----------
        walk_length: a scalar tensor representing the length of the walk.
        neighbours: a 1-dimensional tensor representing the packed adjacency list.
        lengths: a 1-dimensional tensor representing the subarrays in neighbours.
        offsets: a 1-dimensional tensor representing the subarrays in neighbours.
        seed: the seed to use.
        """
        self.walk_length = walk_length
        self.seed, self.seed2 = random_seed.get_seed(seed)
        self.neighbours = neighbours
        self.lengths = lengths
        self.offsets = offsets
        super(RandomWalkDataset, self).__init__()
コード例 #44
0
ファイル: random_ops.py プロジェクト: zk8085454/tensorflow
def random_poisson_v2(shape, lam, dtype=dtypes.float32, seed=None, name=None):
    """Draws `shape` samples from each of the given Poisson distribution(s).

  `lam` is the rate parameter describing the distribution(s).

  Example:

  ```python
  samples = tf.random.poisson([10], [0.5, 1.5])
  # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
  # the samples drawn from each distribution

  samples = tf.random.poisson([7, 5], [12.2, 3.3])
  # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
  # represents the 7x5 samples drawn from each of the two distributions
  ```

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output samples
      to be drawn per "rate"-parameterized distribution.
    lam: A Tensor or Python value or N-D array of type `dtype`.
      `lam` provides the rate parameter(s) describing the poisson
      distribution(s) to sample.
    dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or
      `int64`.
    seed: A Python integer. Used to create a random seed for the distributions.
      See
      `tf.random.set_seed`
      for behavior.
    name: Optional name for the operation.

  Returns:
    samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)`
      with values of type `dtype`.
  """
    with ops.name_scope(name, "random_poisson", [lam, shape]):
        shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
        seed1, seed2 = random_seed.get_seed(seed)
        result = gen_random_ops.random_poisson_v2(shape,
                                                  lam,
                                                  dtype=dtype,
                                                  seed=seed1,
                                                  seed2=seed2)
        _maybe_set_static_shape_helper(result, shape, lam)
        return result
コード例 #45
0
 def testRandomSeed(self):
     test_cases = [
         # Each test case is a tuple with input to get_seed:
         # (input_graph_seed, input_op_seed)
         # and output from get_seed:
         # (output_graph_seed, output_op_seed)
         ((None, None), (None, None)),
         ((None, 1), (random_seed.DEFAULT_GRAPH_SEED, 1)),
         ((1, None), (1, 0)),  # 0 will be the default_graph._lastid.
         ((1, 1), (1, 1)),
     ]
     for tc in test_cases:
         tinput, toutput = tc[0], tc[1]
         random_seed.set_random_seed(tinput[0])
         g_seed, op_seed = random_seed.get_seed(tinput[1])
         msg = "test_case = {0}, got {1}, want {2}".format(tinput, (g_seed, op_seed), toutput)
         self.assertEqual((g_seed, op_seed), toutput, msg=msg)
         random_seed.set_random_seed(None)
コード例 #46
0
    def __init__(self, images, labels, reshape=True, seed=None):
        seed1, seed2 = random_seed.get_seed(seed)
        # If op level seed is not set, use whatever graph level seed is returned
        np.random.seed(seed1 if seed is None else seed2)
        assert images.shape[0] == labels.shape[0], (
            'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
        self._num_examples = images.shape[0]

        # Convert shape from [num examples, rows, columns, depth]
        # to [num examples, rows*columns]
        if reshape:
            images = images.reshape(images.shape[0],
                                    images.shape[1] * images.shape[2])

        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
コード例 #47
0
    def __init__(self, data, num_steps, seed=None):
        """Construct a DataSet.
    Seed arg provides for convenient deterministic testing.
    """
        seed1, seed2 = random_seed.get_seed(seed)
        # If op level seed is not set, use whatever graph level seed is returned
        np.random.seed(seed1 if seed is None else seed2)

        inps, outs = slide_window(data, num_steps)

        assert inps.shape[0] == outs.shape[0], (
            'inps.shape: %s outs.shape: %s' % (inps.shape, outs.shape))

        self._num_examples = inps.shape[0]
        self._inps = inps
        self._outs = outs
        self._epochs_completed = 0
        self._index_in_epoch = 0
コード例 #48
0
ファイル: random_ops.py プロジェクト: zk8085454/tensorflow
def truncated_normal(shape,
                     mean=0.0,
                     stddev=1.0,
                     dtype=dtypes.float32,
                     seed=None,
                     name=None):
    """Outputs random values from a truncated normal distribution.

  The generated values follow a normal distribution with specified mean and
  standard deviation, except that values whose magnitude is more than 2 standard
  deviations from the mean are dropped and re-picked.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
      truncated normal distribution.
    stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
      of the normal distribution, before truncation.
    dtype: The type of the output.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      `tf.random.set_seed`
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random truncated normal values.
  """
    with ops.name_scope(name, "truncated_normal",
                        [shape, mean, stddev]) as name:
        shape_tensor = tensor_util.shape_tensor(shape)
        mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
        stddev_tensor = ops.convert_to_tensor(stddev,
                                              dtype=dtype,
                                              name="stddev")
        seed1, seed2 = random_seed.get_seed(seed)
        rnd = gen_random_ops.truncated_normal(shape_tensor,
                                              dtype,
                                              seed=seed1,
                                              seed2=seed2)
        mul = rnd * stddev_tensor
        value = math_ops.add(mul, mean_tensor, name=name)
        tensor_util.maybe_set_static_shape(value, shape)
        return value
コード例 #49
0
def cont_bow(source, window, seed=None, name=None):
    """Generates `Continuous bag-of-words` target and context pairs from batched list of tokens.

    Args:
        source: `2-D` string `Tensor` or `RaggedTensor`, batched lists of tokens [sentences, tokens].
        window: `int`, size of context before and after target token, must be > 0.
        seed: `int`, used to create a random seed (optional).
            See @{tf.random.set_seed} for behavior.
        name: `string`, a name for the operation (optional).

    Returns:
        `1-D` string `Tensor`: target tokens.
        `2-D` string `RaggedTensor`: context tokens.
        `2-D` int32 `RaggedTensor`: context positions.
    """
    with tf.name_scope(name or 'cont_bow'):
        source = ragged_tensor.convert_to_tensor_or_ragged_tensor(
            source, name='source')

        if source.shape.rank != 2:
            raise ValueError('Rank of `source` must equals 2')

        if not ragged_tensor.is_ragged(source):
            source = ragged_tensor.RaggedTensor.from_tensor(source,
                                                            ragged_rank=1)

        if source.ragged_rank != 1:
            raise ValueError('Ragged rank of `source` must equals 1')

        seed1, seed2 = random_seed.get_seed(seed)

        target, context_values, context_splits, context_positions = tfmiss_ops.miss_cont_bow(
            source_values=source.values,
            source_splits=source.row_splits,
            window=window,
            seed=seed1,
            seed2=seed2)

        context = tf.RaggedTensor.from_row_splits(context_values,
                                                  context_splits)
        position = tf.RaggedTensor.from_row_splits(context_positions,
                                                   context_splits)

        return target, context, position
コード例 #50
0
 def testRandomSeed(self):
     test_cases = [
         # Each test case is a tuple with input to get_seed:
         # (input_graph_seed, input_op_seed)
         # and output from get_seed:
         # (output_graph_seed, output_op_seed)
         ((None, None), (None, None)),
         ((None, 1), (random_seed.DEFAULT_GRAPH_SEED, 1)),
         ((1, None), (1, 0)),  # 0 will be the default_graph._lastid.
         ((1, 1), (1, 1)),
     ]
     for tc in test_cases:
         tinput, toutput = tc[0], tc[1]
         random_seed.set_random_seed(tinput[0])
         g_seed, op_seed = random_seed.get_seed(tinput[1])
         msg = 'test_case = {0}, got {1}, want {2}'.format(
             tinput, (g_seed, op_seed), toutput)
         self.assertEqual((g_seed, op_seed), toutput, msg=msg)
         random_seed.set_random_seed(None)
コード例 #51
0
ファイル: image_ops.py プロジェクト: sarvex/tensorflow
def random_crop(image, size, seed=None, name=None):
    """Randomly crops `image` to size `[target_height, target_width]`.

  The offset of the output within `image` is uniformly random. `image` always
  fully contains the result.

  Args:
    image: 3-D tensor of shape `[height, width, channels]`
    size: 1-D tensor with two elements, specifying target `[height, width]`
    seed: A Python integer. Used to create a random seed. See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.
    name: A name for this operation (optional).

  Returns:
    A cropped 3-D tensor of shape `[target_height, target_width, channels]`.
  """
    seed1, seed2 = random_seed.get_seed(seed)
    return gen_image_ops.random_crop(image, size, seed=seed1, seed2=seed2, name=name)
コード例 #52
0
    def __init__(self,
                 rnn_mode,
                 num_layers,
                 num_units,
                 input_size,
                 input_mode="linear_input",
                 direction="unidirectional",
                 dropout=0.,
                 seed=0):
        """Creates a MkldnnRNN model from model spec.

    Args:
      rnn_mode: a string specifies the mode, under which this RNN model runs.
          Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.
      num_layers: the number of layers for the RNN model.
      num_units: the number of units within the RNN model.
      input_size: the size of the input, it could be different from the
          num_units.
      input_mode: indicate whether there is a linear projection between the
          input and the actual computation before the first layer. It could be
          'linear_input', 'skip_input' or 'auto_select'.
          'linear_input' (default) always applies a linear projection of input
          onto RNN hidden state. (standard RNN behavior).
          'skip_input' is only allowed when input_size == num_units;
          'auto_select' implies 'skip_input' when input_size == num_units;
          otherwise, it implies 'linear_input'.
      direction: the direction model that the model operates. Could be either
          'unidirectional' or 'bidirectional'
      dropout: whether to enable dropout. With it is 0, dropout is disabled.
      seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
          for behavior.
    """
        self._num_layers = num_layers
        self._num_units = num_units
        self._input_size = input_size
        self._rnn_mode = rnn_mode
        self._input_mode = input_mode
        self._direction = direction
        self._dropout = dropout
        # get graph and op seed.
        self._seed, self._seed2 = random_seed.get_seed(seed)
        if self._seed is None and self._seed2 is None:
            self._seed, self._seed2 = 0, 0
コード例 #53
0
ファイル: mnist.py プロジェクト: chdinh/tensorflow
  def __init__(self,
               images,
               labels,
               fake_data=False,
               one_hot=False,
               dtype=dtypes.float32,
               reshape=True,
               seed=None):
    """Construct a DataSet.
    one_hot arg is used only if fake_data is true.  `dtype` can be either
    `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
    `[0, 1]`.  Seed arg provides for convenient deterministic testing.
    """
    seed1, seed2 = random_seed.get_seed(seed)
    # If op level seed is not set, use whatever graph level seed is returned
    numpy.random.seed(seed1 if seed is None else seed2)
    dtype = dtypes.as_dtype(dtype).base_dtype
    if dtype not in (dtypes.uint8, dtypes.float32):
      raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
                      dtype)
    if fake_data:
      self._num_examples = 10000
      self.one_hot = one_hot
    else:
      assert images.shape[0] == labels.shape[0], (
          'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
      self._num_examples = images.shape[0]

      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      if reshape:
        assert images.shape[3] == 1
        images = images.reshape(images.shape[0],
                                images.shape[1] * images.shape[2])
      if dtype == dtypes.float32:
        # Convert from [0, 255] -> [0.0, 1.0].
        images = images.astype(numpy.float32)
        images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
コード例 #54
0
  def __init__(self,
               rnn_mode,
               num_layers,
               num_units,
               input_size,
               input_mode="auto_select",
               direction="unidirectional",
               dropout=0.,
               seed=0):
    """Creates a CudnnRNN model from model spec.

    Args:
      rnn_mode: a string specifies the mode, under which this RNN model runs.
          Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.
      num_layers: the number of layers for the RNN model.
      num_units: the number of units within the RNN model.
      input_size: the size of the input, it could be different from the
          num_units.
      input_mode: indicate whether there is a linear projection between the
          input and The actual computation before the first layer. It could be
          'skip_input', 'linear_input' or 'auto_select'.
          'skip_input' is only allowed when input_size == num_units;
          'auto_select' implies 'skip_input' when input_size == num_units;
          otherwise, it implies 'linear_input'.
      direction: the direction model that the model operates. Could be either
          'unidirectional' or 'bidirectional'
      dropout: whether to enable dropout. With it is 0, dropout is disabled.
      seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
          for behavior.
    """
    self._num_layers = num_layers
    self._num_units = num_units
    self._input_size = input_size
    self._rnn_mode = rnn_mode
    self._input_mode = input_mode
    self._direction = direction
    self._dropout = dropout
    # get graph and op seed.
    self._seed, self._seed2 = random_seed.get_seed(seed)
    if self._seed is None and self._seed2 is None:
      self._seed, self._seed2 = 0, 0
コード例 #55
0
ファイル: random_ops.py プロジェクト: bunbutter/tensorflow
def random_poisson_v2(shape, lam, dtype=dtypes.float32, seed=None, name=None):
  """Draws `shape` samples from each of the given Poisson distribution(s).

  `lam` is the rate parameter describing the distribution(s).

  Example:

  ```python
  samples = tf.random_poisson([10], [0.5, 1.5])
  # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
  # the samples drawn from each distribution

  samples = tf.random_poisson([7, 5], [12.2, 3.3])
  # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
  # represents the 7x5 samples drawn from each of the two distributions
  ```

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output samples
      to be drawn per "rate"-parameterized distribution.
    lam: A Tensor or Python value or N-D array of type `dtype`.
      `lam` provides the rate parameter(s) describing the poisson
      distribution(s) to sample.
    dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or
      `int64`.
    seed: A Python integer. Used to create a random seed for the distributions.
      See
      `tf.set_random_seed`
      for behavior.
    name: Optional name for the operation.

  Returns:
    samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)`
      with values of type `dtype`.
  """
  with ops.name_scope(name, "random_poisson", [lam, shape]):
    shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
    seed1, seed2 = random_seed.get_seed(seed)
    return gen_random_ops.random_poisson_v2(
        shape, lam, dtype=dtype, seed=seed1, seed2=seed2)
コード例 #56
0
  def __init__(self,
               images,
               labels,
               reshape=True,
               dtype=dtypes.float32,
               seed=None):
    seed1, seed2 = random_seed.get_seed(seed)
    np.random.seed(seed1 if seed is None else seed2)
    if reshape:
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1]*images.shape[2])

    if dtype == dtypes.float32:
      images = images.astype(np.float32)
      images = np.multiply(images, 1.0 / 255.0)

    self._num_examples = images.shape[0]
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
コード例 #57
0
 def testRandomSeed(self):
   test_cases = [
       # Each test case is a tuple with input to get_seed:
       # (input_graph_seed, input_op_seed)
       # and output from get_seed:
       # (output_graph_seed, output_op_seed)
       ((None, None), (None, None)),
       ((None, 1), (random_seed.DEFAULT_GRAPH_SEED, 1)),
       ((1, None), (1, 0)),  # 0 will be the default_graph._lastid.
       ((1, 1), (1, 1)),
       ((0, 0), (0, 2**31 - 1)),  # Avoid nondeterministic (0, 0) output
       ((2**31 - 1, 0), (0, 2**31 - 1)),  # Don't wrap to (0, 0) either
       ((0, 2**31 - 1), (0, 2**31 - 1)),  # Wrapping for the other argument
   ]
   for tc in test_cases:
     tinput, toutput = tc[0], tc[1]
     random_seed.set_random_seed(tinput[0])
     g_seed, op_seed = random_seed.get_seed(tinput[1])
     msg = 'test_case = {0}, got {1}, want {2}'.format(tinput,
                                                       (g_seed, op_seed),
                                                       toutput)
     self.assertEqual((g_seed, op_seed), toutput, msg=msg)
     random_seed.set_random_seed(None)
コード例 #58
0
ファイル: random_ops.py プロジェクト: ray2020/tensorflow
def random_uniform(shape, minval=0.0, maxval=1.0,
                   dtype=types.float32, seed=None,
                   name=None):
  """Outputs random values from a uniform distribution.

  The generated values follow a uniform distribution in the range
  `[minval, maxval)`. The lower bound `minval` is included in the range, while
  the upper bound `maxval` is excluded.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
      range of random values to generate.
    maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on
      the range of random values to generate.
    dtype: The type of the output.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random uniform values.
  """
  with ops.op_scope([shape, minval, maxval], name, "random_uniform") as name:
    shape_tensor = _ShapeTensor(shape)
    min_tensor = ops.convert_to_tensor(minval, dtype=dtype, name="min")
    range_tensor = ops.convert_to_tensor(
        maxval - minval, dtype=dtype, name="range")
    seed1, seed2 = random_seed.get_seed(seed)
    rnd = gen_random_ops._random_uniform(shape_tensor, dtype,
                                         seed=seed1,
                                         seed2=seed2)
    mul = rnd * range_tensor
    value = math_ops.add(mul, min_tensor, name=name)
    return value
コード例 #59
0
def fixed_unigram_candidate_sampler(true_classes,
                                    num_true,
                                    num_sampled,
                                    unique,
                                    range_max,
                                    vocab_file='',
                                    distortion=1.0,
                                    num_reserved_ids=0,
                                    num_shards=1,
                                    shard=0,
                                    unigrams=(),
                                    seed=None,
                                    name=None):
  """Samples a set of classes using the provided (fixed) base distribution.

  This operation randomly samples a tensor of sampled classes
  (`sampled_candidates`) from the range of integers `[0, range_max)`.

  The elements of `sampled_candidates` are drawn without replacement
  (if `unique=True`) or with replacement (if `unique=False`) from
  the base distribution.

  The base distribution is read from a file or passed in as an
  in-memory array. There is also an option to skew the distribution by
  applying a distortion power to the weights.

  In addition, this operation returns tensors `true_expected_count`
  and `sampled_expected_count` representing the number of times each
  of the target classes (`true_classes`) and the sampled
  classes (`sampled_candidates`) is expected to occur in an average
  tensor of sampled classes.  These values correspond to `Q(y|x)`
  defined in [this
  document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
  If `unique=True`, then these are post-rejection probabilities and we
  compute them approximately.

  Args:
    true_classes: A `Tensor` of type `int64` and shape `[batch_size,
      num_true]`. The target classes.
    num_true: An `int`.  The number of target classes per training example.
    num_sampled: An `int`.  The number of classes to randomly sample per batch.
    unique: A `bool`. Determines whether all sampled classes in a batch are
      unique.
    range_max: An `int`. The number of possible classes.
    vocab_file: Each valid line in this file (which should have a CSV-like
      format) corresponds to a valid word ID. IDs are in sequential order,
      starting from num_reserved_ids. The last entry in each line is expected
      to be a value corresponding to the count or relative probability. Exactly
      one of `vocab_file` and `unigrams` needs to be passed to this operation.
    distortion: The distortion is used to skew the unigram probability
      distribution.  Each weight is first raised to the distortion's power
      before adding to the internal unigram distribution. As a result,
      `distortion = 1.0` gives regular unigram sampling (as defined by the vocab
      file), and `distortion = 0.0` gives a uniform distribution.
    num_reserved_ids: Optionally some reserved IDs can be added in the range
      `[0, num_reserved_ids]` by the users. One use case is that a special
      unknown word token is used as ID 0. These IDs will have a sampling
      probability of 0.
    num_shards: A sampler can be used to sample from a subset of the original
      range in order to speed up the whole computation through parallelism. This
      parameter (together with `shard`) indicates the number of partitions that
      are being used in the overall computation.
    shard: A sampler can be used to sample from a subset of the original range
      in order to speed up the whole computation through parallelism. This
      parameter (together with `num_shards`) indicates the particular partition
      number of the operation, when partitioning is being used.
    unigrams: A list of unigram counts or probabilities, one per ID in
      sequential order. Exactly one of `vocab_file` and `unigrams` should be
      passed to this operation.
    seed: An `int`. An operation-specific seed. Default is 0.
    name: A name for the operation (optional).

  Returns:
    sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
      The sampled classes.
    true_expected_count: A tensor of type `float`.  Same shape as
      `true_classes`. The expected counts under the sampling distribution
      of each of `true_classes`.
    sampled_expected_count: A tensor of type `float`. Same shape as
      `sampled_candidates`. The expected counts under the sampling distribution
      of each of `sampled_candidates`.

  """
  seed1, seed2 = random_seed.get_seed(seed)
  return gen_candidate_sampling_ops._fixed_unigram_candidate_sampler(
      true_classes, num_true, num_sampled, unique, range_max,
      vocab_file=vocab_file, distortion=distortion,
      num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard,
      unigrams=unigrams, seed=seed1, seed2=seed2, name=name)