Exemple #1
0
 def f():
     stateless.stateless_random_uniform(shape=shape,
                                        seed=[1, 2],
                                        minval=array_ops.zeros(
                                            shape, 'int32'),
                                        maxval=100,
                                        dtype='int32')
Exemple #2
0
 def f2():
     stateless.stateless_random_uniform(
         shape=shape,
         seed=[1, 2],
         minval=0,
         maxval=array_ops.ones(shape, 'int32') * 100,
         dtype='int32')
Exemple #3
0
def stateless_split(seed, num=2):
    """Splits an RNG seed into `num` new seeds by adding a leading axis.

  Example:

  >>> seed = [1, 2]
  >>> new_seeds = tf.random.experimental.stateless_split(seed, num=3)
  >>> print(new_seeds)
  tf.Tensor(
  [[1105988140 1738052849]
   [-335576002  370444179]
   [  10670227 -246211131]], shape=(3, 2), dtype=int32)
  >>> tf.random.stateless_normal(shape=[3], seed=new_seeds[0, :])
  <tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.59835213, -0.9578608 ,
  0.9002807 ], dtype=float32)>

  Args:
    seed: an RNG seed (a tensor with shape [2] and dtype `int32` or
      `int64`). (When using XLA, only `int32` is allowed.)
    num: optional, a positive integer or scalar tensor indicating the number of
      seeds to produce (default 2).

  Returns:
    A tensor with shape [num, 2] representing `num` new seeds. It will have the
    same dtype as `seed` (if `seed` doesn't have an explict dtype, the dtype
    will be determined by `tf.convert_to_tensor`).
  """
    seed = ops.convert_to_tensor(seed)
    return stateless_random_ops.stateless_random_uniform(shape=[num, 2],
                                                         seed=seed,
                                                         dtype=seed.dtype,
                                                         minval=None,
                                                         maxval=None)
 def builder_fn():
   shape = (10, 1000, 1000)
   seed_var = variables.Variable((312, 456),
                                 dtype=dtypes.int32,
                                 name='input')
   random_t = stateless.stateless_random_uniform(
       shape, seed=seed_var, dtype=dtype)
   return '%s.shape%s' % (name, shape), [random_t]
Exemple #5
0
 def _random_uniform(self, *args, **kwargs):
     if self._use_stateless:
         c_seed = self._stateless_seed_offset + kwargs['seed']
         kwargs['seed'] = math_ops.cast(
             array_ops.stack([c_seed, self._global_step]), dtypes.int32)
         return stateless_random_ops.stateless_random_uniform(
             *args, **kwargs)
     else:
         return random_ops.random_uniform(*args, **kwargs)
 def testRandomUniformIsInRange(self):
   with self.cached_session() as sess, self.test_scope():
     for dtype in self._random_types():
       seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
       x = stateless.stateless_random_uniform(
           shape=[1000], seed=seed_t, dtype=dtype)
       y = sess.run(x, {seed_t: [0x12345678, 0xabcdef12]})
       self.assertTrue(np.all(y >= 0))
       self.assertTrue(np.all(y < 1))
 def testRandomUniformIsInRange(self):
   with self.session() as sess, self.test_scope():
     for dtype in self._random_types(include_int=True):
       maxval = 1
       if dtype.is_integer:
         maxval = 100
       seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
       x = stateless.stateless_random_uniform(
           shape=[1000], seed=seed_t, maxval=maxval, dtype=dtype)
       y = sess.run(x, {seed_t: [0x12345678, 0xabcdef1]})
       self.assertTrue(np.all(y >= 0))
       self.assertTrue(np.all(y < maxval))
Exemple #8
0
  def testNDimensional(self):
    with self.test_scope():
      shape = [77, 10, 3, 5, 7]
      superdiag = random.stateless_random_uniform(
          shape=shape[:-1], dtype=dtypes.float32, seed=[5, 10])
      maindiag = random.stateless_random_uniform(
          shape=shape[:-1], dtype=dtypes.float32, seed=[5, 11])
      subdiag = random.stateless_random_uniform(
          shape=shape[:-1], dtype=dtypes.float32, seed=[5, 12])
      rhs = random.stateless_random_uniform(
          shape=shape, dtype=dtypes.float32, seed=[5, 13])

      expected = self._tridiagonal_matmul((superdiag, maindiag, subdiag),
                                          rhs,
                                          diagonals_format='sequence')

      real = self._jit_tridiagonal_matmul((superdiag, maindiag, subdiag),
                                          rhs,
                                          diagonals_format='sequence')

      self.assertAllClose(expected, real)
Exemple #9
0
def main(unused_argv):
    # Build data pipelines.
    print('Loading data.')
    x_train, y_train, x_test, y_test = \
        datasets.get_dataset('mnist', FLAGS.train_size, FLAGS.test_size)

    # Build the network
    init_fn, apply_fn, _ = stax.serial(stax.Dense(512, 1., 0.05), stax.Erf(),
                                       stax.Dense(10, 1., 0.05))

    key = random.stateless_random_uniform(shape=[2],
                                          seed=[0, 0],
                                          minval=None,
                                          maxval=None,
                                          dtype=np.int32)
    _, params = init_fn(key, (1, 784))

    # Create and initialize an optimizer.
    opt_init, opt_apply, get_params = optimizers.sgd(FLAGS.learning_rate)
    state = opt_init(params)

    # Create an mse loss function and a gradient function.
    loss = lambda fx, y_hat: 0.5 * np.mean((fx - y_hat)**2)
    grad_loss = jit(grad(lambda params, x, y: loss(apply_fn(params, x), y)))

    # Create an MSE predictor to solve the NTK equation in function space.
    ntk = nt.batch(nt.empirical_ntk_fn(apply_fn), batch_size=4, device_count=0)
    g_dd = ntk(x_train, None, params)
    g_td = ntk(x_test, x_train, params)
    predictor = nt.predict.gradient_descent_mse(g_dd, y_train)

    # Get initial values of the network in function space.
    fx_train = apply_fn(params, x_train)
    fx_test = apply_fn(params, x_test)

    # Train the network.
    train_steps = int(FLAGS.train_time // FLAGS.learning_rate)
    print('Training for {} steps'.format(train_steps))

    for i in range(train_steps):
        params = get_params(state)
        state = opt_apply(i, grad_loss(params, x_train, y_train), state)

    # Get predictions from analytic computation.
    print('Computing analytic prediction.')
    fx_train, fx_test = predictor(FLAGS.train_time, fx_train, fx_test, g_td)

    # Print out summary data comparing the linear / nonlinear model.
    util.print_summary('train', y_train, apply_fn(params, x_train), fx_train,
                       loss)
    util.print_summary('test', y_test, apply_fn(params, x_test), fx_test, loss)
 def testDistributionOfStatelessRandomUniform(self):
   """Use Pearson's Chi-squared test to test for uniformity."""
   with self.cached_session() as sess, self.test_scope():
     for dtype in self._random_types():
       seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
       n = 1000
       x = stateless.stateless_random_uniform(
           shape=[n], seed=seed_t, dtype=dtype)
       y = sess.run(x, {seed_t: [565656, 121212]})
       # Tests that the values are distributed amongst 10 bins with equal
       # probability. 16.92 is the Chi^2 value for 9 degrees of freedom with
       # p=0.05. This test is probabilistic and would be flaky if the random
       # seed were not fixed.
       self.assertTrue(self._chi_squared(y, 10) < 16.92)
def stateless_random_crop(value, size, seed, name=None):
  """Randomly crops a tensor to a given size in a deterministic manner.

  Slices a shape `size` portion out of `value` at a uniformly chosen offset.
  Requires `value.shape >= size`.

  If a dimension should not be cropped, pass the full size of that dimension.
  For example, RGB images can be cropped with
  `size = [crop_height, crop_width, 3]`.

  Guarantees the same results given the same `seed` independent of how many
  times the function is called, and independent of global seed settings (e.g.
  `tf.random.set_seed`).

  Usage Example:

  >>> image = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]
  >>> seed = (1, 2)
  >>> tf.image.stateless_random_crop(value=image, size=(1, 2, 3), seed=seed)
  <tf.Tensor: shape=(1, 2, 3), dtype=int32, numpy=
  array([[[1, 2, 3],
          [4, 5, 6]]], dtype=int32)>

  Args:
    value: Input tensor to crop.
    size: 1-D tensor with size the rank of `value`.
    seed: A shape [2] Tensor, the seed to the random number generator. Must have
      dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
    name: A name for this operation (optional).

  Returns:
    A cropped tensor of the same rank as `value` and shape `size`.
  """
  with ops.name_scope(name, "random_crop", [value, size]) as name:
    value = ops.convert_to_tensor(value, name="value")
    size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size")
    shape = array_ops.shape(value)
    check = control_flow_ops.Assert(
        math_ops.reduce_all(shape >= size),
        ["Need value.shape >= size, got ", shape, size],
        summarize=1000)
    shape = control_flow_ops.with_dependencies([check], shape)
    limit = shape - size + 1
    offset = stateless_random_ops.stateless_random_uniform(
        array_ops.shape(shape),
        dtype=size.dtype,
        maxval=size.dtype.max,
        seed=seed) % limit
    return array_ops.slice(value, offset, size, name=name)
Exemple #12
0
 def random_cropped_inputs():
   """Cropped inputs with stateless random ops."""
   input_shape = array_ops.shape(inputs)
   crop_size = array_ops.stack(
       [input_shape[0], self.height, self.width, input_shape[3]])
   check = control_flow_ops.Assert(
       math_ops.reduce_all(input_shape >= crop_size),
       [self.height, self.width])
   input_shape = control_flow_ops.with_dependencies([check], input_shape)
   limit = input_shape - crop_size + 1
   offset = stateless_random_ops.stateless_random_uniform(
       array_ops.shape(input_shape),
       dtype=crop_size.dtype,
       maxval=crop_size.dtype.max,
       seed=self._rng.make_seeds()[:, 0]) % limit
   return array_ops.slice(inputs, offset, crop_size)
 def testDistributionOfStatelessRandomUniform(self):
   """Use Pearson's Chi-squared test to test for uniformity."""
   with self.session() as sess, self.test_scope():
     for dtype in self._random_types(include_int=True):
       seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
       n = 1000
       maxval = 1
       if dtype.is_integer:
         maxval = 100
       x = stateless.stateless_random_uniform(
           shape=[n], seed=seed_t, maxval=maxval, dtype=dtype)
       y = sess.run(x, {seed_t: [565656, 121212]})
       # Convert y to float and normalize its value to range [0, 1) when
       # maxval != 1.
       y = y.astype(float) / maxval
       # Tests that the values are distributed amongst 10 bins with equal
       # probability. 16.92 is the Chi^2 value for 9 degrees of freedom with
       # p=0.05. This test is probabilistic and would be flaky if the random
       # seed were not fixed.
       self.assertLess(random_test_util.chi_squared(y, 10), 16.92)
 def testDistributionOfStatelessRandomUniform(self):
   """Use Pearson's Chi-squared test to test for uniformity."""
   with self.cached_session() as sess, self.test_scope():
     for dtype in self._random_types(include_int=True):
       seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
       n = 1000
       maxval = 1
       if dtype.is_integer:
         maxval = 100
       x = stateless.stateless_random_uniform(
           shape=[n], seed=seed_t, maxval=maxval, dtype=dtype)
       y = sess.run(x, {seed_t: [565656, 121212]})
       if maxval > 1:
         # Normalize y to range [0, 1).
         y = y.astype(float) / maxval
       # Tests that the values are distributed amongst 10 bins with equal
       # probability. 16.92 is the Chi^2 value for 9 degrees of freedom with
       # p=0.05. This test is probabilistic and would be flaky if the random
       # seed were not fixed.
       self.assertLess(random_test_util.chi_squared(y, 10), 16.92)
Exemple #15
0
def main(unused_argv):
    # Build data and .
    print('Loading data.')
    x_train, y_train, x_test, y_test = datasets.get_dataset('mnist',
                                                            permute_train=True)

    # Build the network
    init_fn, f, _ = stax.serial(stax.Dense(512, 1., 0.05), stax.Erf(),
                                stax.Dense(10, 1., 0.05))

    key = random.stateless_random_uniform(shape=[2],
                                          seed=[0, 0],
                                          minval=None,
                                          maxval=None,
                                          dtype=np.int32)
    _, params = init_fn(key, (1, 784))

    # Linearize the network about its initial parameters.
    f_lin = nt.linearize(f, params)

    # Create and initialize an optimizer for both f and f_lin.
    opt_init, opt_apply, get_params = optimizers.momentum(
        FLAGS.learning_rate, 0.9)
    opt_apply = jit(opt_apply)

    state = opt_init(params)
    state_lin = opt_init(params)

    # Create a cross-entropy loss function.
    loss = lambda fx, y_hat: -np.mean(logsoftmax(fx) * y_hat)

    # Specialize the loss function to compute gradients for both linearized and
    # full networks.
    grad_loss = jit(grad(lambda params, x, y: loss(f(params, x), y)))
    grad_loss_lin = jit(grad(lambda params, x, y: loss(f_lin(params, x), y)))

    # Train the network.
    print('Training.')
    print('Epoch\tLoss\tLinearized Loss')
    print('------------------------------------------')

    epoch = 0
    steps_per_epoch = 50000 // FLAGS.batch_size

    for i, (x, y) in enumerate(
            datasets.minibatch(x_train, y_train, FLAGS.batch_size,
                               FLAGS.train_epochs)):

        params = get_params(state)
        state = opt_apply(i, grad_loss(params, x, y), state)

        params_lin = get_params(state_lin)
        state_lin = opt_apply(i, grad_loss_lin(params_lin, x, y), state_lin)

        if i % steps_per_epoch == 0:
            print('{}\t{}\t{}'.format(epoch, loss(f(params, x), y),
                                      loss(f_lin(params_lin, x), y)))
            epoch += 1

    # Print out summary data comparing the linear / nonlinear model.
    x, y = x_train[:10000], y_train[:10000]
    util.print_summary('train', y, f(params, x), f_lin(params_lin, x), loss)
    util.print_summary('test', y_test, f(params, x_test),
                       f_lin(params_lin, x_test), loss)