def randn_sampler_switchover(shape, num_iters, use_gpu=False):
  # Benchmark by constructing samplers on the threshold of using the randn
  # rejection sampling and check that this threshold is set correctly by
  # benchmarking with bounds just above and below this threshold.
  # The uniform and randn samplers should have about the same performance
  # at this point.

  stddev_inside_bounds_before_using_randn = (
      _get_stddev_inside_bounds_before_using_randn(use_gpu))

  epsilon = 0.001

  np.random.seed(1618)  # Make it reproducible.

  # No CSE/CF.
  optimizer_options = config_pb2.OptimizerOptions(
      opt_level=config_pb2.OptimizerOptions.L0)
  config = config_pb2.ConfigProto(
      graph_options=config_pb2.GraphOptions(
          optimizer_options=optimizer_options))

  with session.Session(config=config) as sess:
    with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
      uniform_sampler_op = control_flow_ops.group(
          random_ops.parameterized_truncated_normal(
              shape,
              means=0.,
              stddevs=1.0,
              minvals=-stddev_inside_bounds_before_using_randn + epsilon,
              maxvals=0.01))
      randn_sampler_op = control_flow_ops.group(
          random_ops.parameterized_truncated_normal(
              shape,
              means=0.,
              stddevs=1.0,
              minvals=-stddev_inside_bounds_before_using_randn - epsilon,
              maxvals=0.01))

    # Burn-in to avoid session setup costs in the timing.
    sess.run(uniform_sampler_op)
    sess.run(uniform_sampler_op)
    uniform_dt = timeit.timeit(
        lambda: sess.run(uniform_sampler_op), number=num_iters)

    sess.run(randn_sampler_op)
    sess.run(randn_sampler_op)
    randn_dt = timeit.timeit(
        lambda: sess.run(randn_sampler_op), number=num_iters)

    return randn_dt, uniform_dt
def randn_sampler_switchover(shape, num_iters, use_gpu=False):
  # Benchmark by constructing samplers on the threshold of using the randn
  # rejection sampling and check that this threshold is set correctly by
  # benchmarking with bounds just above and below this threshold.
  # The uniform and randn samplers should have about the same performance
  # at this point.

  stddev_inside_bounds_before_using_randn = (
      _get_stddev_inside_bounds_before_using_randn(use_gpu))

  epsilon = 0.001

  np.random.seed(1618)  # Make it reproducible.

  # No CSE/CF.
  optimizer_options = config_pb2.OptimizerOptions(
      opt_level=config_pb2.OptimizerOptions.L0)
  config = config_pb2.ConfigProto(
      graph_options=config_pb2.GraphOptions(
          optimizer_options=optimizer_options))

  with session.Session(config=config) as sess:
    with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
      uniform_sampler_op = control_flow_ops.group(
          random_ops.parameterized_truncated_normal(
              shape,
              means=0.,
              stddevs=1.0,
              minvals=-stddev_inside_bounds_before_using_randn + epsilon,
              maxvals=0.01))
      randn_sampler_op = control_flow_ops.group(
          random_ops.parameterized_truncated_normal(
              shape,
              means=0.,
              stddevs=1.0,
              minvals=-stddev_inside_bounds_before_using_randn - epsilon,
              maxvals=0.01))

    # Burn-in to avoid session setup costs in the timing.
    sess.run(uniform_sampler_op)
    sess.run(uniform_sampler_op)
    uniform_dt = timeit.timeit(
        lambda: sess.run(uniform_sampler_op), number=num_iters)

    sess.run(randn_sampler_op)
    sess.run(randn_sampler_op)
    randn_dt = timeit.timeit(
        lambda: sess.run(randn_sampler_op), number=num_iters)

    return randn_dt, uniform_dt
 def testParameterizedTruncatedNormalBatched(self):
     # TODO(b/112289993): Make this test work with dtype np.float64.
     for dtype in self._random_types() & {np.float32}:
         with self.session():
             with self.test_scope():
                 count = 10000000
                 a = -100.
                 b = 100.
                 mu0 = 0.
                 mu1 = 1.
                 sigma = .1
                 x = random_ops.parameterized_truncated_normal(
                     shape=[2, count],
                     dtype=dtype,
                     means=[mu0, mu1],
                     stddevs=sigma,
                     minvals=[a],
                     maxvals=[b])
             self._checkTruncatedNormalIsInRange(x[0],
                                                 a=a,
                                                 b=b,
                                                 mu=mu0,
                                                 sigma=sigma,
                                                 count=count,
                                                 stat_test=True)
             self._checkTruncatedNormalIsInRange(x[1],
                                                 a=a,
                                                 b=b,
                                                 mu=mu1,
                                                 sigma=sigma,
                                                 count=count,
                                                 stat_test=True)
  def validateKolmogorovSmirnov(self,
                                shape,
                                mean,
                                stddev,
                                minval,
                                maxval,
                                seed=1618):
    try:
      import scipy.stats  # pylint: disable=g-import-not-at-top
      random_seed.set_random_seed(seed)
      with self.test_session(use_gpu=True):
        samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
                                                            minval,
                                                            maxval).eval()
      assert (~np.isnan(samples)).all()
      minval = max(mean - stddev * 10, minval)
      maxval = min(mean + stddev * 10, maxval)
      dist = scipy.stats.norm(loc=mean, scale=stddev)
      cdf_min = dist.cdf(minval)
      cdf_max = dist.cdf(maxval)

      def truncated_cdf(x):
        return np.clip((dist.cdf(x) - cdf_min) / (cdf_max - cdf_min), 0.0, 1.0)

      pvalue = scipy.stats.kstest(samples, truncated_cdf)[1]
      self.assertGreater(pvalue, 1e-10)
    except ImportError as e:
      tf_logging.warn("Cannot test truncated normal op: %s" % str(e))
    def testSamplingWithSmallStdDevFarFromBound(self):
        sample_op = random_ops.parameterized_truncated_normal(
            shape=(int(1e5), ),
            means=0.8,
            stddevs=0.05,
            minvals=-1.,
            maxvals=1.)
        new_seed = random_ops.random_uniform([2],
                                             seed=1234,
                                             minval=0,
                                             maxval=(2**31 - 1),
                                             dtype=np.int32)
        sample_op_stateless = stateless.stateless_parameterized_truncated_normal(
            shape=(int(1e5), ),
            seed=new_seed,
            means=0.8,
            stddevs=0.05,
            minvals=-1.,
            maxvals=1.)

        with self.session() as sess:
            samples, samples_stateless = sess.run(
                [sample_op, sample_op_stateless])
            # 0. is more than 16 standard deviations from the mean, and
            # should have a likelihood < 1e-57.
            assert (~np.isnan(samples)).all()
            assert (~np.isnan(samples_stateless)).all()
            self.assertAllGreater(samples, 0.)
            self.assertAllGreater(samples_stateless, 0.)
    def testShapeTypes(self):
        for shape_dtype in [np.int32, np.int64]:
            shape = np.array([1000], dtype=shape_dtype)
            sample_op = random_ops.parameterized_truncated_normal(shape=shape,
                                                                  means=0.0,
                                                                  stddevs=0.1,
                                                                  minvals=-1.,
                                                                  maxvals=1.)
            new_seed = random_ops.random_uniform([2],
                                                 seed=1234,
                                                 minval=0,
                                                 maxval=(2**31 - 1),
                                                 dtype=np.int32)
            sample_op_stateless = stateless.stateless_parameterized_truncated_normal(
                shape=shape,
                seed=new_seed,
                means=0.0,
                stddevs=0.1,
                minvals=-1.,
                maxvals=1.)

            samples = self.evaluate(sample_op)
            stateless_samples = self.evaluate(sample_op_stateless)
            self.assertAllEqual(samples.shape, shape)
            self.assertAllEqual(stateless_samples.shape, shape)
示例#7
0
  def validateKolmogorovSmirnov(self,
                                shape,
                                mean,
                                stddev,
                                minval,
                                maxval,
                                seed=1618):
    try:
      import scipy.stats  # pylint: disable=g-import-not-at-top
      tf.set_random_seed(seed)
      with self.test_session(use_gpu=self.use_gpu):
        samples = random_ops.parameterized_truncated_normal(
            shape, mean, stddev, minval, maxval).eval()
      minval = max(mean - stddev * 10, minval)
      maxval = min(mean + stddev * 10, maxval)
      dist = scipy.stats.norm(loc=mean, scale=stddev)
      cdf_min = dist.cdf(minval)
      cdf_max = dist.cdf(maxval)

      def truncated_cdf(x):
        return np.clip((dist.cdf(x) - cdf_min) / (cdf_max - cdf_min), 0.0, 1.0)

      pvalue = scipy.stats.kstest(samples, truncated_cdf)[1]
      self.assertGreater(pvalue, 1e-10)
    except ImportError as e:
      tf.logging.warn("Cannot test truncated normal op: %s" % str(e))
示例#8
0
  def _sample_n(self, n, seed=None):
    sample_and_batch_shape = tf.concat([[n], self.batch_shape_tensor()], 0)
    flat_batch_and_sample_shape = tf.stack([
        tf.reduce_prod(self.batch_shape_tensor()), n])

    # In order to be reparameterizable we sample on the truncated_normal of
    # unit variance and mean and scale (but with the standardized
    # truncation bounds).

    std_samples = random_ops.parameterized_truncated_normal(
        shape=flat_batch_and_sample_shape,
        means=0.0,
        stddevs=1.0,
        minvals=tf.reshape(self._standardized_low, [-1]),
        maxvals=tf.reshape(self._standardized_high, [-1]),
        dtype=self.dtype,
        seed=seed)

    # The returned shape is [flat_batch x n]
    std_samples = tf.transpose(std_samples, [1, 0])

    std_samples = tf.reshape(std_samples, sample_and_batch_shape)
    samples = (std_samples * tf.expand_dims(self._scale, axis=0) +
               tf.expand_dims(self._loc, axis=0))
    return samples
示例#9
0
    def _sample_n(self, n, seed=None):
        sample_and_batch_shape = tf.concat([[n], self.batch_shape_tensor()], 0)
        flat_batch_and_sample_shape = tf.stack(
            [tf.reduce_prod(self.batch_shape_tensor()), n])

        # In order to be reparameterizable we sample on the truncated_normal of
        # unit variance and mean and scale (but with the standardized
        # truncation bounds).

        std_samples = random_ops.parameterized_truncated_normal(
            shape=flat_batch_and_sample_shape,
            means=0.0,
            stddevs=1.0,
            minvals=tf.reshape(self._standardized_low, [-1]),
            maxvals=tf.reshape(self._standardized_high, [-1]),
            dtype=self.dtype,
            seed=seed)

        # The returned shape is [flat_batch x n]
        std_samples = tf.transpose(std_samples, [1, 0])

        std_samples = tf.reshape(std_samples, sample_and_batch_shape)
        samples = (std_samples * tf.expand_dims(self._scale, axis=0) +
                   tf.expand_dims(self._loc, axis=0))
        return samples
示例#10
0
        def _std_samples_with_gradients(lower, upper):
            """Standard truncated Normal with gradient support for low, high."""
            # Note: Unlike the convention in tf_probability,
            # parameterized_truncated_normal returns a tensor with the final dimension
            # being the sample dimension.
            std_samples = random_ops.parameterized_truncated_normal(
                shape=flat_batch_and_sample_shape,
                means=0.0,
                stddevs=1.0,
                minvals=lower,
                maxvals=upper,
                dtype=self.dtype,
                seed=seed)

            def grad(dy):
                """Computes a derivative for the min and max parameters.

        This function implements the derivative wrt the truncation bounds, which
        get blocked by the sampler. We use a custom expression for numerical
        stability instead of automatic differentiation on CDF for implicit
        gradients.

        Args:
          dy: output gradients

        Returns:
           The standard normal samples and the gradients wrt the upper
           bound and lower bound.
        """
                # std_samples has an extra dimension (the sample dimension), expand
                # lower and upper so they broadcast along this dimension.
                # See note above regarding parameterized_truncated_normal, the sample
                # dimension is the final dimension.
                lower_broadcast = lower[..., tf.newaxis]
                upper_broadcast = upper[..., tf.newaxis]

                cdf_samples = ((special_math.ndtr(std_samples) -
                                special_math.ndtr(lower_broadcast)) /
                               (special_math.ndtr(upper_broadcast) -
                                special_math.ndtr(lower_broadcast)))

                # tiny, eps are tolerance parameters to ensure we stay away from giving
                # a zero arg to the log CDF expression.

                tiny = np.finfo(self.dtype.as_numpy_dtype).tiny
                eps = np.finfo(self.dtype.as_numpy_dtype).eps
                cdf_samples = tf.clip_by_value(cdf_samples, tiny, 1 - eps)

                du = tf.exp(0.5 * (std_samples**2 - upper_broadcast**2) +
                            tf.log(cdf_samples))
                dl = tf.exp(0.5 * (std_samples**2 - lower_broadcast**2) +
                            tf.log1p(-cdf_samples))

                # Reduce the gradient across the samples
                grad_u = tf.reduce_sum(dy * du, axis=-1)
                grad_l = tf.reduce_sum(dy * dl, axis=-1)
                return [grad_l, grad_u]

            return std_samples, grad
 def _sample_n(self, n, seed=None):
     shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
     samples = parameterized_truncated_normal(shape,
                                              means=self.loc,
                                              stddevs=self.scale,
                                              minvals=self.minval,
                                              maxvals=self.maxval,
                                              dtype=self.loc.dtype,
                                              seed=seed)
     return samples
  def testSamplingWithSmallStdDevFarFromBound(self):
    sample_op = random_ops.parameterized_truncated_normal(
        shape=(int(1e5),), means=0.8, stddevs=0.05, minvals=-1., maxvals=1.)

    with self.session(use_gpu=True) as sess:
      samples = sess.run(sample_op)
      # 0. is more than 16 standard deviations from the mean, and
      # should have a likelihood < 1e-57.
      assert (~np.isnan(samples)).all()
      no_neg_samples = np.sum(samples < 0.)
      self.assertEqual(no_neg_samples, 0.)
  def testSamplingWithSmallStdDevFarFromBound(self):
    sample_op = random_ops.parameterized_truncated_normal(
        shape=(int(1e5),), means=0.8, stddevs=0.05, minvals=-1., maxvals=1.)

    with self.session(use_gpu=True) as sess:
      samples = sess.run(sample_op)
      # 0. is more than 16 standard deviations from the mean, and
      # should have a likelihood < 1e-57.
      assert (~np.isnan(samples)).all()
      no_neg_samples = np.sum(samples < 0.)
      self.assertEqual(no_neg_samples, 0.)
def parameterized_vs_naive(shape, num_iters):
    np.random.seed(1618)  # Make it reproducible.

    # No CSE/CF.
    optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)
    config = tf.ConfigProto(graph_options=tf.GraphOptions(optimizer_options=optimizer_options))

    with tf.Session(config=config) as sess:
        param_op = tf.group(random_ops.parameterized_truncated_normal(shape))
        naive_op = tf.group(random_ops.truncated_normal(shape))

        param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters)
        naive_dt = timeit.timeit(lambda: sess.run(naive_op), number=num_iters)
        return param_dt, naive_dt
示例#15
0
def parameterized_vs_naive(shape, num_iters):
  np.random.seed(1618)  # Make it reproducible.

  # No CSE/CF.
  optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)
  config = tf.ConfigProto(
      graph_options=tf.GraphOptions(optimizer_options=optimizer_options))

  with tf.Session(config=config) as sess:
    param_op = tf.group(random_ops.parameterized_truncated_normal(shape))
    naive_op = tf.group(random_ops.truncated_normal(shape))

    param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters)
    naive_dt = timeit.timeit(lambda: sess.run(naive_op), number=num_iters)
    return param_dt, naive_dt
示例#16
0
 def _implParameterizedTruncatedNormalIsInRange(self, a, b, mu, sigma, count,
                                                stat_test):
   # TODO(b/34339814): make this test work with 16 bit float types.
   for dtype in self._random_types() & {np.float32, np.float64}:
     with self.session():
       with self.test_scope():
         x = random_ops.parameterized_truncated_normal(
             shape=[count],
             dtype=dtype,
             means=mu,
             stddevs=sigma,
             minvals=a,
             maxvals=b)
       self._checkTruncatedNormalIsInRange(
           x, a=a, b=b, mu=mu, sigma=sigma, count=count, stat_test=stat_test)
    def validateMoments(self, shape, mean, stddev, minval, maxval, seed=1618):
        try:
            # TruncatedNormalMoments requires scipy.stats.
            # Give up early if we are unable to import it.
            import scipy.stats  # pylint: disable=g-import-not-at-top,unused-variable

            tf.set_random_seed(seed)
            with self.test_session(use_gpu=self.use_gpu):
                samples = random_ops.parameterized_truncated_normal(shape, mean, stddev, minval, maxval).eval()
            moments = calculate_moments(samples, self.max_moment)
            expected_moments = TruncatedNormalMoments(mean, stddev, minval, maxval)
            num_samples = functools.reduce(lambda x, y: x * y, shape, 1)
            for i in range(1, len(moments)):
                self.assertLess(z_test(moments, expected_moments, i, num_samples), self.z_limit)
        except ImportError as e:
            tf.logging.warn("Cannot test truncated normal op: %s" % str(e))
    def testSamplingWithSmallStdDevFarFromBound(self):
        sample_op = random_ops.parameterized_truncated_normal(
            shape=(int(1e5), ),
            means=0.8,
            stddevs=0.05,
            minvals=-1.,
            maxvals=1.)

        with self.test_session(use_gpu=True) as sess:
            samples = sess.run(sample_op)
            # 0. is more than 16 standard deviations from the mean, and
            # should have a likelihood < 1e-57.
            # TODO(jjhunt)  Sampler is still numerically unstable in this case,
            # numbers less than 0 should never observed.
            no_neg_samples = np.sum(samples < 0.)
            self.assertLess(no_neg_samples, 2.)
示例#19
0
 def validateMoments(self, shape, mean, stddev, minval, maxval, seed=1618):
   try:
     # TruncatedNormalMoments requires scipy.stats.
     # Give up early if we are unable to import it.
     import scipy.stats  # pylint: disable=g-import-not-at-top,unused-variable
     tf.set_random_seed(seed)
     with self.test_session(use_gpu=self.use_gpu):
       samples = random_ops.parameterized_truncated_normal(
           shape, mean, stddev, minval, maxval).eval()
     moments = calculate_moments(samples, self.max_moment)
     expected_moments = TruncatedNormalMoments(mean, stddev, minval, maxval)
     num_samples = functools.reduce(lambda x, y: x * y, shape, 1)
     for i in range(1, len(moments)):
       self.assertLess(
           z_test(moments, expected_moments, i, num_samples), self.z_limit)
   except ImportError as e:
     tf.logging.warn("Cannot test truncated normal op: %s" % str(e))
    def _std_samples_with_gradients(lower, upper):
      """Standard truncated Normal with gradient support for low, high."""
      std_samples = random_ops.parameterized_truncated_normal(
          shape=flat_batch_and_sample_shape,
          means=0.0,
          stddevs=1.0,
          minvals=lower,
          maxvals=upper,
          dtype=self.dtype,
          seed=seed)

      def grad(dy):
        """Computes a derivative for the min and max parameters.

        This function implements the derivative wrt the truncation bounds, which
        get blocked by the sampler. We use a custom expression for numerical
        stability instead of automatic differentiation on CDF for implicit
        gradients.

        Args:
          dy: output gradients

        Returns:
           The standard normal samples and the gradients wrt the upper
           bound and lower bound.
        """
        cdf_samples = ((special_math.ndtr(std_samples) -
                        special_math.ndtr(lower)) /
                       (special_math.ndtr(upper) - special_math.ndtr(lower)))

        # tiny, eps are tolerance parameters to ensure we stay away from giving
        # a zero arg to the log CDF expression.

        tiny = np.finfo(self.dtype.as_numpy_dtype).tiny
        eps = np.finfo(self.dtype.as_numpy_dtype).eps
        cdf_samples = tf.clip_by_value(cdf_samples, tiny, 1 - eps)

        du = tf.exp(0.5 * (std_samples**2 - upper**2) + tf.log(cdf_samples))
        dl = tf.exp(0.5 * (std_samples**2 - lower**2) + tf.log(1 - cdf_samples))

        # Reduce the gradient across the samples
        grad_u = tf.reduce_sum(dy * du, axis=-1)
        grad_l = tf.reduce_sum(dy * dl, axis=-1)
        return [grad_l, grad_u]

      return std_samples, grad
示例#21
0
 def testParameterizedTruncatedNormalBroadcasting(self):
   for dtype in self._random_types() & {np.float32, np.float64}:
     with self.session():
       with self.test_scope():
         a = -1.
         b = 1.
         mu = 0.
         sigma = 1.
         count = 10000000
         x = random_ops.parameterized_truncated_normal(
             shape=[1, count],
             dtype=dtype,
             means=mu,
             stddevs=sigma,
             minvals=[a],
             maxvals=[b])
       self._checkTruncatedNormalIsInRange(
           x, a=a, b=b, mu=mu, sigma=sigma, count=count, stat_test=True)
def parameterized_vs_naive(shape, num_iters, use_gpu=False):
    np.random.seed(1618)  # Make it reproducible.

    # No CSE/CF.
    optimizer_options = config_pb2.OptimizerOptions(opt_level=config_pb2.OptimizerOptions.L0)
    config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(optimizer_options=optimizer_options))

    with session.Session(config=config) as sess:
        with ops.device("/cpu:0" if not use_gpu else None):
            param_op = control_flow_ops.group(random_ops.parameterized_truncated_normal(shape))
            naive_op = control_flow_ops.group(random_ops.truncated_normal(shape))

        # Burn-in to avoid session setup costs in the timing.
        sess.run(param_op)
        sess.run(param_op)
        param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters)
        sess.run(naive_op)
        sess.run(naive_op)
        naive_dt = timeit.timeit(lambda: sess.run(naive_op), number=num_iters)
        return param_dt, naive_dt
    def validateKolmogorovSmirnov(self,
                                  shape,
                                  mean,
                                  stddev,
                                  minval,
                                  maxval,
                                  use_stateless=False,
                                  seed=1618):
        try:
            import scipy.stats  # pylint: disable=g-import-not-at-top
            random_seed.set_random_seed(seed)
            with self.cached_session():
                if use_stateless:
                    new_seed = random_ops.random_uniform([2],
                                                         seed=seed,
                                                         minval=0,
                                                         maxval=(2**31 - 1),
                                                         dtype=np.int32)
                    samples = stateless.stateless_parameterized_truncated_normal(
                        shape, new_seed, mean, stddev, minval, maxval).eval()
                else:
                    samples = random_ops.parameterized_truncated_normal(
                        shape, mean, stddev, minval, maxval).eval()

            assert (~np.isnan(samples)).all()
            minval = max(mean - stddev * 10, minval)
            maxval = min(mean + stddev * 10, maxval)
            dist = scipy.stats.norm(loc=mean, scale=stddev)
            cdf_min = dist.cdf(minval)
            cdf_max = dist.cdf(maxval)

            def truncated_cdf(x):
                return np.clip((dist.cdf(x) - cdf_min) / (cdf_max - cdf_min),
                               0.0, 1.0)

            pvalue = scipy.stats.kstest(samples, truncated_cdf)[1]
            self.assertGreater(pvalue, 1e-10)
        except ImportError as e:
            tf_logging.warn("Cannot test truncated normal op: %s" % str(e))
def parameterized_vs_naive(shape, num_iters, use_gpu=False):
    np.random.seed(1618)  # Make it reproducible.

    # No CSE/CF.
    optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)
    config = tf.ConfigProto(graph_options=tf.GraphOptions(
        optimizer_options=optimizer_options))

    with tf.Session(config=config) as sess:
        with tf.device("/cpu:0" if not use_gpu else None):
            param_op = tf.group(
                random_ops.parameterized_truncated_normal(shape))
            naive_op = tf.group(random_ops.truncated_normal(shape))

        # Burn-in to avoid session setup costs in the timing.
        sess.run(param_op)
        sess.run(param_op)
        param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters)
        sess.run(naive_op)
        sess.run(naive_op)
        naive_dt = timeit.timeit(lambda: sess.run(naive_op), number=num_iters)
        return param_dt, naive_dt
 def validateMoments(self,
                     shape,
                     mean,
                     stddev,
                     minval,
                     maxval,
                     use_stateless=False,
                     seed=1618):
     try:
         # TruncatedNormalMoments requires scipy.stats.
         # Give up early if we are unable to import it.
         random_seed.set_random_seed(seed)
         with self.cached_session():
             if use_stateless:
                 # Generate a seed that stateless ops can use.
                 new_seed = random_ops.random_uniform([2],
                                                      seed=seed,
                                                      minval=0,
                                                      maxval=(2**31 - 1),
                                                      dtype=np.int32)
                 samples = stateless.stateless_parameterized_truncated_normal(
                     shape, new_seed, mean, stddev, minval, maxval).eval()
             else:
                 samples = random_ops.parameterized_truncated_normal(
                     shape, mean, stddev, minval, maxval).eval()
             assert (~np.isnan(samples)).all()
         moments = calculate_moments(samples, self.max_moment)
         expected_moments = TruncatedNormalMoments(mean, stddev, minval,
                                                   maxval)
         num_samples = functools.reduce(lambda x, y: x * y, shape, 1)
         for i in range(1, len(moments)):
             self.assertLess(
                 z_test(moments, expected_moments, i, num_samples),
                 self.z_limit)
     except ImportError as e:
         tf_logging.warn("Cannot test truncated normal op: %s" % str(e))