def testDistribution(self, dist_name, data):
        if dist_name in WORKING_PRECISION_TEST_BLOCK_LIST:
            self.skipTest('{} is blocked'.format(dist_name))

        def eligibility_filter(name):
            return name not in WORKING_PRECISION_TEST_BLOCK_LIST

        dist = data.draw(
            dhps.distributions(dist_name=dist_name,
                               eligibility_filter=eligibility_filter,
                               enable_vars=False,
                               validate_args=False))
        hp.note('Trying distribution {}'.format(
            self.evaluate_dict(dist.parameters)))
        seed = test_util.test_seed()
        with tfp_hps.no_tf_rank_errors(), kernel_hps.no_pd_errors():
            samples = dist.sample(5, seed=seed)
            self.assertIn(samples.dtype, [tf.float32, tf.int32])
            self.assertEqual(dist.log_prob(samples).dtype, tf.float32)

        def log_prob_function(dist, x):
            return dist.log_prob(x)

        dist64 = tf.nest.map_structure(tensor_to_f64,
                                       tfe.as_composite(dist),
                                       expand_composites=True)
        with tfp_hps.no_tf_rank_errors(), kernel_hps.no_pd_errors():
            result64 = log_prob_function(dist64, tensor_to_f64(samples))
        self.assertEqual(result64.dtype, tf.float64)
  def testDistribution(self, dist_name, data):
    dist = data.draw(dhps.distributions(dist_name=dist_name, enable_vars=False,
                                        validate_args=False))
    seed = test_util.test_seed(sampler_type='stateless')
    sample_shape = [2, 1]
    with tfp_hps.no_tf_rank_errors(), kernel_hps.no_pd_errors():
      s1, lp1 = dist.experimental_sample_and_log_prob(sample_shape, seed=seed)
      s2 = dist.sample(sample_shape, seed=seed)
      self.assertAllClose(s1, s2, atol=1e-4)

      # Sanity-check the log prob. The actual values may differ arbitrarily (if
      # the `sample_and_log_prob` implementation is more stable) or be NaN, but
      # they should at least have the same shape.
      lp2 = dist.log_prob(s1)
      self.assertAllEqual(lp1.shape, lp2.shape)
    def testGradientsThroughSample(self, process_name, data):
        tfp_hps.guitar_skip_if_matches('VariationalGaussianProcess',
                                       process_name, 'b/147770193')
        process = data.draw(
            stochastic_processes(process_name=process_name, enable_vars=True))
        self.evaluate([var.initializer for var in process.variables])

        # TODO(b/147770193): Avoid non-PSD matrices in
        # `GaussianProcessRegressionModel`.
        with kernel_hps.no_pd_errors():
            with tf.GradientTape() as tape:
                sample = process.sample()
        if process.reparameterization_type == tfd.FULLY_REPARAMETERIZED:
            grads = tape.gradient(sample, process.variables)
            for grad, var in zip(grads, process.variables):
                self.assertIsNotNone(
                    grad, 'Grad of sample was `None` for var: {}.'.format(var))
    def testGradientsThroughLogProb(self, process_name, data):
        tfp_hps.guitar_skip_if_matches('VariationalGaussianProcess',
                                       process_name, 'b/147770193')
        process = data.draw(
            stochastic_processes(process_name=process_name, enable_vars=True))
        self.evaluate([var.initializer for var in process.variables])

        # Test that log_prob produces non-None gradients.
        # TODO(b/147770193): Avoid non-PSD matrices in
        # `GaussianProcessRegressionModel`.
        with kernel_hps.no_pd_errors():
            sample = process.sample()
        with tf.GradientTape() as tape:
            lp = process.log_prob(sample)
        grads = tape.gradient(lp, process.variables)
        for grad, var in zip(grads, process.variables):
            self.assertIsNotNone(
                grad, 'Grad of log_prob was `None` for var: {}.'.format(var))
Exemplo n.º 5
0
  def testExcessiveConcretizationInZeroArgPublicMethods(
      self, process_name, data):
    tfp_hps.guitar_skip_if_matches(
        'VariationalGaussianProcess', process_name, 'b/147770193')
    # Check that standard statistics do not concretize variables/deferred
    # tensors more than the allowed amount.
    process = data.draw(stochastic_processes(process_name, enable_vars=True))
    self.evaluate([var.initializer for var in process.variables])

    for stat in ['mean', 'covariance', 'stddev', 'variance', 'sample']:
      hp.note('Testing excessive concretization in {}.{}'.format(process_name,
                                                                 stat))
      try:
        with tfp_hps.assert_no_excessive_var_usage(
            'method `{}` of `{}`'.format(stat, process),
            max_permissible=MAX_CONVERSIONS_BY_CLASS.get(process_name, 1)
            ), kernel_hps.no_pd_errors():
          getattr(process, stat)()

      except NotImplementedError:
        pass
    def testExcessiveConcretizationInLogProb(self, process_name, data):
        # Check that log_prob computations avoid reading process parameters
        # more than once.
        tfp_hps.guitar_skip_if_matches('VariationalGaussianProcess',
                                       process_name, 'b/147770193')
        process = data.draw(
            stochastic_processes(process_name=process_name, enable_vars=True))
        self.evaluate([var.initializer for var in process.variables])

        hp.note(
            'Testing excessive var usage in {}.log_prob'.format(process_name))
        # TODO(b/147770193): Avoid non-PSD matrices in
        # `GaussianProcessRegressionModel`.
        with kernel_hps.no_pd_errors():
            sample = process.sample()
        try:
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `log_prob` of `{}`'.format(process),
                    max_permissible=MAX_CONVERSIONS_BY_CLASS.get(
                        process_name, 1)):
                process.log_prob(sample)
        except NotImplementedError:
            pass