Ejemplo n.º 1
0
  def testNanFromGradsDontPropagate(self):
    """Test that update with NaN gradients does not cause NaN in results."""
    if tf1.control_flow_v2_enabled():
      self.skipTest('b/138796859')
    if tf.executing_eagerly(): return
    def _nan_log_prob_with_nan_gradient(x):
      return np.nan * tf.reduce_sum(x)

    initial_x = tf.linspace(0.01, 5, 10)
    hmc = tfp.mcmc.HamiltonianMonteCarlo(
        target_log_prob_fn=_nan_log_prob_with_nan_gradient,
        step_size=2.,
        num_leapfrog_steps=5)
    updated_x, kernel_results = hmc.one_step(
        current_state=initial_x,
        previous_kernel_results=hmc.bootstrap_results(initial_x),
        seed=test_util.test_seed())
    initial_x_, updated_x_, log_accept_ratio_ = self.evaluate(
        [initial_x, updated_x, kernel_results.log_accept_ratio])
    acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.))

    logging.vlog(1, 'initial_x = {}'.format(initial_x_))
    logging.vlog(1, 'updated_x = {}'.format(updated_x_))
    logging.vlog(1, 'log_accept_ratio = {}'.format(log_accept_ratio_))

    self.assertAllEqual(initial_x_, updated_x_)
    self.assertEqual(acceptance_probs, 0.)

    self.assertAllEqual([True], [
        g is None for g in tf.gradients(
            ys=kernel_results.proposed_results.grads_target_log_prob,
            xs=initial_x)
    ])
    self.assertAllFinite(
        self.evaluate(tf.gradients(ys=updated_x, xs=initial_x)[0]))
Ejemplo n.º 2
0
    def testCholeskyUpdateXLA(self):
        self.skip_if_no_xla()
        if not (tf1.control_flow_v2_enabled() or self.use_static_shape):
            self.skipTest('TF1 broken')

        cholesky_update_fun = tf.function(tfp.math.cholesky_update,
                                          jit_compile=True)
        self._testCholeskyUpdate(cholesky_update_fun)
 def testNotReparameterized(self):
   if tf1.control_flow_v2_enabled():
     self.skipTest("b/138796859")
   total_count = tf.constant(5.0)
   concentration = tf.constant([0.1, 0.1, 0.1])
   _, [grad_total_count, grad_concentration] = tfp.math.value_and_gradient(
       lambda n, c: tfd.DirichletMultinomial(n, c).sample(100),
       [total_count, concentration])
   self.assertIsNone(grad_total_count)
   self.assertIsNone(grad_concentration)
Ejemplo n.º 4
0
 def testNotReparameterized(self):
     if tf1.control_flow_v2_enabled():
         self.skipTest('b/138796859')
     total_count = tf.constant(5.0)
     probs = tf.constant([0.2, 0.6])
     _, [grad_total_count, grad_probs] = tfp.math.value_and_gradient(
         lambda n, p: tfd.Multinomial(total_count=n, probs=p).sample(100),
         [total_count, probs])
     self.assertIsNone(grad_total_count)
     self.assertIsNone(grad_probs)
Ejemplo n.º 5
0
 def testNotReparameterized(self):
   if tf1.control_flow_v2_enabled():
     self.skipTest('b/138796859')
   total_count = tf.constant(5.0)
   concentration = tf.constant([0.1, 0.1, 0.1])
   _, [grad_total_count, grad_concentration] = tfp.math.value_and_gradient(
       lambda n, c: tfd.DirichletMultinomial(n, c, validate_args=True).sample(  # pylint: disable=g-long-lambda
           100, seed=test_util.test_seed()), [total_count, concentration])
   self.assertIsNone(grad_total_count)
   self.assertIsNone(grad_concentration)
    def _testWhileLoop(self):
        """Shows misuse of `dtc._TensorCoercible(distribution)` in `tf.while_loop`.

    Since `dtc._TensorCoercible(distribution)` only creates the `Tensor` on an
    as-needed basis, care must be taken that the Tensor is created outside the
    body of a `tf.while_loop`, if the result is going to be used outside of the
    `tf.while_loop`. Although this is the case for any use of `tf.while_loop` we
    write this unit-test as a reminder of the behavior.
    """
        mean_ = 0.5
        stddev_ = 2.
        num_iter_ = 4

        x = dtc._TensorCoercible(tfd.Normal(mean_, 0.75),
                                 tfd.Distribution.mean)

        # Note: in graph mode we can make the assertion not raise
        # if we make sure to create the Tensor outside the loop. Ie,
        # tf.convert_to_tensor(x)

        def _body(iter_, d):
            y = dtc._TensorCoercible(tfd.Normal(0, stddev_),
                                     tfd.Distribution.stddev)
            return [iter_ + 1, d + y + x]

        _, mean_plus_numiter_times_stddev = tf.while_loop(
            cond=lambda iter_, *args: iter_ < num_iter_,
            body=_body,
            loop_vars=[0, mean_])

        if not tf.executing_eagerly():
            # In graph mode we cannot access the cached value of x outside the
            # tf.while_loop. To make this exception not occur, simply call
            # `tf.convert_to_tensor(x)` prior to the `tf.while_loop`.  Doing so will
            # cause the value of `x` to exist both outside and inside the
            # `tf.while_loop`.
            if tf1.control_flow_v2_enabled():
                error_regex = r'Tensor.*must be from the same graph as Tensor.*'
            else:
                error_regex = 'Cannot use.*in a while loop'
            with self.assertRaisesRegexp(ValueError, error_regex):
                _ = x + tf.constant(3.)
            return

        # Things work in Eager mode because it has regular Python semantics,
        # ie, lexical scope rules.
        self.assertAllEqual([
            mean_ + 3.,
            mean_ + num_iter_ * (mean_ + stddev_),
        ],
                            self.evaluate([
                                x + tf.constant(3.),
                                mean_plus_numiter_times_stddev,
                            ]))
Ejemplo n.º 7
0
 def var_to_tensors(var):
     if resource_variable_ops.is_resource_variable(var):
         if tf.control_flow_v2_enabled() and hasattr(
                 layer_collection.graph, 'captures'):
             # TODO(b/143690035): Note that the "captures" property relies on an
             # API which might change.
             captures = layer_collection.graph.captures
             return [h for vh, h in captures if vh is var.handle]
         else:
             return [var.handle]
     if utils.is_reference_variable(var):
         return [tf_ops.internal_convert_to_tensor(var, as_ref=True)]
     raise ValueError('%s is not a recognized variable type.' % str(var))
Ejemplo n.º 8
0
 def testNotReparameterized(self):
     if tf1.control_flow_v2_enabled():
         self.skipTest('b/138796859')
     total_count = tf.constant(5.0)
     probs = tf.constant([0.4, 0.6])
     _, [grad_total_count, grad_probs] = tfp.math.value_and_gradient(
         lambda n, p: tfd.Multinomial(  # pylint: disable=g-long-lambda
             total_count=n,
             probs=p,
             validate_args=True).sample(100, seed=test_util.test_seed()),
         [total_count, probs])
     self.assertIsNone(grad_total_count)
     self.assertIsNone(grad_probs)
Ejemplo n.º 9
0
  def testNotReparameterized(self):
    if tf1.control_flow_v2_enabled():
      self.skipTest('b/138796859')
    n = tf.constant(5.0)
    c1 = tf.constant([0.1, 0.1, 0.1])
    c0 = tf.constant([0.3, 0.3, 0.3])

    def f(n, c1, c0):
      dist = tfd.BetaBinomial(n, c1, c0, validate_args=True)
      return dist.sample(100, seed=test_util.test_seed())

    _, [grad_n, grad_c1, grad_c0] = tfp.math.value_and_gradient(f, [n, c1, c0])
    self.assertIsNone(grad_n)
    self.assertIsNone(grad_c1)
    self.assertIsNone(grad_c0)
Ejemplo n.º 10
0
  def variable_uses(self, var):
    """Computes number of times a variable is used.

    Args:
      var: Variable or ResourceVariable instance.

    Returns:
      Number of times a variable is used within this subgraph.

    Raises:
      ValueError: If 'var' is not a variable type.
    """
    def _add_tensor_consumers_to_set(tensor, consumers_set):
      """Finds consumers of a tensor and add them to the current consumers set.
      """
      for consumer in set(tensor.consumers()):
        # These are the type of ops which relay a tensor to other ops without
        # doing anything to the tensor value, so recursively find the actual
        # consumers.
        if consumer.type in [
            "Identity", "ReadVariableOp", "Enter", "ResourceGather"]:
          for output in consumer.outputs:
            _add_tensor_consumers_to_set(output, consumers_set)
        else:
          consumers_set.add(consumer)

    consumers = set()
    if resource_variable_ops.is_resource_variable(var):
      if tf.control_flow_v2_enabled() and hasattr(self._graph, "captures"):
        # TODO(b/143690035): Note that the "captures" property relies on an API
        # which might change.
        captures = self._graph.captures
        for handle in [h for vh, h in captures if vh is var.handle]:
          _add_tensor_consumers_to_set(handle, consumers)
      else:
        _add_tensor_consumers_to_set(var.handle, consumers)
    elif is_reference_variable(var):
      _add_tensor_consumers_to_set(var.value(), consumers)
    else:
      raise ValueError("%s does not appear to be a variable." % str(var))

    return len(self._members.intersection(consumers))
Ejemplo n.º 11
0
    def test_forecast_from_hmc(self):
        if not tf1.control_flow_v2_enabled():
            self.skipTest(
                'test_forecast_from_hmc does not currently work with TF1')

        # test that we can directly plug in the output of an HMC chain as
        # the input to `forecast`, as done in the example, with no `sess.run` call.
        num_results = 5
        num_timesteps = 4
        num_steps_forecast = 3
        batch_shape = [1, 2]
        observed_time_series = self._build_tensor(
            np.random.randn(*(batch_shape + [num_timesteps])))
        model = self._build_model(observed_time_series)
        samples, _ = tfp.sts.fit_with_hmc(model,
                                          observed_time_series,
                                          num_results=num_results,
                                          num_warmup_steps=2,
                                          num_variational_steps=2)

        forecast_dist = tfp.sts.forecast(model,
                                         observed_time_series,
                                         parameter_samples=samples,
                                         num_steps_forecast=num_steps_forecast)

        forecast_mean = forecast_dist.mean()[..., 0]
        forecast_scale = forecast_dist.stddev()[..., 0]

        sample_shape = [10]
        forecast_samples = forecast_dist.sample(
            sample_shape, seed=test_util.test_seed())[..., 0]

        self.evaluate(tf1.global_variables_initializer())
        forecast_mean_, forecast_scale_, forecast_samples_ = self.evaluate(
            (forecast_mean, forecast_scale, forecast_samples))
        self.assertAllEqual(forecast_mean_.shape,
                            batch_shape + [num_steps_forecast])
        self.assertAllEqual(forecast_scale_.shape,
                            batch_shape + [num_steps_forecast])
        self.assertAllEqual(forecast_samples_.shape,
                            sample_shape + batch_shape + [num_steps_forecast])