def testMeanCovarianceNoBatch(self):
     with self.test_session() as sess:
         dims = 3
         vdm = vector_diffeomixture_lib.VectorDiffeomixture(
             mix_loc=[[0.], [4.]],
             mix_scale=[10.],
             distribution=normal_lib.Normal(0., 1.),
             loc=[
                 np.float32([-2.]),
                 None,
             ],
             scale=[
                 linop_identity_lib.LinearOperatorScaledIdentity(
                     num_rows=dims,
                     multiplier=np.float32(1.5),
                     is_positive_definite=True),
                 linop_diag_lib.LinearOperatorDiag(
                     diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
                     is_positive_definite=True),
             ],
             validate_args=True)
         self.run_test_sample_consistent_mean_covariance(sess.run,
                                                         vdm,
                                                         rtol=0.02,
                                                         cov_rtol=0.06)
Exemplo n.º 2
0
def _cholesky_diag(diag_operator):
    return linear_operator_diag.LinearOperatorDiag(math_ops.sqrt(
        diag_operator.diag),
                                                   is_non_singular=True,
                                                   is_self_adjoint=True,
                                                   is_positive_definite=True,
                                                   is_square=True)
def _inverse_diag(diag_operator):
    return linear_operator_diag.LinearOperatorDiag(
        1. / diag_operator.diag,
        is_non_singular=diag_operator.is_non_singular,
        is_self_adjoint=diag_operator.is_self_adjoint,
        is_positive_definite=diag_operator.is_positive_definite,
        is_square=True)
def linop_scale(w, op):
    # We assume w > 0. (This assumption only relates to the is_* attributes.)
    with ops.name_scope("linop_scale", values=[w]):
        # TODO(b/35301104): LinearOperatorComposition doesn't combine operators, so
        # special case combinations here. Once it does, this function can be
        # replaced by:
        #     return linop_composition_lib.LinearOperatorComposition([
        #         scaled_identity(w), op])
        def scaled_identity(w):
            return linop_identity_lib.LinearOperatorScaledIdentity(
                num_rows=op.range_dimension_tensor(),
                multiplier=w,
                is_non_singular=op.is_non_singular,
                is_self_adjoint=op.is_self_adjoint,
                is_positive_definite=op.is_positive_definite)

        if isinstance(op, linop_identity_lib.LinearOperatorIdentity):
            return scaled_identity(w)
        if isinstance(op, linop_identity_lib.LinearOperatorScaledIdentity):
            return scaled_identity(w * op.multiplier)
        if isinstance(op, linop_diag_lib.LinearOperatorDiag):
            return linop_diag_lib.LinearOperatorDiag(
                diag=w[..., array_ops.newaxis] * op.diag_part(),
                is_non_singular=op.is_non_singular,
                is_self_adjoint=op.is_self_adjoint,
                is_positive_definite=op.is_positive_definite)
        if isinstance(op, linop_tril_lib.LinearOperatorLowerTriangular):
            return linop_tril_lib.LinearOperatorLowerTriangular(
                tril=w[..., array_ops.newaxis, array_ops.newaxis] *
                op.to_dense(),
                is_non_singular=op.is_non_singular,
                is_self_adjoint=op.is_self_adjoint,
                is_positive_definite=op.is_positive_definite)
        raise NotImplementedError("Unsupported Linop type ({})".format(
            type(op).__name__))
 def _add(self, op1, op2, operator_name, hints):
     return linear_operator_diag.LinearOperatorDiag(
         diag=op1.diag_part() + op2.diag_part(),
         is_non_singular=hints.is_non_singular,
         is_self_adjoint=hints.is_self_adjoint,
         is_positive_definite=hints.is_positive_definite,
         name=operator_name)
Exemplo n.º 6
0
    def test_none_loc_static_scale(self):
        loc = None
        scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
        batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
            loc, scale)

        self.assertEqual(tensor_shape.TensorShape([5, 1]), batch_shape)
        self.assertEqual(tensor_shape.TensorShape([3]), event_shape)
Exemplo n.º 7
0
 def _set_diag_operators(self, diag_update, is_diag_update_positive):
     """Set attributes self._diag_update and self._diag_operator."""
     if diag_update is not None:
         self._diag_operator = linear_operator_diag.LinearOperatorDiag(
             self._diag_update,
             is_positive_definite=is_diag_update_positive)
         self._diag_inv_operator = linear_operator_diag.LinearOperatorDiag(
             1. / self._diag_update,
             is_positive_definite=is_diag_update_positive)
     else:
         if self.u.get_shape()[-1].value is not None:
             r = self.u.get_shape()[-1].value
         else:
             r = array_ops.shape(self.u)[-1]
         self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
             num_rows=r, dtype=self.dtype)
         self._diag_inv_operator = self._diag_operator
Exemplo n.º 8
0
def _matmul_linear_operator_diag(linop_a, linop_b):
    return linear_operator_diag.LinearOperatorDiag(
        diag=linop_a.diag * linop_b.diag,
        is_non_singular=_combined_non_singular_hint(linop_a, linop_b),
        is_self_adjoint=_combined_self_adjoint_hint(linop_a, linop_b),
        is_positive_definite=_combined_positive_definite_hint(
            linop_a, linop_b),
        is_square=True)
Exemplo n.º 9
0
 def test_none_loc_dynamic_scale(self):
     loc = None
     diag = array_ops.placeholder(dtypes.float64)
     scale = linear_operator_diag.LinearOperatorDiag(diag)
     with self.test_session() as sess:
         batch_shape, event_shape = sess.run(
             distribution_util.shapes_from_loc_and_scale(loc, scale),
             feed_dict={diag: np.ones((5, 1, 3))})
         self.assertAllEqual([5, 1], batch_shape)
         self.assertAllEqual([3], event_shape)
def _solve_linear_operator_diag(linop_a, linop_b):
    return linear_operator_diag.LinearOperatorDiag(
        diag=linop_b.diag / linop_a.diag,
        is_non_singular=registrations_util.combined_non_singular_hint(
            linop_a, linop_b),
        is_self_adjoint=registrations_util.
        combined_commuting_self_adjoint_hint(linop_a, linop_b),
        is_positive_definite=(
            registrations_util.combined_commuting_positive_definite_hint(
                linop_a, linop_b)),
        is_square=True)
Exemplo n.º 11
0
def _adjoint_diag(diag_operator):
    diag = diag_operator.diag
    if diag.dtype.is_complex:
        diag = math_ops.conj(diag)

    return linear_operator_diag.LinearOperatorDiag(
        diag=diag,
        is_non_singular=diag_operator.is_non_singular,
        is_self_adjoint=diag_operator.is_self_adjoint,
        is_positive_definite=diag_operator.is_positive_definite,
        is_square=True)
Exemplo n.º 12
0
def _matmul_linear_operator_diag_scaled_identity_left(linop_scaled_identity,
                                                      linop_diag):
    return linear_operator_diag.LinearOperatorDiag(
        diag=linop_diag.diag * linop_scaled_identity.multiplier,
        is_non_singular=_combined_non_singular_hint(linop_diag,
                                                    linop_scaled_identity),
        is_self_adjoint=_combined_self_adjoint_hint(linop_diag,
                                                    linop_scaled_identity),
        is_positive_definite=_combined_positive_definite_hint(
            linop_diag, linop_scaled_identity),
        is_square=True)
 def _set_diag_operators(self, diag_update, is_diag_update_positive):
   """Set attributes self._diag_update and self._diag_operator."""
   if diag_update is not None:
     self._diag_operator = linear_operator_diag.LinearOperatorDiag(
         self._diag_update, is_positive_definite=is_diag_update_positive)
   else:
     if tensor_shape.dimension_value(self.u.shape[-1]) is not None:
       r = tensor_shape.dimension_value(self.u.shape[-1])
     else:
       r = array_ops.shape(self.u)[-1]
     self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
         num_rows=r, dtype=self.dtype)
Exemplo n.º 14
0
def _matmul_linear_operator_diag_scaled_identity_right(
    linop_diag, linop_scaled_identity):
  return linear_operator_diag.LinearOperatorDiag(
      diag=linop_diag.diag * linop_scaled_identity.multiplier,
      is_non_singular=registrations_util.combined_non_singular_hint(
          linop_diag, linop_scaled_identity),
      is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
          linop_diag, linop_scaled_identity),
      is_positive_definite=(
          registrations_util.combined_commuting_positive_definite_hint(
              linop_diag, linop_scaled_identity)),
      is_square=True)
Exemplo n.º 15
0
 def test_dynamic_loc_static_scale(self):
     loc = array_ops.placeholder(dtypes.float64)
     diag = constant_op.constant(np.ones((5, 2, 3)))
     scale = linear_operator_diag.LinearOperatorDiag(diag)
     with self.test_session():
         batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
             loc, scale)
         # batch_shape depends on both args, and so is dynamic.  Since loc did not
         # have static shape, we inferred event shape entirely from scale, and this
         # is available statically.
         self.assertAllEqual(
             [5, 2], batch_shape.eval(feed_dict={loc: np.zeros((2, 3))}))
         self.assertAllEqual([3], event_shape)
Exemplo n.º 16
0
    def testGlobalDispatcherLinearOperators(self):
        original_global_dispatchers = dispatch._GLOBAL_DISPATCHERS
        try:
            TensorTracerOpDispatcher().register()

            x = TensorTracer("x")

            # To grab the eigenvalues the diag operator just calls convert_to_tensor
            # (twice) in this case.
            trace = linear_operator_diag.LinearOperatorDiag(x).eigvals()
            self.assertEqual(
                str(trace),
                "convert_to_tensor(convert_to_tensor(x, dtype=None, dtype_hint=None, "
                "name=diag))")

            # The diagonal tensor addition gets traced even though the linear_operator
            # API only uses dispatchable ops instead of directly exposing dispatching.
            trace = linear_operator_diag.LinearOperatorDiag(x).add_to_tensor(x)
            self.assertIn(
                "linalg.set_diag(convert_to_tensor(x, name=x), __operators__.add("
                "convert_to_tensor(x, dtype=None, dtype_hint=None, name=diag), "
                "linalg.diag_part(convert_to_tensor(x, name=x)), "
                "name=", str(trace))

            # The dispatch-supporting ops the non-singular check calls out to
            # get traced.
            trace = linear_operator_diag.LinearOperatorDiag(
                x).assert_non_singular()
            self.assertIn("debugging.assert_less", str(trace))
            self.assertIn(
                "message=Singular operator:  Diagonal contained zero values.",
                str(trace))

        finally:
            # Clean up.
            dispatch._GLOBAL_DISPATCHERS = original_global_dispatchers
 def testSampleProbConsistentDynamicQuadrature(self):
     with self.test_session() as sess:
         qgrid = array_ops.placeholder(dtype=dtypes.float32)
         qprobs = array_ops.placeholder(dtype=dtypes.float32)
         g, p = np.polynomial.hermite.hermgauss(deg=8)
         dims = 4
         vdm = vector_diffeomixture_lib.VectorDiffeomixture(
             mix_loc=[[0.], [1.]],
             mix_scale=[1.],
             distribution=normal_lib.Normal(0., 1.),
             loc=[
                 None,
                 np.float32([2.] * dims),
             ],
             scale=[
                 linop_identity_lib.LinearOperatorScaledIdentity(
                     num_rows=dims,
                     multiplier=np.float32(1.1),
                     is_positive_definite=True),
                 linop_diag_lib.LinearOperatorDiag(
                     diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
                     is_positive_definite=True),
             ],
             quadrature_grid_and_probs=(g, p),
             validate_args=True)
         # Ball centered at component0's mean.
         sess_run_fn = lambda x: sess.run(x,
                                          feed_dict={
                                              qgrid: g,
                                              qprobs: p
                                          })
         self.run_test_sample_consistent_log_prob(sess_run_fn,
                                                  vdm,
                                                  radius=2.,
                                                  center=0.,
                                                  rtol=0.005)
         # Larger ball centered at component1's mean.
         self.run_test_sample_consistent_log_prob(sess_run_fn,
                                                  vdm,
                                                  radius=4.,
                                                  center=2.,
                                                  rtol=0.005)
 def testSampleProbConsistentBroadcastMixBatch(self):
     with self.test_session() as sess:
         dims = 4
         vdm = vdm_lib.VectorDiffeomixture(
             mix_loc=[[0.], [1.]],
             temperature=[1.],
             distribution=normal_lib.Normal(0., 1.),
             loc=[
                 None,
                 np.float32([2.] * dims),
             ],
             scale=[
                 linop_identity_lib.LinearOperatorScaledIdentity(
                     num_rows=dims,
                     multiplier=[np.float32(1.1)],
                     is_positive_definite=True),
                 linop_diag_lib.LinearOperatorDiag(
                     diag=np.stack([
                         np.linspace(2.5, 3.5, dims, dtype=np.float32),
                         np.linspace(2.75, 3.25, dims, dtype=np.float32),
                     ]),
                     is_positive_definite=True),
             ],
             quadrature_size=8,
             validate_args=True)
         # Ball centered at component0's mean.
         self.run_test_sample_consistent_log_prob(sess.run,
                                                  vdm,
                                                  radius=2.,
                                                  center=0.,
                                                  rtol=0.01)
         # Larger ball centered at component1's mean.
         self.run_test_sample_consistent_log_prob(sess.run,
                                                  vdm,
                                                  radius=4.,
                                                  center=2.,
                                                  rtol=0.01)
 def testMeanCovarianceNoBatchUncenteredNonStandardBase(self):
   with self.cached_session() as sess:
     dims = 3
     vdm = vdm_lib.VectorDiffeomixture(
         mix_loc=[[0.], [4.]],
         temperature=[0.1],
         distribution=normal_lib.Normal(-1., 1.5),
         loc=[
             np.float32([-2.]),
             np.float32([0.]),
         ],
         scale=[
             linop_identity_lib.LinearOperatorScaledIdentity(
                 num_rows=dims,
                 multiplier=np.float32(1.5),
                 is_positive_definite=True),
             linop_diag_lib.LinearOperatorDiag(
                 diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
                 is_positive_definite=True),
         ],
         quadrature_size=8,
         validate_args=True)
     self.run_test_sample_consistent_mean_covariance(
         sess.run, vdm, num_samples=int(1e6), rtol=0.01, cov_atol=0.025)
Exemplo n.º 20
0
    def _mean_of_covariance_given_quadrature_component(self, diag_only):
        p = self.mixture_distribution.probs

        # To compute E[Cov(Z|V)], we'll add matrices within three categories:
        # scaled-identity, diagonal, and full. Then we'll combine these at the end.
        scale_identity_multiplier = None
        diag = None
        full = None

        for k, aff in enumerate(self.interpolated_affine):
            s = aff.scale  # Just in case aff.scale has side-effects, we'll call once.
            if (s is None or isinstance(
                    s, linop_identity_lib.LinearOperatorIdentity)):
                scale_identity_multiplier = add(scale_identity_multiplier,
                                                p[..., k, array_ops.newaxis])
            elif isinstance(s,
                            linop_identity_lib.LinearOperatorScaledIdentity):
                scale_identity_multiplier = add(
                    scale_identity_multiplier, (p[..., k, array_ops.newaxis] *
                                                math_ops.square(s.multiplier)))
            elif isinstance(s, linop_diag_lib.LinearOperatorDiag):
                diag = add(diag, (p[..., k, array_ops.newaxis] *
                                  math_ops.square(s.diag_part())))
            else:
                x = (p[..., k, array_ops.newaxis, array_ops.newaxis] *
                     s.matmul(s.to_dense(), adjoint_arg=True))
                if diag_only:
                    x = array_ops.matrix_diag_part(x)
                full = add(full, x)

        # We must now account for the fact that the base distribution might have a
        # non-unity variance. Recall that, since X ~ iid Law(X_0),
        #   `Cov(SX+m) = S Cov(X) S.T = S S.T Diag(Var(X_0))`.
        # We can scale by `Var(X)` (vs `Cov(X)`) since X corresponds to `d` iid
        # samples from a scalar-event distribution.
        v = self.distribution.variance()
        if scale_identity_multiplier is not None:
            scale_identity_multiplier *= v
        if diag is not None:
            diag *= v[..., array_ops.newaxis]
        if full is not None:
            full *= v[..., array_ops.newaxis]

        if diag_only:
            # Apparently we don't need the full matrix, just the diagonal.
            r = add(diag, full)
            if r is None and scale_identity_multiplier is not None:
                ones = array_ops.ones(self.event_shape_tensor(),
                                      dtype=self.dtype)
                return scale_identity_multiplier[..., array_ops.newaxis] * ones
            return add(r, scale_identity_multiplier)

        # `None` indicates we don't know if the result is positive-definite.
        is_positive_definite = (True if all(
            aff.scale.is_positive_definite
            for aff in self.endpoint_affine) else None)

        to_add = []
        if diag is not None:
            to_add.append(
                linop_diag_lib.LinearOperatorDiag(
                    diag=diag, is_positive_definite=is_positive_definite))
        if full is not None:
            to_add.append(
                linop_full_lib.LinearOperatorFullMatrix(
                    matrix=full, is_positive_definite=is_positive_definite))
        if scale_identity_multiplier is not None:
            to_add.append(
                linop_identity_lib.LinearOperatorScaledIdentity(
                    num_rows=self.event_shape_tensor()[0],
                    multiplier=scale_identity_multiplier,
                    is_positive_definite=is_positive_definite))

        return (linop_add_lib.add_operators(to_add)[0].to_dense()
                if to_add else None)
Exemplo n.º 21
0
 def test_static_loc_static_scale_non_matching_event_size_raises(self):
     loc = constant_op.constant(np.zeros((2, 4)))
     scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
     with self.assertRaisesRegexp(ValueError, "could not be broadcast"):
         distribution_util.shapes_from_loc_and_scale(loc, scale)