def testMeanCovarianceBatch(self):
     with self.test_session() as sess:
         dims = 3
         vdm = vector_diffeomixture_lib.VectorDiffeomixture(
             mix_loc=[[0.], [4.]],
             mix_scale=[10.],
             distribution=normal_lib.Normal(0., 1.),
             loc=[
                 np.float32([[-2.]]),
                 None,
             ],
             scale=[
                 linop_identity_lib.LinearOperatorScaledIdentity(
                     num_rows=dims,
                     multiplier=[np.float32(1.5)],
                     is_positive_definite=True),
                 linop_diag_lib.LinearOperatorDiag(
                     diag=np.stack([
                         np.linspace(2.5, 3.5, dims, dtype=np.float32),
                         np.linspace(0.5, 1.5, dims, dtype=np.float32),
                     ]),
                     is_positive_definite=True),
             ],
             validate_args=True)
         self.run_test_sample_consistent_mean_covariance(sess,
                                                         vdm,
                                                         rtol=0.02,
                                                         cov_rtol=0.06)
 def testSampleProbConsistentBroadcastMixNonStandardBase(self):
     with self.test_session() as sess:
         dims = 4
         vdm = vector_diffeomixture_lib.VectorDiffeomixture(
             mix_loc=[[0.], [1.]],
             mix_scale=[1.],
             distribution=normal_lib.Normal(1., 1.5),
             loc=[
                 None,
                 np.float32([2.] * dims),
             ],
             scale=[
                 linop_identity_lib.LinearOperatorScaledIdentity(
                     num_rows=dims,
                     multiplier=np.float32(1.1),
                     is_positive_definite=True),
                 linop_diag_lib.LinearOperatorDiag(
                     diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
                     is_positive_definite=True),
             ],
             validate_args=True)
         # Ball centered at component0's mean.
         self.run_test_sample_consistent_log_prob(sess,
                                                  vdm,
                                                  radius=2.,
                                                  center=1.,
                                                  rtol=0.006)
         # Larger ball centered at component1's mean.
         self.run_test_sample_consistent_log_prob(sess,
                                                  vdm,
                                                  radius=4.,
                                                  center=3.,
                                                  rtol=0.009)
Exemple #3
0
 def scaled_identity(w):
     return linop_identity_lib.LinearOperatorScaledIdentity(
         num_rows=op.range_dimension_tensor(),
         multiplier=w,
         is_non_singular=op.is_non_singular,
         is_self_adjoint=op.is_self_adjoint,
         is_positive_definite=op.is_positive_definite)
  def _add(self, op1, op2, operator_name, hints):
    # Will build a LinearOperatorScaledIdentity.

    if _type(op1) == _SCALED_IDENTITY:
      multiplier_1 = op1.multiplier
    else:
      multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype)

    if _type(op2) == _SCALED_IDENTITY:
      multiplier_2 = op2.multiplier
    else:
      multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype)

    return linear_operator_identity.LinearOperatorScaledIdentity(
        num_rows=op1.range_dimension_tensor(),
        multiplier=multiplier_1 + multiplier_2,
        is_non_singular=hints.is_non_singular,
        is_self_adjoint=hints.is_self_adjoint,
        is_positive_definite=hints.is_positive_definite,
        name=operator_name)
Exemple #5
0
 def linop(self, num_rows=None, multiplier=None, diag=None):
     """Helper to create non-singular, symmetric, positive definite matrices."""
     if num_rows is not None and multiplier is not None:
         if any(p is not None for p in [diag]):
             raise ValueError("Found extra args for scaled identity.")
         return linop_identity_lib.LinearOperatorScaledIdentity(
             num_rows=num_rows,
             multiplier=multiplier,
             is_positive_definite=True)
     elif num_rows is not None:
         if any(p is not None for p in [multiplier, diag]):
             raise ValueError("Found extra args for identity.")
         return linop_identity_lib.LinearOperatorIdentity(
             num_rows=num_rows, is_positive_definite=True)
     elif diag is not None:
         if any(p is not None for p in [num_rows, multiplier]):
             raise ValueError("Found extra args for diag.")
         return linop_diag_lib.LinearOperatorDiag(diag=diag,
                                                  is_positive_definite=True)
     else:
         raise ValueError("Must specify at least one arg.")
 def testMeanCovarianceNoBatchUncenteredNonStandardBase(self):
     with self.test_session() as sess:
         dims = 3
         vdm = vector_diffeomixture_lib.VectorDiffeomixture(
             mix_loc=[[0.], [4.]],
             mix_scale=[10.],
             distribution=normal_lib.Normal(-1., 1.5),
             loc=[
                 np.float32([-2.]),
                 np.float32([0.]),
             ],
             scale=[
                 linop_identity_lib.LinearOperatorScaledIdentity(
                     num_rows=dims,
                     multiplier=np.float32(1.5),
                     is_positive_definite=True),
                 linop_diag_lib.LinearOperatorDiag(
                     diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
                     is_positive_definite=True),
             ],
             validate_args=True)
         self.run_test_sample_consistent_mean_covariance(
             sess, vdm, num_samples=int(1e6), rtol=0.01, cov_atol=0.025)
Exemple #7
0
    def _mean_of_covariance_given_quadrature_component(self, diag_only):
        # Since we created logits to already be scaled, we can use exp which is
        # slightly cheaper than `self.mixture_distribution.probs`.
        p = math_ops.exp(self.mixture_distribution.logits)

        # To compute E[Cov(Z|V)], we'll add matrices within three categories:
        # scaled-identity, diagonal, and full. Then we'll combine these at the end.
        scaled_identity = None
        diag = None
        full = None

        for k, aff in enumerate(self.interpolated_affine):
            s = aff.scale  # Just in case aff.scale has side-effects, we'll call once.
            if (s is None or isinstance(
                    s, linop_identity_lib.LinearOperatorIdentity)):
                scaled_identity = add(scaled_identity, p[..., k,
                                                         array_ops.newaxis])
            elif isinstance(s,
                            linop_identity_lib.LinearOperatorScaledIdentity):
                scaled_identity = add(scaled_identity,
                                      (p[..., k, array_ops.newaxis] *
                                       math_ops.square(s.multiplier)))
            elif isinstance(s, linop_diag_lib.LinearOperatorDiag):
                diag = add(diag, (p[..., k, array_ops.newaxis] *
                                  math_ops.square(s.diag_part())))
            else:
                x = (p[..., k, array_ops.newaxis, array_ops.newaxis] *
                     s.matmul(s.to_dense(), adjoint_arg=True))
                if diag_only:
                    x = array_ops.matrix_diag_part(x)
                full = add(full, x)

        # We must now account for the fact that the base distribution might have a
        # non-unity variance. Recall that `Cov(SX+m) = S.T Cov(X) S = S.T S Var(X)`.
        # We can scale by `Var(X)` (vs `Cov(X)`) since X corresponds to `d` iid
        # samples from a scalar-event distribution.
        v = self.distribution.variance()
        if scaled_identity is not None:
            scaled_identity *= v
        if diag is not None:
            diag *= v[..., array_ops.newaxis]
        if full is not None:
            full *= v[..., array_ops.newaxis]

        if diag_only:
            # Apparently we don't need the full matrix, just the diagonal.
            r = add(diag, full)
            if r is None and scaled_identity is not None:
                ones = array_ops.ones(self.event_shape_tensor(),
                                      dtype=self.dtype)
                return scaled_identity * ones
            return add(r, scaled_identity)

        # `None` indicates we don't know if the result is positive-definite.
        is_positive_definite = (True if all(
            aff.scale.is_positive_definite
            for aff in self.endpoint_affine) else None)

        to_add = []
        if diag is not None:
            to_add.append(
                linop_diag_lib.LinearOperatorDiag(
                    diag=diag, is_positive_definite=is_positive_definite))
        if full is not None:
            to_add.append(
                linop_full_lib.LinearOperatorFullMatrix(
                    matrix=full, is_positive_definite=is_positive_definite))
        if scaled_identity is not None:
            to_add.append(
                linop_identity_lib.LinearOperatorScaledIdentity(
                    num_rows=self.event_shape_tensor()[0],
                    multiplier=scaled_identity,
                    is_positive_definite=is_positive_definite))

        return (linop_add_lib.add_operators(to_add)[0].to_dense()
                if to_add else None)