def _set_diag_operators(self, diag, is_diag_positive):
     """Set attributes self._diag and self._diag_operator."""
     if diag is not None:
         self._diag_operator = linear_operator_diag.LinearOperatorDiag(
             self._diag, is_positive_definite=is_diag_positive)
         self._diag_inv_operator = linear_operator_diag.LinearOperatorDiag(
             1. / self._diag, is_positive_definite=is_diag_positive)
     else:
         if self.u.get_shape()[-1].value is not None:
             r = self.u.get_shape()[-1].value
         else:
             r = array_ops.shape(self.u)[-1]
         self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
             num_rows=r, dtype=self.dtype)
         self._diag_inv_operator = self._diag_operator
 def _add(self, op1, op2, operator_name, hints):
     return linear_operator_diag.LinearOperatorDiag(
         diag=op1.diag_part() + op2.diag_part(),
         is_non_singular=hints.is_non_singular,
         is_self_adjoint=hints.is_self_adjoint,
         is_positive_definite=hints.is_positive_definite,
         name=operator_name)
 def testSampleProbConsistentBroadcastMixNonStandardBase(self):
     with self.test_session() as sess:
         dims = 4
         vdm = vector_diffeomixture_lib.VectorDiffeomixture(
             mix_loc=[[0.], [1.]],
             mix_scale=[1.],
             distribution=normal_lib.Normal(1., 1.5),
             loc=[
                 None,
                 np.float32([2.] * dims),
             ],
             scale=[
                 linop_identity_lib.LinearOperatorScaledIdentity(
                     num_rows=dims,
                     multiplier=np.float32(1.1),
                     is_positive_definite=True),
                 linop_diag_lib.LinearOperatorDiag(
                     diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
                     is_positive_definite=True),
             ],
             validate_args=True)
         # Ball centered at component0's mean.
         self.run_test_sample_consistent_log_prob(sess,
                                                  vdm,
                                                  radius=2.,
                                                  center=1.,
                                                  rtol=0.006)
         # Larger ball centered at component1's mean.
         self.run_test_sample_consistent_log_prob(sess,
                                                  vdm,
                                                  radius=4.,
                                                  center=3.,
                                                  rtol=0.009)
 def testMeanCovarianceBatch(self):
     with self.test_session() as sess:
         dims = 3
         vdm = vector_diffeomixture_lib.VectorDiffeomixture(
             mix_loc=[[0.], [4.]],
             mix_scale=[10.],
             distribution=normal_lib.Normal(0., 1.),
             loc=[
                 np.float32([[-2.]]),
                 None,
             ],
             scale=[
                 linop_identity_lib.LinearOperatorScaledIdentity(
                     num_rows=dims,
                     multiplier=[np.float32(1.5)],
                     is_positive_definite=True),
                 linop_diag_lib.LinearOperatorDiag(
                     diag=np.stack([
                         np.linspace(2.5, 3.5, dims, dtype=np.float32),
                         np.linspace(0.5, 1.5, dims, dtype=np.float32),
                     ]),
                     is_positive_definite=True),
             ],
             validate_args=True)
         self.run_test_sample_consistent_mean_covariance(sess,
                                                         vdm,
                                                         rtol=0.02,
                                                         cov_rtol=0.06)
Beispiel #5
0
def linop_scale(w, op):
    # We assume w > 0. (This assumption only relates to the is_* attributes.)
    with ops.name_scope("linop_scale", values=[w]):
        # TODO(b/35301104): LinearOperatorComposition doesn't combine operators, so
        # special case combinations here. Once it does, this function can be
        # replaced by:
        #     return linop_composition_lib.LinearOperatorComposition([
        #         scaled_identity(w), op])
        def scaled_identity(w):
            return linop_identity_lib.LinearOperatorScaledIdentity(
                num_rows=op.range_dimension_tensor(),
                multiplier=w,
                is_non_singular=op.is_non_singular,
                is_self_adjoint=op.is_self_adjoint,
                is_positive_definite=op.is_positive_definite)

        if isinstance(op, linop_identity_lib.LinearOperatorIdentity):
            return scaled_identity(w)
        if isinstance(op, linop_identity_lib.LinearOperatorScaledIdentity):
            return scaled_identity(w * op.multiplier)
        if isinstance(op, linop_diag_lib.LinearOperatorDiag):
            return linop_diag_lib.LinearOperatorDiag(
                diag=w[..., array_ops.newaxis] * op.diag_part(),
                is_non_singular=op.is_non_singular,
                is_self_adjoint=op.is_self_adjoint,
                is_positive_definite=op.is_positive_definite)
        if isinstance(op, linop_tril_lib.LinearOperatorTriL):
            return linop_tril_lib.LinearOperatorTriL(
                tril=w[..., array_ops.newaxis, array_ops.newaxis] *
                op.to_dense(),
                is_non_singular=op.is_non_singular,
                is_self_adjoint=op.is_self_adjoint,
                is_positive_definite=op.is_positive_definite)
        raise NotImplementedError("Unsupported Linop type ({})".format(
            type(op).__name__))
def linop_scale(w, op):
    # We assume w > 0. (This assumption only relates to the is_* attributes.)
    with ops.name_scope("linop_scale", values=[w]):

        def scaled_identity(w):
            return linop_identity_lib.LinearOperatorScaledIdentity(
                num_rows=op.range_dimension_tensor(),
                multiplier=w,
                is_non_singular=op.is_non_singular,
                is_self_adjoint=op.is_self_adjoint,
                is_positive_definite=op.is_positive_definite)

        if isinstance(op, linop_identity_lib.LinearOperatorIdentity):
            return scaled_identity(w)
        elif isinstance(op, linop_identity_lib.LinearOperatorScaledIdentity):
            return scaled_identity(w * op.multiplier)
        elif isinstance(op, linop_diag_lib.LinearOperatorDiag):
            return linop_diag_lib.LinearOperatorDiag(
                diag=w[..., array_ops.newaxis] * op.diag_part(),
                is_non_singular=op.is_non_singular,
                is_self_adjoint=op.is_self_adjoint,
                is_positive_definite=op.is_positive_definite)
        else:
            return linop_composition_lib.LinearOperatorComposition(
                [scaled_identity(w), op])
    def test_none_loc_static_scale(self):
        loc = None
        scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
        batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
            loc, scale)

        self.assertEqual(tensor_shape.TensorShape([5, 1]), batch_shape)
        self.assertEqual(tensor_shape.TensorShape([3]), event_shape)
 def test_none_loc_dynamic_scale(self):
     loc = None
     diag = array_ops.placeholder(dtypes.float64)
     scale = linear_operator_diag.LinearOperatorDiag(diag)
     with self.test_session() as sess:
         batch_shape, event_shape = sess.run(
             distribution_util.shapes_from_loc_and_scale(loc, scale),
             feed_dict={diag: np.ones((5, 1, 3))})
         self.assertAllEqual([5, 1], batch_shape)
         self.assertAllEqual([3], event_shape)
 def test_dynamic_loc_static_scale(self):
     loc = array_ops.placeholder(dtypes.float64)
     diag = constant_op.constant(np.ones((5, 2, 3)))
     scale = linear_operator_diag.LinearOperatorDiag(diag)
     with self.test_session():
         batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
             loc, scale)
         # batch_shape depends on both args, and so is dynamic.  Since loc did not
         # have static shape, we infered event shape entirely from scale, and this
         # is available statically.
         self.assertAllEqual(
             [5, 2], batch_shape.eval(feed_dict={loc: np.zeros((2, 3))}))
         self.assertAllEqual([3], event_shape)
Beispiel #10
0
 def linop(self, num_rows=None, multiplier=None, diag=None):
     """Helper to create non-singular, symmetric, positive definite matrices."""
     if num_rows is not None and multiplier is not None:
         if any(p is not None for p in [diag]):
             raise ValueError("Found extra args for scaled identity.")
         return linop_identity_lib.LinearOperatorScaledIdentity(
             num_rows=num_rows,
             multiplier=multiplier,
             is_positive_definite=True)
     elif num_rows is not None:
         if any(p is not None for p in [multiplier, diag]):
             raise ValueError("Found extra args for identity.")
         return linop_identity_lib.LinearOperatorIdentity(
             num_rows=num_rows, is_positive_definite=True)
     elif diag is not None:
         if any(p is not None for p in [num_rows, multiplier]):
             raise ValueError("Found extra args for diag.")
         return linop_diag_lib.LinearOperatorDiag(diag=diag,
                                                  is_positive_definite=True)
     else:
         raise ValueError("Must specify at least one arg.")
 def testMeanCovarianceNoBatchUncenteredNonStandardBase(self):
     with self.test_session() as sess:
         dims = 3
         vdm = vector_diffeomixture_lib.VectorDiffeomixture(
             mix_loc=[[0.], [4.]],
             mix_scale=[10.],
             distribution=normal_lib.Normal(-1., 1.5),
             loc=[
                 np.float32([-2.]),
                 np.float32([0.]),
             ],
             scale=[
                 linop_identity_lib.LinearOperatorScaledIdentity(
                     num_rows=dims,
                     multiplier=np.float32(1.5),
                     is_positive_definite=True),
                 linop_diag_lib.LinearOperatorDiag(
                     diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
                     is_positive_definite=True),
             ],
             validate_args=True)
         self.run_test_sample_consistent_mean_covariance(
             sess, vdm, num_samples=int(1e6), rtol=0.01, cov_atol=0.025)
 def test_static_loc_static_scale_non_matching_event_size_raises(self):
     loc = constant_op.constant(np.zeros((2, 4)))
     scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
     with self.assertRaisesRegexp(ValueError, "could not be broadcast"):
         distribution_util.shapes_from_loc_and_scale(loc, scale)
Beispiel #13
0
    def _mean_of_covariance_given_quadrature_component(self, diag_only):
        # Since we created logits to already be scaled, we can use exp which is
        # slightly cheaper than `self.mixture_distribution.probs`.
        p = math_ops.exp(self.mixture_distribution.logits)

        # To compute E[Cov(Z|V)], we'll add matrices within three categories:
        # scaled-identity, diagonal, and full. Then we'll combine these at the end.
        scaled_identity = None
        diag = None
        full = None

        for k, aff in enumerate(self.interpolated_affine):
            s = aff.scale  # Just in case aff.scale has side-effects, we'll call once.
            if (s is None or isinstance(
                    s, linop_identity_lib.LinearOperatorIdentity)):
                scaled_identity = add(scaled_identity, p[..., k,
                                                         array_ops.newaxis])
            elif isinstance(s,
                            linop_identity_lib.LinearOperatorScaledIdentity):
                scaled_identity = add(scaled_identity,
                                      (p[..., k, array_ops.newaxis] *
                                       math_ops.square(s.multiplier)))
            elif isinstance(s, linop_diag_lib.LinearOperatorDiag):
                diag = add(diag, (p[..., k, array_ops.newaxis] *
                                  math_ops.square(s.diag_part())))
            else:
                x = (p[..., k, array_ops.newaxis, array_ops.newaxis] *
                     s.matmul(s.to_dense(), adjoint_arg=True))
                if diag_only:
                    x = array_ops.matrix_diag_part(x)
                full = add(full, x)

        # We must now account for the fact that the base distribution might have a
        # non-unity variance. Recall that `Cov(SX+m) = S.T Cov(X) S = S.T S Var(X)`.
        # We can scale by `Var(X)` (vs `Cov(X)`) since X corresponds to `d` iid
        # samples from a scalar-event distribution.
        v = self.distribution.variance()
        if scaled_identity is not None:
            scaled_identity *= v
        if diag is not None:
            diag *= v[..., array_ops.newaxis]
        if full is not None:
            full *= v[..., array_ops.newaxis]

        if diag_only:
            # Apparently we don't need the full matrix, just the diagonal.
            r = add(diag, full)
            if r is None and scaled_identity is not None:
                ones = array_ops.ones(self.event_shape_tensor(),
                                      dtype=self.dtype)
                return scaled_identity * ones
            return add(r, scaled_identity)

        # `None` indicates we don't know if the result is positive-definite.
        is_positive_definite = (True if all(
            aff.scale.is_positive_definite
            for aff in self.endpoint_affine) else None)

        to_add = []
        if diag is not None:
            to_add.append(
                linop_diag_lib.LinearOperatorDiag(
                    diag=diag, is_positive_definite=is_positive_definite))
        if full is not None:
            to_add.append(
                linop_full_lib.LinearOperatorFullMatrix(
                    matrix=full, is_positive_definite=is_positive_definite))
        if scaled_identity is not None:
            to_add.append(
                linop_identity_lib.LinearOperatorScaledIdentity(
                    num_rows=self.event_shape_tensor()[0],
                    multiplier=scaled_identity,
                    is_positive_definite=is_positive_definite))

        return (linop_add_lib.add_operators(to_add)[0].to_dense()
                if to_add else None)