Exemple #1
0
    def construct_model_ilmm_equivalent(vs):
        kernels = [
            vs.pos(1, name=f"{i}/var") *
            Matern12().stretch(vs.pos(0.1, name=f"{i}/scale"))
            for i in range(m)
        ]
        noise = vs.pos(1e-2, name="noise")
        latent_noises = vs.pos(1e-2 * B.ones(m), name="latent_noises")
        u = vs.orth(shape=(p, m), name="u")
        s_sqrt = vs.pos(shape=(m, ), name="s_sqrt")
        h = Dense(u * s_sqrt[None, :])

        return ILMMPP(kernels, h, noise, latent_noises)
Exemple #2
0
def compute_K_u(model):
    """Covariance matrix of inducing variables :math:`u` associated with
    :math:`h`.

    Args:
        model (:class:`.gprv.GPRV`): Model.

    Returns:
        tensor: :math:`K_u`.
    """
    return Dense(
        (model.gamma_t**2 / (2 * model.gamma))
        * B.exp(-model.gamma * B.abs(model.t_u[:, None] - model.t_u[None, :]))
    )
Exemple #3
0
    def construct_model(vs):
        if args.separable:
            # Copy same kernel `m` times.
            kernel = [
                Mat52().stretch(vs.bnd(6 * 30, lower=60, name="k_scale"))
            ]
            kernels = kernel * m
        else:
            # Parametrise different kernels.
            kernels = [
                Mat52().stretch(vs.bnd(6 * 30, lower=60, name=f"{i}/k_scale"))
                for i in range(m)
            ]
        noise = vs.bnd(1e-2, name="noise")
        latent_noises = vs.bnd(1e-2 * B.ones(m), name="latent_noises")

        # Construct component of the mixing matrix over simulators.
        u = vs.orth(init=u_s_init, shape=(p_s, m_s), name="sims/u")
        s_sqrt = vs.bnd(init=s_sqrt_s_init, shape=(m_s, ), name="sims/s_sqrt")

        u_s = Dense(u)
        s_sqrt_s = Diagonal(s_sqrt)

        # Construct components of the mixing matrix over space from a
        # covariance.
        scales = vs.bnd(init=scales_init, name="space/scales")
        k = Mat52().stretch(scales)

        u, s, _ = B.svd(B.dense(k(loc)))
        u_r = Dense(u[:, :m_r])
        s_sqrt_r = Diagonal(B.sqrt(s[:m_r]))

        # Compose.
        s_sqrt = Kronecker(s_sqrt_s, s_sqrt_r)
        u = Kronecker(u_s, u_r)

        return OILMM(kernels, u, s_sqrt, noise, latent_noises)
Exemple #4
0
def compute_K_u(model):
    """Covariance matrix of inducing variables :math:`u` associated with
    :math:`h`.

    Args:
        model (:class:`.gpcm.GPCM`): Model.

    Returns:
        tensor: :math:`K_u`.
    """
    return Dense(
        model.k_h(var("t1"), var("t2")).eval(
            t1=model.t_u[:, None], t2=model.t_u[None, :]
        )
    )
Exemple #5
0
    def _project_pattern(self, x, y, pattern):
        # Check whether all data is available.
        no_missing = all(pattern)

        if no_missing:
            # All data is available. Nothing to be done.
            u = self.u
        else:
            # Data is missing. Pick the available entries.
            y = B.take(y, pattern, axis=1)
            # Ensure that `u` remains a structured matrix.
            u = Dense(B.take(self.u, pattern))

        # Get number of data points and outputs in this part of the data.
        n = B.shape(x)[0]
        p = sum(pattern)

        # Perform projection.
        proj_y_partial = B.matmul(y, B.pinv(u), tr_b=True)
        proj_y = B.matmul(proj_y_partial, B.inv(self.s_sqrt), tr_b=True)

        # Compute projected noise.
        u_square = B.matmul(u, u, tr_a=True)
        proj_noise = (
            self.noise_obs / B.diag(self.s_sqrt) ** 2 * B.diag(B.pd_inv(u_square))
        )

        # Convert projected noise to weights.
        noises = self.model.noises
        weights = noises / (noises + proj_noise)
        proj_w = B.ones(B.dtype(weights), n, self.m) * weights[None, :]

        # Compute Frobenius norm.
        frob = B.sum(y ** 2)
        frob = frob - B.sum(proj_y_partial * B.matmul(proj_y_partial, u_square))

        # Compute regularising term.
        reg = 0.5 * (
            n * (p - self.m) * B.log(2 * B.pi * self.noise_obs)
            + frob / self.noise_obs
            + n * B.logdet(B.matmul(u, u, tr_a=True))
            + n * 2 * B.logdet(self.s_sqrt)
        )

        return x, proj_y, proj_w, reg
Exemple #6
0
def test_normal_arithmetic(normal1, normal2):
    a = Dense(B.randn(3, 3))
    b = 5.0

    # Test matrix multiplication.
    approx(normal1.lmatmul(a).mean, a @ normal1.mean)
    approx(normal1.lmatmul(a).var, a @ normal1.var @ a.T)
    approx(normal1.rmatmul(a).mean, a.T @ normal1.mean)
    approx(normal1.rmatmul(a).var, a.T @ normal1.var @ a)

    # Test multiplication.
    approx((normal1 * b).mean, normal1.mean * b)
    approx((normal1 * b).var, normal1.var * b**2)
    approx((b * normal1).mean, normal1.mean * b)
    approx((b * normal1).var, normal1.var * b**2)
    with pytest.raises(NotFoundLookupError):
        normal1.__mul__(normal1)
    with pytest.raises(NotFoundLookupError):
        normal1.__rmul__(normal1)

    # Test addition.
    approx((normal1 + normal2).mean, normal1.mean + normal2.mean)
    approx((normal1 + normal2).var, normal1.var + normal2.var)
    approx(normal1.__radd__(b).mean, normal1.mean + b)
    approx(normal1.__radd__(b).mean, normal1.mean + b)
    with pytest.raises(NotFoundLookupError):
        normal1.__add__(RandomVector())
    with pytest.raises(NotFoundLookupError):
        normal1.__radd__(RandomVector())

    # Test negation.
    approx((-normal1).mean, -normal1.mean)
    approx((-normal1).var, normal1.var)

    # Test substraction.
    approx((normal1 - normal2).mean, normal1.mean - normal2.mean)
    approx((normal1 - normal2).var, normal1.var + normal2.var)
    approx(normal1.__rsub__(normal2).mean, normal2.mean - normal1.mean)
    approx(normal1.__rsub__(normal2).var, normal1.var + normal2.var)

    # Test division.
    approx(normal1.__div__(b).mean, normal1.mean / b)
    approx(normal1.__div__(b).var, normal1.var / b**2)
    approx(normal1.__truediv__(b).mean, normal1.mean / b)
    approx(normal1.__truediv__(b).var, normal1.var / b**2)
Exemple #7
0
    def construct_model(vs, m):
        kernels = [
            vs.pos(0.5, name=f"{i}/k_var") *
            Matern52().stretch(vs.bnd(2 * 30, name=f"{i}/k_scale")) +
            vs.pos(0.5, name=f"{i}/k_per_var") * (Matern52().stretch(
                vs.bnd(1.0, name=f"{i}/k_per_scale")).periodic(365))
            for i in range(m)
        ]
        noise = vs.pos(1e-2, name="noise")
        latent_noises = vs.pos(1e-2 * B.ones(m), name="latent_noises")

        # Construct orthogonal matrix and time it.
        time_h_start = time.time()
        u = Dense(vs.orth(shape=(p, m), name="u"))
        s_sqrt = Diagonal(vs.pos(shape=(m, ), name="s_sqrt"))
        dur_h = time.time() - time_h_start

        return OILMM(kernels, u, s_sqrt, noise, latent_noises), dur_h
Exemple #8
0
    def construct_model(vs):
        kernels = [
            vs.pos(0.5, name=f"{i}/k_var") *
            Matern52().stretch(vs.bnd(2 * 30, name=f"{i}/k_scale")) +
            vs.pos(0.5, name=f"{i}/k_per_var") * (Matern52().stretch(
                vs.bnd(1.0, name=f"{i}/k_per_scale")).periodic(365))
            for i in range(m)
        ]
        latent_noises = vs.pos(1e-2 * B.ones(m), name="latent_noises")
        noise = vs.pos(1e-2, name="noise")

        # Construct components of mixing matrix from a covariance over
        # outputs.
        variance = vs.pos(1, name="h/variance")
        scales = vs.pos(init=scales_init, name="h/scales")
        k = variance * Matern52().stretch(scales)
        u, s, _ = B.svd(B.dense(B.reg(k(loc))))
        u = Dense(u[:, :m])
        s_sqrt = Diagonal(B.sqrt(s[:m]))

        return OILMM(kernels, u, s_sqrt, noise, latent_noises)
Exemple #9
0
def test_compare_ilmm():
    # Setup models.
    kernels = [EQ(), 2 * EQ().stretch(1.5)]
    noise_obs = 0.1
    noises_latent = np.array([0.1, 0.2])
    u, s_sqrt = B.svd(B.randn(3, 2))[:2]
    u = Dense(u)
    s_sqrt = Diagonal(s_sqrt)

    # Construct models.
    ilmm = ILMMPP(kernels, u @ s_sqrt, noise_obs, noises_latent)
    oilmm = OILMM(kernels, u, s_sqrt, noise_obs, noises_latent)

    # Construct data.
    x = B.linspace(0, 3, 5)
    y = ilmm.sample(x, latent=False)
    x2 = B.linspace(4, 7, 5)
    y2 = ilmm.sample(x2, latent=False)

    # Check LML before conditioning.
    approx(ilmm.logpdf(x, y), oilmm.logpdf(x, y))
    approx(ilmm.logpdf(x2, y2), oilmm.logpdf(x2, y2))

    ilmm = ilmm.condition(x, y)
    oilmm = oilmm.condition(x, y)

    # Check LML after conditioning.
    approx(ilmm.logpdf(x, y), oilmm.logpdf(x, y))
    approx(ilmm.logpdf(x2, y2), oilmm.logpdf(x2, y2))

    # Predict.
    means_pp, lowers_pp, uppers_pp = ilmm.predict(x2)
    means, lowers, uppers = oilmm.predict(x2)

    # Check predictions.
    approx(means_pp, means)
    approx(lowers_pp, lowers)
    approx(uppers_pp, uppers)
Exemple #10
0
 def __call__(self, x, y):
     return Dense(self._compute(B.pw_dists(x, y)))
Exemple #11
0
 def __call__(self, x, y):
     return Dense(B.exp(-B.pw_dists(x, y)))
Exemple #12
0
 def __call__(self, x, y):
     if x is y:
         return B.fill_diag(B.one(x), B.shape(uprank(x))[0])
     else:
         return Dense(self._compute(B.pw_dists2(uprank(x), uprank(y))))
Exemple #13
0
    def objective(vs):
        x_ind = vs.unbounded(x_ind_init, name="x_ind")
        return -construct_model(vs).logpdf(x_data, y_data, x_ind=x_ind)

    minimise_l_bfgs_b(objective, vs, trace=True, iters=args.i)

    # Print variables.
    vs.print()

    def cov_to_corr(k):
        std = B.sqrt(B.diag(k))
        return k / std[:, None] / std[None, :]

    # Compute correlations between simulators.
    u = Dense(vs["sims/u"])
    s_sqrt = Diagonal(vs["sims/s_sqrt"])
    k = u @ s_sqrt @ s_sqrt @ u.T
    std = B.sqrt(B.diag(k))
    corr_learned = cov_to_corr(k)

    # Compute empirical correlations.
    all_obs = np.concatenate(
        [sim.to_numpy()[:args.n].reshape(-1, 1) for sim in sims.values()],
        axis=1)
    corr_empirical = cov_to_corr(np.cov(all_obs.T))

    # Compute predictions for latent processes.
    model = construct_model(vs)
    model = model.condition(x_data, y_data, x_ind=vs["x_ind"])
    x_proj, y_proj, _, _ = model.project(x_data, y_data)
Exemple #14
0
 def __call__(self, x, y):
     pw_sums_raised = B.power(B.pw_sums(B.add(x, self.beta), y), self.alpha)
     return Dense(B.divide(self._compute_beta_raised(), pw_sums_raised))
Exemple #15
0
def test_cholesky_square_assertion():
    with pytest.raises(AssertionError):
        B.cholesky(Dense(B.randn(3, 4)))
Exemple #16
0
def test_structured():
    assert structured(Diagonal(B.ones(3)))
    assert not structured(Dense(B.ones(3, 3)))
    assert not structured(B.ones(3, 3))
Exemple #17
0
def test_conversion_to_dense(diag1):
    approx(Dense(diag1), diag1)
    assert isinstance(Dense(diag1).mat, B.Numeric)
Exemple #18
0
def test_dense_attributes():
    mat = B.ones(3, 3)
    dense = Dense(mat)
    assert dense.mat is mat
Exemple #19
0
 def __call__(self, x, y):
     if x is y:
         return B.fill_diag(B.one(x), num_elements(x))
     else:
         return Dense(self._compute(B.pw_dists2(x, y)))
Exemple #20
0
def test_root_square_assertion():
    with pytest.raises(AssertionError):
        B.root(Dense(B.randn(3, 4)))
Exemple #21
0
def generate(code):
    """Generate a random tensor of a particular type, specified with a code.

    Args:
        code (str): Code of the matrix.

    Returns:
        tensor: Random tensor.
    """
    mat_code, shape_code = code.split(":")

    # Parse shape.
    if shape_code == "":
        shape = ()
    else:
        shape = tuple(int(d) for d in shape_code.split(","))

    if mat_code == "randn":
        return B.randn(*shape)
    elif mat_code == "randn_pd":
        mat = B.randn(*shape)

        # If it is a scalar or vector, just pointwise square it.
        if len(shape) in {0, 1}:
            return mat**2 + 1
        else:
            return B.matmul(mat, mat, tr_b=True) + B.eye(shape[0])

    elif mat_code == "zero":
        return Zero(B.default_dtype, *shape)

    elif mat_code == "const":
        return Constant(B.randn(), *shape)
    elif mat_code == "const_pd":
        return Constant(B.randn()**2 + 1, *shape)

    elif mat_code == "lt":
        mat = B.vec_to_tril(B.randn(int(0.5 * shape[0] * (shape[0] + 1))))
        return LowerTriangular(mat)
    elif mat_code == "lt_pd":
        mat = generate(f"randn_pd:{shape[0]},{shape[0]}")
        return LowerTriangular(B.cholesky(B.reg(mat)))

    elif mat_code == "ut":
        mat = B.vec_to_tril(B.randn(int(0.5 * shape[0] * (shape[0] + 1))))
        return UpperTriangular(B.transpose(mat))
    elif mat_code == "ut_pd":
        mat = generate(f"randn_pd:{shape[0]},{shape[0]}")
        return UpperTriangular(B.transpose(B.cholesky(B.reg(mat))))

    elif mat_code == "dense":
        return Dense(generate(f"randn:{shape_code}"))
    elif mat_code == "dense_pd":
        return Dense(generate(f"randn_pd:{shape_code}"))

    elif mat_code == "diag":
        return Diagonal(generate(f"randn:{shape_code}"))
    elif mat_code == "diag_pd":
        return Diagonal(generate(f"randn_pd:{shape_code}"))

    else:
        raise RuntimeError(f'Cannot parse generation code "{code}".')
Exemple #22
0
 def __call__(self, x, y):
     dists = B.maximum(B.pw_dists(x, y), 1e-10)
     return Dense(B.divide(B.log(dists + 1), dists))