Esempio n. 1
0
 def eig(a, compute_eigvecs=True):
     if compute_eigvecs:
         vals, vecs = B.eig(a, compute_eigvecs=True)
         vals = B.flatten(vals)
         if B.rank(vecs) == 3:
             vecs = B.transpose(vecs, perm=(1, 0, 2))
             vecs = B.reshape(vecs, 3, -1)
         order = compute_order(vals)
         return B.take(vals, order), B.abs(B.take(vecs, order, axis=1))
     else:
         vals = B.flatten(B.eig(a, compute_eigvecs=False))
         return B.take(vals, compute_order(vals))
Esempio n. 2
0
File: util.py Progetto: wesselb/varz
def pack(*objs: B.Numeric):
    """Pack objects.

    Args:
        *objs (tensor): Objects to pack.

    Returns:
        tensor: Vector representation of the objects.
    """
    return B.concat(*[B.flatten(obj) for obj in objs], axis=0)
Esempio n. 3
0
    def sample(self, num=1, trace=False):
        """Generate samples from the target distribution.

        Args:
            num (int, optional): Number of samples. Defaults to one.
            trace (bool, optional): Show progress. Defaults to `False`.

        Returns:
            list[tensor]: Samples.
        """
        samples = []

        if trace:
            with wbml.out.Progress(name="Sampling (ESS)",
                                   total=num,
                                   filter={"Attempts": None}) as progress:
                for i in range(num):
                    attempts, ms_per_attempt = self._sample()
                    samples.append(self.x)

                    # Compute average effective sample size.
                    m = 20
                    if len(samples) > m:
                        ess = []
                        chain = B.stack(
                            *[B.flatten(x)[0] for x in samples[m:]], axis=0)
                        corrs = autocorr(chain, window=True, lags=5)
                        ess.append(len(samples) / (1 + 2 * np.sum(corrs)))
                        ess = np.mean(ess)
                    else:
                        ess = np.nan

                    progress({
                        "Pseudo-log-likelihood": self.log_lik_x,
                        "Attempts": attempts,
                        "Milliseconds per attempt": ms_per_attempt,
                        "Effective sample size": ess,
                    })

        else:
            for i in range(num):
                self._sample()
                samples.append(self.x)

        return samples
Esempio n. 4
0
def test_fdd_properties():
    p = GP(1, EQ())

    # Sample observations.
    x = B.linspace(0, 5, 5)
    y = p(x, 0.1).sample()

    # Compute posterior.
    p = p | (p(x, 0.1), y)

    fdd = p(B.linspace(0, 5, 10), 0.2)
    mean, var = fdd.mean, fdd.var

    # Check `var_diag`.
    fdd = p(B.linspace(0, 5, 10), 0.2)
    approx(fdd.var_diag, B.diag(var))

    # Check `mean_var`.
    fdd = p(B.linspace(0, 5, 10), 0.2)
    approx(fdd.mean_var, (mean, var))

    # Check `marginals()`.
    fdd = p(B.linspace(0, 5, 10), 0.2)
    approx(fdd.marginals(), (B.flatten(mean), B.diag(var)))
Esempio n. 5
0
        lambda scheme: RGPCM(
            scheme=scheme,
            window=window,
            scale=scale,
            noise=noise,
            n_u=n_u,
            m_max=n_z // 2,
            t=t,
        ),
    ),
]:
    # Sample data.
    gp_f = GP(kernel)
    gp_y = gp_f + GP(noise * Delta(), measure=gp_f.measure)
    f, y = gp_f.measure.sample(gp_f(t), gp_y(t))
    f, y = B.flatten(f), B.flatten(y)
    wd.save(
        {
            "t": t,
            "f": f,
            "k": B.flatten(kernel(t_k, 0)),
            "y": y,
            "true_logpdf": gp_y(t).logpdf(y),
        },
        slugify(str(kernel)),
        "data.pickle",
    )

    for scheme in ["mean-field", "structured"]:
        model = model_constructor(scheme)
        prefix = (slugify(str(kernel)), scheme, slugify(model.name))
Esempio n. 6
0
tex()
wd = WorkingDirectory("_experiments", "compare_inference")

# Setup experiment.
noise = 0.5
t = B.linspace(0, 20, 500)

# Setup GPCM models.
window = 2
scale = 1
n_u = 40
n_z = 40

# Sample data.
kernel = EQ()
y = B.flatten(GP(kernel)(t, noise).sample())
gp_logpdf = GP(kernel)(t, noise).logpdf(y)


# Run the original mean-field scheme.

model = GPCM(
    scheme="mean-field",
    window=window,
    scale=scale,
    noise=noise,
    n_u=n_u,
    n_z=n_z,
    t=t,
)
# Save initialisation and apply to next models for fair comparison.
Esempio n. 7
0
File: smk.py Progetto: wesselb/gpcm
# Setup experiment.
noise = 0.1
t = B.linspace(0, 40, 200)
t_k = B.linspace(0, 4, 200)

# Setup GPCM models.
window = 2
scale = 0.25
n_u = 80
n_z = 80

# Sample data.
kernel = (lambda x: B.sin(B.pi * x)) * EQ() + (
    lambda x: B.cos(B.pi * x)) * EQ()
y = B.flatten(GP(kernel)(t, noise).sample())
k = B.flatten(kernel(t_k, 0))


def extract(pred):
    """Extract important statistics from a prediction."""
    return pred.x, pred.mean, pred.var, pred.err_95_lower, pred.err_95_upper


if args.train:
    # Train mean-field approximation.
    model = GPCM(
        scheme="mean-field",
        window=window,
        scale=scale,
        noise=noise,