Exemplo n.º 1
0
        noise = vs.pos(1e-2, name="noise")
        latent_noises = vs.pos(1e-2 * B.ones(m), name="latent_noises")
        h = Dense(vs.get(shape=(p, m), name="h"))

        return ILMMPP(kernels, h, noise, latent_noises)

    def objective(vs):
        return -construct_model(vs).logpdf(torch.tensor(x),
                                           torch.tensor(y_norm))

    minimise_l_bfgs_b(objective, vs, trace=True, iters=1000)

    # Predict.
    model = construct_model(vs)
    model = model.condition(torch.tensor(x), torch.tensor(y_norm))
    means, lowers, uppers = B.to_numpy(model.predict(torch.tensor(x)))

    # Undo normalisation
    means, lowers, uppers = normaliser.unnormalise(means, lowers, uppers)

    # For the purpose of comparison, standardise using the mean of the
    # *training* data. This is not how the SMSE usually is defined!
    pred = pd.DataFrame(means, index=train.index, columns=train.columns)
    smse = ((pred - test)**2).mean(axis=0) / (
        (train.mean(axis=0) - test)**2).mean(axis=0)

    # Report average SMSE.
    wbml.out.kv("SMSEs", smse.dropna())
    wbml.out.kv("Average SMSE", smse.mean())

    # Compute PPLP.
Exemplo n.º 2
0
m = 10  # Number of latent processes

# Learn.
vs = Vars(torch.float64)
minimise_l_bfgs_b(lambda vs_: objective(vs_, m, x_data, y_data_norm, locs),
                  vs=vs,
                  trace=True,
                  iters=200)
wbml.out.kv('Learned spatial scales', vs['scales'])

# Predict.
lat_preds, obs_preds = predict(vs, m, x_data, y_data_norm, locs, x_pred)

# Convert to NumPy and undo normalisation.
obs_preds = [
    tuple(x * data_mean[0, i] + data_scale[0, i] for x in B.to_numpy(tup))
    for i, tup in enumerate(obs_preds)
]

# Plot first four latent processes.
plt.figure(figsize=(15, 5))
y_proj, _, S, _ = B.to_numpy(project(vs, m, y_data_norm, locs))
xs, _, _ = model(vs, m)
for i in range(4):
    plt.subplot(2, 2, i + 1)
    mean, lower, upper = lat_preds[i]
    plt.title(f'Latent Process {i + 1} (${100 * S[i] / np.sum(S):.1f}\\%$) \n'
              f'{xs[i].display(wbml.out.format)}')
    plt.plot(x_data, y_proj[i], c='tab:blue')
    plt.plot(x_pred, mean, c='tab:green')
    plt.plot(x_pred, lower, c='tab:green', ls='--')
Exemplo n.º 3
0
        [sim.to_numpy()[:args.n].reshape(-1, 1) for sim in sims.values()],
        axis=1)
    corr_empirical = cov_to_corr(np.cov(all_obs.T))

    # Compute predictions for latent processes.
    model = construct_model(vs)
    model = model.condition(x_data, y_data, x_ind=vs["x_ind"])
    x_proj, y_proj, _, _ = model.project(x_data, y_data)
    means, lowers, uppers = model.model.predict(x_proj)

    # Save for processing.
    wd.save(
        B.to_numpy({
            "n": args.n,
            "m": m,
            "p": p,
            "m_r": m_r,
            "m_s": m_s,
            "x_proj": x_proj,
            "y_proj": y_proj,
            "means": means,
            "lowers": lowers,
            "uppers": uppers,
            "learned_parameters": {name: vs[name]
                                   for name in vs.names},
            "corr_learned": corr_learned,
            "corr_empirical": corr_empirical,
        }),
        f"results_mr{m_r}_ms{m_s}{suffix}.pickle",
    )
Exemplo n.º 4
0
        u = vs.orth(shape=(p, m), name="u")
        s_sqrt = vs.pos(shape=(m, ), name="s_sqrt")
        h = Dense(u * s_sqrt[None, :])

        return ILMMPP(kernels, h, noise, latent_noises)

    def objective(vs):
        return -construct_model(vs).logpdf(torch.tensor(x),
                                           torch.tensor(y_norm))

    minimise_l_bfgs_b(objective, vs, trace=True, iters=1000)

    # Predict.
    model = construct_model(vs)
    model = model.condition(torch.tensor(x), torch.tensor(y_norm))
    means, lowers, uppers = B.to_numpy(model.predict(x))

    # Undo normalisation
    means, lowers, uppers = normaliser.unnormalise(means, lowers, uppers)

    # Report SMSE.
    pred = pd.DataFrame(means, index=train.index, columns=train.columns)
    smse = wbml.metric.smse(pred, test)
    wbml.out.kv("SMSEs", smse.dropna())
    wbml.out.kv("Average SMSEs", smse.mean())

    # Compute PPLP.
    for name, model in [
        ("OILMM", construct_model(vs)),
        ("ILMM", construct_model_ilmm_equivalent(vs)),
    ]: