Ejemplo n.º 1
0
    def _obs(self, x, x_ind, y, w, f, noise):
        # Filter available data points.
        available = ~B.isnan(y[:, 0])
        x = x[available]
        y = y[available]
        w = w[available]

        if self.sparse:
            return PseudoObs(f(x_ind), f(x, noise / w), y)
        else:
            return Obs(f(x, noise / w), y)
Ejemplo n.º 2
0
    def condition(self, x, y):
        """Condition on data.

        Args:
            x (tensor): Input locations.
            y (tensor): Observed values.
        """
        obs = Obs(*[(self.ys[i](x), y) for x, i, y in _per_output(x, y)])
        post = self.measure | obs
        return ILMMPP(
            post, [post(x) for x in self.xs], self.h, self.noise_obs, self.noises_latent
        )
Ejemplo n.º 3
0
    def logpdf(self, x, y):
        """Compute the logpdf of data.

        Args:
            x (tensor): Input locations.
            y (tensor): Observed values.

        Returns:
            tensor: Logpdf of data.
        """
        obs = Obs(*[(self.ys[i](x), y) for x, i, y in _per_output(x, y)])
        return self.measure.logpdf(obs)
Ejemplo n.º 4
0
    def _obs(self, x, x_ind, y, w, f, e):
        # Filter available data points.
        available = ~B.isnan(y[:, 0])
        x = x[available]
        y = y[available]
        w = w[available]

        # Perform weighting.
        x = WeightedUnique(x, w=w)

        if self.sparse:
            return SparseObs(f(x_ind), e, f(x), y)
        else:
            return Obs((f + e)(x), y)
Ejemplo n.º 5
0
    def condition(self, x, y, w=None, x_ind=None):
        """Condition the model.

        Args:
            x (matrix): Locations of training data.
            y (matrix): Observations of training data.
            w (matrix, optional): Weights of training data.
            x_ind (matrix, optional): Locations of inducing points.
        """
        w = _init_weights(w, y)

        fs_post = []
        for f, ni, (xi, yi, wi) in zip(self.fs, self.noises, _per_output(x, y, w)):
            if x_ind is None:
                obs = Obs(f(xi, ni / wi), yi)
            else:
                obs = PseudoObs(f(x_ind), f(xi, ni / wi), yi)
            fs_post.append(f | obs)

        return IGP(fs_post, self.noises)
Ejemplo n.º 6
0
def test_update_inputs():
    prior = Measure()
    f = GP(EQ(), measure=prior)

    x = np.array([[1], [2], [3]])
    y = np.array([[4], [5], [6]], dtype=float)
    res = B.concat(x, y, axis=1)
    x_ind = np.array([[6], [7]])
    res_ind = np.array([[6, 0], [7, 0]])

    # Check vanilla case.
    gpar = GPAR(x_ind=x_ind)
    approx(gpar._update_inputs(x, x_ind, y, f, None), (res, res_ind))

    # Check imputation with prior.
    gpar = GPAR(impute=True, x_ind=x_ind)
    this_y = y.copy()
    this_y[1] = np.nan
    this_res = res.copy()
    this_res[1, 1] = 0
    approx(gpar._update_inputs(x, x_ind, this_y, f, None), (this_res, res_ind))

    # Check replacing with prior.
    gpar = GPAR(replace=True, x_ind=x_ind)
    this_y = y.copy()
    this_y[1] = np.nan
    this_res = res.copy()
    this_res[0, 1] = 0
    this_res[1, 1] = np.nan
    this_res[2, 1] = 0
    approx(gpar._update_inputs(x, x_ind, this_y, f, None), (this_res, res_ind))

    # Check imputation and replacing with prior.
    gpar = GPAR(impute=True, replace=True, x_ind=x_ind)
    this_res = res.copy()
    this_res[:, 1] = 0
    approx(gpar._update_inputs(x, x_ind, y, f, None), (this_res, res_ind))

    # Construct observations and update result for inducing points.
    obs = Obs(f(np.array([1, 2, 3, 6, 7])), np.array([9, 10, 11, 12, 13]))
    res_ind = np.array([[6, 12], [7, 13]])

    # Check imputation with posterior.
    gpar = GPAR(impute=True, x_ind=x_ind)
    this_y = y.copy()
    this_y[1] = np.nan
    this_res = res.copy()
    this_res[1, 1] = 10
    approx(gpar._update_inputs(x, x_ind, this_y, f, obs), (this_res, res_ind))

    # Check replacing with posterior.
    gpar = GPAR(replace=True, x_ind=x_ind)
    this_y = y.copy()
    this_y[1] = np.nan
    this_res = res.copy()
    this_res[0, 1] = 9
    this_res[1, 1] = np.nan
    this_res[2, 1] = 11
    approx(gpar._update_inputs(x, x_ind, this_y, f, obs), (this_res, res_ind))

    # Check imputation and replacing with posterior.
    gpar = GPAR(impute=True, replace=True, x_ind=x_ind)
    this_res = res.copy()
    this_res[0, 1] = 9
    this_res[1, 1] = 10
    this_res[2, 1] = 11
    approx(gpar._update_inputs(x, x_ind, y, f, obs), (this_res, res_ind))
Ejemplo n.º 7
0
 def _obs(self, x, x_ind, y, f, e):
     available = ~B.isnan(y[:, 0])
     if self.sparse:
         return SparseObs(f(x_ind), e, f(x[available]), y[available])
     else:
         return Obs((f + e)(x[available]), y[available])
Ejemplo n.º 8
0
# Define points to predict at.
x = np.linspace(0, 10, 100)
x_obs = np.linspace(0, 10, 20)

# Constuct a prior:
w = lambda x: np.exp(-x**2 / 0.5)  # Window
b = [(GP(EQ()) * w).shift(xi) for xi in x_obs]  # Weighted basis functions
f = sum(b)  # Latent function
e = GP(Delta())  # Noise
y = f + 0.2 * e  # Observation model

# Sample a true, underlying function and observations.
f_true, y_obs = model.sample(f(x), y(x_obs))

# Condition on the observations to make predictions.
obs = Obs(y(x_obs), y_obs)
f, b = (f | obs, b | obs)

# Plot result.
for i, bi in enumerate(b):
    mean, lower, upper = bi(x).marginals()
    kw_args = {'label': 'Basis functions'} if i == 0 else {}
    plt.plot(x, mean, c='tab:orange', **kw_args)
plt.plot(x, f_true, label='True', c='tab:blue')
plt.scatter(x_obs, y_obs, label='Observations', c='tab:red')
mean, lower, upper = f(x).marginals()
plt.plot(x, mean, label='Prediction', c='tab:green')
plt.plot(x, lower, ls='--', c='tab:green')
plt.plot(x, upper, ls='--', c='tab:green')
wbml.plot.tweak()
Ejemplo n.º 9
0
# observations.
y = f + .5 * e

# Sample a true, underlying function and observations.
f_true_smooth, f_true_wiggly, f_true_periodic, f_true_linear, f_true, y_obs = \
    model.sample(f_smooth(x),
                 f_wiggly(x),
                 f_periodic(x),
                 f_linear(x),
                 f(x),
                 y(x_obs))

# Now condition on the observations and make predictions for the latent
# function and its various components.
f_smooth, f_wiggly, f_periodic, f_linear, f = \
    (f_smooth, f_wiggly, f_periodic, f_linear, f) | Obs(y(x_obs), y_obs)

pred_smooth = f_smooth(x).marginals()
pred_wiggly = f_wiggly(x).marginals()
pred_periodic = f_periodic(x).marginals()
pred_linear = f_linear(x).marginals()
pred_f = f(x).marginals()


# Plot results.
def plot_prediction(x, f, pred, x_obs=None, y_obs=None):
    plt.plot(x, f, label='True', c='tab:blue')
    if x_obs is not None:
        plt.scatter(x_obs, y_obs, label='Observations', c='tab:red')
    mean, lower, upper = pred
    plt.plot(x, mean, label='Prediction', c='tab:green')
Ejemplo n.º 10
0
def test_update_inputs():
    graph = Graph()
    f = GP(EQ(), graph=graph)

    x = array([[1], [2], [3]])
    y = array([[4], [5], [6]])
    res = B.concat([x, y], axis=1)
    x_ind = array([[6], [7]])
    res_ind = array([[6, 0], [7, 0]])

    # Check vanilla case.
    gpar = GPAR(x_ind=x_ind)
    yield allclose, gpar._update_inputs(x, x_ind, y, f, None), (res, res_ind)

    # Check imputation with prior.
    gpar = GPAR(impute=True, x_ind=x_ind)
    this_y = y.clone()
    this_y[1] = np.nan
    this_res = res.clone()
    this_res[1, 1] = 0
    yield allclose, \
          gpar._update_inputs(x, x_ind, this_y, f, None), \
          (this_res, res_ind)

    # Check replacing with prior.
    gpar = GPAR(replace=True, x_ind=x_ind)
    this_y = y.clone()
    this_y[1] = np.nan
    this_res = res.clone()
    this_res[0, 1] = 0
    this_res[1, 1] = np.nan
    this_res[2, 1] = 0
    yield allclose, \
          gpar._update_inputs(x, x_ind, this_y, f, None), \
          (this_res, res_ind)

    # Check imputation and replacing with prior.
    gpar = GPAR(impute=True, replace=True, x_ind=x_ind)
    this_res = res.clone()
    this_res[:, 1] = 0
    yield allclose, \
          gpar._update_inputs(x, x_ind, y, f, None), \
          (this_res, res_ind)

    # Construct observations and update result for inducing points.
    obs = Obs(f(array([1, 2, 3, 6, 7])), array([9, 10, 11, 12, 13]))
    res_ind = array([[6, 12], [7, 13]])

    # Check imputation with posterior.
    gpar = GPAR(impute=True, x_ind=x_ind)
    this_y = y.clone()
    this_y[1] = np.nan
    this_res = res.clone()
    this_res[1, 1] = 10
    yield allclose, \
          gpar._update_inputs(x, x_ind, this_y, f, obs), \
          (this_res, res_ind)

    # Check replacing with posterior.
    gpar = GPAR(replace=True, x_ind=x_ind)
    this_y = y.clone()
    this_y[1] = np.nan
    this_res = res.clone()
    this_res[0, 1] = 9
    this_res[1, 1] = np.nan
    this_res[2, 1] = 11
    yield allclose, \
          gpar._update_inputs(x, x_ind, this_y, f, obs), \
          (this_res, res_ind)

    # Check imputation and replacing with posterior.
    gpar = GPAR(impute=True, replace=True, x_ind=x_ind)
    this_res = res.clone()
    this_res[0, 1] = 9
    this_res[1, 1] = 10
    this_res[2, 1] = 11
    yield allclose, \
          gpar._update_inputs(x, x_ind, y, f, obs), \
          (this_res, res_ind)
Ejemplo n.º 11
0
 def obs(self, x, ys):
     return Obs(*((p(x), y) for p, y in zip(self.ps, ys)))
Ejemplo n.º 12
0
# Construct the model.
slope = GP(1)
intercept = GP(5)
f = slope * (lambda x: x) + intercept

e = 0.2 * GP(Delta())  # Noise model

y = f + e  # Observation model

# Sample a slope, intercept, underlying function, and observations.
true_slope, true_intercept, f_true, y_obs = \
    model.sample(slope(0), intercept(0), f(x), y(x_obs))

# Condition on the observations to make predictions.
slope, intercept, f = (slope, intercept, f) | Obs(y(x_obs), y_obs)
mean, lower, upper = f(x).marginals()

print('true slope', true_slope)
print('predicted slope', slope(0).mean)
print('true intercept', true_intercept)
print('predicted intercept', intercept(0).mean)

# Plot result.
plt.plot(x, f_true, label='True', c='tab:blue')
plt.scatter(x_obs, y_obs, label='Observations', c='tab:red')
plt.plot(x, mean, label='Prediction', c='tab:green')
plt.plot(x, lower, ls='--', c='tab:green')
plt.plot(x, upper, ls='--', c='tab:green')
plt.legend()
plt.show()
Ejemplo n.º 13
0
# Define points to predict at.
x = np.linspace(0, 10, 200)
x_obs = np.linspace(0, 10, 10)

# Construct the model.
f = 0.7 * GP(EQ()).stretch(1.5)
e = 0.2 * GP(Delta())

# Construct derivatives via finite differences.
df = f.diff_approx(1)
ddf = f.diff_approx(2)
dddf = f.diff_approx(3) + e

# Fix the integration constants.
f, df, ddf, dddf = (f, df, ddf, dddf) | Obs((f(0), 1), (df(0), 0),
                                            (ddf(0), -1))

# Sample observations.
y_obs = np.sin(x_obs) + 0.2 * np.random.randn(*x_obs.shape)

# Condition on the observations to make predictions.
f, df, ddf, dddf = (f, df, ddf, dddf) | Obs(dddf(x_obs), y_obs)

# And make predictions.
pred_iiif = f(x).marginals()
pred_iif = df(x).marginals()
pred_if = ddf(x).marginals()
pred_f = dddf(x).marginals()


# Plot result.