def test_sample(): graph = Graph() x = array([1, 2, 3])[:, None] # Test that it produces random samples. Not sure how to test for # correctness. f1, e1 = GP(EQ(), graph=graph), GP(1e-1 * Delta(), graph=graph) f2, e2 = GP(EQ(), graph=graph), GP(1e-1 * Delta(), graph=graph) gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2)) yield ge, B.sum(B.abs(gpar.sample(x) - gpar.sample(x))), 1e-3 yield ge, \ B.sum(B.abs(gpar.sample(x, latent=True) - gpar.sample(x, latent=True))), \ 1e-3 # Test that posterior latent samples are around the data that is # conditioned on. graph = Graph() f1, e1 = GP(EQ(), graph=graph), GP(1e-8 * Delta(), graph=graph) f2, e2 = GP(EQ(), graph=graph), GP(1e-8 * Delta(), graph=graph) gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2)) y = gpar.sample(x, latent=True) gpar = gpar | (x, y) yield approx, gpar.sample(x), y, 3 yield approx, gpar.sample(x, latent=True), y, 3
def test_logpdf(): graph = Graph() f1, e1 = GP(EQ(), graph=graph), GP(2e-1 * Delta(), graph=graph) f2, e2 = GP(Linear(), graph=graph), GP(1e-1 * Delta(), graph=graph) gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2)) # Sample some data from GPAR. x = B.linspace(0, 2, 10, dtype=torch.float64)[:, None] y = gpar.sample(x, latent=True) # Compute logpdf. logpdf1 = (f1 + e1)(x).logpdf(y[:, 0]) logpdf2 = (f2 + e2)(B.concat([x, y[:, 0:1]], axis=1)).logpdf(y[:, 1]) # Test computation of GPAR. yield eq, gpar.logpdf(x, y), logpdf1 + logpdf2 yield eq, gpar.logpdf(x, y, only_last_layer=True), logpdf2 # Test resuming computation. x_int, x_ind_int = gpar.logpdf(x, y, return_inputs=True, outputs=[0]) yield eq, gpar.logpdf(x_int, y, x_ind=x_ind_int, outputs=[1]), logpdf2 # Test that sampling missing gives a stochastic estimate. y[1, 0] = np.nan yield ge, \ B.abs(gpar.logpdf(x, y, sample_missing=True) - gpar.logpdf(x, y, sample_missing=True)).numpy(), \ 1e-3
from varz import Vars from varz.torch import minimise_l_bfgs_b from wbml.out import Counter from .model import GPAR, per_output __all__ = ['GPARRegressor', 'log_transform', 'squishing_transform'] log = logging.getLogger(__name__) _dispatch = Dispatcher() #: Log transform for the data. log_transform = (B.log, B.exp) #: Squishing transform for the data. squishing_transform = (lambda x: B.sign(x) * B.log(1 + B.abs(x)), lambda x: B.sign(x) * (B.exp(B.abs(x)) - 1)) def _vector_from_init(init, length): # If only a single value is given, create ones. if np.size(init) == 1: return init * np.ones(length) # Multiple values are given. Check that enough values are available. init_squeezed = np.squeeze(init) if np.ndim(init_squeezed) != 1: raise ValueError('Incorrect shape {} of hyperparameters.' ''.format(np.shape(init))) if np.size(init_squeezed) < length: # Squeezed doesn't change size. raise ValueError('Not enough hyperparameters specified.')
from wbml.out import Counter from .model import GPAR, per_output __all__ = ["GPARRegressor", "log_transform", "squishing_transform"] log = logging.getLogger(__name__) _dispatch = Dispatcher() #: Log transform for the data. log_transform = (B.log, B.exp) #: Squishing transform for the data. squishing_transform = ( lambda x: B.sign(x) * B.log(B.add(1, B.abs(x))), lambda x: B.sign(x) * B.subtract(B.exp(B.abs(x)), 1), ) def _vector_from_init(init, length): # If only a single value is given, create ones. if np.size(init) == 1: return init * np.ones(length) # Multiple values are given. Check that enough values are available. init_squeezed = np.squeeze(init) if np.ndim(init_squeezed) != 1: raise ValueError("Incorrect shape {} of hyperparameters." "".format(np.shape(init))) if np.size(init_squeezed) < length: # Squeezed doesn't change size.