Exemplo n.º 1
0
def test_boxing_inplace(generate, Numeric):
    k = EQ()
    x = generate(10)

    # Test in-place addition.
    num = generate()
    num += generate()
    assert isinstance(num, Numeric)

    num += k
    assert isinstance(num, Element)

    num = generate()
    num += k(x)
    assert isinstance(num, Element)

    # Test in-place multiplication.
    num = generate()
    num *= generate()
    assert isinstance(num, Numeric)

    num *= k
    assert isinstance(num, Element)

    num = generate()
    num *= k(x)
    assert isinstance(num, Element)
Exemplo n.º 2
0
    def __init__(self, process_dimension, cnn='simple', unit_density=32):
        super(ConvCNP, self).__init__()

        # Initialise at double grid spacing
        self.kernel_x_lengh_scale = nn.Parameter(
            torch.tensor(2. / unit_density))
        self.kernel_rho_lengh_scale = nn.Parameter(
            torch.tensor(2. / unit_density))
        self.kernel_x = EQ() > self.kernel_x_lengh_scale
        self.kernel_rho = EQ() > self.kernel_rho_lengh_scale
        self.unit_density = unit_density
        # self.kernel_x = EQKernel(length_scale=.15, trainable=True)
        # self.kernel_rho = EQKernel(length_scale=.15, trainable=True)
        if cnn == 'simple':
            self.rho_cnn = SimpleCNN(process_dimension + 1, process_dimension)
        elif cnn == 'xl':
            self.rho_cnn = XLCNN(process_dimension + 1, process_dimension)
Exemplo n.º 3
0
def test_boxing(generate, Numeric):
    k = EQ()
    num = generate()
    x = generate(10)

    # Test addition and multiplication.
    assert isinstance(num + k, Element)
    assert isinstance(num + k(x), Element)
    assert isinstance(num + num, Numeric)
    assert isinstance(num * k, Element)
    assert isinstance(num * k(x), Element)
    assert isinstance(num * num, Numeric)
Exemplo n.º 4
0
def test_boxing_direct(generate, Numeric):
    k = EQ()
    num = generate()
    x = generate(10)

    # Test addition and multiplication.
    assert isinstance(num + k, Element)
    assert isinstance(num + Dense(k(x)), AbstractMatrix)
    assert isinstance(num + num, Numeric)
    assert isinstance(num * k, Element)
    assert isinstance(num * Dense(k(x)), AbstractMatrix)
    assert isinstance(num * num, Numeric)
Exemplo n.º 5
0
def test_boxing_autograd():
    k = EQ()
    objs = []

    def objective(x):
        x = x + 2
        x = x * 2
        objs.append(x + k)
        objs.append(x + k(x))
        objs.append(x * k)
        objs.append(x * k(x))
        return B.sum(x)

    grad(objective)(B.randn(10))

    for obj in objs:
        assert isinstance(obj, Element)
Exemplo n.º 6
0
def test_boxing_objective(grad):
    k = EQ()
    objs = []

    def objective(x):
        x = x + 2
        x = x * 2
        objs.append(x[0] + k)
        objs.append(x[0] + Dense(k(x)))
        objs.append(x[0] * k)
        objs.append(x[0] * Dense(k(x)))
        return B.sum(x)

    grad(objective)(B.randn(10))

    for obj in objs:
        assert isinstance(obj, (Element, AbstractMatrix))
Exemplo n.º 7
0
def test_boxing(dtype, Numeric):
    k = EQ()
    num = B.randn(dtype)
    x = B.randn(dtype, 10)

    # Test addition and multiplication.
    assert isinstance(num + k, Element)
    assert isinstance(num + k(x), Element)
    assert isinstance(num + num, Numeric)
    assert isinstance(num * k, Element)
    assert isinstance(num * k(x), Element)
    assert isinstance(num * num, Numeric)

    # Test in-place addition.
    num = B.randn(dtype)
    num += B.randn(dtype)
    assert isinstance(num, Numeric)

    num += k
    assert isinstance(num, Element)

    num = B.randn(dtype)
    num += k(x)
    assert isinstance(num, Element)

    # Test in-place multiplication.
    num = B.randn(dtype)
    num *= B.randn(dtype)
    assert isinstance(num, Numeric)

    num *= k
    assert isinstance(num, Element)

    num = B.randn(dtype)
    num *= k(x)
    assert isinstance(num, Element)
Exemplo n.º 8
0
    def model():
        # Start with a zero kernels.
        kernel_inputs = ZeroKernel()  # Kernel over inputs.
        kernel_outputs = ZeroKernel()  # Kernel over outputs.

        # Determine indices corresponding to the inputs and outputs.
        m_inds, p_inds, p_num = _determine_indices(m, pi, markov)

        # Add nonlinear kernel over the inputs.
        variance = vs.bnd(name='{}/input/var'.format(pi), init=1.)
        scales = vs.bnd(name='{}/input/scales'.format(0 if scale_tie else pi),
                        init=_vector_from_init(scale, m))
        if rq:
            k = RQ(
                vs.bnd(name='{}/input/alpha', init=1e-2, lower=1e-3,
                       upper=1e3))
        else:
            k = EQ()
        kernel_inputs += variance * k.stretch(scales)

        # Add a locally periodic kernel over the inputs.
        if per:
            variance = vs.bnd(name='{}/input/per/var'.format(pi), init=1.)
            scales = vs.bnd(name='{}/input/per/scales'.format(pi),
                            init=_vector_from_init(per_scale, 2 * m))
            periods = vs.bnd(name='{}/input/per/pers'.format(pi),
                             init=_vector_from_init(per_period, m))
            decays = vs.bnd(name='{}/input/per/decay'.format(pi),
                            init=_vector_from_init(per_decay, m))
            kernel_inputs += variance * \
                             EQ().stretch(scales).periodic(periods) * \
                             EQ().stretch(decays)

        # Add a linear kernel over the inputs.
        if input_linear:
            scales = vs.bnd(name='{}/input/lin/scales'.format(pi),
                            init=_vector_from_init(input_linear_scale, m))
            const = vs.get(name='{}/input/lin/const'.format(pi), init=1.)
            kernel_inputs += Linear().stretch(scales) + const

        # Add linear kernel over the outputs.
        if linear and pi > 0:
            scales = vs.bnd(name='{}/output/lin/scales'.format(pi),
                            init=_vector_from_init(linear_scale, p_num))
            kernel_outputs += Linear().stretch(scales)

        # Add nonlinear kernel over the outputs.
        if nonlinear and pi > 0:
            variance = vs.bnd(name='{}/output/nonlin/var'.format(pi), init=1.)
            scales = vs.bnd(name='{}/output/nonlin/scales'.format(pi),
                            init=_vector_from_init(nonlinear_scale, p_num))
            if rq:
                k = RQ(
                    vs.bnd(name='{}/output/nonlin/alpha'.format(pi),
                           init=1e-2,
                           lower=1e-3,
                           upper=1e3))
            else:
                k = EQ()
            kernel_outputs += variance * k.stretch(scales)

        # Construct noise kernel.
        variance = vs.bnd(name='{}/noise'.format(pi),
                          init=_vector_from_init(noise, pi + 1)[pi],
                          lower=1e-8)  # Allow noise to be small.
        kernel_noise = variance * Delta()

        # Construct model and return.
        graph = Graph()
        f = GP(kernel_inputs.select(m_inds) + kernel_outputs.select(p_inds),
               graph=graph)
        e = GP(kernel_noise, graph=graph)
        return f, e
Exemplo n.º 9
0
    def model():
        # Start with a zero kernels.
        kernel_inputs = ZeroKernel()  # Kernel over inputs.
        kernel_outputs = ZeroKernel()  # Kernel over outputs.

        # Determine indices corresponding to the inputs and outputs.
        m_inds, p_inds, p_num = _determine_indices(m, pi, markov)

        # Add nonlinear kernel over the inputs.
        variance = vs.bnd(name=f"{pi}/input/var", init=1.0)
        scales = vs.bnd(
            name=f"{0 if scale_tie else pi}/input/scales",
            init=_vector_from_init(scale, m),
        )
        if rq:
            k = RQ(
                vs.bnd(name=f"{pi}/input/alpha",
                       init=1e-2,
                       lower=1e-3,
                       upper=1e3))
        else:
            k = EQ()
        kernel_inputs += variance * k.stretch(scales)

        # Add a locally periodic kernel over the inputs.
        if per:
            variance = vs.bnd(name=f"{pi}/input/per/var", init=1.0)
            scales = vs.bnd(
                name=f"{pi}/input/per/scales",
                init=_vector_from_init(per_scale, 2 * m),
            )
            periods = vs.bnd(
                name=f"{pi}/input/per/pers",
                init=_vector_from_init(per_period, m),
            )
            decays = vs.bnd(
                name=f"{pi}/input/per/decay",
                init=_vector_from_init(per_decay, m),
            )
            kernel_inputs += (variance *
                              EQ().stretch(scales).periodic(periods) *
                              EQ().stretch(decays))

        # Add a linear kernel over the inputs.
        if input_linear:
            scales = vs.bnd(
                name=f"{pi}/input/lin/scales",
                init=_vector_from_init(input_linear_scale, m),
            )
            const = vs.get(name=f"{pi}/input/lin/const", init=1.0)
            kernel_inputs += Linear().stretch(scales) + const

        # Add linear kernel over the outputs.
        if linear and pi > 0:
            scales = vs.bnd(
                name=f"{pi}/output/lin/scales",
                init=_vector_from_init(linear_scale, p_num),
            )
            kernel_outputs += Linear().stretch(scales)

        # Add nonlinear kernel over the outputs.
        if nonlinear and pi > 0:
            variance = vs.bnd(name=f"{pi}/output/nonlin/var", init=1.0)
            scales = vs.bnd(
                name=f"{pi}/output/nonlin/scales",
                init=_vector_from_init(nonlinear_scale, p_num),
            )
            if rq:
                k = RQ(
                    vs.bnd(
                        name=f"{pi}/output/nonlin/alpha",
                        init=1e-2,
                        lower=1e-3,
                        upper=1e3,
                    ))
            else:
                k = EQ()
            kernel_outputs += variance * k.stretch(scales)

        # Construct noise kernel.
        variance = vs.bnd(
            name=f"{pi}/noise",
            init=_vector_from_init(noise, pi + 1)[pi],
            lower=1e-8,
        )  # Allow noise to be small.
        kernel_noise = variance * Delta()

        # Construct model and return.
        prior = Measure()
        f = GP(kernel_inputs.select(m_inds) + kernel_outputs.select(p_inds),
               measure=prior)
        e = GP(kernel_noise, measure=prior)
        return f, e
def model_alternative(vs, scale: Positive, variance: Positive,
                      noise: Positive):
    """Equivalent to :func:`model`, but with `@parametrised`."""
    kernel = variance * EQ().stretch(scale)
    return GP(kernel), noise
def model(vs):
    """Construct a model with learnable parameters."""
    p = vs.struct  # Varz handles positivity (and other) constraints.
    kernel = p.variance.positive() * EQ().stretch(p.scale.positive())
    return GP(kernel), p.noise.positive()
from src.utils import kernel_evaluate

x = torch.linspace(1, 10, 10).unsqueeze(0).unsqueeze(-1)
y = torch.linspace(1, 10, 10).unsqueeze(0).unsqueeze(-1)

# x = torch.tensor([1.,2.,3.,6.,10.]).unsqueeze(0).unsqueeze(-1)
# y = torch.tensor([5.,6.,4.,3.,7.]).unsqueeze(0).unsqueeze(-1)

y = torch.cat(
            (
                y,
                torch.ones_like(y)
            ),
            dim=2
        )

kernal = EQ() > .1

x_grid = torch.linspace(0, 11, 1000).unsqueeze(0).unsqueeze(-1)

y_grid = kernel_evaluate(y, x, x_grid, kernal)

print(y.squeeze())
print(y.squeeze().shape)

plt.scatter(x.squeeze(), y.squeeze()[:, 0])
plt.plot(x_grid.squeeze(), y_grid.squeeze())
plt.plot(x_grid.squeeze(), y_grid.squeeze()[:, 0] / (y_grid.squeeze()[:, 1] + 1e-6))
plt.legend(['h1', 'h0', 'h1/h0'])
plt.show()
Exemplo n.º 13
0
                else:
                    y_target_mu, y_target_sigma, _, _, _ = model.forward(x_context, y_context, x_target, y_target)
                    y_target_mu = y_target_mu
                    y_target_sigma = y_target_sigma

                # Select only the first element of 
                y_context = y_context[0].data.squeeze()
                x_context = x_context[0].data.squeeze()
                x_target = x_target[0].data.squeeze()
                y_target = y_target[0].data.squeeze() 

                name = f'epoch_{epoch}_sample_{i}'

                # Fit the relevent GP to the data
                if args.GP_type == 'RBF':
                    kernel = EQ().stretch(params[0].squeeze()) * (params[1].squeeze() ** 2)
                elif args.GP_type == 'Matern':
                    kernel = Matern52().stretch(params[0].squeeze()) * (params[1].squeeze() ** 2)

                f = GP(kernel)
                e = GP(Delta()) * kernel_noise
                gp = f + e | (x_context, y_context)
                preds = gp(x_target)
                gp_mean , gp_lower, gp_upper = preds.marginals()
                gp_std = (gp_upper - gp_mean) / 2
                
                if args.model in ['ANP', 'NP']:
                    plot_compare_processes_gp_latent(
                        x_target,
                        y_target,
                        x_context,