Exemplo n.º 1
0
    def model():
        # Start with a zero kernels.
        kernel_inputs = ZeroKernel()  # Kernel over inputs.
        kernel_outputs = ZeroKernel()  # Kernel over outputs.

        # Determine indices corresponding to the inputs and outputs.
        m_inds, p_inds, p_num = _determine_indices(m, pi, markov)

        # Add nonlinear kernel over the inputs.
        variance = vs.bnd(name='{}/input/var'.format(pi), init=1.)
        scales = vs.bnd(name='{}/input/scales'.format(0 if scale_tie else pi),
                        init=_vector_from_init(scale, m))
        if rq:
            k = RQ(
                vs.bnd(name='{}/input/alpha', init=1e-2, lower=1e-3,
                       upper=1e3))
        else:
            k = EQ()
        kernel_inputs += variance * k.stretch(scales)

        # Add a locally periodic kernel over the inputs.
        if per:
            variance = vs.bnd(name='{}/input/per/var'.format(pi), init=1.)
            scales = vs.bnd(name='{}/input/per/scales'.format(pi),
                            init=_vector_from_init(per_scale, 2 * m))
            periods = vs.bnd(name='{}/input/per/pers'.format(pi),
                             init=_vector_from_init(per_period, m))
            decays = vs.bnd(name='{}/input/per/decay'.format(pi),
                            init=_vector_from_init(per_decay, m))
            kernel_inputs += variance * \
                             EQ().stretch(scales).periodic(periods) * \
                             EQ().stretch(decays)

        # Add a linear kernel over the inputs.
        if input_linear:
            scales = vs.bnd(name='{}/input/lin/scales'.format(pi),
                            init=_vector_from_init(input_linear_scale, m))
            const = vs.get(name='{}/input/lin/const'.format(pi), init=1.)
            kernel_inputs += Linear().stretch(scales) + const

        # Add linear kernel over the outputs.
        if linear and pi > 0:
            scales = vs.bnd(name='{}/output/lin/scales'.format(pi),
                            init=_vector_from_init(linear_scale, p_num))
            kernel_outputs += Linear().stretch(scales)

        # Add nonlinear kernel over the outputs.
        if nonlinear and pi > 0:
            variance = vs.bnd(name='{}/output/nonlin/var'.format(pi), init=1.)
            scales = vs.bnd(name='{}/output/nonlin/scales'.format(pi),
                            init=_vector_from_init(nonlinear_scale, p_num))
            if rq:
                k = RQ(
                    vs.bnd(name='{}/output/nonlin/alpha'.format(pi),
                           init=1e-2,
                           lower=1e-3,
                           upper=1e3))
            else:
                k = EQ()
            kernel_outputs += variance * k.stretch(scales)

        # Construct noise kernel.
        variance = vs.bnd(name='{}/noise'.format(pi),
                          init=_vector_from_init(noise, pi + 1)[pi],
                          lower=1e-8)  # Allow noise to be small.
        kernel_noise = variance * Delta()

        # Construct model and return.
        graph = Graph()
        f = GP(kernel_inputs.select(m_inds) + kernel_outputs.select(p_inds),
               graph=graph)
        e = GP(kernel_noise, graph=graph)
        return f, e
Exemplo n.º 2
0
    def model():
        # Start with a zero kernels.
        kernel_inputs = ZeroKernel()  # Kernel over inputs.
        kernel_outputs = ZeroKernel()  # Kernel over outputs.

        # Determine indices corresponding to the inputs and outputs.
        m_inds, p_inds, p_num = _determine_indices(m, pi, markov)

        # Add nonlinear kernel over the inputs.
        variance = vs.bnd(name=f"{pi}/input/var", init=1.0)
        scales = vs.bnd(
            name=f"{0 if scale_tie else pi}/input/scales",
            init=_vector_from_init(scale, m),
        )
        if rq:
            k = RQ(
                vs.bnd(name=f"{pi}/input/alpha",
                       init=1e-2,
                       lower=1e-3,
                       upper=1e3))
        else:
            k = EQ()
        kernel_inputs += variance * k.stretch(scales)

        # Add a locally periodic kernel over the inputs.
        if per:
            variance = vs.bnd(name=f"{pi}/input/per/var", init=1.0)
            scales = vs.bnd(
                name=f"{pi}/input/per/scales",
                init=_vector_from_init(per_scale, 2 * m),
            )
            periods = vs.bnd(
                name=f"{pi}/input/per/pers",
                init=_vector_from_init(per_period, m),
            )
            decays = vs.bnd(
                name=f"{pi}/input/per/decay",
                init=_vector_from_init(per_decay, m),
            )
            kernel_inputs += (variance *
                              EQ().stretch(scales).periodic(periods) *
                              EQ().stretch(decays))

        # Add a linear kernel over the inputs.
        if input_linear:
            scales = vs.bnd(
                name=f"{pi}/input/lin/scales",
                init=_vector_from_init(input_linear_scale, m),
            )
            const = vs.get(name=f"{pi}/input/lin/const", init=1.0)
            kernel_inputs += Linear().stretch(scales) + const

        # Add linear kernel over the outputs.
        if linear and pi > 0:
            scales = vs.bnd(
                name=f"{pi}/output/lin/scales",
                init=_vector_from_init(linear_scale, p_num),
            )
            kernel_outputs += Linear().stretch(scales)

        # Add nonlinear kernel over the outputs.
        if nonlinear and pi > 0:
            variance = vs.bnd(name=f"{pi}/output/nonlin/var", init=1.0)
            scales = vs.bnd(
                name=f"{pi}/output/nonlin/scales",
                init=_vector_from_init(nonlinear_scale, p_num),
            )
            if rq:
                k = RQ(
                    vs.bnd(
                        name=f"{pi}/output/nonlin/alpha",
                        init=1e-2,
                        lower=1e-3,
                        upper=1e3,
                    ))
            else:
                k = EQ()
            kernel_outputs += variance * k.stretch(scales)

        # Construct noise kernel.
        variance = vs.bnd(
            name=f"{pi}/noise",
            init=_vector_from_init(noise, pi + 1)[pi],
            lower=1e-8,
        )  # Allow noise to be small.
        kernel_noise = variance * Delta()

        # Construct model and return.
        prior = Measure()
        f = GP(kernel_inputs.select(m_inds) + kernel_outputs.select(p_inds),
               measure=prior)
        e = GP(kernel_noise, measure=prior)
        return f, e
def model(vs):
    """Construct a model with learnable parameters."""
    p = vs.struct  # Varz handles positivity (and other) constraints.
    kernel = p.variance.positive() * EQ().stretch(p.scale.positive())
    return GP(kernel), p.noise.positive()
def model_alternative(vs, scale: Positive, variance: Positive,
                      noise: Positive):
    """Equivalent to :func:`model`, but with `@parametrised`."""
    kernel = variance * EQ().stretch(scale)
    return GP(kernel), noise
Exemplo n.º 5
0
                # Select only the first element of 
                y_context = y_context[0].data.squeeze()
                x_context = x_context[0].data.squeeze()
                x_target = x_target[0].data.squeeze()
                y_target = y_target[0].data.squeeze() 

                name = f'epoch_{epoch}_sample_{i}'

                # Fit the relevent GP to the data
                if args.GP_type == 'RBF':
                    kernel = EQ().stretch(params[0].squeeze()) * (params[1].squeeze() ** 2)
                elif args.GP_type == 'Matern':
                    kernel = Matern52().stretch(params[0].squeeze()) * (params[1].squeeze() ** 2)

                f = GP(kernel)
                e = GP(Delta()) * kernel_noise
                gp = f + e | (x_context, y_context)
                preds = gp(x_target)
                gp_mean , gp_lower, gp_upper = preds.marginals()
                gp_std = (gp_upper - gp_mean) / 2
                
                if args.model in ['ANP', 'NP']:
                    plot_compare_processes_gp_latent(
                        x_target,
                        y_target,
                        x_context,
                        y_context,
                        y_target_mu.data.squeeze(),
                        y_target_sigma.data.squeeze(),
                        gp_mean,