def __init__(self,
                 input_dim,
                 locs_poi,
                 typeIndicator,
                 active_dims=None,
                 name=None,
                 typIdx=1,
                 lengthscale=None,
                 effects=None,
                 locs=None,
                 mindist=0.5,
                 kernel_type="linear"):
        super().__init__(input_dim, active_dims, name=name)
        effects = np.random.uniform(low=0.5,
                                    high=1.0) if effects is None else effects
        MeanFunction.__init__(self)
        self.locs_poi = locs_poi.astype(np.float64)
        self.typeIndicator = typeIndicator.astype(np.float64)
        self.typIdx = typIdx
        self.locs_poi_j = self.locs_poi[self.typeIndicator[:, self.typIdx] ==
                                        1, :]
        self.kernel_type = kernel_type

        self.lengthscale = 0  #Parameter(2, transform=transforms.Logistic(a=mindist, b=10), dtype=settings.float_type)
        self.effects = Parameter(effects,
                                 transform=transforms.Logistic(a=0.01, b=1),
                                 dtype=settings.float_type)
        self.distmax = Parameter(0.5,
                                 transform=transforms.Logistic(a=0, b=1.5),
                                 dtype=settings.float_type)
Beispiel #2
0
 def __init__(self,
              input_dim,
              variance=1.0,
              frequency=np.array([1.0, 1.0]),
              lengthscale=1.0,
              correlation=0.0,
              max_freq=1.0,
              active_dims=None):
     assert (input_dim == 1
             )  # the derivations are valid only for one dimensional input
     Kernel.__init__(self, input_dim=input_dim, active_dims=active_dims)
     self.variance = Param(variance, transforms.positive)
     self.frequency = Param(frequency, transforms.Logistic(0.0, max_freq))
     self.lengthscale = Param(lengthscale, transforms.positive)
     correlation = np.clip(correlation, 1e-4,
                           1 - 1e-4)  # clip for numerical reasons
     self.correlation = Param(correlation, transforms.Logistic())
 def __init__(self, num_classes, epsilon=1e-3, **kwargs):
     super().__init__(**kwargs)
     self.epsilon = Parameter(epsilon,
                              transforms.Logistic(),
                              trainable=False,
                              dtype=settings.float_type,
                              prior=priors.Beta(0.2, 5.))
     self.num_classes = num_classes
Beispiel #4
0
 def __init__(self, delta=1e-3, a=0., **kwargs):
     super().__init__(**kwargs)
     self.delta = Parameter(delta,
                            transforms.Logistic(),
                            trainable=False,
                            dtype=settings.float_type,
                            prior=priors.Beta(0.2, 5.))
     self.a = Parameter(a,
                        transforms.positive,
                        trainable=False,
                        dtype=settings.float_type)
Beispiel #5
0
def initialize_model(x_s,
                     y,
                     m,
                     q,
                     mu=None,
                     s=None,
                     joint=False,
                     infer_type="diag",
                     xi_kern="RBF",
                     x_st_infer=None,
                     x_st_test=None):
    """

    :param use_kronecker:
    :param t: Inputs (i.e. simulator inputs)
    :param x_s: spatiotemporal inputs (just temporal for KO...)
    :type x_s: Iterable of 2D np.ndarrays
    :param y: outputs
    :param m:
    :param q:
    :param joint: if true then we're training a joint model with 2 channels
        (output dimensions).  Theya re assumed to be provided as
        column-concatenated in y.
    :param infer_type: How to infer latent variables.  Options:
        * "diag": VI with diagonal Gaussian variational posterior
        * "full": VI will full Gaussian variational posterior
        * "mcmc": MCMC particle inference
    :return:
    """

    n_s = np.prod([x_si.shape[0] for x_si in x_s])
    # Providing transformed variables...
    if mu is None:
        if joint:
            y_pca = y[:, :n_s]  # Inputs only
        else:
            y_pca = y
        mu = pca(y_pca, q)
        s = 0.1 * np.ones(mu.shape)
        supervised = False
        train_kl = True
    else:
        supervised = True
        train_kl = False
    if m == mu.shape[0]:
        z = mu
    else:
        z = None

    x = [mu] + x_s
    d_in = [x_i.shape[1] for x_i in x]

    with gpflow.defer_build():
        # X -> Y kernels
        if xi_kern == "Linear":
            kern_list = [Linear(d_in[0], variance=0.01, ARD=True)]
            kern_list[-1].variance.transform = transforms.Logistic(
                1.0e-12, 1.0)
        elif xi_kern == "RBF":
            kern_list = [RBF(d_in[0], lengthscales=np.sqrt(d_in[0]), ARD=True)]
            kern_list[-1].lengthscales.transform = transforms.Logistic(
                1.0e-12, 1000.0)
            kern_list[-1].lengthscales.prior = Gamma(mu=2.0, var=1.0)
        elif xi_kern == "Sum":
            kern_list = [
                gpflow.kernels.Sum([
                    RBF(d_in[0], lengthscales=np.sqrt(d_in[0]), ARD=True),
                    Linear(d_in[0], variance=0.01, ARD=True)
                ])
            ]
            kern_list[-1].kernels[0].lengthscales.transform = \
                transforms.Logistic(1.0e-12, 1000.0)
            kern_list[-1].kernels[0].lengthscales.prior = Gamma(mu=2.0,
                                                                var=1.0)
            kern_list[-1].kernels[1].variance.transform = \
                transforms.Logistic(1.0e-12, 1.0)
        else:
            raise NotImplementedError("Unknown xi kernel {}".format(xi_kern))
        for d_in_i in d_in[1:]:
            kern_list.append(
                Exponential(d_in_i, lengthscales=np.sqrt(d_in_i), ARD=True))
        for kern in kern_list[1:]:
            kern.lengthscales = 0.1

        # Restructure the inputs for the SGPLVM:
        if joint:
            y_structured = np.concatenate((y[:, :n_s].reshape(
                (-1, 1)), y[:, n_s:].reshape((-1, 1))), 1)
        else:
            y_structured = y.reshape((-1, 1))

        # Initialize model:
        model_types = {"diag": Sgplvm, "full": SgplvmFullCovInfer}
        if infer_type not in model_types:
            raise NotImplementedError(
                "No suport for infer_type {}".format(infer_type))
        else:
            kgplvm = model_types[infer_type]
        model = kgplvm(x,
                       s,
                       y_structured,
                       kern_list,
                       m,
                       z,
                       train_kl=train_kl,
                       x_st_infer=x_st_infer,
                       x_st_test=x_st_test)
        model.likelihood.variance = 1.0e-2 * np.var(y)
        if supervised:
            # Lock provided inputs
            model.X0.trainable = False
            model.h_s.trainable = False
    model.compile()

    return model