def test_expand(available_kernels, kernel_to_tree, tree_to_str):
    for kernel_name, kernel in available_kernels.items():
        # Without defering this build time this takes is unbearable.
        with gpflow.defer_build():
            res = [
                tree_to_str(kernel_to_tree(k))
                for k in expand_kernel(kernel(1))
            ]
        assert res == [
            f'{kernel_name}',
            'constant',
            'linear',
            'periodic',
            'rbf',
            'white',
            f'{kernel_name} + constant',
            f'{kernel_name} * constant',
            f'{kernel_name} * (constant + constant)',
            f'{kernel_name} + linear',
            f'{kernel_name} * linear',
            f'{kernel_name} * (linear + constant)',
            f'{kernel_name} + periodic',
            f'{kernel_name} * periodic',
            f'{kernel_name} * (periodic + constant)',
            f'{kernel_name} + rbf',
            f'{kernel_name} * rbf',
            f'{kernel_name} * (rbf + constant)',
            f'{kernel_name} + white',
            f'{kernel_name} * white',
            f'{kernel_name} * (white + constant)',
        ]

        with gpflow.defer_build():
            res_strs = [
                tree_to_str(kernel_to_tree(k))
                for k in expand_kernel(kernel(1),
                                       base_kernels_to_exclude=['constant'])
            ]

        res_start_should_be = [f'{kernel_name}'
                               ] if kernel_name != 'constant' else []
        res_complex_should_be = [
            f'{kernel_name} + linear',
            f'{kernel_name} * linear',
            f'{kernel_name} * (linear + constant)',
            f'{kernel_name} + periodic',
            f'{kernel_name} * periodic',
            f'{kernel_name} * (periodic + constant)',
            f'{kernel_name} + rbf',
            f'{kernel_name} * rbf',
            f'{kernel_name} * (rbf + constant)',
            f'{kernel_name} + white',
            f'{kernel_name} * white',
            f'{kernel_name} * (white + constant)',
        ] if kernel_name != 'constant' else []

        assert res_strs == res_start_should_be + [
            'linear', 'periodic', 'rbf', 'white'
        ] + res_complex_should_be
Exemple #2
0
def ST_Model(X, y, use_priors=False, e_s=0):

    with gpflow.defer_build():

        like = gpflow.likelihoods.Poisson(binsize=e_s)

        kern_s_effect = gpflow.kernels.RBF(2,
                                           active_dims=[1, 2],
                                           name='space_effect')
        kern_t_effect = gpflow.kernels.RBF(1,
                                           active_dims=[0],
                                           name='time_effect')

        full_kern = kern_t_effect + kern_s_effect

        m = gpflow.models.VGP(X,
                              y,
                              full_kern,
                              likelihood=like,
                              mean_function=None)

        t_prior = gpflow.priors.StudentT(mean=0, scale=1, deg_free=4)

        if use_priors:
            m.kern.rbf_1.variance.prior = t_prior
            m.kern.rbf_2.variance.prior = t_prior

            m.kern.rbf_1.lengthscales.prior = t_prior
            m.kern.rbf_2.lengthscales.prior = t_prior

        return m
Exemple #3
0
def evalMCMC(X, Y):  # type: (Any, {shape}) -> Tuple[DataFrame, GPR]
    with gpflow.defer_build():
        k = gpflow.kernels.Matern52(1, lengthscales=0.3)
        meanf = gpflow.mean_functions.Zero()
        m = gpflow.models.GPR(X, Y, k, meanf)
        m.clear()

        m.kern.lengthscales.prior = gpflow.priors.Beta(1., 3.)
        m.kern.variance.prior = gpflow.priors.Beta(1., 3.)
        m.likelihood.variance.prior = gpflow.priors.Beta(1., 3.)
        # m.mean_function.A.prior = gpflow.priors.Gaussian(0., 10.)
        # m.mean_function.b.prior = gpflow.priors.Gaussian(0., 10.)

    m.compile()
    print(m.as_pandas_table())

    sampler = gpflow.train.HMC()
    traces = sampler.sample(m,
                            num_samples=2000,
                            burn=1000,
                            epsilon=0.05,
                            lmin=1,
                            lmax=3,
                            logprobs=False)
    return traces, m
Exemple #4
0
    def test_construction(self):
        with self.test_context():
            gpflow.ParamList([])
            gpflow.ParamList([gpflow.Param(1)])
            gpflow.ParamList([1.0, np.array([1, 2]), gpflow.Param(1.0)])
            with self.assertRaises(ValueError):
                gpflow.ParamList([gpflow.Param(1), 'stringsnotallowed'])
            with self.assertRaises(ValueError):
                # tuples not valid in constuctor:
                gpflow.ParamList((gpflow.Param(1),))
            with self.assertRaises(ValueError):
                # param objects not valid in constructor (must be in list)
                gpflow.ParamList(gpflow.Param(1))

            with gpflow.defer_build():
                p = gpflow.ParamList([0.0])
                p[0] = gpflow.Param(1.0)
                with self.assertRaises(ValueError):
                    p[0] = 1.0
                with self.assertRaises(ValueError):
                    p[0] = "test"

            p = gpflow.ParamList([])
            p.append(gpflow.Param(1.0))
            p.append(gpflow.Param(2.0))
            p.append(2.0)
            self.assertEqual(len(p), 3)
            with self.assertRaises(ValueError):
                p.append("test")
Exemple #5
0
    def __init__(self, X, y, hyperparameters=None, optimize_hyperparameters=False,
                 learning_rate=0.001, maxiter=5000, **kwargs):
        # Store model kwargs
        self._model_kwargs = {
            'hyperparameters': hyperparameters,
            'optimize_hyperparameters': optimize_hyperparameters,
            'learning_rate': learning_rate,
            'maxiter': maxiter,
        }

        # Store kernel kwargs
        kernel_kwargs = self._get_kernel_kwargs(X_dim=X.shape[1], **kwargs)
        if hyperparameters is not None:
            self._assign_kernel_hyperparams(hyperparameters, kernel_kwargs)
        self._kernel_kwargs = copy.deepcopy(kernel_kwargs)

        # Build the kernels and the model
        with gpflow.defer_build():
            k = self._build_kernel(kernel_kwargs, optimize_hyperparameters=optimize_hyperparameters, **kwargs)
            m = GPRC(X, y, kern=k)
            if hyperparameters is not None and self._LIKELIHOOD_HP_KEY in hyperparameters:
                m.likelihood.variance = hyperparameters[self._LIKELIHOOD_HP_KEY]
        m.compile()

        # If enabled, optimize the hyperparameters
        if optimize_hyperparameters:
            opt = gpflow.train.AdamOptimizer(learning_rate)
            opt.minimize(m, maxiter=maxiter)
        self.model = m
Exemple #6
0
def train_arrival_time_gp(X, Y, number, kernel="rbf_linear"):
    """GP which maps (%, v) -> arrival_time."""
    with gpflow.defer_build():
        if kernel is None or kernel == "white":
            kern = gpflow.kernels.White(input_dim=1)
        elif kernel == "matern32":
            kern = gpflow.kernels.Matern32(input_dim=1)
        elif kernel == "linear":
            kern = gpflow.kernels.Linear(input_dim=1)
        elif kernel == "rbf":
            kern = gpflow.kernels.RBF(input_dim=1)
        elif kernel == "rbf_linear":
            kern = gpflow.kernels.RBF(
                input_dim=1, lengthscales=0.1) + gpflow.kernels.Linear(
                    input_dim=1, variance=500)
        else:
            raise Exception("Kernel {} unknown!".format(kernel))

        m = gpflow.models.GPR(X, Y, kern=kern)
        m.likelihood.variance = 1  #10
        m.compile()
        opt = gpflow.train.ScipyOptimizer()
        opt.minimize(m)
        logger.info("Arrival Time Pred GP #{} trained.".format(number))
    return m
def smooth_direction_time(weights,X,Y,freqs, M=None,minibatch=None,iterations=1000, n_kern = 1):

    N, num_latent = Y.shape
    M = M or N
    Z = np.random.choice(X,size=M,replace=False)
#     Z = kmeans2(X, M, minit='points')[0]

    with gp.defer_build():
        kern = [gp.kernels.Matern32(2,active_dims=[1,2])*gp.kernels.Periodic(1,active_dims=[0])*gp.kernels.Matern32(1,active_dims = [0],variance=0.1**2) for _ in range(n_kern)]
        for k in kern:
            k.children['matern32_1'].variance.set_trainable(False)
            k.children['periodic'].variance.prior = gp.priors.Gaussian(0.,0.1)
            k.children['matern32_2'].variance.set_trainable(False)
            
        likelihood = make_likelihood(freqs)
        mean = gp.mean_functions.Constant()
        #X, Y, Z, kernels, likelihood,
        model = WeightedDGP(weights,X, Y, Z, kern, likelihood,
                    mean_function = mean, minibatch_size=minibatch,
                    num_outputs=num_latent-1)
        for layer in model.layers[:-1]:
            layer.q_sqrt = layer.q_sqrt.value * 1e-5 

        model.likelihood.variance = np.pi/30.
        model.compile()
        
    opt = gp.train.AdamOptimizer(5e-3)
    opt.minimize(model, maxiter=iterations)
    
    ystar,varstar = batch_predict_y(model,X,batch_size=100,S=200)
    l = batch_predict_density(model,X,Y,batch_size=100,S=200)
    return l.mean(), ystar, np.sqrt(varstar)
Exemple #8
0
    def test_construction(self):
        with self.test_context():
            gpflow.ParamList([])
            gpflow.ParamList([gpflow.Param(1)])
            gpflow.ParamList([1.0, np.array([1, 2]), gpflow.Param(1.0)])
            with self.assertRaises(ValueError):
                gpflow.ParamList([gpflow.Param(1), 'stringsnotallowed'])
            with self.assertRaises(ValueError):
                # tuples not valid in constuctor:
                gpflow.ParamList((gpflow.Param(1), ))
            with self.assertRaises(ValueError):
                # param objects not valid in constructor (must be in list)
                gpflow.ParamList(gpflow.Param(1))

            with gpflow.defer_build():
                p = gpflow.ParamList([0.0])
                p[0] = gpflow.Param(1.0)
                with self.assertRaises(ValueError):
                    p[0] = 1.0
                with self.assertRaises(ValueError):
                    p[0] = "test"

            p = gpflow.ParamList([])
            p.append(gpflow.Param(1.0))
            p.append(gpflow.Param(2.0))
            p.append(2.0)
            self.assertEqual(len(p), 3)
            with self.assertRaises(ValueError):
                p.append("test")
Exemple #9
0
def test_monitor(session_tf):
    np.random.seed(0)
    X = np.random.rand(10000, 1) * 10
    Y = np.sin(X) + np.random.randn(*X.shape)

    with gpflow.defer_build():
        m = gpflow.models.SVGP(X, Y, gpflow.kernels.RBF(1), gpflow.likelihoods.Gaussian(),
                               Z=np.linspace(0, 10, 5)[:, None],
                               minibatch_size=100, name="SVGP")
        m.likelihood.variance = 0.01
    m.compile()

    global_step = tf.Variable(0, trainable=False, name="global_step")
    session_tf.run(global_step.initializer)

    adam = gpflow.train.AdamOptimizer(0.01).make_optimize_action(m, global_step=global_step)

    # create a filewriter for summaries
    fw = tf.summary.FileWriter('./model_tensorboard', m.graph)

    print_lml = mon.PrintTimings(itertools.count(), mon.Trigger.ITER, single_line=True, global_step=global_step)
    sleep = mon.SleepAction(itertools.count(), mon.Trigger.ITER, 0.0)
    saver = mon.StoreSession(itertools.count(step=3), mon.Trigger.ITER, session_tf,
                             hist_path="./monitor-saves/checkpoint", global_step=global_step)
    tensorboard = mon.ModelTensorBoard(itertools.count(step=3), mon.Trigger.ITER, m, fw, global_step=global_step)
    lml_tensorboard = mon.LmlTensorBoard(itertools.count(step=5), mon.Trigger.ITER, m, fw, global_step=global_step)
    callback = mon.CallbackAction(mon.seq_exp_lin(2.0, np.inf, 1e-3), mon.Trigger.TOTAL_TIME, lambda x, b: x, m)

    actions = [adam, print_lml, tensorboard, lml_tensorboard, saver, sleep, callback]

    gpflow.actions.Loop(actions, stop=11)()
Exemple #10
0
    def run(self, save_path, init_design_size=1, n_iter=24, plot=False, likelihood_uncert=1.,design = None):
        self.save_path = save_path
        self.iter = 0
        
        self.X = [] + self.init_X
        self.Y = [] + self.init_Y
        
        domain = np.sum(self.domain)

#        if self.first_tries is not None:
#            Y = [self.objective(x).flatten() for x in self.first_tries]
#            self.Y = self.Y + Y
#            self.X = self.X + list(self.first_tries)

        if init_design_size > 0:
            if design == 'latin':
                design = LatinHyperCube(init_design_size,domain)
            if design == 'fac':
                design = FactorialDesign(2, domain)
            X = list(design.generate())
            Y = [self.objective(x).flatten() for x in X]
            self.Y = self.Y + Y
            self.X = self.X + X
        
        
        self.X, self.Y = np.stack(self.X,axis=0), np.stack(self.Y,axis=0)
        
        u = np.concatenate([self.X,self.Y],axis=1)
        u, idx = np.unique(u,return_index=True,axis=0)
        self.X = list(self.X[idx,:])
        self.Y = list(self.Y[idx,:])
        
        self.burnin = len(self.X)
        
        logging.warning("Beginnig search")
#         logging.warning("Initial solutions\n{}".format(list(zip(self.X, self.Y))))
        with gp.defer_build():
            kern = gp.kernels.Matern52(domain.size, ARD=True)# + gp.kernels.White(domain.size)
            m = gp.models.GPR(np.stack(self.X,axis=0), np.stack(self.Y,axis=0), kern)
            lik_var = log_normal_solve(likelihood_uncert**2, 0.5*likelihood_uncert**2)
            m.likelihood.variance = likelihood_uncert**2#np.exp(lik_var[0])
            m.likelihood.variance.prior = gp.priors.LogNormal(lik_var[0], lik_var[1]**2)
            m.likelihood.variance.trainable = False
            m.compile()
        self.ei = MomentGeneratingFunctionImprovement(m, self.t)
        opt = optim.StagedOptimizer([optim.MCOptimizer(domain, 5000), optim.SciPyOptimizer(domain)])
        optimizer = BayesianOptimizer(domain, self.ei, optimizer=opt, hyper_draws=1)

        #with optimizer.silent():
        result = optimizer.optimize(self.objective, n_iter=n_iter)
        logging.warning(result)
        if plot:
            self.plot_results()
        result = self.get_kwargs(result.x)
        self.print_top_k(5)
        return result
Exemple #11
0
    def _build_model(self, Y_var, freqs, X, Y, kern_params=None, Z=None, q_mu = None, q_sqrt = None, M=None, P=None, L=None, W=None, num_data=None, jitter=1e-6, tec_scale=None, W_trainable=False, use_mc=False, **kwargs):
        """
        Build the model from the data.
        X,Y: tensors the X and Y of data

        Returns:
        gpflow.models.Model
        """

        settings.numerics.jitter = jitter

        with gp.defer_build():
            # Define the likelihood
            likelihood = ComplexHarmonicPhaseOnlyGaussianEncodedHetero(tec_scale=tec_scale)
#            likelihood.variance = 0.3**2#(5.*np.pi/180.)**2
#            likelihood_var = log_normal_solve((5.*np.pi/180.)**2, 0.5*(5.*np.pi/180.)**2)
#            likelihood.variance.prior = LogNormal(likelihood_var[0],likelihood_var[1]**2)
#            likelihood.variance.transform = gp.transforms.positiveRescale(np.exp(likelihood_var[0]))
            likelihood.variance.trainable = False


            q_mu = q_mu/tec_scale #M, L
            q_sqrt = q_sqrt/tec_scale# L, M, M



            kern = mk.SeparateMixedMok([self._build_kernel(None, None, None, #kern_params[l].w, kern_params[l].mu, kern_params[l].v, 
                kern_var = np.var(q_mu[:,l]), **kwargs.get("priors",{})) for l in range(L)], W)
            kern.W.trainable = W_trainable
            kern.W.prior = gp.priors.Gaussian(W, 0.01**2)

            
            feature = mf.MixedKernelSeparateMof([InducingPoints(Z) for _ in range(L)])
            mean = Zero()
            model = HeteroscedasticPhaseOnlySVGP(Y_var, freqs, X, Y, kern, likelihood, 
                        feat = feature,
                        mean_function=mean, 
                        minibatch_size=None,
                        num_latent = P, 
                        num_data = num_data,
                        whiten = False, 
                        q_mu = None, 
                        q_sqrt = None, 
                        q_diag = True)
            for feat in feature.feat_list:
                feat.Z.trainable = True #True
            model.q_mu.trainable = True
            model.q_mu.prior = gp.priors.Gaussian(0., 0.05**2)
            model.q_sqrt.trainable = True
#            model.q_sqrt.prior = gp.priors.Gaussian(0., (0.005/tec_scale)**2)
            model.compile()
            tf.summary.image('W',kern.W.constrained_tensor[None,:,:,None])
            tf.summary.image('q_mu',model.q_mu.constrained_tensor[None,:,:,None])
#            tf.summary.image('q_sqrt',model.q_sqrt.constrained_tensor[:,:,:,None])

            return model
Exemple #12
0
 def prepare(self, autobuild=True):
     rng = np.random.RandomState()
     with gpflow.defer_build():
         m = Foo()
         m.X = gpflow.DataHolder(rng.randn(2, 2))
         m.Y = gpflow.DataHolder(rng.randn(2, 2))
         m.Z = gpflow.DataHolder(rng.randn(2, 2))
     if autobuild:
         m.compile()
     return m, rng
 def prepare(self, autobuild=True):
     rng = np.random.RandomState()
     with gpflow.defer_build():
         m = Foo()
         m.X = gpflow.DataHolder(rng.randn(2, 2))
         m.Y = gpflow.DataHolder(rng.randn(2, 2))
         m.Z = gpflow.DataHolder(rng.randn(2, 2))
     if autobuild:
         m.compile()
     return m, rng
def test_expand_combinations_simple_combs(tree_to_str, kernel_to_tree, k1, k2,
                                          k3, k4):
    with gpflow.defer_build():
        res_strs = [
            tree_to_str(kernel_to_tree(k))
            for k in _expand_combinations((k1 * k2 + k3) * k4)
        ]
        assert res_strs == [
            f'{k1.name.lower()} * {k2.name.lower()} + {k3.name.lower()}',
            f'{k4.name.lower()}'
        ]
Exemple #15
0
 def _make_model(self, X, Y):
     _, num_input_dimensions = X.shape
     with gpflow.defer_build():
         kernel = gpflow.kernels.RBF(input_dim=num_input_dimensions,
                                     ARD=True)
         model = gpflow.models.GPR(X, Y, kern=kernel)
         if self.min_noise_variance is not None:
             model.likelihood.variance.transform = gpflow.transforms.Log1pe(
                 lower=self.min_noise_variance)
     model.build()
     return model
def train_inv_f1_gp(X_train, Y_train, logger):
    """GP which maps Tau -> lng or lat."""
    with gpflow.defer_build():
        m = gpflow.models.GPR(X_train, Y_train, kern=gpflow.kernels.RBF(1))
        m.compile()
        opt = gpflow.train.ScipyOptimizer()
        opt.minimize(m)
        logger.info("Model optimized")
        logger.info(m)
        logger.info("Inverse f1 GP trained.")
    return m
Exemple #17
0
 def __init__(self, N, country, plot=False):
     df = pd.read_csv('data/previous-case-counts-%s.csv' % country)
     df['WHO report date'] = pd.to_datetime(df['WHO report date'],
                                            format="%d/%m/%Y")
     df['delta_time_days'] = (df['WHO report date'] -
                              df['WHO report date'].min()).dt.days
     df = df.sort_values('delta_time_days')
     print(df)
     self.df = df
     self.N = N
     self.country = country
     self.plot = plot
     # Differential case counts
     self.delta_cases = df['Total Cases'].values[1:] - df[
         'Total Cases'].values[:-1]
     # Differential death counts
     self.delta_deaths = df['Total Deaths'].values[1:] - df[
         'Total Deaths'].values[:-1]
     # GP fit
     with gpflow.defer_build():
         k = gpflow.kernels.Matern52(1)
         self.mc = gpflow.models.GPR(
             df['delta_time_days'].values[:, np.newaxis].astype('float'),
             df['Total Cases'].values[:, np.newaxis].astype('float'),
             kern=k)
         self.mc.likelihood.variance.trainable = False
         self.mc.likelihood.variance = 300**2
     self.mc.compile()
     gpflow.train.ScipyOptimizer().minimize(self.mc)
     print(self.mc.as_pandas_table())
     with gpflow.defer_build():
         k = gpflow.kernels.Matern52(1)
         self.md = gpflow.models.GPR(
             df['delta_time_days'].values[:, np.newaxis].astype('float'),
             df['Total Deaths'].values[:, np.newaxis].astype('float'),
             kern=k)
         self.md.likelihood.variance.trainable = False
         self.md.likelihood.variance = 300**2
     self.md.compile()
     gpflow.train.ScipyOptimizer().minimize(self.md)
     print(self.md.as_pandas_table())
def test_gpflow():
    import gpflow
    import numpy as np
    with gpflow.defer_build():
        X = np.random.rand(20, 1)
        Y = np.sin(
            12 * X) + 0.66 * np.cos(25 * X) + np.random.randn(20, 1) * 0.01
        m = gpflow.models.GPR(X,
                              Y,
                              kern=gpflow.kernels.Matern32(1) +
                              gpflow.kernels.Linear(1))
    print(m)
Exemple #19
0
def train_GP(X_tau, Y, session, number, gp_name):
    """GP which maps tau -> lng or lat."""
    with gpflow.defer_build():
        m = gpflow.models.GPR(X_tau, Y, kern=gpflow.kernels.RBF(1))
        m.likelihood.variance = 1e-03
        m.likelihood.variance.trainable = False
        m.compile()
        opt = gpflow.train.ScipyOptimizer()
        opt.minimize(m)
        logger.info("{} GP #{} trained.".format(gp_name, number))
    session.save(BASE_DIR + "GP/model_{}_{}".format(number, gp_name), m)
    return m
Exemple #20
0
def SafeMatern32_Model(X,
                       y,
                       use_priors=False,
                       e_s=0,
                       period=12,
                       partial=False):

    with gpflow.defer_build():

        like = gpflow.likelihoods.Poisson(binsize=e_s)
        kern_s_effect = gpflow.kernels.SafeMatern32(input_dim=2,
                                                    active_dims=[1, 2],
                                                    name='space_effect')
        kern_t_effect = gpflow.kernels.RBF(1,
                                           active_dims=[0],
                                           name='time_effect')

        ## Will have to write custom kernel to match Flaxman 2014
        kern_p_effect = gpflow.kernels.Periodic(1,
                                                active_dims=[0],
                                                name='periodic_effect')
        kern_st_effect = gpflow.kernels.Product([kern_s_effect, kern_t_effect])

        full_kern = kern_t_effect + kern_s_effect + kern_p_effect + kern_st_effect

        if partial:
            m = PartialVGP(X,
                           y,
                           full_kern,
                           likelihood=like,
                           mean_function=None)
        else:
            m = gpflow.models.VGP(X,
                                  y,
                                  full_kern,
                                  likelihood=like,
                                  mean_function=None)

        m.kern.periodic.period = period
        m.kern.periodic.period.trainable = True

        t_prior = gpflow.priors.StudentT(mean=0, scale=5, deg_free=4)

        if use_priors:
            m.kern.safematern32.variance.prior = t_prior
            m.kern.periodic.variance.prior = t_prior
            m.kern.rbf.variance.prior = t_prior

            m.kern.safematern32.lengthscales.prior = t_prior
            m.kern.rbf.lengthscales.prior = t_prior
            m.kern.periodic.lengthscales.prior = t_prior

    return m
Exemple #21
0
    def __init__(self,
                 data,
                 n_latent_dims=1,
                 n_inducing_points=10,
                 kernel={
                     'name': 'RBF',
                     'ls': 1.0,
                     'var': 1.0
                 },
                 mData=None,
                 latent_prior_mean=None,
                 latent_prior_var=1.,
                 latent_mean=None,
                 latent_var=0.1,
                 inducing_inputs=None,
                 dtype='float64'):
        self.Y = None
        self.Q = n_latent_dims
        self.M = n_inducing_points
        self.kern = None
        self.mData = mData
        self.X_prior_mean = None
        self.X_prior_var = None
        self.X_mean = None
        self.X_var = None
        self.Z = None

        self.set_Y(data)
        self.N, self.D = self.Y.shape

        self.set_kern(kernel)

        self.set_X_prior_mean(latent_prior_mean)
        self.set_X_prior_var(latent_prior_var)

        self.set_X_mean(latent_mean)
        self.set_X_var(latent_var)

        self.set_inducing_inputs(inducing_inputs)

        self.fitting_time = 0

        with gpflow.defer_build():
            self.m = gpflow.models.BayesianGPLVM(
                Y=self.Y,
                kern=self.kern,
                X_prior_mean=self.X_prior_mean,
                X_prior_var=self.X_prior_var,
                X_mean=self.X_mean.copy(),
                X_var=self.X_var.copy(),
                Z=self.Z.copy(),
                M=self.M)
            self.m.likelihood.variance = 0.01
Exemple #22
0
def sparse_mcmc_example():
    #%matplotlib inline
    plt.style.use('ggplot')

    np.random.seed(1)

    # Make a one dimensional classification problem.
    X = np.random.rand(100, 1)
    K = np.exp(-0.5 * np.square(X - X.T) / 0.01) + np.eye(100) * 1e-6
    f = np.dot(np.linalg.cholesky(K), np.random.randn(100, 3))
    Y = np.array(np.argmax(f, 1).reshape(-1, 1), dtype=float)

    plt.figure(figsize=(12, 6))
    plt.plot(X, f, '.')

    with gpflow.defer_build():
        model = gpflow.models.SGPMC(
            X,
            Y,
            kern=gpflow.kernels.Matern32(1, lengthscales=0.1) +
            gpflow.kernels.White(1, variance=0.01),
            likelihood=gpflow.likelihoods.MultiClass(3),
            Z=X[::5].copy(),
            num_latent=3)
        model.kern.kernels[0].variance.prior = gpflow.priors.Gamma(1.0, 1.0)
        model.kern.kernels[0].lengthscales.prior = gpflow.priors.Gamma(
            2.0, 2.0)
        model.kern.kernels[1].variance.trainables = False

    model.compile()
    print(model.as_pandas_table())

    opt = gpflow.train.ScipyOptimizer()
    opt.minimize(model, maxiter=10)
    print(model.kern.as_pandas_table())

    hmc = gpflow.train.HMC()
    samples = hmc.sample(model,
                         num_samples=500,
                         epsilon=0.04,
                         lmax=15,
                         logprobs=False)  # pands.DataFrame.
    #print('Columns =', samples.columns.values)

    plot_from_samples(model, samples)
    print(samples.head())

    _ = plt.hist(np.vstack(
        samples['SGPMC/kern/kernels/0/lengthscales']).flatten(),
                 50,
                 density=True)
    plt.xlabel('lengthscale')
def restore():
    with gpflow.defer_build():
        m = init()
    tf.local_variables_initializer()
    tf.global_variables_initializer()

    tf_graph = m.enquire_graph()
    tf_session = m.enquire_session()
    m.compile(tf_session)

    saver = tf.train.Saver()
    save_path = saver.restore(tf_session, save_dir + "/model.ckpt")
    print("Model loaded from path: %s" % save_path)

    return m
def test_train(iterations):
    with gpflow.defer_build():
        m = init()

    tf.local_variables_initializer()
    tf.global_variables_initializer()

    tf_session = m.enquire_session()
    m.compile(tf_session)

    op_adam = AdamOptimizer(0.01).make_optimize_tensor(m)
    bool_2400 = False
    bool_2300 = False
    bool_2200 = False

    for it in range(iterations):
        tf_session.run(op_adam)
        if it % 100 == 0:
            likelihood = tf_session.run(m.likelihood_tensor)
            print('{}, ELBO={:.4f}'.format(it, likelihood))
            if likelihood >= -2400:
                if bool_2400 == False:
                    saver = tf.train.Saver()
                    save_path = saver.save(
                        tf_session, save_dir + '/model' + '2800' + '.ckpt')
                    bool_2400 = True
                    print("Model saved in path: %s" % save_path)
                if likelihood >= -2300:
                    if bool_2300 == False:
                        saver = tf.train.Saver()
                        save_path = saver.save(
                            tf_session, save_dir + '/model' + '2600' + '.ckpt')
                        bool_2300 = True
                        print("Model saved in path: %s" % save_path)
                    if likelihood >= -2200 and bool_2200 == False:
                        saver = tf.train.Saver()
                        save_path = saver.save(
                            tf_session, save_dir + '/model' + '2500' + '.ckpt')
                        bool_2200 = True
                        print("Model saved in path: %s" % save_path)

    saver = tf.train.Saver()
    save_path = saver.save(tf_session, save_dir + "/model.ckpt")
    print("Model saved in path: %s" % save_path)

    m.anchor(tf_session)

    return m
def main():
    np.random.seed(1234)
    X, y = generate_dataset(200)

    float_type = gpflow.settings.float_type
    prior = Gamma(float_type(0.01), float_type(0.01))

    with gpflow.defer_build():
        kern = gpflow.kernels.RBF(1)
    likelihood = gpflow.likelihoods.Gaussian()
    priors = {kern.variance: prior, kern.lengthscales: prior}
    Z = np.linspace(min(X), max(X), 10)[:, np.newaxis]
    model = FVGP(X, y, kern, likelihood, Z=Z, priors=priors)

    result_path = Path('advi_model_params.npz')
    if result_path.exists():
        print('loading parameters values from {}'.format(result_path))
        params = dict(np.load(str(result_path)))
        model.assign(params)
    else:
        optimizer = gpflow.training.AdamOptimizer(0.01)
        optimizer.minimize(model, maxiter=5000)
        np.savez(str(result_path), **model.read_values())

    sess = model.enquire_session()
    var_samples = [kern.variance.read_value(sess) for _ in range(2000)]
    len_samples = [kern.lengthscales.read_value(sess) for _ in range(2000)]

    xs = np.linspace(X[0, 0], X[-1, 0], 500)[:, np.newaxis]
    f_samples = [model.predict_f_samples(xs, 1) for _ in range(200)]
    f_samples = np.vstack(f_samples)[..., 0]

    # TODO fix model printing?
    # print(model)

    fig, axes = plt.subplots(1, 3, figsize=(16, 6))
    axes[0].grid()
    axes[0].plot(X, y, 'o')
    axes[0].plot(xs, f_samples.T, 'k', alpha=0.1)
    axes[0].plot(xs, np.mean(f_samples, 0), lw=2)
    axes[1].set_title('kernel variance posterior')
    axes[1].grid()
    sb.distplot(var_samples, ax=axes[1])
    axes[2].set_title('kernel lengthscale posterior')
    axes[2].grid()
    sb.distplot(len_samples, ax=axes[2])
    fig.tight_layout()
    plt.show()
Exemple #26
0
    def _build_model(self, Y_var, X, Y, Z=None, q_mu = None, q_sqrt = None, M=None, P=None, L=None, W=None, num_data=None, jitter=1e-6, tec_scale=None, W_diag=False, **kwargs):
        """
        Build the model from the data.
        X,Y: tensors the X and Y of data

        Returns:
        gpflow.models.Model
        """

        
        settings.numerics.jitter = jitter

        with gp.defer_build():
            # Define the likelihood
            likelihood = GaussianTecHetero(tec_scale=tec_scale)
 
            q_mu = q_mu/tec_scale #M, L
            q_sqrt = q_sqrt/tec_scale# L, M, M

            kern = mk.SeparateMixedMok([self._build_kernel(kern_var = np.var(q_mu[:,l]), **kwargs.get("priors",{})) for l in range(L)], W)
            if W_diag:
#                kern.W.transform = Reshape(W.shape,(P,L,L))(gp.transforms.DiagMatrix(L)(gp.transforms.positive))
                kern.W.trainable = False
            else:
                kern.W.transform = Reshape(W.shape,(P//L,L,L))(MatrixSquare()(gp.transforms.LowerTriangular(L,P//L)))
                kern.W.trainable = True
            
            feature = mf.MixedKernelSeparateMof([InducingPoints(Z) for _ in range(L)])
            mean = Zero()
            model = HeteroscedasticTecSVGP(Y_var, X, Y, kern, likelihood, 
                        feat = feature,
                        mean_function=mean, 
                        minibatch_size=None,
                        num_latent = P, 
                        num_data = num_data,
                        whiten = False, 
                        q_mu = q_mu, 
                        q_sqrt = q_sqrt)
            for feat in feature.feat_list:
                feat.Z.trainable = True
            model.q_mu.trainable = True
            model.q_sqrt.trainable = True
#            model.q_sqrt.prior = gp.priors.Gaussian(q_sqrt, 0.005**2)
            model.compile()
            tf.summary.image('W',kern.W.constrained_tensor[None,:,:,None])
            tf.summary.image('q_mu',model.q_mu.constrained_tensor[None,:,:,None])
            tf.summary.image('q_sqrt',model.q_sqrt.constrained_tensor[:,:,:,None])
        return model
Exemple #27
0
def init_gsm(x, y, M, Q, max_freq=1.0, max_len=1.0, ell=1.0, n_inits=10,
             minibatch_size=256, noise_var=10.0, ARD=False, likelihood=None):
    print('Initializing GSM...')
    best_loglik = -np.inf
    best_m = None
    N, input_dim = x.shape
    for k in range(n_inits):
        print('init:', k)
        try:
            #gpflow.reset_default_graph_and_session()
            with gpflow.defer_build():
                Z = random_Z(x, N, M)
                p = _gsm_rand_params(M, Q, input_dim, max_freq=max_freq, max_len=max_len, ell=ell, ARD=ARD)
                if likelihood is not None:
                    likhood = likelihood
                else:
                    likhood = gpflow.likelihoods.Gaussian(noise_var)
                    likhood.variance.prior = gpflow.priors.LogNormal(mu=0, var=1)
                spectral = SpectralSVGP(X=x, Y=y, Z=Z, ARD=ARD, likelihood=likhood,
                                        max_freq=max_freq,
                                        minibatch_size=minibatch_size,
                                        variances=p['variances'],
                                        frequencies=p['frequencies'],
                                        lengthscales=p['lengthscales'],
                                        Kvar=p['Kvar'], Kfreq=p['Kfreq'],
                                        Klen=p['Klen'])
                spectral.feature.Z.prior = gpflow.priors.Gaussian(0, 1)
            spectral.compile()
            loglik = spectral.compute_log_likelihood()
            print('loglik:', loglik)
            if loglik > best_loglik:
                best_loglik = loglik
                best_m = spectral
                #best_dir = tempfile.TemporaryDirectory()
                #gpflow.saver.Saver().save(best_dir.name + 'model.gpflow', best_m)
            del spectral
            gc.collect()
        except tf.errors.InvalidArgumentError:  # cholesky may fail sometimes
            pass
    print('Best initialization: %f' % best_loglik)
    print(best_m)
    #gpflow.reset_default_graph_and_session()
    #best_m = gpflow.saver.Saver().load(best_dir.name + 'model.gpflow')
    #best_m.compile()
    #print(best_m)
    return best_m
Exemple #28
0
    def test_read_values(self):
        def check_values(values, expected_dict, unexpected_dicts):
            self.assertTrue(values == expected_dict)
            for unexpected_dict in unexpected_dicts:
                self.assertFalse(values == unexpected_dict)

        expected_dict = {'p/a': 10., 'p/b': 11., 'p/c/d': 12.}
        unexpected_dicts = [{
            'p': 10.,
            'p/b': 11.,
            'p/c/d': 12.
        }, {
            'p/a': 11.,
            'p/b': 11.,
            'p/c/d': 12.
        }, {
            'p/a': 11.
        }]

        with self.test_context() as session:
            session_new = tf.Session(graph=session.graph)
            self.assertNotEqual(session_new, session)
            with session_new.as_default():
                with gpflow.defer_build():
                    p = self.create_layout()
                    values = p.read_values()
                    check_values(values, expected_dict, unexpected_dicts)
                    p.compile()
                    values = p.read_values()
                    check_values(values, expected_dict, unexpected_dicts)
                    with self.assertRaises(tf.errors.FailedPreconditionError):
                        p.read_values(session=session)

        with self.test_context() as session_fail:
            self.assertFalse(session == session_fail)
            with self.assertRaises(tf.errors.FailedPreconditionError):
                p.read_values(session=session_fail)

        with self.test_context() as session_intialize:
            p.initialize(session=session_intialize)
            values = p.read_values(session=session_intialize)
            check_values(values, expected_dict, unexpected_dicts)

        values = p.read_values(session=session_new)
        check_values(values, expected_dict, unexpected_dicts)
        session_new.close()
 def test_keep_custom_parameters(self):
     for t in [gpflow.transforms.Log1pe, gpflow.transforms.Identity,
               gpflow.transforms.Exp, gpflow.transforms.Logistic]:
         with self.test_context():
             with gpflow.defer_build():
                 m = self.get_model()
                 m.q_mu.prior = gpflow.priors.Gamma(3., 1./3.)
                 m.q_sqrt.transform = t()
             m.compile()
             m.X = np.random.rand(11, 1)
             m.Y = np.random.rand(11, 1)
             m.clear()
             m.compile()
             self.assertIsInstance(m.q_mu.prior, gpflow.priors.Gamma)
             self.assertIsInstance(m.q_sqrt.transform, t)
             self.assertTupleEqual(m.q_mu.shape, (11, 1))
             self.assertTupleEqual(m.q_sqrt.shape, (1, 11, 11))
Exemple #30
0
def evalMLE(X, Y):  # type: (Any, {shape}) -> Tuple[None, GPR]
    with gpflow.defer_build():
        k = gpflow.kernels.Matern52(1, lengthscales=0.3)
        meanf = gpflow.mean_functions.Zero()
        m = gpflow.models.GPR(X, Y, k, meanf)
        m.clear()

        m.kern.lengthscales.prior = gpflow.priors.Beta(1., 3.)
        m.kern.variance.prior = gpflow.priors.Beta(1., 3.)
        m.likelihood.variance.prior = gpflow.priors.Beta(1., 3.)

    m.compile()
    print(m.as_pandas_table())

    # gpflow.train.scipy_optimizer.ScipyOptimizer().minimize(m)
    gpflow.train.AdamOptimizer(learning_rate=.05, beta1=.5,
                               beta2=.99).minimize(m)
    return None, m
Exemple #31
0
 def test_append(self):
     with self.test_context():
         p1 = gpflow.Param(1.2)
         p4 = gpflow.Param(np.array([3.4, 5.6], settings.float_type))
         with gpflow.defer_build():
             p2 = gpflow.Param(1.2)
             param_list = gpflow.ParamList([p1])
             param_list.append(p2)
         p3 = gpflow.Param(1.2)
         param_list.append(p3)
         param_list.compile()
         with self.assertRaises(gpflow.GPflowError):
             param_list.append(p4)
         self.assertTrue(p1 in param_list.params)
         self.assertTrue(p2 in param_list.params)
         self.assertTrue(p3 in param_list.params)
         self.assertFalse(p4 in param_list.params)
         with self.assertRaises(ValueError):
             param_list.append('foo')
Exemple #32
0
 def test_append(self):
     with self.test_context():
         p1 = gpflow.Param(1.2)
         p4 = gpflow.Param(np.array([3.4, 5.6], settings.float_type))
         with gpflow.defer_build():
             p2 = gpflow.Param(1.2)
             param_list = gpflow.ParamList([p1])
             param_list.append(p2)
         p3 = gpflow.Param(1.2)
         param_list.append(p3)
         param_list.compile()
         with self.assertRaises(gpflow.GPflowError):
             param_list.append(p4)
         self.assertTrue(p1 in param_list.params)
         self.assertTrue(p2 in param_list.params)
         self.assertTrue(p3 in param_list.params)
         self.assertFalse(p4 in param_list.params)
         with self.assertRaises(ValueError):
             param_list.append('foo')
Exemple #33
0
    def deep_structure():
        a = gp.Param(1)
        b = gp.Param(2)
        c_a =  gp.Param(3)
        c_b =  gp.Param(4)

        with gp.defer_build():
            p =  gp.Parameterized()
            p.c =  gp.Parameterized()
            p.c.c =  gp.Parameterized()
            p.c.c.a =  gp.Param(3)
            p.c.c.b =  gp.Param(4)
            p.a = a
            p.b = b
            p.c.a = c_a
            p.c.b = c_b

        p.compile()
        return p
def train_f1_gp(X_train, Y_train, logger):
    """GP which maps lng, lat -> Tau.
    X_train should be standardised and should not contain any stops."""
    with gpflow.defer_build():
        logger.info(X_train.shape)
        
        logger.info(X_train)
        logger.info(Y_train)

        m = gpflow.models.GPR(X_train, Y_train, kern=gpflow.kernels.Matern32(2))
        logger.info(m)

        m.compile()
        opt = gpflow.train.ScipyOptimizer()
        opt.minimize(m)
        logger.info("Model optimized")
        logger.info(m)
        logger.info("f1 GP trained.")
    return m
Exemple #35
0
    def test_read_values(self):
        def check_values(values, expected_dict, unexpected_dicts):
            self.assertTrue(values == expected_dict)
            for unexpected_dict in unexpected_dicts:
                self.assertFalse(values == unexpected_dict)

        expected_dict = {'p/a': 10., 'p/b': 11., 'p/c/d': 12.}
        unexpected_dicts = [
            {'p': 10., 'p/b': 11., 'p/c/d': 12.},
            {'p/a': 11., 'p/b': 11., 'p/c/d': 12.},
            {'p/a': 11.}
        ]

        with self.test_context() as session:
            session_new = tf.Session(graph=session.graph)
            self.assertNotEqual(session_new, session)
            with session_new.as_default():
                with gpflow.defer_build():
                    p = self.create_layout()
                    values = p.read_values()
                    check_values(values, expected_dict, unexpected_dicts)
                    p.compile()
                    values = p.read_values()
                    check_values(values, expected_dict, unexpected_dicts)
                    with self.assertRaises(tf.errors.FailedPreconditionError):
                        p.read_values(session=session)

        with self.test_context() as session_fail:
            self.assertFalse(session == session_fail)
            with self.assertRaises(tf.errors.FailedPreconditionError):
                p.read_values(session=session_fail)

        with self.test_context() as session_intialize:
            p.initialize(session=session_intialize)
            values = p.read_values(session=session_intialize)
            check_values(values, expected_dict, unexpected_dicts)

        values = p.read_values(session=session_new)
        check_values(values, expected_dict, unexpected_dicts)
        session_new.close()
Exemple #36
0
 def prepare(self):
     with gpflow.defer_build():
         return gpflow.models.GPR(
             np.ones((1, 1)),
             np.ones((1, 1)),
             kern=gpflow.kernels.Matern52(1))
 def prepare(self):
     with gpflow.defer_build():
         X = np.random.rand(100, 1)
         Y = np.sin(X) + np.random.randn(*X.shape) * 0.01
         k = gpflow.kernels.RBF(1)
         return gpflow.models.GPR(X, Y, k)
Exemple #38
0
 def setUp(self):
     with self.test_context(), gpflow.defer_build():
         self.m = gpflow.params.Parameterized(name='m')
         self.m.p = gpflow.params.Parameterized()
         self.m.b = gpflow.params.Parameterized()