示例#1
0
def rbf_lin_sum_kern2():
    return kernels.Sum([
        kernels.Linear(Data.D_in, variance=rng.rand()),
        kernels.RBF(Data.D_in, variance=rng.rand(), lengthscales=rng.rand() + 1.),
        kernels.Linear(Data.D_in, variance=rng.rand()),
        kernels.RBF(Data.D_in, variance=rng.rand(), lengthscales=rng.rand() + 1.),
    ])
示例#2
0
    def setUp(self):
        self.test_graph = tf.Graph()
        with self.test_context():
            self.rng = np.random.RandomState(0)

            self.N = 4
            self.D = 2
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(3, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov_pairwise = t.forward(unconstrained)
            self.Xcov = self.Xcov_pairwise[0]  # no cross-covariances

            variance = 0.3 + self.rng.rand()

            k1 = ekernels.RBF(1, variance, active_dims=[0])
            k2 = ekernels.RBF(1, variance, active_dims=[1])
            klin = ekernels.Linear(1, variance, active_dims=[1])
            self.ekernels = [k1, k2, klin]

            k1 = ekernels.RBF(2, variance)
            k2 = ekernels.RBF(2, variance)
            klin = ekernels.Linear(2, variance)
            self.pekernels = [k1, k2, klin]

            k1 = kernels.RBF(1, variance, active_dims=[0])
            klin = kernels.Linear(1, variance, active_dims=[1])
            self.kernels = [k1, klin]

            k1 = kernels.RBF(2, variance)
            klin = kernels.Linear(2, variance)
            self.pkernels = [k1, klin]
示例#3
0
    def setUp(self):
        with self.test_session():
            self.rng = np.random.RandomState(0)

            self.N = 4
            self.D = 2
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(3, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov = t.forward(unconstrained)

            variance = 0.3 + self.rng.rand()

            k1 = ekernels.RBF(1, variance, active_dims=[0])
            k2 = ekernels.RBF(1, variance, active_dims=[1])
            klin = ekernels.Linear(1, variance, active_dims=[1])
            self.ekernels = [k1, k2, klin]

            k1 = ekernels.RBF(2, variance)
            k2 = ekernels.RBF(2, variance)
            klin = ekernels.Linear(2, variance)
            self.pekernels = [k1, k2, klin]

            k1 = kernels.RBF(1, variance, active_dims=[0])
            klin = kernels.Linear(1, variance, active_dims=[1])
            self.kernels = [k1, klin]

            k1 = kernels.RBF(2, variance)
            klin = kernels.Linear(2, variance)
            self.pkernels = [k1, klin]
示例#4
0
    def setUp(self):
        self.test_graph = tf.Graph()
        with self.test_context():
            self.N = 4
            self.D = 2
            self.rng = np.random.RandomState(0)
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(3, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov = t.forward(unconstrained)

            variance = 0.3 + self.rng.rand()

            k1 = ekernels.RBF(1, variance, active_dims=[0])
            k2 = ekernels.RBF(1, variance, active_dims=[1])
            klin = ekernels.Linear(1, variance, active_dims=[1])
            self.ekernels = [k1, k2, klin]  # Kernels doing the expectation in closed form, doing the slicing

            k1 = ekernels.RBF(1, variance)
            k2 = ekernels.RBF(1, variance)
            klin = ekernels.Linear(1, variance)
            self.pekernels = [k1, k2, klin]  # kernels doing the expectations in closed form, without slicing

            k1 = kernels.RBF(1, variance, active_dims=[0])
            klin = kernels.Linear(1, variance, active_dims=[1])
            self.kernels = [k1, klin]

            k1 = kernels.RBF(1, variance)
            klin = kernels.Linear(1, variance)
            self.pkernels = [k1, klin]
示例#5
0
    def setUp(self):
        self.test_graph = tf.Graph()
        self.rng = np.random.RandomState(
            1)  # this seed works with 60 GH points
        self.N = 4
        self.D = 2
        self.Xmu = self.rng.rand(self.N, self.D)
        self.Z = self.rng.rand(2, self.D)

        unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
        t = TriDiagonalBlockRep()
        self.Xcov = t.forward(unconstrained)

        # Set up "normal" kernels
        ekernel_classes = [ekernels.RBF, ekernels.Linear]
        kernel_classes = [kernels.RBF, kernels.Linear]
        params = [(self.D, 0.3 + self.rng.rand(),
                   self.rng.rand(2) + [0.5, 1.5], None, True),
                  (self.D, 0.3 + self.rng.rand(), None)]
        self.ekernels = [c(*p) for c, p in zip(ekernel_classes, params)]
        self.kernels = [c(*p) for c, p in zip(kernel_classes, params)]

        # Test summed kernels, non-overlapping
        rbfvariance = 0.3 + self.rng.rand()
        rbfard = [self.rng.rand() + 0.5]
        linvariance = 0.3 + self.rng.rand()
        self.kernels.append(
            kernels.Add([
                kernels.RBF(1, rbfvariance, rbfard, [1], False),
                kernels.Linear(1, linvariance, [0])
            ]))
        self.kernels[-1].input_size = self.kernels[-1].input_dim
        for k in self.kernels[-1].kern_list:
            k.input_size = self.kernels[-1].input_size
        self.ekernels.append(
            ekernels.Add([
                ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                ekernels.Linear(1, linvariance, [0])
            ]))
        self.ekernels[-1].input_size = self.ekernels[-1].input_dim
        for k in self.ekernels[-1].kern_list:
            k.input_size = self.ekernels[-1].input_size

        # Test summed kernels, overlapping
        rbfvariance = 0.3 + self.rng.rand()
        rbfard = [self.rng.rand() + 0.5]
        linvariance = 0.3 + self.rng.rand()
        self.kernels.append(
            kernels.Add([
                kernels.RBF(self.D, rbfvariance, rbfard, active_dims=[0, 1]),
                kernels.Linear(self.D, linvariance, active_dims=[0, 1])
            ]))
        self.ekernels.append(
            ekernels.Add([
                ekernels.RBF(self.D, rbfvariance, rbfard, active_dims=[0, 1]),
                ekernels.Linear(self.D, linvariance, active_dims=[0, 1])
            ]))

        self.assertTrue(self.ekernels[-2].on_separate_dimensions)
        self.assertTrue(not self.ekernels[-1].on_separate_dimensions)
示例#6
0
    def setUp(self):
        with self.test_session():
            self._threshold = 0.5
            self.rng = np.random.RandomState(0)
            self.N = 4
            self.D = 2

            # Test summed kernels, non-overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()

            self.kernel = kernels.Prod([
                kernels.RBF(1, rbfvariance, rbfard, [1], False),
                kernels.Linear(1, linvariance, [0])
            ])

            self.ekernel = ekernels.Prod([
                ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                ekernels.Linear(1, linvariance, [0])
            ])

            self.Xmu = self.rng.rand(self.N, self.D)
            self.Xcov = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(2, self.D)
示例#7
0
def rbf_lin_prod_kern():
    return kernels.Product([
        kernels.RBF(1,
                    variance=rng.rand(),
                    lengthscales=rng.rand() + 1.,
                    active_dims=[0]),
        kernels.Linear(1, variance=rng.rand(), active_dims=[1])
    ])
示例#8
0
def lin_kern_act_dim_1():
    return kernels.Linear(1, variance=rng.rand(), active_dims=[1])
示例#9
0
def lin_kern():
    return kernels.Linear(Data.D_in, variance=rng.rand())
示例#10
0
    def test_kernelsActiveDims(self):
        ''' Test sum and product compositional kernels '''
        with self.test_context():
            Q = 2  # latent dimensions
            X_mean = gpflow.models.PCA_reduce(self.Y, Q)
            kernsQuadratu = [
                kernels.RBF(1, active_dims=[0]) + kernels.Linear(1, active_dims=[1]),
                kernels.RBF(1, active_dims=[0]) + kernels.Periodic(1, active_dims=[1]),
                kernels.RBF(1, active_dims=[0]) * kernels.Linear(1, active_dims=[1]),
                kernels.RBF(Q)+kernels.Linear(Q)]  # non-overlapping
            kernsAnalytic = [
                ekernels.Sum([
                    ekernels.RBF(1, active_dims=[0]),
                    ekernels.Linear(1, active_dims=[1])]),
                ekernels.Sum([
                    ekernels.RBF(1, active_dims=[0]),
                    kernels.Periodic(1, active_dims=[1])]),
                ekernels.Product([
                    ekernels.RBF(1, active_dims=[0]),
                    ekernels.Linear(1, active_dims=[1])]),
                ekernels.Sum([
                    ekernels.RBF(Q),
                    ekernels.Linear(Q)])
            ]
            fOnSeparateDims = [True, True, True, False]
            Z = np.random.permutation(X_mean.copy())[:self.M]
            # Also test default N(0,1) is used
            X_prior_mean = np.zeros((self.N, Q))
            X_prior_var = np.ones((self.N, Q))
            Xtest = self.rng.randn(10, Q)

        for kq, ka, sepDims in zip(kernsQuadratu, kernsAnalytic, fOnSeparateDims):
            with self.test_context():
                kq.num_gauss_hermite_points = 20  # speed up quadratic for tests
                # RBF should throw error if quadrature is used
                ka.kern_list[0].num_gauss_hermite_points = 0
                if sepDims:
                    self.assertTrue(
                        ka.on_separate_dimensions,
                        'analytic kernel must not use quadrature')
                mq = gpflow.models.BayesianGPLVM(
                    X_mean=X_mean,
                    X_var=np.ones((self.N, Q)),
                    Y=self.Y,
                    kern=kq,
                    M=self.M,
                    Z=Z,
                    X_prior_mean=X_prior_mean,
                    X_prior_var=X_prior_var)
                ma = gpflow.models.BayesianGPLVM(
                    X_mean=X_mean,
                    X_var=np.ones((self.N, Q)),
                    Y=self.Y,
                    kern=ka,
                    M=self.M,
                    Z=Z)
                ql = mq.compute_log_likelihood()
                al = ma.compute_log_likelihood()
                self.assertTrue(np.allclose(ql, al, atol=1e-2),
                                'Likelihood not equal %f<>%f' % (ql, al))
                mu_f_a, var_f_a = ma.predict_f(Xtest)
                mu_f_q, var_f_q = mq.predict_f(Xtest)
                self.assertTrue(np.allclose(mu_f_a, mu_f_q, atol=1e-4),
                                ('Posterior means different', mu_f_a-mu_f_q))
                self.assertTrue(np.allclose(mu_f_a, mu_f_q, atol=1e-4),
                                ('Posterior vars different', var_f_a-var_f_q))
示例#11
0
    "gauss_diag":
    DiagonalGaussian(Xmu, rng.rand(num_data, D_in)),
    "dirac_diag":
    DiagonalGaussian(Xmu, np.zeros((num_data, D_in))),
    "dirac_markov_gauss":
    MarkovGaussian(Xmu_markov, np.zeros((2, num_data + 1, D_in, D_in))),
    "markov_gauss":
    markov_gauss(),
}

_kerns = {
    "rbf":
    kernels.SquaredExponential(variance=rng.rand(),
                               lengthscales=rng.rand() + 1.0),
    "lin":
    kernels.Linear(variance=rng.rand()),
    "matern":
    kernels.Matern32(variance=rng.rand()),
    "rbf_act_dim_0":
    kernels.SquaredExponential(variance=rng.rand(),
                               lengthscales=rng.rand() + 1.0,
                               active_dims=[0]),
    "rbf_act_dim_1":
    kernels.SquaredExponential(variance=rng.rand(),
                               lengthscales=rng.rand() + 1.0,
                               active_dims=[1]),
    "lin_act_dim_0":
    kernels.Linear(variance=rng.rand(), active_dims=[0]),
    "lin_act_dim_1":
    kernels.Linear(variance=rng.rand(), active_dims=[1]),
    "rbf_lin_sum":
示例#12
0
    def setUp(self):
        with self.test_session():
            self.rng = np.random.RandomState(0)
            self.N = 4
            self.D = 2
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(2, self.D)

            self.Xcov_diag = 0.05 + self.rng.rand(self.N, self.D)
            self.Xcov = np.zeros(
                (self.Xcov_diag.shape[0], self.Xcov_diag.shape[1],
                 self.Xcov_diag.shape[1]))
            self.Xcov[
                (np.s_[:], ) +
                np.diag_indices(self.Xcov_diag.shape[1])] = self.Xcov_diag

            # Set up "normal" kernels
            ekernel_classes = [ekernels.RBF, ekernels.Linear]
            kernel_classes = [kernels.RBF, kernels.Linear]
            params = [(self.D, 0.3 + self.rng.rand(),
                       self.rng.rand(2) + [0.5, 1.5], None, True),
                      (self.D, 0.3 + self.rng.rand(), None)]
            self.ekernels = [c(*p) for c, p in zip(ekernel_classes, params)]
            self.kernels = [c(*p) for c, p in zip(kernel_classes, params)]

            # Test summed kernels, non-overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()
            self.kernels.append(
                kernels.Add([
                    kernels.RBF(1, rbfvariance, rbfard, [1], False),
                    kernels.Linear(1, linvariance, [0])
                ]))
            self.kernels[-1].input_size = self.kernels[-1].input_dim
            for k in self.kernels[-1].kern_list:
                k.input_size = self.kernels[-1].input_size
            self.ekernels.append(
                ekernels.Add([
                    ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                    ekernels.Linear(1, linvariance, [0])
                ]))
            self.ekernels[-1].input_size = self.ekernels[-1].input_dim
            for k in self.ekernels[-1].kern_list:
                k.input_size = self.ekernels[-1].input_size

            # Test summed kernels, overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()
            self.kernels.append(
                kernels.Add([
                    kernels.RBF(self.D, rbfvariance, rbfard),
                    kernels.Linear(self.D, linvariance)
                ]))
            self.ekernels.append(
                ekernels.Add([
                    ekernels.RBF(self.D, rbfvariance, rbfard),
                    ekernels.Linear(self.D, linvariance)
                ]))

            self.assertTrue(self.ekernels[-2].on_separate_dimensions)
            self.assertTrue(not self.ekernels[-1].on_separate_dimensions)
示例#13
0
def bayesian_gplvm_example():
    np.random.seed(42)

    pods.datasets.overide_manual_authorize = True  # Dont ask to authorize.
    gpflow.settings.numerics.quadrature = 'error'  # Throw error if quadrature is used for kernel expectations.

    # Data.
    data = pods.datasets.oil_100()
    Y = data['X']
    print('Number of points X Number of dimensions', Y.shape)
    data['citation']

    # Model construction.
    Q = 5
    M = 20  # Number of inducing pts.
    N = Y.shape[0]
    X_mean = gpflow.models.PCA_reduce(Y, Q)  # Initialise via PCA.
    Z = np.random.permutation(X_mean.copy())[:M]

    fHmmm = False
    if fHmmm:
        k = (kernels.RBF(3, ARD=True, active_dims=slice(0, 3)) +
             kernels.Linear(2, ARD=False, active_dims=slice(3, 5)))
    else:
        k = (kernels.RBF(3, ARD=True, active_dims=[0, 1, 2]) +
             kernels.Linear(2, ARD=False, active_dims=[3, 4]))

    m = gpflow.models.BayesianGPLVM(X_mean=X_mean,
                                    X_var=0.1 * np.ones((N, Q)),
                                    Y=Y,
                                    kern=k,
                                    M=M,
                                    Z=Z)
    m.likelihood.variance = 0.01

    opt = gpflow.train.ScipyOptimizer()
    m.compile()
    opt.minimize(m)  #, options=dict(disp=True, maxiter=100))

    # Compute and sensitivity to input.
    #	Sensitivity is a measure of the importance of each latent dimension.
    kern = m.kern.kernels[0]
    sens = np.sqrt(kern.variance.read_value()) / kern.lengthscales.read_value()
    print(m.kern)
    print(sens)
    fig, ax = plt.subplots()
    ax.bar(np.arange(len(kern.lengthscales.read_value())),
           sens,
           0.1,
           color='y')
    ax.set_title('Sensitivity to latent inputs')

    # Plot vs PCA.
    XPCAplot = gpflow.models.PCA_reduce(data['X'], 2)
    f, ax = plt.subplots(1, 2, figsize=(10, 6))
    labels = data['Y'].argmax(axis=1)
    colors = cm.rainbow(np.linspace(0, 1, len(np.unique(labels))))

    for i, c in zip(np.unique(labels), colors):
        ax[0].scatter(XPCAplot[labels == i, 0],
                      XPCAplot[labels == i, 1],
                      color=c,
                      label=i)
        ax[0].set_title('PCA')
        ax[1].scatter(m.X_mean.read_value()[labels == i, 1],
                      m.X_mean.read_value()[labels == i, 2],
                      color=c,
                      label=i)
        ax[1].set_title('Bayesian GPLVM')
示例#14
0
data = np.load('data/three_phase_oil_flow.npz')
Y = data['Y']
labels = data['labels']

print('Number of points x Number of dimensions', Y.shape)

Q = 5
M = 20  # number of inducing pts
N = Y.shape[0]
X_mean = gpflow.models.PCA_reduce(Y, Q)  # Initialise via PCA
Z = np.random.permutation(X_mean.copy())[:M]

fHmmm = False
if (fHmmm):
    k = (kernels.RBF(3, ARD=True, active_dims=slice(0, 3)) +
         kernels.Linear(2, ARD=False, active_dims=slice(3, 5)))
else:
    k = (kernels.RBF(3, ARD=True, active_dims=[0, 1, 2]) +
         kernels.Linear(2, ARD=False, active_dims=[3, 4]))

m = gpflow.models.BayesianGPLVM(X_mean=X_mean,
                                X_var=0.1 * np.ones((N, Q)),
                                Y=Y,
                                kern=k,
                                M=M,
                                Z=Z)
m.likelihood.variance = 0.01

opt = gpflow.train.ScipyOptimizer()
m.compile()
示例#15
0
class Data:
    rng = np.random.RandomState(1)
    num_data = 5
    num_ind = 4
    D_in = 2
    D_out = 2

    Xmu = rng.randn(num_data, D_in)
    L = gen_L(rng, num_data, D_in, D_in)
    Xvar = np.array([l @ l.T for l in L])
    Z = rng.randn(num_ind, D_in)

    # distributions don't need to be compiled (No Parameter objects)
    # but the members should be Tensors created in the same graph
    graph = tf.Graph()
    with test_util.session_context(graph) as sess:
        gauss = Gaussian(tf.constant(Xmu), tf.constant(Xvar))
        dirac = Gaussian(tf.constant(Xmu),
                         tf.constant(np.zeros((num_data, D_in, D_in))))
        gauss_diag = DiagonalGaussian(tf.constant(Xmu),
                                      tf.constant(rng.rand(num_data, D_in)))
        dirac_diag = DiagonalGaussian(tf.constant(Xmu),
                                      tf.constant(np.zeros((num_data, D_in))))
        dirac_markov_gauss = MarkovGaussian(
            tf.constant(Xmu), tf.constant(np.zeros((2, num_data, D_in, D_in))))

        # create the covariance for the pairwise markov-gaussian
        dummy_gen = lambda rng, n, *shape: np.array(
            [rng.randn(*shape) for _ in range(n)])
        L_mg = dummy_gen(rng, num_data, D_in, 2 * D_in)  # N+1 x D x 2D
        LL = np.concatenate((L_mg[:-1], L_mg[1:]), 1)  # N x 2D x 2D
        Xcov = LL @ np.transpose(LL, (0, 2, 1))
        Xc = np.concatenate((Xcov[:, :D_in, :D_in], Xcov[-1:, D_in:, D_in:]),
                            0)  # N+1 x D x D
        Xcross = np.concatenate(
            (Xcov[:, :D_in, D_in:], np.zeros(
                (1, D_in, D_in))), 0)  # N+1 x D x D
        Xcc = np.stack([Xc, Xcross])  # 2 x N+1 x D x D

        markov_gauss = MarkovGaussian(Xmu, Xcc)

    with gpflow.decors.defer_build():
        # features
        ip = features.InducingPoints(Z)
        # kernels
        rbf_prod_seperate_dims = kernels.Product([
            kernels.RBF(1,
                        variance=rng.rand(),
                        lengthscales=rng.rand(),
                        active_dims=[0]),
            kernels.RBF(1,
                        variance=rng.rand(),
                        lengthscales=rng.rand(),
                        active_dims=[1])
        ])

        rbf_lin_sum = kernels.Sum([
            kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()),
            kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()),
            kernels.Linear(D_in, variance=rng.rand())
        ])

        rbf = kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand())

        lin_kern = kernels.Linear(D_in, variance=rng.rand())

        # mean functions
        lin = mean_functions.Linear(rng.rand(D_in, D_out), rng.rand(D_out))
        iden = mean_functions.Identity(
            D_in)  # Note: Identity can only be used if Din == Dout
        zero = mean_functions.Zero(output_dim=D_out)
        const = mean_functions.Constant(rng.rand(D_out))