コード例 #1
0
        def test_kernel_as_MXFusionFunction(self, dtype, X, X_isSamples, X2,
            X2_isSamples, lengthscale, lengthscale_isSamples, variance,
            variance_isSamples, num_samples, input_dim, ARD):

            X_mx = prepare_mxnet_array(X, X_isSamples, dtype)
            X2_mx = prepare_mxnet_array(X2, X2_isSamples, dtype)
            var_mx = prepare_mxnet_array(variance, variance_isSamples, dtype)
            l_mx = prepare_mxnet_array(lengthscale, lengthscale_isSamples,
                                       dtype)

            X_mf = Variable(shape=X.shape)
            l_mf = Variable(shape=lengthscale.shape)
            var_mf = Variable(shape=variance.shape)
            rbf = RBF(input_dim, ARD, 1., 1., 'rbf', None, dtype)
            eval = rbf(X_mf, rbf_lengthscale=l_mf, rbf_variance=var_mf).factor
            variables = {eval.X.uuid: X_mx, eval.rbf_lengthscale.uuid: l_mx, eval.rbf_variance.uuid: var_mx}
            res_eval = eval.eval(F=mx.nd, variables=variables)
            kernel_params = rbf.fetch_parameters(variables)
            res_direct = rbf.K(F=mx.nd, X=X_mx, **kernel_params)
            assert np.allclose(res_eval.asnumpy(), res_direct.asnumpy())

            X_mf = Variable(shape=X.shape)
            X2_mf = Variable(shape=X2.shape)
            l_mf = Variable(shape=lengthscale.shape)
            var_mf = Variable(shape=variance.shape)
            rbf = RBF(input_dim, ARD, 1., 1., 'rbf', None, dtype)
            eval = rbf(X_mf, X2_mf, rbf_lengthscale=l_mf, rbf_variance=var_mf).factor
            variables = {eval.X.uuid: X_mx, eval.X2.uuid: X2_mx, eval.rbf_lengthscale.uuid: l_mx, eval.rbf_variance.uuid: var_mx}
            res_eval = eval.eval(F=mx.nd, variables=variables)
            kernel_params = rbf.fetch_parameters(variables)
            res_direct = rbf.K(F=mx.nd, X=X_mx, X2=X2_mx, **kernel_params)
            assert np.allclose(res_eval.asnumpy(), res_direct.asnumpy())
コード例 #2
0
    def test_mean_argument(self):

        with pytest.raises(ModelSpecificationError):
            dtype = 'float64'

            net = nn.HybridSequential(prefix='nn_')
            with net.name_scope():
                net.add(
                    nn.Dense(1,
                             flatten=False,
                             activation="tanh",
                             in_units=2,
                             dtype=dtype))
            net.initialize(mx.init.Xavier(magnitude=3))

            rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)
            X_var = Variable(shape=(5, 2))
            X_cond_var = Variable(shape=(8, 2))
            Y_cond_var = Variable(shape=(8, 1))
            mean_func = MXFusionGluonFunction(net,
                                              num_outputs=1,
                                              broadcastable=True)
            mean_var = mean_func(X_var)
            mean_cond_var = mean_func(X_cond_var)
            gp = ConditionalGaussianProcess.define_variable(
                X=X_var,
                X_cond=X_cond_var,
                Y_cond=Y_cond_var,
                mean_cond=mean_cond_var,
                kernel=rbf,
                shape=(5, 1),
                dtype=dtype)
コード例 #3
0
 def gen_mxfusion_model(self,
                        dtype,
                        D,
                        noise_var,
                        lengthscale,
                        variance,
                        rand_gen=None):
     m = Model()
     m.N = Variable()
     m.X = Variable(shape=(m.N, 3))
     m.noise_var = Variable(transformation=PositiveTransformation(),
                            initial_value=mx.nd.array(noise_var,
                                                      dtype=dtype))
     kernel = RBF(input_dim=3,
                  ARD=True,
                  variance=mx.nd.array(variance, dtype=dtype),
                  lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                  dtype=dtype)
     m.Y = GPRegression.define_variable(X=m.X,
                                        kernel=kernel,
                                        noise_var=m.noise_var,
                                        shape=(m.N, D),
                                        dtype=dtype,
                                        rand_gen=rand_gen)
     return m
コード例 #4
0
 def gen_mxfusion_model(self,
                        dtype,
                        D,
                        Z,
                        noise_var,
                        lengthscale,
                        variance,
                        rand_gen=None):
     m = Model()
     m.N = Variable()
     m.X = Variable(shape=(m.N, 3))
     m.Z = Variable(shape=(3, 3), initial_value=mx.nd.array(Z, dtype=dtype))
     m.noise_var = Variable(transformation=PositiveTransformation(),
                            initial_value=mx.nd.array(noise_var,
                                                      dtype=dtype))
     kernel = RBF(input_dim=3,
                  ARD=True,
                  variance=mx.nd.array(variance, dtype=dtype),
                  lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                  dtype=dtype)
     m.Y = SVGPRegression.define_variable(X=m.X,
                                          kernel=kernel,
                                          noise_var=m.noise_var,
                                          inducing_inputs=m.Z,
                                          shape=(m.N, D),
                                          dtype=dtype)
     gp = m.Y.factor
     m.Y.factor.svgp_log_pdf.jitter = 1e-8
     return m, gp
コード例 #5
0
    def test_with_samples(self):
        from mxfusion.common import config
        config.DEFAULT_DTYPE = 'float64'
        dtype = 'float64'

        D, X, Y, noise_var, lengthscale, variance = self.gen_data()

        m = Model()
        m.N = Variable()
        m.X = Normal.define_variable(mean=0, variance=1, shape=(m.N, 3))
        m.noise_var = Variable(transformation=PositiveTransformation(),
                               initial_value=mx.nd.array(noise_var,
                                                         dtype=dtype))
        kernel = RBF(input_dim=3,
                     ARD=True,
                     variance=mx.nd.array(variance, dtype=dtype),
                     lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                     dtype=dtype)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, D))

        q = create_Gaussian_meanfield(model=m, observed=[m.Y])

        infr = GradBasedInference(
            inference_algorithm=StochasticVariationalInference(
                model=m, posterior=q, num_samples=10, observed=[m.Y]))
        infr.run(Y=mx.nd.array(Y, dtype='float64'),
                 max_iter=2,
                 learning_rate=0.1,
                 verbose=True)

        infr2 = Inference(
            ForwardSamplingAlgorithm(model=m, observed=[m.X], num_samples=5))
        infr2.run(X=mx.nd.array(X, dtype='float64'))

        infr_pred = TransferInference(ModulePredictionAlgorithm(
            model=m, observed=[m.X], target_variables=[m.Y]),
                                      infr_params=infr.params)
        xt = np.random.rand(13, 3)
        res = infr_pred.run(X=mx.nd.array(xt, dtype=dtype))[0]

        gp = m.Y.factor
        gp.attach_prediction_algorithms(
            targets=gp.output_names,
            conditionals=gp.input_names,
            algorithm=GPRegressionSamplingPrediction(gp._module_graph,
                                                     gp._extra_graphs[0],
                                                     [gp._module_graph.X]),
            alg_name='gp_predict')
        gp.gp_predict.diagonal_variance = False
        gp.gp_predict.jitter = 1e-6

        infr_pred2 = TransferInference(ModulePredictionAlgorithm(
            model=m, observed=[m.X], target_variables=[m.Y]),
                                       infr_params=infr.params)
        xt = np.random.rand(13, 3)
        res = infr_pred2.run(X=mx.nd.array(xt, dtype=dtype))[0]
コード例 #6
0
 def make_gpregr_model(self):
     m = Model()
     m.N = Variable()
     m.X = Variable(shape=(m.N, 3))
     m.noise_var = Variable(transformation=PositiveTransformation(), initial_value=mx.nd.array([1.]))
     kernel = RBF(input_dim=3, variance=mx.nd.array([1.]), lengthscale=mx.nd.array([1.]))
     m.Y = GPRegression.define_variable(X=m.X, kernel=kernel, noise_var=m.noise_var, shape=(m.N, 2))
     return m
コード例 #7
0
    def test_prediction(self):
        np.random.seed(0)
        np.random.seed(0)
        X = np.random.rand(10, 3)
        Y = np.random.rand(10, 1)
        Z = np.random.rand(3, 3)
        qU_mean = np.random.rand(3, 1)
        qU_cov_W = np.random.rand(3, 3)
        qU_cov_diag = np.random.rand(3,)
        noise_var = np.random.rand(1)
        lengthscale = np.random.rand(3)
        variance = np.random.rand(1)
        qU_chol = np.linalg.cholesky(qU_cov_W.dot(qU_cov_W.T)+np.diag(qU_cov_diag))[None,:,:]
        Xt = np.random.rand(5, 3)

        m_gpy = GPy.core.SVGP(X=X, Y=Y, Z=Z, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), likelihood=GPy.likelihoods.Gaussian(variance=noise_var))
        m_gpy.q_u_mean = qU_mean
        m_gpy.q_u_chol = GPy.util.choleskies.triang_to_flat(qU_chol)

        dtype = 'float64'
        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.Z = Variable(shape=(3, 3), initial_value=mx.nd.array(Z, dtype=dtype))
        m.noise_var = Variable(transformation=PositiveTransformation(), initial_value=mx.nd.array(noise_var, dtype=dtype))
        kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype)
        m.Y = SVGPRegression.define_variable(X=m.X, kernel=kernel, noise_var=m.noise_var, inducing_inputs=m.Z, shape=(m.N, 1), dtype=dtype)
        gp = m.Y.factor

        observed = [m.X, m.Y]
        infr = Inference(MAP(model=m, observed=observed), dtype=dtype)
        infr.initialize(X=X.shape, Y=Y.shape)
        infr.params[gp._extra_graphs[0].qU_mean] = mx.nd.array(qU_mean, dtype=dtype)
        infr.params[gp._extra_graphs[0].qU_cov_W] = mx.nd.array(qU_cov_W, dtype=dtype)
        infr.params[gp._extra_graphs[0].qU_cov_diag] = mx.nd.array(qU_cov_diag, dtype=dtype)

        loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype))

        # noise_free, diagonal
        mu_gpy, var_gpy = m_gpy.predict_noiseless(Xt)

        infr2 = TransferInference(ModulePredictionAlgorithm(m, observed=[m.X], target_variables=[m.Y]), infr_params=infr.params, dtype=np.float64)
        res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0]
        mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0]

        assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf)
        assert np.allclose(var_gpy[:,0], var_mf), (var_gpy[:,0], var_mf)

        # noisy, diagonal
        mu_gpy, var_gpy = m_gpy.predict(Xt)

        infr2 = TransferInference(ModulePredictionAlgorithm(m, observed=[m.X], target_variables=[m.Y]), infr_params=infr.params, dtype=np.float64)
        infr2.inference_algorithm.model.Y.factor.svgp_predict.noise_free = False
        res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0]
        mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0]

        assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf)
        assert np.allclose(var_gpy[:,0], var_mf), (var_gpy[:,0], var_mf)
コード例 #8
0
ファイル: gpregression_test.py プロジェクト: pgmoren/MXFusion
    def test_sampling_prediction(self):
        np.random.seed(0)
        X = np.random.rand(10, 3)
        Xt = np.random.rand(20, 3)
        Y = np.random.rand(10, 1)
        noise_var = np.random.rand(1)
        lengthscale = np.random.rand(3)
        variance = np.random.rand(1)

        m_gpy = GPy.models.GPRegression(X=X,
                                        Y=Y,
                                        kernel=GPy.kern.RBF(
                                            3,
                                            ARD=True,
                                            lengthscale=lengthscale,
                                            variance=variance),
                                        noise_var=noise_var)

        dtype = 'float64'
        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.noise_var = Variable(transformation=PositiveTransformation(),
                               initial_value=mx.nd.array(noise_var,
                                                         dtype=dtype))
        kernel = RBF(input_dim=3,
                     ARD=True,
                     variance=mx.nd.array(variance, dtype=dtype),
                     lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                     dtype=dtype)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, 1),
                                           dtype=dtype)

        observed = [m.X, m.Y]
        infr = Inference(MAP(model=m, observed=observed), dtype=dtype)

        loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype),
                           Y=mx.nd.array(Y, dtype=dtype))

        infr_pred = TransferInference(ModulePredictionAlgorithm(
            model=m, observed=[m.X], target_variables=[m.Y], num_samples=5),
                                      infr_params=infr.params)
        gp = m.Y.factor
        gp.attach_prediction_algorithms(
            targets=gp.output_names,
            conditionals=gp.input_names,
            algorithm=GPRegressionSamplingPrediction(gp._module_graph,
                                                     gp._extra_graphs[0],
                                                     [gp._module_graph.X]),
            alg_name='gp_predict')
        gp.gp_predict.diagonal_variance = False
        gp.gp_predict.jitter = 1e-6

        y_samples = infr_pred.run(X=mx.nd.array(Xt, dtype=dtype))[0].asnumpy()
コード例 #9
0
    def test_module_clone(self):
        D, X, Y, Z, noise_var, lengthscale, variance = self.gen_data()
        dtype = 'float64'

        m = Model()
        m.N = Variable()
        kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype)
        m.Y = SparseGPRegression.define_variable(X=mx.nd.zeros((2, 3)), kernel=kernel, noise_var=mx.nd.ones((1,)), dtype=dtype)
        m.clone()
コード例 #10
0
ファイル: gp_test.py プロジェクト: vishalbelsare/MXFusion
    def test_clone_gp(self, dtype, X, X_isSamples, rbf_lengthscale,
                      rbf_lengthscale_isSamples, rbf_variance,
                      rbf_variance_isSamples, rv, rv_isSamples, num_samples):
        X_mx = prepare_mxnet_array(X, X_isSamples, dtype)
        rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale,
                                                 rbf_lengthscale_isSamples,
                                                 dtype)
        rbf_variance_mx = prepare_mxnet_array(rbf_variance,
                                              rbf_variance_isSamples, dtype)
        rv_mx = prepare_mxnet_array(rv, rv_isSamples, dtype)
        rv_shape = rv.shape[1:] if rv_isSamples else rv.shape

        rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)

        m = Model()
        m.X_var = Variable(shape=(5, 2))
        m.Y = GaussianProcess.define_variable(X=m.X_var,
                                              kernel=rbf,
                                              shape=rv_shape,
                                              dtype=dtype)

        gp = m.clone().Y.factor

        variables = {
            gp.X.uuid: X_mx,
            gp.rbf_lengthscale.uuid: rbf_lengthscale_mx,
            gp.rbf_variance.uuid: rbf_variance_mx,
            gp.random_variable.uuid: rv_mx
        }
        log_pdf_rt = gp.log_pdf(F=mx.nd, variables=variables).asnumpy()

        log_pdf_np = []
        for i in range(num_samples):
            X_i = X[i] if X_isSamples else X
            lengthscale_i = rbf_lengthscale[
                i] if rbf_lengthscale_isSamples else rbf_lengthscale
            variance_i = rbf_variance[
                i] if rbf_variance_isSamples else rbf_variance
            rv_i = rv[i] if rv_isSamples else rv
            rbf_np = GPy.kern.RBF(input_dim=2, ARD=True)
            rbf_np.lengthscale = lengthscale_i
            rbf_np.variance = variance_i
            K_np = rbf_np.K(X_i)
            log_pdf_np.append(
                multivariate_normal.logpdf(rv_i[:, 0], mean=None, cov=K_np))
        log_pdf_np = np.array(log_pdf_np)
        isSamples_any = any([
            X_isSamples, rbf_lengthscale_isSamples, rbf_variance_isSamples,
            rv_isSamples
        ])
        assert np.issubdtype(log_pdf_rt.dtype, dtype)
        assert array_has_samples(mx.nd, log_pdf_rt) == isSamples_any
        if isSamples_any:
            assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
        assert np.allclose(log_pdf_np, log_pdf_rt)
コード例 #11
0
    def test_sampling_prediction(self):
        np.random.seed(0)
        np.random.seed(0)
        X = np.random.rand(10, 3)
        Y = np.random.rand(10, 1)
        Z = np.random.rand(3, 3)
        qU_mean = np.random.rand(3, 1)
        qU_cov_W = np.random.rand(3, 3)
        qU_cov_diag = np.random.rand(3,)
        noise_var = np.random.rand(1)
        lengthscale = np.random.rand(3)
        variance = np.random.rand(1)
        qU_chol = np.linalg.cholesky(qU_cov_W.dot(qU_cov_W.T)+np.diag(qU_cov_diag))[None,:,:]
        Xt = np.random.rand(5, 3)

        m_gpy = GPy.core.SVGP(X=X, Y=Y, Z=Z, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), likelihood=GPy.likelihoods.Gaussian(variance=noise_var))
        m_gpy.q_u_mean = qU_mean
        m_gpy.q_u_chol = GPy.util.choleskies.triang_to_flat(qU_chol)

        dtype = 'float64'
        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.Z = Variable(shape=(3, 3), initial_value=mx.nd.array(Z, dtype=dtype))
        m.noise_var = Variable(transformation=PositiveTransformation(), initial_value=mx.nd.array(noise_var, dtype=dtype))
        kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype)
        m.Y = SVGPRegression.define_variable(X=m.X, kernel=kernel, noise_var=m.noise_var, inducing_inputs=m.Z, shape=(m.N, 1), dtype=dtype)
        gp = m.Y.factor

        observed = [m.X, m.Y]
        infr = Inference(MAP(model=m, observed=observed), dtype=dtype)
        infr.initialize(X=X.shape, Y=Y.shape)
        infr.params[gp._extra_graphs[0].qU_mean] = mx.nd.array(qU_mean, dtype=dtype)
        infr.params[gp._extra_graphs[0].qU_cov_W] = mx.nd.array(qU_cov_W, dtype=dtype)
        infr.params[gp._extra_graphs[0].qU_cov_diag] = mx.nd.array(qU_cov_diag, dtype=dtype)

        loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype))

        # noise_free, diagonal
        infr_pred = TransferInference(ModulePredictionAlgorithm(model=m, observed=[m.X], target_variables=[m.Y], num_samples=5),
                                      infr_params=infr.params)
        gp = m.Y.factor
        gp.attach_prediction_algorithms(
            targets=gp.output_names, conditionals=gp.input_names,
            algorithm=SVGPRegressionSamplingPrediction(
                gp._module_graph, gp._extra_graphs[0], [gp._module_graph.X]),
            alg_name='svgp_predict')
        gp.svgp_predict.diagonal_variance = False
        gp.svgp_predict.jitter = 1e-6

        y_samples = infr_pred.run(X=mx.nd.array(Xt, dtype=dtype))[0].asnumpy()
コード例 #12
0
ファイル: gp_test.py プロジェクト: vishalbelsare/MXFusion
    def test_draw_samples(self, dtype, X, X_isSamples, rbf_lengthscale,
                          rbf_lengthscale_isSamples, rbf_variance,
                          rbf_variance_isSamples, rv_shape, num_samples):
        X_mx = prepare_mxnet_array(X, X_isSamples, dtype)
        rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale,
                                                 rbf_lengthscale_isSamples,
                                                 dtype)
        rbf_variance_mx = prepare_mxnet_array(rbf_variance,
                                              rbf_variance_isSamples, dtype)

        rand = np.random.randn(num_samples, *rv_shape)
        rand_gen = MockMXNetRandomGenerator(
            mx.nd.array(rand.flatten(), dtype=dtype))

        rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)
        X_var = Variable(shape=(5, 2))
        gp = GaussianProcess.define_variable(X=X_var,
                                             kernel=rbf,
                                             shape=rv_shape,
                                             dtype=dtype,
                                             rand_gen=rand_gen).factor

        variables = {
            gp.X.uuid: X_mx,
            gp.rbf_lengthscale.uuid: rbf_lengthscale_mx,
            gp.rbf_variance.uuid: rbf_variance_mx
        }
        samples_rt = gp.draw_samples(F=mx.nd,
                                     variables=variables,
                                     num_samples=num_samples).asnumpy()

        samples_np = []
        for i in range(num_samples):
            X_i = X[i] if X_isSamples else X
            lengthscale_i = rbf_lengthscale[
                i] if rbf_lengthscale_isSamples else rbf_lengthscale
            variance_i = rbf_variance[
                i] if rbf_variance_isSamples else rbf_variance
            rand_i = rand[i]
            rbf_np = GPy.kern.RBF(input_dim=2, ARD=True)
            rbf_np.lengthscale = lengthscale_i
            rbf_np.variance = variance_i
            K_np = rbf_np.K(X_i)
            L_np = np.linalg.cholesky(K_np)
            sample_np = L_np.dot(rand_i)
            samples_np.append(sample_np)
        samples_np = np.array(samples_np)

        assert np.issubdtype(samples_rt.dtype, dtype)
        assert get_num_samples(mx.nd, samples_rt) == num_samples
        assert np.allclose(samples_np, samples_rt)
コード例 #13
0
    def test_draw_samples_w_mean(self):
        D, X, Y, noise_var, lengthscale, variance = self.gen_data()
        dtype = 'float64'

        rand_gen = MockMXNetRandomGenerator(
            mx.nd.array(np.random.rand(20 * D), dtype=dtype))

        m, net = self.gen_mxfusion_model_w_mean(dtype, D, noise_var,
                                                lengthscale, variance,
                                                rand_gen)

        observed = [m.X]
        infr = Inference(ForwardSamplingAlgorithm(m,
                                                  observed,
                                                  num_samples=2,
                                                  target_variables=[m.Y]),
                         dtype=dtype)

        samples = infr.run(X=mx.nd.array(X, dtype=dtype),
                           Y=mx.nd.array(Y, dtype=dtype))[0].asnumpy()

        kern = RBF(3, True, name='rbf', dtype=dtype) + White(3, dtype=dtype)
        X_var = Variable(shape=(10, 3))
        mean_func = MXFusionGluonFunction(net,
                                          num_outputs=1,
                                          broadcastable=True)
        mean_var = mean_func(X_var)
        gp = GaussianProcess.define_variable(X=X_var,
                                             kernel=kern,
                                             mean=mean_var,
                                             shape=(10, D),
                                             dtype=dtype,
                                             rand_gen=rand_gen).factor

        variables = {
            gp.X.uuid:
            mx.nd.expand_dims(mx.nd.array(X, dtype=dtype), axis=0),
            gp.add_rbf_lengthscale.uuid:
            mx.nd.expand_dims(mx.nd.array(lengthscale, dtype=dtype), axis=0),
            gp.add_rbf_variance.uuid:
            mx.nd.expand_dims(mx.nd.array(variance, dtype=dtype), axis=0),
            gp.add_white_variance.uuid:
            mx.nd.expand_dims(mx.nd.array(noise_var, dtype=dtype), axis=0),
            mean_var.uuid:
            mx.nd.expand_dims(net(mx.nd.array(X, dtype=dtype)), axis=0)
        }
        samples_2 = gp.draw_samples(F=mx.nd,
                                    variables=variables,
                                    num_samples=2).asnumpy()

        assert np.allclose(samples, samples_2), (samples, samples_2)
コード例 #14
0
ファイル: gpregression_test.py プロジェクト: pgmoren/MXFusion
    def test_log_pdf(self):
        np.random.seed(0)
        D = 2
        X = np.random.rand(10, 3)
        Y = np.random.rand(10, D)
        noise_var = np.random.rand(1)
        lengthscale = np.random.rand(3)
        variance = np.random.rand(1)

        m_gpy = GPy.models.GPRegression(X=X,
                                        Y=Y,
                                        kernel=GPy.kern.RBF(
                                            3,
                                            ARD=True,
                                            lengthscale=lengthscale,
                                            variance=variance),
                                        noise_var=noise_var)

        l_gpy = m_gpy.log_likelihood()

        dtype = 'float64'
        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.noise_var = Variable(transformation=PositiveTransformation(),
                               initial_value=mx.nd.array(noise_var,
                                                         dtype=dtype))
        kernel = RBF(input_dim=3,
                     ARD=True,
                     variance=mx.nd.array(variance, dtype=dtype),
                     lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                     dtype=dtype)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, D),
                                           dtype=dtype)

        observed = [m.X, m.Y]
        infr = Inference(MAP(model=m, observed=observed), dtype=dtype)

        loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype),
                           Y=mx.nd.array(Y, dtype=dtype))
        l_mf = -loss

        assert np.allclose(l_mf.asnumpy(), l_gpy)
コード例 #15
0
    def gen_mxfusion_model_w_mean(self, dtype, D, Z, noise_var, lengthscale,
                                  variance, rand_gen=None):
        net = nn.HybridSequential(prefix='nn_')
        with net.name_scope():
            net.add(nn.Dense(D, flatten=False, activation="tanh",
                             in_units=3, dtype=dtype))
        net.initialize(mx.init.Xavier(magnitude=3))

        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.Z = Variable(shape=(3, 3), initial_value=mx.nd.array(Z, dtype=dtype))
        m.noise_var = Variable(transformation=PositiveTransformation(), initial_value=mx.nd.array(noise_var, dtype=dtype))
        kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype)
        m.mean_func = MXFusionGluonFunction(net, num_outputs=1,
                                            broadcastable=True)
        m.Y = SparseGPRegression.define_variable(X=m.X, kernel=kernel, noise_var=m.noise_var, mean=m.mean_func(m.X), inducing_inputs=m.Z, shape=(m.N, D), dtype=dtype)
        m.Y.factor.sgp_log_pdf.jitter = 1e-8
        return m, net
コード例 #16
0
    def test_log_pdf_w_samples_of_noise_var(self):
        D, X, Y, Z, noise_var, lengthscale, variance, qU_mean, \
            qU_cov_W, qU_cov_diag, qU_chol = self.gen_data()
        dtype = 'float64'
        D = 2
        Y = np.random.rand(10, D)
        qU_mean = np.random.rand(3, D)

        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.Z = Variable(shape=(3, 3), initial_value=mx.nd.array(Z, dtype=dtype))
        m.noise_var = Variable(transformation=PositiveTransformation(),
                               shape=(m.N, D))
        kernel = RBF(input_dim=3,
                     ARD=True,
                     variance=mx.nd.array(variance, dtype=dtype),
                     lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                     dtype=dtype)
        m.Y = SVGPRegression.define_variable(X=m.X,
                                             kernel=kernel,
                                             noise_var=m.noise_var,
                                             inducing_inputs=m.Z,
                                             shape=(m.N, D),
                                             dtype=dtype)
        gp = m.Y.factor
        m.Y.factor.svgp_log_pdf.jitter = 1e-8

        observed = [m.X, m.Y]
        infr = Inference(MAP(model=m, observed=observed), dtype=dtype)
        infr.initialize(X=X.shape, Y=Y.shape)
        infr.params[gp._extra_graphs[0].qU_mean] = mx.nd.array(qU_mean,
                                                               dtype=dtype)
        infr.params[gp._extra_graphs[0].qU_cov_W] = mx.nd.array(qU_cov_W,
                                                                dtype=dtype)
        infr.params[gp._extra_graphs[0].qU_cov_diag] = mx.nd.array(qU_cov_diag,
                                                                   dtype=dtype)

        loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype),
                           Y=mx.nd.array(Y, dtype=dtype),
                           max_iter=1)
コード例 #17
0
    def test_log_pdf(self):
        np.random.seed(0)
        D = 2
        X = np.random.rand(10, 3)
        Y = np.random.rand(10, D)
        Z = np.random.rand(3, 3)
        qU_mean = np.random.rand(3, D)
        qU_cov_W = np.random.rand(3, 3)
        qU_cov_diag = np.random.rand(3,)
        noise_var = np.random.rand(1)
        lengthscale = np.random.rand(3)
        variance = np.random.rand(1)
        qU_chol = np.linalg.cholesky(qU_cov_W.dot(qU_cov_W.T)+np.diag(qU_cov_diag))[None,:,:]

        m_gpy = GPy.core.SVGP(X=X, Y=Y, Z=Z, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), likelihood=GPy.likelihoods.Gaussian(variance=noise_var))
        m_gpy.q_u_mean = qU_mean
        m_gpy.q_u_chol = GPy.util.choleskies.triang_to_flat(qU_chol)

        l_gpy = m_gpy.log_likelihood()

        dtype = 'float64'
        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.Z = Variable(shape=(3, 3), initial_value=mx.nd.array(Z, dtype=dtype))
        m.noise_var = Variable(transformation=PositiveTransformation(), initial_value=mx.nd.array(noise_var, dtype=dtype))
        kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype)
        m.Y = SVGPRegression.define_variable(X=m.X, kernel=kernel, noise_var=m.noise_var, inducing_inputs=m.Z, shape=(m.N, D), dtype=dtype)
        gp = m.Y.factor

        observed = [m.X, m.Y]
        infr = Inference(MAP(model=m, observed=observed), dtype=dtype)
        infr.initialize(X=X.shape, Y=Y.shape)
        infr.params[gp._extra_graphs[0].qU_mean] = mx.nd.array(qU_mean, dtype=dtype)
        infr.params[gp._extra_graphs[0].qU_cov_W] = mx.nd.array(qU_cov_W, dtype=dtype)
        infr.params[gp._extra_graphs[0].qU_cov_diag] = mx.nd.array(qU_cov_diag, dtype=dtype)

        loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype))
        l_mf = -loss

        assert np.allclose(l_mf.asnumpy(), l_gpy)
コード例 #18
0
    def fit_model(self,
                  state_list,
                  action_list,
                  win_in,
                  verbose=True,
                  max_iter=1000):
        """
        Fits a Gaussian Process model to the state / action pairs passed in.
        This creates a model of the environment which is used during
        policy optimization instead of querying the environment directly.

        See mxfusion.gp_modules for additional types of GP models to fit,
        including Sparse GP and Stochastic Varitional Inference Sparse GP.
        """
        X, Y = self.prepare_data(state_list, action_list, win_in)

        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, X.shape[-1]))
        m.noise_var = Variable(shape=(1, ),
                               transformation=PositiveTransformation(),
                               initial_value=0.01)
        m.kernel = RBF(input_dim=X.shape[-1],
                       variance=1,
                       lengthscale=1,
                       ARD=True)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=m.kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, Y.shape[-1]))
        m.Y.factor.gp_log_pdf.jitter = 1e-6

        infr = GradBasedInference(
            inference_algorithm=MAP(model=m, observed=[m.X, m.Y]))
        infr.run(X=mx.nd.array(X),
                 Y=mx.nd.array(Y),
                 max_iter=max_iter,
                 learning_rate=0.1,
                 verbose=verbose)
        return m, infr, X, Y
コード例 #19
0
    def make_gpregr_model(self, lengthscale, variance, noise_var):
        from mxfusion.models import Model
        from mxfusion.components.variables import Variable, PositiveTransformation
        from mxfusion.modules.gp_modules import GPRegression
        from mxfusion.components.distributions.gp.kernels import RBF

        dtype = 'float64'
        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.noise_var = Variable(transformation=PositiveTransformation(),
                               initial_value=mx.nd.array(noise_var,
                                                         dtype=dtype))
        kernel = RBF(input_dim=3,
                     ARD=True,
                     variance=mx.nd.array(variance, dtype=dtype),
                     lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                     dtype=dtype)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, 1),
                                           dtype=dtype)
        return m
コード例 #20
0
ファイル: kernel_test.py プロジェクト: tdiethe/MXFusion
 def create_rbf_plus_linear():
     return RBF(input_dim, True, 1., 1., 'rbf', None, dtype) + (
         RBF(input_dim, True, 1., 1., 'rbf', None, dtype) +
         Linear(input_dim, True, 1, 'linear', None, dtype))
コード例 #21
0
ファイル: kernel_test.py プロジェクト: tdiethe/MXFusion
 def create_rbf():
     return RBF(input_dim, ARD, 1., 1., 'rbf', None, dtype)
コード例 #22
0
ファイル: gp_test.py プロジェクト: vishalbelsare/MXFusion
    def test_draw_samples_w_mean(self, dtype, X, X_isSamples, rbf_lengthscale,
                                 rbf_lengthscale_isSamples, rbf_variance,
                                 rbf_variance_isSamples, rv_shape,
                                 num_samples):

        net = nn.HybridSequential(prefix='nn_')
        with net.name_scope():
            net.add(
                nn.Dense(rv_shape[-1],
                         flatten=False,
                         activation="tanh",
                         in_units=X.shape[-1],
                         dtype=dtype))
        net.initialize(mx.init.Xavier(magnitude=3))

        X_mx = prepare_mxnet_array(X, X_isSamples, dtype)
        rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale,
                                                 rbf_lengthscale_isSamples,
                                                 dtype)
        rbf_variance_mx = prepare_mxnet_array(rbf_variance,
                                              rbf_variance_isSamples, dtype)
        mean_mx = net(X_mx)
        mean_np = mean_mx.asnumpy()

        rand = np.random.randn(num_samples, *rv_shape)
        rand_gen = MockMXNetRandomGenerator(
            mx.nd.array(rand.flatten(), dtype=dtype))

        rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)
        X_var = Variable(shape=(5, 2))
        mean_func = MXFusionGluonFunction(net,
                                          num_outputs=1,
                                          broadcastable=True)
        mean_var = mean_func(X_var)
        gp = GaussianProcess.define_variable(X=X_var,
                                             kernel=rbf,
                                             shape=rv_shape,
                                             mean=mean_var,
                                             dtype=dtype,
                                             rand_gen=rand_gen).factor

        variables = {
            gp.X.uuid: X_mx,
            gp.rbf_lengthscale.uuid: rbf_lengthscale_mx,
            gp.rbf_variance.uuid: rbf_variance_mx,
            gp.mean.uuid: mean_mx
        }
        samples_rt = gp.draw_samples(F=mx.nd,
                                     variables=variables,
                                     num_samples=num_samples).asnumpy()

        samples_np = []
        for i in range(num_samples):
            X_i = X[i] if X_isSamples else X
            lengthscale_i = rbf_lengthscale[
                i] if rbf_lengthscale_isSamples else rbf_lengthscale
            variance_i = rbf_variance[
                i] if rbf_variance_isSamples else rbf_variance
            rand_i = rand[i]
            rbf_np = GPy.kern.RBF(input_dim=2, ARD=True)
            rbf_np.lengthscale = lengthscale_i
            rbf_np.variance = variance_i
            K_np = rbf_np.K(X_i)
            L_np = np.linalg.cholesky(K_np)
            sample_np = L_np.dot(rand_i)
            samples_np.append(sample_np)
        samples_np = np.array(samples_np) + mean_np

        assert np.issubdtype(samples_rt.dtype, dtype)
        assert get_num_samples(mx.nd, samples_rt) == num_samples
        assert np.allclose(samples_np, samples_rt)
コード例 #23
0
ファイル: gp_test.py プロジェクト: vishalbelsare/MXFusion
    def test_log_pdf_w_mean(self, dtype, X, X_isSamples, rbf_lengthscale,
                            rbf_lengthscale_isSamples, rbf_variance,
                            rbf_variance_isSamples, rv, rv_isSamples,
                            num_samples):

        net = nn.HybridSequential(prefix='nn_')
        with net.name_scope():
            net.add(
                nn.Dense(rv.shape[-1],
                         flatten=False,
                         activation="tanh",
                         in_units=X.shape[-1],
                         dtype=dtype))
        net.initialize(mx.init.Xavier(magnitude=3))

        X_mx = prepare_mxnet_array(X, X_isSamples, dtype)
        rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale,
                                                 rbf_lengthscale_isSamples,
                                                 dtype)
        rbf_variance_mx = prepare_mxnet_array(rbf_variance,
                                              rbf_variance_isSamples, dtype)
        rv_mx = prepare_mxnet_array(rv, rv_isSamples, dtype)
        rv_shape = rv.shape[1:] if rv_isSamples else rv.shape
        mean_mx = net(X_mx)
        mean_np = mean_mx.asnumpy()

        rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)
        X_var = Variable(shape=(5, 2))
        mean_func = MXFusionGluonFunction(net,
                                          num_outputs=1,
                                          broadcastable=True)
        mean_var = mean_func(X_var)
        gp = GaussianProcess.define_variable(X=X_var,
                                             kernel=rbf,
                                             shape=rv_shape,
                                             mean=mean_var,
                                             dtype=dtype).factor

        variables = {
            gp.X.uuid: X_mx,
            gp.rbf_lengthscale.uuid: rbf_lengthscale_mx,
            gp.rbf_variance.uuid: rbf_variance_mx,
            gp.random_variable.uuid: rv_mx,
            gp.mean.uuid: mean_mx
        }
        log_pdf_rt = gp.log_pdf(F=mx.nd, variables=variables).asnumpy()

        log_pdf_np = []
        for i in range(num_samples):
            X_i = X[i] if X_isSamples else X
            lengthscale_i = rbf_lengthscale[
                i] if rbf_lengthscale_isSamples else rbf_lengthscale
            variance_i = rbf_variance[
                i] if rbf_variance_isSamples else rbf_variance
            rv_i = rv[i] if rv_isSamples else rv
            rv_i = rv_i - mean_np[i] if X_isSamples else rv_i - mean_np[0]
            rbf_np = GPy.kern.RBF(input_dim=2, ARD=True)
            rbf_np.lengthscale = lengthscale_i
            rbf_np.variance = variance_i
            K_np = rbf_np.K(X_i)
            log_pdf_np.append(
                multivariate_normal.logpdf(rv_i[:, 0], mean=None, cov=K_np))
        log_pdf_np = np.array(log_pdf_np)
        isSamples_any = any([
            X_isSamples, rbf_lengthscale_isSamples, rbf_variance_isSamples,
            rv_isSamples
        ])
        assert np.issubdtype(log_pdf_rt.dtype, dtype)
        assert array_has_samples(mx.nd, log_pdf_rt) == isSamples_any
        if isSamples_any:
            assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
        assert np.allclose(log_pdf_np, log_pdf_rt)
コード例 #24
0
    def test_draw_samples_w_mean(self, dtype, X, X_isSamples, X_cond,
                                 X_cond_isSamples, Y_cond, Y_cond_isSamples,
                                 rbf_lengthscale, rbf_lengthscale_isSamples,
                                 rbf_variance, rbf_variance_isSamples,
                                 rv_shape, num_samples):
        net = nn.HybridSequential(prefix='nn_')
        with net.name_scope():
            net.add(
                nn.Dense(rv_shape[-1],
                         flatten=False,
                         activation="tanh",
                         in_units=X.shape[-1],
                         dtype=dtype))
        net.initialize(mx.init.Xavier(magnitude=3))

        from scipy.linalg.lapack import dtrtrs
        X_mx = prepare_mxnet_array(X, X_isSamples, dtype)
        X_cond_mx = prepare_mxnet_array(X_cond, X_cond_isSamples, dtype)
        Y_cond_mx = prepare_mxnet_array(Y_cond, Y_cond_isSamples, dtype)
        rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale,
                                                 rbf_lengthscale_isSamples,
                                                 dtype)
        rbf_variance_mx = prepare_mxnet_array(rbf_variance,
                                              rbf_variance_isSamples, dtype)
        mean_mx = net(X_mx)
        mean_np = mean_mx.asnumpy()
        mean_cond_mx = net(X_cond_mx)
        mean_cond_np = mean_cond_mx.asnumpy()

        rand = np.random.randn(num_samples, *rv_shape)
        rand_gen = MockMXNetRandomGenerator(
            mx.nd.array(rand.flatten(), dtype=dtype))

        rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)
        X_var = Variable(shape=(5, 2))
        X_cond_var = Variable(shape=(8, 2))
        Y_cond_var = Variable(shape=(8, 1))
        mean_func = MXFusionGluonFunction(net,
                                          num_outputs=1,
                                          broadcastable=True)
        mean_var = mean_func(X_var)
        mean_cond_var = mean_func(X_cond_var)
        gp = ConditionalGaussianProcess.define_variable(
            X=X_var,
            X_cond=X_cond_var,
            Y_cond=Y_cond_var,
            mean=mean_var,
            mean_cond=mean_cond_var,
            kernel=rbf,
            shape=rv_shape,
            dtype=dtype,
            rand_gen=rand_gen).factor

        variables = {
            gp.X.uuid: X_mx,
            gp.X_cond.uuid: X_cond_mx,
            gp.Y_cond.uuid: Y_cond_mx,
            gp.rbf_lengthscale.uuid: rbf_lengthscale_mx,
            gp.rbf_variance.uuid: rbf_variance_mx,
            gp.mean.uuid: mean_mx,
            gp.mean_cond.uuid: mean_cond_mx
        }
        samples_rt = gp.draw_samples(F=mx.nd,
                                     variables=variables,
                                     num_samples=num_samples).asnumpy()

        samples_np = []
        for i in range(num_samples):
            X_i = X[i] if X_isSamples else X
            X_cond_i = X_cond[i] if X_cond_isSamples else X_cond
            Y_cond_i = Y_cond[i] if Y_cond_isSamples else Y_cond
            Y_cond_i = Y_cond_i - mean_cond_np[
                i] if X_cond_isSamples else Y_cond_i - mean_cond_np[0]
            lengthscale_i = rbf_lengthscale[
                i] if rbf_lengthscale_isSamples else rbf_lengthscale
            variance_i = rbf_variance[
                i] if rbf_variance_isSamples else rbf_variance
            rand_i = rand[i]
            rbf_np = GPy.kern.RBF(input_dim=2, ARD=True)
            rbf_np.lengthscale = lengthscale_i
            rbf_np.variance = variance_i
            K_np = rbf_np.K(X_i)
            Kc_np = rbf_np.K(X_cond_i, X_i)
            Kcc_np = rbf_np.K(X_cond_i)

            L = np.linalg.cholesky(Kcc_np)
            LInvY = dtrtrs(L, Y_cond_i, lower=1, trans=0)[0]
            LinvKxt = dtrtrs(L, Kc_np, lower=1, trans=0)[0]

            mu = LinvKxt.T.dot(LInvY)
            cov = K_np - LinvKxt.T.dot(LinvKxt)
            L_cov_np = np.linalg.cholesky(cov)
            sample_np = mu + L_cov_np.dot(rand_i)
            samples_np.append(sample_np)
        samples_np = np.array(samples_np) + mean_np
        assert np.issubdtype(samples_rt.dtype, dtype)
        assert get_num_samples(mx.nd, samples_rt) == num_samples
        assert np.allclose(samples_np, samples_rt)
コード例 #25
0
ファイル: gpregression_test.py プロジェクト: pgmoren/MXFusion
    def test_prediction(self):
        np.random.seed(0)
        X = np.random.rand(10, 3)
        Xt = np.random.rand(20, 3)
        Y = np.random.rand(10, 1)
        noise_var = np.random.rand(1)
        lengthscale = np.random.rand(3)
        variance = np.random.rand(1)

        m_gpy = GPy.models.GPRegression(X=X,
                                        Y=Y,
                                        kernel=GPy.kern.RBF(
                                            3,
                                            ARD=True,
                                            lengthscale=lengthscale,
                                            variance=variance),
                                        noise_var=noise_var)

        dtype = 'float64'
        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.noise_var = Variable(transformation=PositiveTransformation(),
                               initial_value=mx.nd.array(noise_var,
                                                         dtype=dtype))
        kernel = RBF(input_dim=3,
                     ARD=True,
                     variance=mx.nd.array(variance, dtype=dtype),
                     lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                     dtype=dtype)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, 1),
                                           dtype=dtype)

        observed = [m.X, m.Y]
        infr = Inference(MAP(model=m, observed=observed), dtype=dtype)

        loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype),
                           Y=mx.nd.array(Y, dtype=dtype))

        # noise_free, diagonal
        mu_gpy, var_gpy = m_gpy.predict_noiseless(Xt)

        infr2 = TransferInference(ModulePredictionAlgorithm(
            m, observed=[m.X], target_variables=[m.Y]),
                                  infr_params=infr.params,
                                  dtype=np.float64)
        res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0]
        mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0]

        assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf)
        assert np.allclose(var_gpy[:, 0], var_mf), (var_gpy[:, 0], var_mf)

        # noisy, diagonal
        mu_gpy, var_gpy = m_gpy.predict(Xt)

        infr2 = TransferInference(ModulePredictionAlgorithm(
            m, observed=[m.X], target_variables=[m.Y]),
                                  infr_params=infr.params,
                                  dtype=np.float64)
        infr2.inference_algorithm.model.Y.factor.gp_predict.noise_free = False
        res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0]
        mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0]

        assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf)
        assert np.allclose(var_gpy[:, 0], var_mf), (var_gpy[:, 0], var_mf)

        # noise_free, full_cov
        mu_gpy, var_gpy = m_gpy.predict_noiseless(Xt, full_cov=True)

        infr2 = TransferInference(ModulePredictionAlgorithm(
            m, observed=[m.X], target_variables=[m.Y]),
                                  infr_params=infr.params,
                                  dtype=np.float64)
        infr2.inference_algorithm.model.Y.factor.gp_predict.diagonal_variance = False
        infr2.inference_algorithm.model.Y.factor.gp_predict.noise_free = True
        res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0]
        mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0]

        assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf)
        assert np.allclose(var_gpy, var_mf), (var_gpy, var_mf)

        # noisy, full_cov
        mu_gpy, var_gpy = m_gpy.predict(Xt, full_cov=True)

        infr2 = TransferInference(ModulePredictionAlgorithm(
            m, observed=[m.X], target_variables=[m.Y]),
                                  infr_params=infr.params,
                                  dtype=np.float64)
        infr2.inference_algorithm.model.Y.factor.gp_predict.diagonal_variance = False
        infr2.inference_algorithm.model.Y.factor.gp_predict.noise_free = False
        res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0]
        mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0]
        print((var_gpy, var_mf))

        assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf)
        assert np.allclose(var_gpy, var_mf), (var_gpy, var_mf)
コード例 #26
0
ファイル: gpregression_test.py プロジェクト: pgmoren/MXFusion
    def test_draw_samples(self):
        np.random.seed(0)
        X = np.random.rand(10, 3)
        Y = np.random.rand(10, 1)
        noise_var = np.random.rand(1)
        lengthscale = np.random.rand(3)
        variance = np.random.rand(1)
        dtype = 'float64'

        rand_gen = MockMXNetRandomGenerator(
            mx.nd.array(np.random.rand(20), dtype=dtype))

        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.noise_var = Variable(transformation=PositiveTransformation(),
                               initial_value=mx.nd.array(noise_var,
                                                         dtype=dtype))
        kernel = RBF(input_dim=3,
                     ARD=True,
                     variance=mx.nd.array(variance, dtype=dtype),
                     lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                     dtype=dtype)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, 1),
                                           dtype=dtype,
                                           rand_gen=rand_gen)

        observed = [m.X]
        infr = Inference(ForwardSamplingAlgorithm(m,
                                                  observed,
                                                  num_samples=2,
                                                  target_variables=[m.Y]),
                         dtype=dtype)

        samples = infr.run(X=mx.nd.array(X, dtype=dtype),
                           Y=mx.nd.array(Y, dtype=dtype))[0].asnumpy()

        kern = RBF(3, True, name='rbf', dtype=dtype) + White(3, dtype=dtype)
        X_var = Variable(shape=(10, 3))
        gp = GaussianProcess.define_variable(X=X_var,
                                             kernel=kern,
                                             shape=(10, 1),
                                             dtype=dtype,
                                             rand_gen=rand_gen).factor

        variables = {
            gp.X.uuid:
            mx.nd.expand_dims(mx.nd.array(X, dtype=dtype), axis=0),
            gp.add_rbf_lengthscale.uuid:
            mx.nd.expand_dims(mx.nd.array(lengthscale, dtype=dtype), axis=0),
            gp.add_rbf_variance.uuid:
            mx.nd.expand_dims(mx.nd.array(variance, dtype=dtype), axis=0),
            gp.add_white_variance.uuid:
            mx.nd.expand_dims(mx.nd.array(noise_var, dtype=dtype), axis=0)
        }
        samples_2 = gp.draw_samples(F=mx.nd,
                                    variables=variables,
                                    num_samples=2).asnumpy()

        assert np.allclose(samples, samples_2), (samples, samples_2)
コード例 #27
0
ファイル: kernel_test.py プロジェクト: tdiethe/MXFusion
 def create_rbf_plus_linear():
     return RBF(2, True, 1., 1., 'rbf', [2, 3], dtype) + Linear(
         3, True, 1, 'linear', [4, 1, 5], dtype)
コード例 #28
0
    def test_log_pdf(self, dtype, X, X_isSamples, X_cond, X_cond_isSamples,
                     Y_cond, Y_cond_isSamples, rbf_lengthscale,
                     rbf_lengthscale_isSamples, rbf_variance,
                     rbf_variance_isSamples, rv, rv_isSamples, num_samples):
        from scipy.linalg.lapack import dtrtrs
        X_mx = prepare_mxnet_array(X, X_isSamples, dtype)
        X_cond_mx = prepare_mxnet_array(X_cond, X_cond_isSamples, dtype)
        Y_cond_mx = prepare_mxnet_array(Y_cond, Y_cond_isSamples, dtype)
        rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale,
                                                 rbf_lengthscale_isSamples,
                                                 dtype)
        rbf_variance_mx = prepare_mxnet_array(rbf_variance,
                                              rbf_variance_isSamples, dtype)
        rv_mx = prepare_mxnet_array(rv, rv_isSamples, dtype)
        rv_shape = rv.shape[1:] if rv_isSamples else rv.shape

        rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)
        X_var = Variable(shape=(5, 2))
        X_cond_var = Variable(shape=(8, 2))
        Y_cond_var = Variable(shape=(8, 1))
        gp = ConditionalGaussianProcess.define_variable(X=X_var,
                                                        X_cond=X_cond_var,
                                                        Y_cond=Y_cond_var,
                                                        kernel=rbf,
                                                        shape=rv_shape,
                                                        dtype=dtype).factor

        variables = {
            gp.X.uuid: X_mx,
            gp.X_cond.uuid: X_cond_mx,
            gp.Y_cond.uuid: Y_cond_mx,
            gp.rbf_lengthscale.uuid: rbf_lengthscale_mx,
            gp.rbf_variance.uuid: rbf_variance_mx,
            gp.random_variable.uuid: rv_mx
        }
        log_pdf_rt = gp.log_pdf(F=mx.nd, variables=variables).asnumpy()

        log_pdf_np = []
        for i in range(num_samples):
            X_i = X[i] if X_isSamples else X
            X_cond_i = X_cond[i] if X_cond_isSamples else X_cond
            Y_cond_i = Y_cond[i] if Y_cond_isSamples else Y_cond
            lengthscale_i = rbf_lengthscale[
                i] if rbf_lengthscale_isSamples else rbf_lengthscale
            variance_i = rbf_variance[
                i] if rbf_variance_isSamples else rbf_variance
            rv_i = rv[i] if rv_isSamples else rv
            rbf_np = GPy.kern.RBF(input_dim=2, ARD=True)
            rbf_np.lengthscale = lengthscale_i
            rbf_np.variance = variance_i
            K_np = rbf_np.K(X_i)
            Kc_np = rbf_np.K(X_cond_i, X_i)
            Kcc_np = rbf_np.K(X_cond_i)

            L = np.linalg.cholesky(Kcc_np)
            LInvY = dtrtrs(L, Y_cond_i, lower=1, trans=0)[0]
            LinvKxt = dtrtrs(L, Kc_np, lower=1, trans=0)[0]

            mu = LinvKxt.T.dot(LInvY)
            cov = K_np - LinvKxt.T.dot(LinvKxt)
            log_pdf_np.append(
                multivariate_normal.logpdf(rv_i[:, 0], mean=mu[:, 0], cov=cov))
        log_pdf_np = np.array(log_pdf_np)
        isSamples_any = any([
            X_isSamples, rbf_lengthscale_isSamples, rbf_variance_isSamples,
            rv_isSamples
        ])
        assert np.issubdtype(log_pdf_rt.dtype, dtype)
        assert array_has_samples(mx.nd, log_pdf_rt) == isSamples_any
        if isSamples_any:
            assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
        assert np.allclose(log_pdf_np, log_pdf_rt)
コード例 #29
0
    def test_draw_samples(self, dtype, X, X_isSamples, X_cond,
                          X_cond_isSamples, Y_cond, Y_cond_isSamples,
                          rbf_lengthscale, rbf_lengthscale_isSamples,
                          rbf_variance, rbf_variance_isSamples, rv_shape,
                          num_samples):
        from scipy.linalg.lapack import dtrtrs
        X_mx = prepare_mxnet_array(X, X_isSamples, dtype)
        X_cond_mx = prepare_mxnet_array(X_cond, X_cond_isSamples, dtype)
        Y_cond_mx = prepare_mxnet_array(Y_cond, Y_cond_isSamples, dtype)
        rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale,
                                                 rbf_lengthscale_isSamples,
                                                 dtype)
        rbf_variance_mx = prepare_mxnet_array(rbf_variance,
                                              rbf_variance_isSamples, dtype)

        rand = np.random.randn(num_samples, *rv_shape)
        rand_gen = MockMXNetRandomGenerator(
            mx.nd.array(rand.flatten(), dtype=dtype))

        rbf = RBF(2, True, 1., 1., 'rbf', None, dtype)
        X_var = Variable(shape=(5, 2))
        X_cond_var = Variable(shape=(8, 2))
        Y_cond_var = Variable(shape=(8, 1))
        gp = ConditionalGaussianProcess.define_variable(
            X=X_var,
            X_cond=X_cond_var,
            Y_cond=Y_cond_var,
            kernel=rbf,
            shape=rv_shape,
            dtype=dtype,
            rand_gen=rand_gen).factor

        variables = {
            gp.X.uuid: X_mx,
            gp.X_cond.uuid: X_cond_mx,
            gp.Y_cond.uuid: Y_cond_mx,
            gp.rbf_lengthscale.uuid: rbf_lengthscale_mx,
            gp.rbf_variance.uuid: rbf_variance_mx
        }
        samples_rt = gp.draw_samples(F=mx.nd,
                                     variables=variables,
                                     num_samples=num_samples).asnumpy()

        samples_np = []
        for i in range(num_samples):
            X_i = X[i] if X_isSamples else X
            X_cond_i = X_cond[i] if X_cond_isSamples else X_cond
            Y_cond_i = Y_cond[i] if Y_cond_isSamples else Y_cond
            lengthscale_i = rbf_lengthscale[
                i] if rbf_lengthscale_isSamples else rbf_lengthscale
            variance_i = rbf_variance[
                i] if rbf_variance_isSamples else rbf_variance
            rand_i = rand[i]
            rbf_np = GPy.kern.RBF(input_dim=2, ARD=True)
            rbf_np.lengthscale = lengthscale_i
            rbf_np.variance = variance_i
            K_np = rbf_np.K(X_i)
            Kc_np = rbf_np.K(X_cond_i, X_i)
            Kcc_np = rbf_np.K(X_cond_i)

            L = np.linalg.cholesky(Kcc_np)
            LInvY = dtrtrs(L, Y_cond_i, lower=1, trans=0)[0]
            LinvKxt = dtrtrs(L, Kc_np, lower=1, trans=0)[0]

            mu = LinvKxt.T.dot(LInvY)
            cov = K_np - LinvKxt.T.dot(LinvKxt)
            L_cov_np = np.linalg.cholesky(cov)
            sample_np = mu + L_cov_np.dot(rand_i)
            samples_np.append(sample_np)
        samples_np = np.array(samples_np)
        assert np.issubdtype(samples_rt.dtype, dtype)
        assert get_num_samples(mx.nd, samples_rt) == num_samples
        assert np.allclose(samples_np, samples_rt)