예제 #1
0
    def test_score_function_rb_minibatch(self):
        dtype = get_default_dtype()
        x = np.random.rand(1000, 1)
        y = np.random.rand(1000, 1)
        x_nd, y_nd = mx.nd.array(y, dtype=dtype), mx.nd.array(x, dtype=dtype)

        self.net = self.make_net()
        self.net(x_nd)

        m = self.make_bnn_model(self.net)

        from mxfusion.inference.meanfield import create_Gaussian_meanfield
        from mxfusion.inference.grad_based_inference import GradBasedInference
        from mxfusion.inference import MinibatchInferenceLoop
        observed = [m.y, m.x]
        q = create_Gaussian_meanfield(model=m, observed=observed)
        alg = ScoreFunctionRBInference(num_samples=3,
                                       model=m,
                                       observed=observed,
                                       posterior=q)
        infr = GradBasedInference(inference_algorithm=alg,
                                  grad_loop=MinibatchInferenceLoop(
                                      batch_size=100, rv_scaling={m.y: 10}))

        infr.initialize(y=(100, 1), x=(100, 1))
        infr.run(max_iter=1, learning_rate=1e-2, y=y_nd, x=x_nd)
예제 #2
0
    def get_ppca_grad(self, x_train, inf_type, num_samples=100):
        import random
        dtype = get_default_dtype()
        random.seed(0)
        np.random.seed(0)
        mx.random.seed(0)
        m = self.make_ppca_model()
        q = self.make_ppca_post(m)
        observed = [m.x]
        alg = inf_type(num_samples=num_samples,
                       model=m,
                       posterior=q,
                       observed=observed)

        from mxfusion.inference.grad_based_inference import GradBasedInference
        from mxfusion.inference import BatchInferenceLoop

        infr = GradBasedInference(inference_algorithm=alg,
                                  grad_loop=BatchInferenceLoop())
        infr.initialize(x=mx.nd.array(x_train, dtype=dtype))
        infr.run(max_iter=1,
                 learning_rate=1e-2,
                 x=mx.nd.array(x_train, dtype=dtype),
                 verbose=False)
        return infr, q.post_mean
예제 #3
0
    def test_with_samples(self):
        from mxfusion.common import config
        config.DEFAULT_DTYPE = 'float64'
        dtype = 'float64'

        D, X, Y, noise_var, lengthscale, variance = self.gen_data()

        m = Model()
        m.N = Variable()
        m.X = Normal.define_variable(mean=0, variance=1, shape=(m.N, 3))
        m.noise_var = Variable(transformation=PositiveTransformation(),
                               initial_value=mx.nd.array(noise_var,
                                                         dtype=dtype))
        kernel = RBF(input_dim=3,
                     ARD=True,
                     variance=mx.nd.array(variance, dtype=dtype),
                     lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                     dtype=dtype)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, D))

        q = create_Gaussian_meanfield(model=m, observed=[m.Y])

        infr = GradBasedInference(
            inference_algorithm=StochasticVariationalInference(
                model=m, posterior=q, num_samples=10, observed=[m.Y]))
        infr.run(Y=mx.nd.array(Y, dtype='float64'),
                 max_iter=2,
                 learning_rate=0.1,
                 verbose=True)

        infr2 = Inference(
            ForwardSamplingAlgorithm(model=m, observed=[m.X], num_samples=5))
        infr2.run(X=mx.nd.array(X, dtype='float64'))

        infr_pred = TransferInference(ModulePredictionAlgorithm(
            model=m, observed=[m.X], target_variables=[m.Y]),
                                      infr_params=infr.params)
        xt = np.random.rand(13, 3)
        res = infr_pred.run(X=mx.nd.array(xt, dtype=dtype))[0]

        gp = m.Y.factor
        gp.attach_prediction_algorithms(
            targets=gp.output_names,
            conditionals=gp.input_names,
            algorithm=GPRegressionSamplingPrediction(gp._module_graph,
                                                     gp._extra_graphs[0],
                                                     [gp._module_graph.X]),
            alg_name='gp_predict')
        gp.gp_predict.diagonal_variance = False
        gp.gp_predict.jitter = 1e-6

        infr_pred2 = TransferInference(ModulePredictionAlgorithm(
            model=m, observed=[m.X], target_variables=[m.Y]),
                                       infr_params=infr.params)
        xt = np.random.rand(13, 3)
        res = infr_pred2.run(X=mx.nd.array(xt, dtype=dtype))[0]
예제 #4
0
    def test_inference_outcome_passing_success(self):
        observed = [self.m.y, self.m.x]
        alg = MAP(model=self.m, observed=observed)
        infr = GradBasedInference(inference_algorithm=alg)
        infr.run(y=mx.nd.array(np.random.rand(self.D)),
                 x=mx.nd.array(np.random.rand(self.D)),
                 max_iter=1)

        infr2 = VariationalPosteriorForwardSampling(10, [self.m.x], infr,
                                                    [self.m.y])
        infr2.run(x=mx.nd.array(np.random.rand(self.D)))
예제 #5
0
 def test_one_map_example(self):
     """
     Tests that the creation of variables from a base gluon block works correctly.
     """
     from mxfusion.inference.map import MAP
     from mxfusion.inference.grad_based_inference import GradBasedInference
     from mxfusion.inference import BatchInferenceLoop
     observed = [self.m.y]
     alg = MAP(model=self.m, observed=observed)
     infr = GradBasedInference(inference_algorithm=alg,
                               grad_loop=BatchInferenceLoop())
     infr.run(y=mx.nd.array(np.random.rand(10)), max_iter=10)
예제 #6
0
    def test_change_default_dtype(self):
        from mxfusion.common import config
        config.DEFAULT_DTYPE = 'float64'

        np.random.seed(0)
        mean_groundtruth = 3.
        variance_groundtruth = 5.
        N = 100
        data = np.random.randn(N)*np.sqrt(variance_groundtruth) + mean_groundtruth

        m = Model()
        m.mu = Variable()
        m.s = Variable(transformation=PositiveTransformation())
        m.Y = Normal.define_variable(mean=m.mu, variance=m.s, shape=(100,))

        infr = GradBasedInference(inference_algorithm=MAP(model=m, observed=[m.Y]))
        infr.run(Y=mx.nd.array(data, dtype='float64'), learning_rate=0.1, max_iters=2)

        config.DEFAULT_DTYPE = 'float32'
예제 #7
0
    def test_inference_basic_run(self, v2, v3):
        # TODO test correctness

        m = self.make_model()
        observed = [m.v2, m.v3]
        target_variables = [m.v5]

        infr = GradBasedInference(
            ExpectationScoreFunctionAlgorithm(
                m, observed, num_samples=10,
                target_variables=target_variables))

        infr.run(max_iter=1, v2=v2, v3=v3, verbose=True)

        infr2 = TransferInference(ExpectationAlgorithm(
            m, observed, num_samples=10, target_variables=target_variables),
                                  infr_params=infr.params)

        infr2.run(max_iter=1, v2=v2, v3=v3, verbose=True)
예제 #8
0
    def fit_model(self,
                  state_list,
                  action_list,
                  win_in,
                  verbose=True,
                  max_iter=1000):
        """
        Fits a Gaussian Process model to the state / action pairs passed in.
        This creates a model of the environment which is used during
        policy optimization instead of querying the environment directly.

        See mxfusion.gp_modules for additional types of GP models to fit,
        including Sparse GP and Stochastic Varitional Inference Sparse GP.
        """
        X, Y = self.prepare_data(state_list, action_list, win_in)

        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, X.shape[-1]))
        m.noise_var = Variable(shape=(1, ),
                               transformation=PositiveTransformation(),
                               initial_value=0.01)
        m.kernel = RBF(input_dim=X.shape[-1],
                       variance=1,
                       lengthscale=1,
                       ARD=True)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=m.kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, Y.shape[-1]))
        m.Y.factor.gp_log_pdf.jitter = 1e-6

        infr = GradBasedInference(
            inference_algorithm=MAP(model=m, observed=[m.X, m.Y]))
        infr.run(X=mx.nd.array(X),
                 Y=mx.nd.array(Y),
                 max_iter=max_iter,
                 learning_rate=0.1,
                 verbose=verbose)
        return m, infr, X, Y
예제 #9
0
    def test_score_function_batch(self):
        x = np.random.rand(1000, 1)
        y = np.random.rand(1000, 1)
        x_nd, y_nd = mx.nd.array(y), mx.nd.array(x)

        self.net = self.make_net()
        self.net(x_nd)

        m = self.make_bnn_model(self.net)

        from mxfusion.inference.meanfield import create_Gaussian_meanfield
        from mxfusion.inference.grad_based_inference import GradBasedInference
        from mxfusion.inference import BatchInferenceLoop
        observed = [m.y, m.x]
        q = create_Gaussian_meanfield(model=m, observed=observed)
        alg = ScoreFunctionInference(num_samples=3,
                                     model=m,
                                     observed=observed,
                                     posterior=q)
        infr = GradBasedInference(inference_algorithm=alg,
                                  grad_loop=BatchInferenceLoop())
        infr.initialize(y=y_nd, x=x_nd)
        infr.run(max_iter=1, learning_rate=1e-2, y=y_nd, x=x_nd)