Ejemplo n.º 1
0
    def test_log_pdf(self, dtype, mean, mean_isSamples, var, var_isSamples,
                     rv, rv_isSamples, num_samples):
        from scipy.stats import norm

        isSamples_any = any([mean_isSamples, var_isSamples, rv_isSamples])
        rv_shape = rv.shape[1:] if rv_isSamples else rv.shape
        n_dim = 1 + len(rv.shape) if isSamples_any and not rv_isSamples else len(rv.shape)
        mean_np = numpy_array_reshape(mean, mean_isSamples, n_dim)
        var_np = numpy_array_reshape(var, var_isSamples, n_dim)
        rv_np = numpy_array_reshape(rv, rv_isSamples, n_dim)
        log_pdf_np = norm.logpdf(rv_np, mean_np, np.sqrt(var_np))
        normal = Normal.define_variable(shape=rv_shape, dtype=dtype).factor
        mean_mx = mx.nd.array(mean, dtype=dtype)
        if not mean_isSamples:
            mean_mx = add_sample_dimension(mx.nd, mean_mx)
        var_mx = mx.nd.array(var, dtype=dtype)
        if not var_isSamples:
            var_mx = add_sample_dimension(mx.nd, var_mx)
        rv_mx = mx.nd.array(rv, dtype=dtype)
        if not rv_isSamples:
            rv_mx = add_sample_dimension(mx.nd, rv_mx)
        variables = {normal.mean.uuid: mean_mx, normal.variance.uuid: var_mx, normal.random_variable.uuid: rv_mx}
        log_pdf_rt = normal.log_pdf(F=mx.nd, variables=variables)

        assert np.issubdtype(log_pdf_rt.dtype, dtype)
        assert array_has_samples(mx.nd, log_pdf_rt) == isSamples_any
        if isSamples_any:
            assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
        if np.issubdtype(dtype, np.float64):
            rtol, atol = 1e-7, 1e-10
        else:
            rtol, atol = 1e-4, 1e-5
        assert np.allclose(log_pdf_np, log_pdf_rt.asnumpy(), rtol=rtol, atol=atol)
Ejemplo n.º 2
0
    def test_draw_samples(self, dtype, mean, mean_isSamples, var,
                          var_isSamples, rv_shape, num_samples):
        n_dim = 1 + len(rv_shape)
        mean_np = numpy_array_reshape(mean, mean_isSamples, n_dim)
        var_np = numpy_array_reshape(var, var_isSamples, n_dim)

        rand = np.random.randn(num_samples, *rv_shape)
        rv_samples_np = mean_np + rand * np.sqrt(var_np)

        rand_gen = MockMXNetRandomGenerator(mx.nd.array(rand.flatten(), dtype=dtype))

        normal = Normal.define_variable(shape=rv_shape, dtype=dtype,
                                        rand_gen=rand_gen).factor
        mean_mx = mx.nd.array(mean, dtype=dtype)
        if not mean_isSamples:
            mean_mx = add_sample_dimension(mx.nd, mean_mx)
        var_mx = mx.nd.array(var, dtype=dtype)
        if not var_isSamples:
            var_mx = add_sample_dimension(mx.nd, var_mx)
        variables = {normal.mean.uuid: mean_mx, normal.variance.uuid: var_mx}
        rv_samples_rt = normal.draw_samples(
            F=mx.nd, variables=variables, num_samples=num_samples)

        assert np.issubdtype(rv_samples_rt.dtype, dtype)
        assert array_has_samples(mx.nd, rv_samples_rt)
        assert get_num_samples(mx.nd, rv_samples_rt) == num_samples

        if np.issubdtype(dtype, np.float64):
            rtol, atol = 1e-7, 1e-10
        else:
            rtol, atol = 1e-4, 1e-5
        assert np.allclose(rv_samples_np, rv_samples_rt.asnumpy(), rtol=rtol, atol=atol)
Ejemplo n.º 3
0
    def test_draw_samples_non_mock(self, plot=False):
        # Also make sure the non-mock sampler works
        dtype = np.float32
        num_samples = 100000

        mean = np.array([0.5])
        variance = np.array([2])

        rv_shape = (1,)

        mean_mx = add_sample_dimension(mx.nd, mx.nd.array(mean, dtype=dtype))
        variance_mx = add_sample_dimension(mx.nd, mx.nd.array(variance, dtype=dtype))

        rand_gen = None
        var = Normal.define_variable(shape=rv_shape, rand_gen=rand_gen, dtype=dtype).factor
        variables = {var.mean.uuid: mean_mx, var.variance.uuid: variance_mx}
        rv_samples_rt = var.draw_samples(F=mx.nd, variables=variables, num_samples=num_samples)

        assert array_has_samples(mx.nd, rv_samples_rt)
        assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
        assert rv_samples_rt.dtype == dtype

        from scipy.stats import norm
        if plot:
            plot_univariate(samples=rv_samples_rt, dist=norm, loc=mean[0], scale=np.sqrt(variance[0]))

        mean_est, scale_est = norm.fit(rv_samples_rt.asnumpy().ravel())
        mean_tol = 1e-1
        variance_tol = 1e-1
        assert np.abs(mean[0] - mean_est) < mean_tol
        assert np.abs(variance[0] - scale_est ** 2) < variance_tol
Ejemplo n.º 4
0
    def test_with_samples(self):
        from mxfusion.common import config
        config.DEFAULT_DTYPE = 'float64'
        dtype = 'float64'

        D, X, Y, noise_var, lengthscale, variance = self.gen_data()

        m = Model()
        m.N = Variable()
        m.X = Normal.define_variable(mean=0, variance=1, shape=(m.N, 3))
        m.noise_var = Variable(transformation=PositiveTransformation(),
                               initial_value=mx.nd.array(noise_var,
                                                         dtype=dtype))
        kernel = RBF(input_dim=3,
                     ARD=True,
                     variance=mx.nd.array(variance, dtype=dtype),
                     lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                     dtype=dtype)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, D))

        q = create_Gaussian_meanfield(model=m, observed=[m.Y])

        infr = GradBasedInference(
            inference_algorithm=StochasticVariationalInference(
                model=m, posterior=q, num_samples=10, observed=[m.Y]))
        infr.run(Y=mx.nd.array(Y, dtype='float64'),
                 max_iter=2,
                 learning_rate=0.1,
                 verbose=True)

        infr2 = Inference(
            ForwardSamplingAlgorithm(model=m, observed=[m.X], num_samples=5))
        infr2.run(X=mx.nd.array(X, dtype='float64'))

        infr_pred = TransferInference(ModulePredictionAlgorithm(
            model=m, observed=[m.X], target_variables=[m.Y]),
                                      infr_params=infr.params)
        xt = np.random.rand(13, 3)
        res = infr_pred.run(X=mx.nd.array(xt, dtype=dtype))[0]

        gp = m.Y.factor
        gp.attach_prediction_algorithms(
            targets=gp.output_names,
            conditionals=gp.input_names,
            algorithm=GPRegressionSamplingPrediction(gp._module_graph,
                                                     gp._extra_graphs[0],
                                                     [gp._module_graph.X]),
            alg_name='gp_predict')
        gp.gp_predict.diagonal_variance = False
        gp.gp_predict.jitter = 1e-6

        infr_pred2 = TransferInference(ModulePredictionAlgorithm(
            model=m, observed=[m.X], target_variables=[m.Y]),
                                       infr_params=infr.params)
        xt = np.random.rand(13, 3)
        res = infr_pred2.run(X=mx.nd.array(xt, dtype=dtype))[0]
Ejemplo n.º 5
0
    def test_change_default_dtype(self):
        from mxfusion.common import config
        config.DEFAULT_DTYPE = 'float64'

        np.random.seed(0)
        mean_groundtruth = 3.
        variance_groundtruth = 5.
        N = 100
        data = np.random.randn(N)*np.sqrt(variance_groundtruth) + mean_groundtruth

        m = Model()
        m.mu = Variable()
        m.s = Variable(transformation=PositiveTransformation())
        m.Y = Normal.define_variable(mean=m.mu, variance=m.s, shape=(100,))

        infr = GradBasedInference(inference_algorithm=MAP(model=m, observed=[m.Y]))
        infr.run(Y=mx.nd.array(data, dtype='float64'), learning_rate=0.1, max_iters=2)

        config.DEFAULT_DTYPE = 'float32'