예제 #1
0
    def test_set_parameters(self):

        class SetValue(InferenceAlgorithm):
            def __init__(self, x, y, model, observed, extra_graphs=None):
                self.x_val = x
                self.y_val = y
                super(SetValue, self).__init__(
                    model=model, observed=observed, extra_graphs=extra_graphs)

            def compute(self, F, variables):
                self.set_parameter(variables, self.model.x, self.x_val)
                self.set_parameter(variables, self.model.y, self.y_val)

        m = Model()
        m.x = Variable(shape=(2,))
        m.y = Variable(shape=(3, 4))

        dtype = 'float64'

        np.random.seed(0)
        x_np = np.random.rand(2)
        y_np = np.random.rand(3, 4)
        x_mx = mx.nd.array(x_np, dtype=dtype)
        y_mx = mx.nd.array(y_np, dtype=dtype)

        infr = Inference(SetValue(x_mx, y_mx, m, []), dtype=dtype)
        infr.run()
        x_res = infr.params[m.x]
        y_res = infr.params[m.y]

        assert np.allclose(x_res.asnumpy(), x_np)
        assert np.allclose(y_res.asnumpy(), y_np)
예제 #2
0
 def _test_operator(self, operator, inputs, properties=None):
     """
     inputs are mx.nd.array
     properties are just the operator properties needed at model def time.
     """
     properties = properties if properties is not None else {}
     m = Model()
     variables = [Variable() for _ in inputs]
     m.r = operator(*variables, **properties)
     vs = [v for v in m.r.factor.inputs]
     variables = {v[1].uuid: inputs[i] for i, v in enumerate(vs)}
     evaluation = m.r.factor.eval(mx.nd, variables=variables)
     return evaluation
예제 #3
0
    def test_operator_replicate(self, mxf_operator, mxnet_operator, inputs,
                                properties):

        properties = properties if properties is not None else {}
        m = Model()
        variables = [Variable() for _ in inputs]
        m.r = mxf_operator(*variables, **properties)
        vs = [v for v in m.r.factor.inputs]
        variables = {v[1].uuid: inputs[i] for i, v in enumerate(vs)}
        evaluation = m.r.factor.eval(mx.nd, variables=variables)

        r_clone = m.extract_distribution_of(m.r)
        vs = [v for v in r_clone.factor.inputs]
        variables = {v[1].uuid: inputs[i] for i, v in enumerate(vs)}
        evaluation2 = r_clone.factor.eval(mx.nd, variables=variables)

        assert np.allclose(evaluation.asnumpy(),
                           evaluation2.asnumpy()), (evaluation, evaluation2)
예제 #4
0
    def test_change_default_dtype(self):
        from mxfusion.common import config
        config.DEFAULT_DTYPE = 'float64'

        np.random.seed(0)
        mean_groundtruth = 3.
        variance_groundtruth = 5.
        N = 100
        data = np.random.randn(N)*np.sqrt(variance_groundtruth) + mean_groundtruth

        m = Model()
        m.mu = Variable()
        m.s = Variable(transformation=PositiveTransformation())
        m.Y = Normal.define_variable(mean=m.mu, variance=m.s, shape=(100,))

        infr = GradBasedInference(inference_algorithm=MAP(model=m, observed=[m.Y]))
        infr.run(Y=mx.nd.array(data, dtype='float64'), learning_rate=0.1, max_iters=2)

        config.DEFAULT_DTYPE = 'float32'
예제 #5
0
 def make_simple_gluon_model(self):
     net = self.make_net()
     m = Model()
     m.x = Variable(shape=(1, 1))
     m.f = MXFusionGluonFunction(net, num_outputs=1)
     m.y = m.f(m.x)
     return m
예제 #6
0
 def make_ppca_model(self):
     dtype = get_default_dtype()
     m = Model()
     m.w = Variable(shape=(self.K,self.D), initial_value=mx.nd.array(np.random.randn(self.K,self.D)))
     dot = nn.HybridLambda(function='dot')
     m.dot = mf.functions.MXFusionGluonFunction(dot, num_outputs=1, broadcastable=False)
     cov = mx.nd.broadcast_to(mx.nd.expand_dims(mx.nd.array(np.eye(self.K,self.K), dtype=dtype), 0),shape=(self.N,self.K,self.K))
     m.z = mf.distributions.MultivariateNormal.define_variable(mean=mx.nd.zeros(shape=(self.N,self.K), dtype=dtype), covariance=cov, shape=(self.N,self.K))
     sigma_2 = Variable(shape=(1,), transformation=PositiveTransformation())
     m.x = mf.distributions.Normal.define_variable(mean=m.dot(m.z, m.w), variance=sigma_2, shape=(self.N,self.D))
     return m
예제 #7
0
    def make_model(self):
        class Func(mx.gluon.HybridBlock):
            def hybrid_forward(self, F, v2, v3, v4, v1):
                return -(F.sum(v2 * F.minimum(v4, v1) - v3 * v1))

        m = Model()
        N = 1
        m.v1 = Variable(shape=(N, ))
        m.v2 = Variable(shape=(N, ))
        m.v3 = Variable(shape=(N, ))
        m.v4 = mf.components.distributions.Gamma.define_variable(
            alpha=mx.nd.array([1]), beta=mx.nd.array([0.1]), shape=(N, ))
        v5 = mf.components.functions.MXFusionGluonFunction(Func(),
                                                           num_outputs=1)
        m.v5 = v5(m.v2, m.v3, m.v4, m.v1)
        return m
예제 #8
0
    def test_gluon_parameters(self):
        self.setUp()

        m = Model()
        m.x = Variable(shape=(1, 1))
        m.f = MXFusionGluonFunction(self.net, num_outputs=1)
        m.y = m.f(m.x)

        infr = Inference(ForwardSamplingAlgorithm(m, observed=[m.x]))
        infr.run(x=mx.nd.ones((1, 1)))
        assert all([
            v.uuid in infr.params.param_dict for v in m.f.parameters.values()
        ])
예제 #9
0
    def fit_model(self,
                  state_list,
                  action_list,
                  win_in,
                  verbose=True,
                  max_iter=1000):
        """
        Fits a Gaussian Process model to the state / action pairs passed in.
        This creates a model of the environment which is used during
        policy optimization instead of querying the environment directly.

        See mxfusion.gp_modules for additional types of GP models to fit,
        including Sparse GP and Stochastic Varitional Inference Sparse GP.
        """
        X, Y = self.prepare_data(state_list, action_list, win_in)

        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, X.shape[-1]))
        m.noise_var = Variable(shape=(1, ),
                               transformation=PositiveTransformation(),
                               initial_value=0.01)
        m.kernel = RBF(input_dim=X.shape[-1],
                       variance=1,
                       lengthscale=1,
                       ARD=True)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=m.kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, Y.shape[-1]))
        m.Y.factor.gp_log_pdf.jitter = 1e-6

        infr = GradBasedInference(
            inference_algorithm=MAP(model=m, observed=[m.X, m.Y]))
        infr.run(X=mx.nd.array(X),
                 Y=mx.nd.array(Y),
                 max_iter=max_iter,
                 learning_rate=0.1,
                 verbose=verbose)
        return m, infr, X, Y
예제 #10
0
    def make_gpregr_model(self, lengthscale, variance, noise_var):
        from mxfusion.models import Model
        from mxfusion.components.variables import Variable, PositiveTransformation
        from mxfusion.modules.gp_modules import GPRegression
        from mxfusion.components.distributions.gp.kernels import RBF

        dtype = 'float64'
        m = Model()
        m.N = Variable()
        m.X = Variable(shape=(m.N, 3))
        m.noise_var = Variable(transformation=PositiveTransformation(),
                               initial_value=mx.nd.array(noise_var,
                                                         dtype=dtype))
        kernel = RBF(input_dim=3,
                     ARD=True,
                     variance=mx.nd.array(variance, dtype=dtype),
                     lengthscale=mx.nd.array(lengthscale, dtype=dtype),
                     dtype=dtype)
        m.Y = GPRegression.define_variable(X=m.X,
                                           kernel=kernel,
                                           noise_var=m.noise_var,
                                           shape=(m.N, 1),
                                           dtype=dtype)
        return m
예제 #11
0
    def test_operators_variable_builtins(self, mxf_operator, mxnet_operator,
                                         inputs, case):

        m = Model()
        v1 = Variable()
        v2 = Variable()
        variables = [v1, v2] if len(inputs) > 1 else [v1]
        m.r = mxf_operator(*variables)
        vs = [v for v in m.r.factor.inputs]
        variables_rt = {v[1].uuid: inputs[i] for i, v in enumerate(vs)}
        r_eval = m.r.factor.eval(mx.nd, variables=variables_rt)

        m2 = Model()
        v12 = Variable()
        v22 = Variable()
        variables2 = [v12, v22] if len(inputs) > 1 else [v12]
        if case == "add":
            m2.r = v12 + v22
        elif case == "sub":
            m2.r = v12 - v22
        elif case == "mul":
            m2.r = v12 * v22
        elif case == "div":
            m2.r = v12 / v22
        elif case == "pow":
            m2.r = v12**v22
        elif case == "transpose":
            m2.r = transpose(v12)
        vs2 = [v for v in m2.r.factor.inputs]
        variables_rt2 = {v[1].uuid: inputs[i] for i, v in enumerate(vs2)}
        p_eval = m2.r.factor.eval(mx.nd, variables=variables_rt2)

        assert np.allclose(r_eval.asnumpy(),
                           p_eval.asnumpy()), (r_eval, p_eval)