def __init__(self, model, num_sub_groups=1):
        self.model = model
        self.glmm_par = self.model.glmm_par
        self.num_sub_groups = num_sub_groups

        self.full_indices = obj_lib.make_index_param(self.glmm_par)

        self.global_par = get_global_parameters(self.model.beta_dim)
        self.global_indices = obj_lib.make_index_param(self.global_par)

        self.group_par = get_group_parameters(
            self.model.beta_dim, self.num_sub_groups)
        self.group_indices = obj_lib.make_index_param(self.group_par)

        self.set_group_parameters(np.arange(0, self.num_sub_groups))

        self.group_rows = [ self.model.y_g_vec == g \
                            for g in range(np.max(self.model.y_g_vec) + 1)]

        self.kl_objective = obj_lib.Objective(
            self.group_par, self.get_group_kl)

        self.data_kl_objective = obj_lib.Objective(
            self.group_par, self.get_group_data_elbo)
        self.get_data_kl_objective_jac = autograd.jacobian(
            self.data_kl_objective.fun_vector)
    def __init__(self, model):
        self.model = model
        self.glmm_par = self.model.glmm_par
        self.full_indices = obj_lib.make_index_param(self.glmm_par)
        self.global_par = get_global_parameters(self.model.beta_dim)
        self.global_indices = obj_lib.make_index_param(self.global_par)

        self.kl_objective = obj_lib.Objective(
            self.global_par, self.get_global_kl)
예제 #3
0
    def test_packing(self):
        dense_mat = np.zeros((3, 3))
        dense_mat[0, 0] = 2.0
        dense_mat[0, 1] = 3.0
        dense_mat[2, 1] = 4.0

        sparse_mat = sp.sparse.csr_matrix(dense_mat)
        sparse_mat_packed = obj_lib.pack_csr_matrix(sparse_mat)
        sparse_mat_unpacked = obj_lib.unpack_csr_matrix(sparse_mat_packed)

        np_test.assert_array_almost_equal(dense_mat,
                                          sparse_mat_unpacked.todense())
    def get_sparse_weight_vec_jacobian(self, free_par, print_every_n=None):
        vector_param_size = self.glmm_par.vector_size()
        n_obs = self.model.x_mat.shape[0]
        weight_indices = np.arange(0, n_obs)
        sparse_weight_jacobian = \
            osp.sparse.csr_matrix((n_obs, vector_param_size))
        if print_every_n is None:
            print_every_n = self.model.num_groups - 1
        self.glmm_par.set_free(free_par)
        for g in range(self.model.num_groups):
            if g % print_every_n == 0:
                print('Group {} of {}'.format(g, self.model.num_groups - 1))
            group_weight_indices = weight_indices[self.group_rows[g]]
            group_par_vec, group_indices = self.set_group_parameters([g])
            group_obs_jac = np.atleast_2d(
                self.get_data_kl_objective_jac(group_par_vec))
            sparse_weight_jacobian += \
                obj_lib.get_sparse_sub_matrix(
                    sub_matrix = group_obs_jac,
                    col_indices = group_indices,
                    row_indices = group_weight_indices,
                    col_dim = vector_param_size,
                    row_dim = n_obs)

        return sparse_weight_jacobian
    def __init__(
        self, glmm_par, prior_par, x_mat, y_vec, y_g_vec, num_gh_points,
        use_prior=True):

        self.glmm_par = copy.deepcopy(glmm_par)
        self.prior_par = copy.deepcopy(prior_par)
        self.x_mat = np.array(x_mat)
        self.y_vec = np.array(y_vec)
        self.y_g_vec = np.array(y_g_vec)
        self.set_gh_points(num_gh_points)
        self.use_prior = use_prior

        self.beta_dim = self.x_mat.shape[1]
        self.num_groups = np.max(self.y_g_vec) + 1

        self.use_weights = False
        self.weights = np.full(self.x_mat.shape[0], 1.0)

        assert np.min(y_g_vec) == 0
        assert np.max(y_g_vec) == self.glmm_par['u'].size() - 1

        self.objective = obj_lib.Objective(self.glmm_par, self.get_kl)

        self.get_prior_model_grad = \
            autograd.grad(self.get_e_log_prior_from_args, argnum=0)
        self.get_prior_hess = \
            autograd.jacobian(self.get_prior_model_grad, argnum=1)

        self.group_model = SubGroupsModel(self, num_sub_groups=1)
        self.global_model = GlobalModel(self)

        self.moment_wrapper = MomentWrapper(self.glmm_par)
    def get_sparse_kl_vec_hessian(self, free_par, print_every_n=None):
        get_kl_re_grad = autograd.grad(
            self.get_group_kl_from_vectors, argnum=1)
        get_kl_offdiag_hess = autograd.jacobian(get_kl_re_grad, argnum=0)
        get_kl_re_hess = autograd.hessian(
            self.get_group_kl_from_vectors, argnum=1)

        full_hess_dim = self.glmm_par.vector_size()
        sparse_group_hess = \
            osp.sparse.csr_matrix((full_hess_dim, full_hess_dim))

        self.glmm_par.set_free(free_par)
        global_par_vec, global_indices = self.set_global_parameters()
        if print_every_n is None:
            print_every_n = self.model.num_groups - 1
        for g in range(self.model.num_groups):
            if g % print_every_n == 0:
                print('Group {} of {}.'.format(g, self.model.num_groups - 1))
            # Set the global parameters within the group.
            self.set_group_parameters([g])
            re_par_vec, re_indices = self.set_re_parameters([g])
            offdiag_hessian = \
                np.atleast_2d(get_kl_offdiag_hess(global_par_vec, re_par_vec))
            re_hessian = \
                np.atleast_2d(get_kl_re_hess(global_par_vec, re_par_vec))

            sp_offdiag_hessian = obj_lib.get_sparse_sub_matrix(
                sub_matrix = offdiag_hessian,
                row_indices = re_indices,
                col_indices = global_indices,
                row_dim = full_hess_dim,
                col_dim = full_hess_dim)

            sp_re_hessian = obj_lib.get_sparse_sub_matrix(
                sub_matrix = re_hessian,
                row_indices = re_indices,
                col_indices = re_indices,
                row_dim = full_hess_dim,
                col_dim = full_hess_dim)

            sparse_group_hess += \
                sp_offdiag_hessian + sp_offdiag_hessian.T + sp_re_hessian

        return sparse_group_hess
    def __init__(self, num_gh_points=50, dim=2,
                 num_components=3, num_draws=10000):

        self.mix_par = get_mixture_parameters(
            dim=dim, num_components=num_components)
        self.moment_par = mml.get_moment_params(dim)
        self.true_var = mml.get_moment_params(dim)
        self.dim = dim
        self.num_components = num_components

        self.set_monte_carlo_draws(num_draws)
        self.zero_tilt()

        self.q = mml.FactorizingNormalApproximation(dim=dim, num_draws=1000)
        self.map_approx = mml.MultivariateMAPApproximation(dim=dim)

        self.kl_objective = obj_lib.Objective(self.q.par, self.get_kl)
        self.map_objective = obj_lib.Objective(
            self.map_approx.map_par, self.map_loss_function)
 def get_sparse_kl_vec_hessian(self, free_par, print_every_n=None):
     full_hess_dim = self.glmm_par.vector_size()
     self.glmm_par.set_free(free_par)
     global_par_vec, global_indices = self.set_global_parameters()
     global_vec_hessian = \
         self.kl_objective.fun_vector_hessian(global_par_vec)
     sparse_global_hess = obj_lib.get_sparse_sub_hessian(
         sub_hessian = global_vec_hessian,
         full_indices = global_indices,
         full_hess_dim = full_hess_dim)
     return sparse_global_hess
예제 #9
0
    def test_two_parameter_objective(self):
        model = TwoParamModel()
        model.set_random()

        objective_full = obj_lib.Objective(model.par, model.fun)

        objective_x = obj_lib.Objective(model.par['x'], model.fun)
        objective_y = obj_lib.Objective(model.par['y'], model.fun)

        objective = obj_lib.TwoParameterObjective(model.par['x'],
                                                  model.par['y'], model.fun)

        par_free = model.par.get_free()
        par_vec = model.par.get_vector()
        x_free = model.par['x'].get_free()
        y_free = model.par['y'].get_free()
        x_vec = model.par['x'].get_vector()
        y_vec = model.par['y'].get_vector()

        np_test.assert_array_almost_equal(model.fun(),
                                          objective.fun_free(x_free, y_free))
        np_test.assert_array_almost_equal(model.fun(),
                                          objective.fun_free(x_free, y_free))
        np_test.assert_array_almost_equal(model.fun(),
                                          objective.fun_vector(x_vec, y_vec))

        np_test.assert_array_almost_equal(
            objective.ag_fun_free_grad1(x_free, y_free),
            objective_x.ag_fun_free_grad(x_free))

        np_test.assert_array_almost_equal(
            objective.ag_fun_free_grad2(x_free, y_free),
            objective_y.ag_fun_free_grad(y_free))

        np_test.assert_array_almost_equal(
            objective.ag_fun_vector_grad1(x_vec, y_vec),
            objective_x.ag_fun_vector_grad(x_vec))

        np_test.assert_array_almost_equal(
            objective.ag_fun_vector_grad2(x_vec, y_vec),
            objective_y.ag_fun_vector_grad(y_vec))
예제 #10
0
    def test_index_params(self):
        dim = 3
        param = vb.ModelParamsDict('test')
        param.push_param(vb.VectorParam('x', size=dim, lb=-2.0, ub=5.0))
        param.push_param(vb.VectorParam('y', size=dim, lb=-2.0, ub=5.0))

        index_par = obj_lib.make_index_param(param)
        param.set_free(np.random.random(param.free_size()))
        param_vec = param.get_vector()

        for d in range(dim):
            for pname in ['x', 'y']:
                self.assertAlmostEqual(param[pname].get()[d],
                                       param_vec[index_par[pname].get()[d]])
def get_pickle_dictionary(model, kl_hess, moment_jac):
    pickle_result_dict = {
        'glmm_par_free': model.glmm_par.get_free(),
        'glmm_par_vector': model.glmm_par.get_vector(),
        'kl_hess_packed': obj_lib.pack_csr_matrix(kl_hess),
        'moment_jac': np.squeeze(moment_jac),
        'prior_par_vec': model.prior_par.get_vector(),
        'num_groups': model.num_groups,
        'beta_dim': model.beta_dim,
        'num_gh_points': model.num_gh_points,
        'y_g_vec': model.y_g_vec,
        'y_vec': model.y_vec,
        'x_mat': model.x_mat }

    return pickle_result_dict
예제 #12
0
    def test_parameter_converter(self):
        model = TwoParamModel()
        model.set_random()
        model.set_y_from_x()
        x_free = model.par['x'].get_free()
        y_free = model.par['y'].get_free()
        x_vec = model.par['x'].get_vector()
        y_vec = model.par['y'].get_vector()

        param_converter = obj_lib.ParameterConverter(model.par['x'],
                                                     model.par['y'],
                                                     model.set_y_from_x)

        np_test.assert_array_almost_equal(
            param_converter.converter_vec_to_vec(x_vec), y_vec)
        np_test.assert_array_almost_equal(
            param_converter.converter_vec_to_free(x_vec), y_free)
        np_test.assert_array_almost_equal(
            param_converter.converter_free_to_vec(x_free), y_vec)
        np_test.assert_array_almost_equal(
            param_converter.converter_free_to_free(x_free), y_free)

        # The function convert_y_to_x corrseponds to the vector to vector
        # map.  Use the free to vec Jacobians to convert to the other maps.
        get_converter_jacobian = autograd.jacobian(model.convert_y_to_x)
        x_free_to_vec_jac = \
            model.par['x'].free_to_vector_jac(x_free).todense()
        y_free_to_vec_jac = \
            model.par['y'].free_to_vector_jac(y_free).todense()

        vec_to_vec_jac = get_converter_jacobian(x_vec)
        np_test.assert_array_almost_equal(
            vec_to_vec_jac, param_converter.vec_to_vec_jacobian(x_vec))

        free_to_vec_jac = np.matmul(vec_to_vec_jac, x_free_to_vec_jac)
        np_test.assert_array_almost_equal(
            free_to_vec_jac, param_converter.free_to_vec_jacobian(x_free))

        np_test.assert_array_almost_equal(
            np.linalg.solve(y_free_to_vec_jac, vec_to_vec_jac),
            param_converter.vec_to_free_jacobian(x_vec))

        np_test.assert_array_almost_equal(
            np.linalg.solve(y_free_to_vec_jac, free_to_vec_jac),
            param_converter.free_to_free_jacobian(x_free))
예제 #13
0
    def test_objective(self):
        model = Model(dim=3)
        objective = obj_lib.Objective(par=model.x, fun=model.f)

        model.set_inits()
        x_free = model.x.get_free()
        x_vec = model.x.get_vector()

        model.set_opt()
        self.assertTrue(objective.fun_free(x_free) > 0.0)
        np_test.assert_array_almost_equal(objective.fun_free(x_free),
                                          objective.fun_vector(x_vec))

        grad = objective.fun_free_grad(x_free)
        hess = objective.fun_free_hessian(x_free)
        np_test.assert_array_almost_equal(np.matmul(hess, grad),
                                          objective.fun_free_hvp(x_free, grad))

        self.assertTrue(objective.fun_vector(x_vec) > 0.0)
        grad = objective.fun_vector_grad(x_vec)
        hess = objective.fun_vector_hessian(x_vec)
        np_test.assert_array_almost_equal(
            np.matmul(hess, grad), objective.fun_vector_hvp(x_free, grad))

        # Test Jacobians.
        vec_objective = obj_lib.Objective(par=model.x, fun=model.get_x_vec)
        vec_jac = vec_objective.fun_vector_jacobian(x_vec)
        np_test.assert_array_almost_equal(model.b_mat, vec_jac)

        free_jac = vec_objective.fun_free_jacobian(x_free)
        x_free_to_vec_jac = \
            model.x.free_to_vector_jac(x_free).todense()
        np_test.assert_array_almost_equal(
            np.matmul(model.b_mat, np.transpose(x_free_to_vec_jac)), free_jac)

        # Test the preconditioning
        preconditioner = 2.0 * np.eye(model.dim)
        preconditioner[model.dim - 1, 0] = 0.1  # Add asymmetry for testing!
        objective.preconditioner = preconditioner

        np_test.assert_array_almost_equal(
            objective.fun_free_cond(x_free),
            objective.fun_free(np.matmul(preconditioner, x_free)),
            err_msg='Conditioned function values')

        fun_free_cond_grad = autograd.grad(objective.fun_free_cond)
        grad_cond = objective.fun_free_grad_cond(x_free)
        np_test.assert_array_almost_equal(
            fun_free_cond_grad(x_free),
            grad_cond,
            err_msg='Conditioned gradient values')

        fun_free_cond_hessian = autograd.hessian(objective.fun_free_cond)
        hess_cond = objective.fun_free_hessian_cond(x_free)
        np_test.assert_array_almost_equal(fun_free_cond_hessian(x_free),
                                          hess_cond,
                                          err_msg='Conditioned Hessian values')

        fun_free_cond_hvp = autograd.hessian_vector_product(
            objective.fun_free_cond)
        np_test.assert_array_almost_equal(
            fun_free_cond_hvp(x_free, grad_cond),
            objective.fun_free_hvp_cond(x_free, grad_cond),
            err_msg='Conditioned Hessian vector product values')
예제 #14
0
 def safe_matmul_todense(a, b):
     result = obj_lib.safe_matmul(a, b)
     if sp.sparse.issparse(result):
         return np.asarray(result.todense())
     else:
         return np.asarray(result)
예제 #15
0
    def run_optimization_tests(self, use_sparse=False):
        model = Model(dim=3)
        objective = obj_lib.Objective(par=model.x, fun=model.f)
        preconditioner = 2.0 * np.eye(model.dim)
        preconditioner[model.dim - 1, 0] = 0.1  # Add asymmetry for testing!

        if use_sparse:
            objective.preconditioner = preconditioner
        else:
            objective.preconditioner = sp.sparse.csr_matrix(preconditioner)

        model.set_inits()
        x0 = model.x.get_free()
        y0 = np.linalg.solve(preconditioner, x0)

        # Unconditioned
        opt_result = sp.optimize.minimize(fun=objective.fun_free,
                                          jac=objective.fun_free_grad,
                                          hessp=objective.fun_free_hvp,
                                          x0=x0,
                                          method='trust-ncg',
                                          options={
                                              'maxiter': 100,
                                              'disp': False,
                                              'gtol': 1e-6
                                          })
        self.assertTrue(opt_result.success)
        model.x.set_free(opt_result.x)
        np_test.assert_array_almost_equal(model.opt_x,
                                          model.x.get_vector(),
                                          err_msg='Trust-NCG Unconditioned')

        # Conditioned:
        opt_result = sp.optimize.minimize(fun=objective.fun_free_cond,
                                          jac=objective.fun_free_grad_cond,
                                          hessp=objective.fun_free_hvp_cond,
                                          x0=y0,
                                          method='trust-ncg',
                                          options={
                                              'maxiter': 100,
                                              'disp': False,
                                              'gtol': 1e-6
                                          })
        self.assertTrue(opt_result.success)
        model.x.set_free(objective.uncondition_x(opt_result.x))
        np_test.assert_array_almost_equal(model.opt_x,
                                          model.x.get_vector(),
                                          err_msg='Trust-NCG')

        opt_result = sp.optimize.minimize(
            fun=lambda par: objective.fun_free_cond(par, verbose=False),
            jac=objective.fun_free_grad_cond,
            x0=y0,
            method='BFGS',
            options={
                'maxiter': 100,
                'disp': False,
                'gtol': 1e-6
            })
        self.assertTrue(opt_result.success)
        model.x.set_free(objective.uncondition_x(opt_result.x))
        np_test.assert_array_almost_equal(model.opt_x,
                                          model.x.get_vector(),
                                          err_msg='BFGS')

        opt_result = sp.optimize.minimize(
            fun=lambda par: objective.fun_free_cond(par, verbose=False),
            jac=objective.fun_free_grad_cond,
            hess=objective.fun_free_hessian_cond,
            x0=y0,
            method='Newton-CG',
            options={
                'maxiter': 100,
                'disp': False
            })
        self.assertTrue(opt_result.success)
        model.x.set_free(objective.uncondition_x(opt_result.x))
        np_test.assert_array_almost_equal(model.opt_x,
                                          model.x.get_vector(),
                                          err_msg='Newton')