Ejemplo n.º 1
0
def approximate_mixture_data():
    num_loc_proposals = 2
    num_imp_samp = 1000
    n_comp = 2
    p_comp = np.array([0.7, 0.3])
    dim = 1
    num_obs = 100
    obs = None
    
    means = []
    
    for i in range(n_comp):
        means.append([20*i]*dim)
        if obs is None:            
            obs = dist.mvt(means[-1], np.eye(dim),30).rvs(np.int(np.round(num_obs*p_comp[i])))
        else:
            obs = np.vstack([obs, dist.mvt(means[-1], np.eye(dim),30).rvs(np.int(np.round(num_obs*p_comp[i])))])

    count = {"local_lpost" :0, "local_llhood" :0, "naive_lpost" :0 ,"naive_llhood" :0,"standard_lpost" :0 ,"standard_llhood" :0}
    print(means)
    #return
    def count_closure(name):
        def rval():
            count[name] = count[name] + 1
        return rval
    
    initial_samples = []
    for _ in range(10):
        initial_samples.append(DirCatTMM(obs, [1]*n_comp, dist.mvt(np.mean(means,0), np.eye(dim)*5, dim),
                                  dist.invwishart(np.eye(dim) * 5, dim+1 ),
                                  stats.gamma(1,scale=1)))
#    (naive_samp, naive_lpost) = pmc.sample(num_imp_samp, initial_samples,
#                               DirCatTMMProposal(naive_multi_proposals = num_loc_proposals,
#                                                     lpost_count = count_closure("naive_lpost"),
#                                                     llhood_count =  count_closure("naive_llhood")),
#                               population_size = 4)
    (infl_samp, infl_lpost) = pmc.sample(num_imp_samp, initial_samples,
                               DirCatTMMProposal(num_local_proposals = num_loc_proposals,
                                                     lpost_count = count_closure("local_lpost"),
                                                     llhood_count =  count_closure("local_llhood")),
                               population_size = 4)
                               
    (stand_samp, stand_lpost) = pmc.sample(num_imp_samp * num_loc_proposals, initial_samples,
                               DirCatTMMProposal(lpost_count = count_closure("standard_lpost"),
                                                     llhood_count =  count_closure("standard_llhood")),
                               population_size = 4)

    print("===============\n",p_comp, means,
#          "\n\n--NAIVE--\n",
#          naive_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, naive_samp[-1].comp_indic.sum(0))+1, count["naive_llhood"], count["naive_lpost"],
          "\n\n--LOCAL--\n",
          infl_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, infl_samp[-1].comp_indic.sum(0))+1, count["local_llhood"], count["local_lpost"],
          "\n\n--STANDARD--\n",
          stand_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, stand_samp[-1].comp_indic.sum(0))+1, count["standard_llhood"], count["standard_lpost"],"\n\n")   
    return {"infl":(infl_samp, infl_lpost), "standard":(stand_samp, stand_lpost)}
Ejemplo n.º 2
0
def test_DirCatTMMProposal():
    num_loc_proposals = 2
    num_imp_samp = 1000
    n_comp = 2
    p_comp = np.array([0.7, 0.3])
    dim = 1
    num_obs = 100
    obs = None
    
    means = []
    
    for i in range(n_comp):
        means.append([20*i]*dim)
        if obs is None:            
            obs = dist.mvt(means[-1], np.eye(dim),30).rvs(np.int(np.round(num_obs*p_comp[i])))
        else:
            obs = np.vstack([obs, dist.mvt(means[-1], np.eye(dim),30).rvs(np.int(np.round(num_obs*p_comp[i])))])

    count = {"local_lpost" :0, "local_llhood" :0, "naive_lpost" :0 ,"naive_llhood" :0,"standard_lpost" :0 ,"standard_llhood" :0}
    print(means)
    #return
    def count_closure(name):
        def rval():
            count[name] = count[name] + 1
        return rval
    
    initial_samples = []
    for _ in range(10):
        initial_samples.append(dis.DirCatTMM(obs, [1]*n_comp, dist.mvt(np.mean(means,0), np.eye(dim)*5, dim),
                                  dist.invwishart(np.eye(dim) * 5, dim+1 ),
                                  stats.gamma(1,scale=1)))
#    (naive_samp, naive_lpost) = pmc.sample(num_imp_samp, initial_samples,
#                               dis.DirCatTMMProposal(naive_multi_proposals = num_loc_proposals,
#                                                     lpost_count = count_closure("naive_lpost"),
#                                                     llhood_count =  count_closure("naive_llhood")),
#                               population_size = 4)
    (infl_samp, infl_lpost) = pmc.sample(num_imp_samp, initial_samples,
                               dis.DirCatTMMProposal(num_local_proposals = num_loc_proposals,
                                                     lpost_count = count_closure("local_lpost"),
                                                     llhood_count =  count_closure("local_llhood")),
                               population_size = 4)
                               
    (stand_samp, stand_lpost) = pmc.sample(num_imp_samp * num_loc_proposals, initial_samples,
                               dis.DirCatTMMProposal(lpost_count = count_closure("standard_lpost"),
                                                     llhood_count =  count_closure("standard_llhood")),
                               population_size = 4)

    print("===============\n",p_comp, means,
#          "\n\n--NAIVE--\n",
#          naive_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, naive_samp[-1].comp_indic.sum(0))+1, count["naive_llhood"], count["naive_lpost"],
          "\n\n--LOCAL--\n",
          infl_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, infl_samp[-1].comp_indic.sum(0))+1, count["local_llhood"], count["local_lpost"],
          "\n\n--STANDARD--\n",
          stand_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, stand_samp[-1].comp_indic.sum(0))+1, count["standard_llhood"], count["standard_lpost"],"\n\n")   
Ejemplo n.º 3
0
 def __init__(self, lpost_and_grad_func, dim, lrate = 0.1):
     self.lpost_and_grad = lpost_and_grad_func
     self.lpost = lambda x:self.lpost_and_grad(x, False)
     self.lrate = lrate
     self.jump_dist = mvt([0]*dim, np.eye(dim)*5, dim)
     self.compute_conj_dir = lambda anc, current: max(0, np.float(current.sample.T.dot(current.sample - anc.sample) / anc.sample.T.dot(anc.sample)))
     self.back_off_count = 0
Ejemplo n.º 4
0
 def __init__(self, lpost_and_grad_func, dim, lrate = 0.1):
     self.lpost_and_grad = lpost_and_grad_func
     self.lpost = lambda x:self.lpost_and_grad(x, False)
     self.lrate = lrate
     self.jump_dist = mvt([0]*dim, np.eye(dim)*5, dim)
     self.compute_conj_dir = lambda anc, current: max(0, np.float(current.sample.T.dot(current.sample - anc.sample) / anc.sample.T.dot(anc.sample)))
     self.back_off_count = 0
Ejemplo n.º 5
0
 def gen_proposal(self, ancestor):
     if not isinstance(ancestor, pmc.PmcSample):
         ancestor = pmc.PmcSample(sample = ancestor)
     rval = []
     lprop_prob = 0
     proto = deepcopy(ancestor.sample)
     
     dirichl = dist.dirichlet(proto.dir_param + proto.cat_param.sum(0))
     proto.cat_param = dirichl.rvs()
     lprop_prob = lprop_prob + dirichl.logpdf(proto.cat_param)
     
     for o in range(ancestor.sample.num_obs):
         def lpost_func(comp_indic):
             self.lpc()
             return proto.lpost_comp_indic(comp_indic, observation_idx = o)
         cp = pmc.LatentClassProposal(lpost_func,
                                      ancestor.sample.comp_indic.shape[1]).gen_proposal()
         proto.comp_indic[o] = cp.sample
         lprop_prob = lprop_prob + cp.lprop      
     
     for i in range(self.naive_multi_proposal):
         
         comp_param_lprops = []
         for comp_idx in range(len(proto.comp_param)):
             comp_param_lprops.append([])
             for k in range(self.num_local_proposals):
                 (anc_mu, anc_cov, anc_df) = proto.comp_param[comp_idx]
                 dim = anc_mu.size
                 prop_mu = pmc.NaiveRandomWalkProposal(lambda x: None,
                                                       dist.mvt(np.zeros(dim), np.eye(dim)*5,20)).gen_proposal(mean=proto.comp_param[comp_idx][0])
                 prop_cov = pmc.InvWishartRandomWalkProposal(anc_cov.shape[0] + 2, anc_cov.shape[0]).gen_proposal(mean=proto.comp_param[comp_idx][1])
                 pdist_df = stats.gamma(proto.comp_param[comp_idx][2] + 1 , scale=1)
                 prop_df = pdist_df.rvs()
                 lprop_df = pdist_df.logpdf(prop_df)
                 param_all = [prop_mu.sample, prop_cov.sample, prop_df]
                 self.llhc()
                 prop = {"param": param_all,
                         "llhood": proto.llhood_comp_param(param_all, comp_idx),
                         "lprop": prop_mu.lprop + prop_cov.lprop + lprop_df}
                 comp_param_lprops[-1].append(prop)
         
         
         for combination in itertools.product(*comp_param_lprops):
             rval.append(pmc.PmcSample(ancestor))
             rval[-1].sample = copy(proto)
             rval[-1].sample.update_comp_dists([c["param"] for c in combination])
             rval[-1].lprop = lprop_prob + np.sum([c["lprop"] for c in combination])
             rval[-1].lpost = rval[-1].sample.lprior() + np.sum([c["llhood"] for c in combination])
             rval[-1].lweight = rval[-1].lpost - rval[-1].lprop
 #            assert()
     if False:
         if self.naive_multi_proposal > 1:
             print("Multiple proposals:", len(rval))
         elif self.num_local_proposals > 1:
             print("Combined proposals:", len(rval))
     return rval
Ejemplo n.º 6
0
 def __init__(self, lpost_and_grad_func, dim, lrate = 0.1, prop_mean_on_line = 0.5, main_var_scale = 1, other_var = 0.5, fix_main_var = None):
     self.lpost_and_grad = lpost_and_grad_func
     self.lpost = lambda x:self.lpost_and_grad(x, False)
     self.lrate = lrate
     self.jump_dist = mvt([0]*dim, np.eye(dim)*5, dim)
     self.back_off_count = 0
     self.prop_mean_on_line = prop_mean_on_line
     self.main_var_scale = main_var_scale
     self.other_var = other_var
     self.fix_main_var = fix_main_var
Ejemplo n.º 7
0
 def __init__(self, lpost_and_grad_func, dim, lrate = 0.1, prop_mean_on_line = 0.5, main_var_scale = 1, other_var = 0.5, fix_main_var = None):
     self.lpost_and_grad = lpost_and_grad_func
     self.lpost = lambda x:self.lpost_and_grad(x, False)
     self.lrate = lrate
     self.jump_dist = mvt([0]*dim, np.eye(dim)*5, dim)
     self.back_off_count = 0
     self.prop_mean_on_line = prop_mean_on_line
     self.main_var_scale = main_var_scale
     self.other_var = other_var
     self.fix_main_var = fix_main_var
Ejemplo n.º 8
0
def test_DirCatTMM():
    num_obs = 1000
    for dim in range(2,4):
        mu = np.array([11 * (i+1) for i in range(dim)])
        K = np.eye(dim) * 5
        df = dim + 1
        obs_dist = dist.mvt(mu, K, df)
        obs = obs_dist.rvs(num_obs)
        dctmm = dis.DirCatTMM(obs, [1]*dim, obs_dist,
                                      dist.invwishart(np.eye(dim) * 5, dim + 1),
                                      stats.gamma(1, scale=1, loc=dim+1))
        orig_cat_param = dctmm.cat_param
        dctmm.cat_param = np.zeros(dim)
        for i in range(dim):
            dctmm.cat_param[i] = 1
            ### Test DirCatTMM.lpost_comp_indic ###
            for j in range(dim):
                c_indic = np.zeros(dim)
                c_indic[j] = 1
                for o in range(obs.shape[0]):
                    if i == j:
                        assert(dctmm.lpost_comp_indic(c_indic, o) > -np.inf)
                    else:
                        assert(dctmm.lpost_comp_indic(c_indic, o) == -np.inf)
                c_indic[j] = 0
            ### Test DirCatTMM.llhood_comp_param ###
            highest = dctmm.llhood_comp_param((mu, K, df), i)
            assert(highest >= dctmm.llhood_comp_param((-mu, K, df), i))
            assert(highest >= dctmm.llhood_comp_param((mu, K*5, df), i))
            assert(highest >= dctmm.llhood_comp_param((mu, K/2, df), i))
            assert(highest >= dctmm.llhood_comp_param((mu, K, df+10), i))
            dctmm.cat_param[i] = 0
        
        
        ### Test DirCatTMM.lprior ###
        dctmm.cat_param = np.array(dctmm.dir_param / dctmm.dir_param.sum())
        dctmm.comp_indic = dist.categorical(dctmm.cat_param).rvs(num_obs, indic = True)
        dctmm.update_comp_dists([(mu, K, df)] * dim)
        highest = dctmm.lprior()
        
        c_param = dctmm.dir_param + np.arange(dim)
        dctmm.cat_param = np.array(c_param / c_param.sum())
        ch_cat_param = dctmm.lprior()
        assert(highest > ch_cat_param)
        dctmm.update_comp_dists([(-mu, K, df)] * dim)
        assert(ch_cat_param > dctmm.lprior())
Ejemplo n.º 9
0
def approximate_iris_mixture_data():
    from sklearn.datasets import load_iris
    num_imp_samp = 100
    num_loc_proposals = 3
    n_comp = 3
    p_comp = np.array([1/n_comp] * n_comp)
    dim = 4
    iris = load_iris()
    obs = iris["data"]
    labels = iris["target"]
    means = np.array([obs[i*50:(i+1)*50].mean(0) for i in range(3)])

    count = {"local_lpost" :0, "local_llhood" :0, "naive_lpost" :0 ,"naive_llhood" :0,"standard_lpost" :0 ,"standard_llhood" :0}

    def count_closure(name):
        def rval():
            count[name] = count[name] + 1
        return rval
    
    initial_samples = []
    for _ in range(10):
        initial_samples.append(DirCatTMM(obs, [1]*n_comp, dist.mvt(obs.mean(0), np.diag(obs.var(0)), 20),
                                  dist.invwishart(np.eye(dim), 50),
                                  stats.gamma(500, scale=0.1)))
        
    (infl_samp, infl_lpost) = pmc.sample(num_imp_samp, initial_samples,
                               DirCatTMMProposal(num_local_proposals = num_loc_proposals,
                                                     lpost_count = count_closure("local_lpost"),
                                                     llhood_count =  count_closure("local_llhood")),
                               population_size = 4)
                               
    (stand_samp, stand_lpost) = pmc.sample(num_imp_samp * num_loc_proposals, initial_samples,
                               DirCatTMMProposal(lpost_count = count_closure("standard_lpost"),
                                                     llhood_count =  count_closure("standard_llhood")),
                               population_size = 4)

    print("===============\n",p_comp, means,
          "\n\n--LOCAL--\n",
          infl_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, infl_samp[-1].cat_param.flatten()), count["local_llhood"], count["local_lpost"],
          "\n\n--STANDARD--\n",
          stand_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, stand_samp[-1].cat_param.flatten()), count["standard_llhood"], count["standard_lpost"],"\n\n")   
    return {"infl":(infl_samp, infl_lpost), "standard":(stand_samp, stand_lpost)}
Ejemplo n.º 10
0
def test_mvt_mvn_logpdf_n_grad():
    # values from R-package bayesm, function dmvt(6.1, a, a)
    for (mu, var, df, lpdf) in [(np.array((1,1)), np.eye(2),   3, -1.83787707) ,
                                (np.array((1,2)), np.eye(2)*3, 3, -2.93648936)]:
        for dist in [mvt(mu,var,df), mvnorm(mu,var)]:
            ad = np.mean(np.abs(dist.logpdf(mu) -lpdf ))
            assert(ad < 10**-8)
            assert(np.all(opt.check_grad(dist.logpdf, dist.logpdf_grad, mu-1) < 10**-7))
    
            al = [(5,4), (3,3), (1,1)]
            (cpdf, cgrad) = dist.log_pdf_and_grad(al)
            (spdf, sgrad) = zip(*[dist.log_pdf_and_grad(m) for m in al])
            (spdf, sgrad) = (np.array(spdf), np.array(sgrad)) 
            assert(np.all(cpdf == spdf) and np.all(cpdf == spdf))
            assert(sgrad.shape == cgrad.shape)
            
    mu = np.array([ 11.56966913,   8.66926112])
    obs = np.array([[ 1.31227875, -2.88454287],[ 2.14283061, -2.97526902]])
    var = np.array([[ 1.44954579, -1.43116137], [-1.43116137,  3.6207941 ]])
    dist = mvnorm(mu, var)
    
    assert(np.all(dist.logpdf(obs) - stats.multivariate_normal(mu, var).logpdf(obs) < 10**-7))
Ejemplo n.º 11
0
        self.p = stats.bernoulli(p)
        self.d1 = mvnorm(mu1, K1)
        self.d2 = mvnorm(mu2, K2)

    def logpdf(self, x):
        return logsumexp([
            self.p.logpmf(1) + self.d1.logpdf(x),
            self.p.logpmf(0) + self.d2.logpdf(x)
        ])


post = mvnorm(mu_true,
              K_true)  # Post(mu_true, K_true, 0.3, mu_true + offset, K_true)#
exp_true = mu_true

prop = mvt(mu_true, K_true, 2.0000001)

check_tails(post, exp_true, prop)

n_estimates = []
bu_estimates = []
bu_indiv_sets_estimates = []

for x in [prop.rvs(100) for _ in range(20)]:
    n_estimates.append(np.linalg.norm(est_plain(x, post, prop) - exp_true, 2))
    bu_estimates.append(np.linalg.norm(est_bu(x, post, prop) - exp_true, 2))
    bu_indiv_sets_estimates.append(
        np.linalg.norm(est_bu_indiv_sets(x, post, prop) - exp_true, 2,
                       1).mean(0))

n_estimates = np.array(n_estimates)
Ejemplo n.º 12
0
 def llhood_comp_param(self, x, component_idx):
     candidate_dist = dist.mvt(*x)
     relevant_data = (component_idx == np.argmax(self.comp_indic, 1))
     return np.sum(candidate_dist.logpdf(self.data[relevant_data]))
Ejemplo n.º 13
0
 def update_comp_dists(self, comp_params):
     assert(len(comp_params) == self.comp_indic.shape[1])
     self.comp_param = comp_params
     self.comp_dist = [dist.mvt(*p) for p in self.comp_param]
Ejemplo n.º 14
0
 def set_params(self, params):
     self.params = params
     self.dens = dist.mvt(0, log(exp(params[0]) + 1),
                          log(exp(params[1]) + 1))
Ejemplo n.º 15
0
    def gen_proposal(self, ancestor):
        if not isinstance(ancestor, pmc.PmcSample):
            ancestor = pmc.PmcSample(sample=ancestor)
        rval = []
        lprop_prob = 0
        proto = deepcopy(ancestor.sample)

        dirichl = dist.dirichlet(proto.dir_param + proto.cat_param.sum(0))
        proto.cat_param = dirichl.rvs()
        lprop_prob = lprop_prob + dirichl.logpdf(proto.cat_param)

        for o in range(ancestor.sample.num_obs):

            def lpost_func(comp_indic):
                self.lpc()
                return proto.lpost_comp_indic(comp_indic, observation_idx=o)

            cp = pmc.LatentClassProposal(
                lpost_func,
                ancestor.sample.comp_indic.shape[1]).gen_proposal()
            proto.comp_indic[o] = cp.sample
            lprop_prob = lprop_prob + cp.lprop

        for i in range(self.naive_multi_proposal):

            comp_param_lprops = []
            for comp_idx in range(len(proto.comp_param)):
                comp_param_lprops.append([])
                for k in range(self.num_local_proposals):
                    (anc_mu, anc_cov, anc_df) = proto.comp_param[comp_idx]
                    dim = anc_mu.size
                    prop_mu = pmc.NaiveRandomWalkProposal(
                        lambda x: None,
                        dist.mvt(np.zeros(dim),
                                 np.eye(dim) * 5, 20)).gen_proposal(
                                     mean=proto.comp_param[comp_idx][0])
                    prop_cov = pmc.InvWishartRandomWalkProposal(
                        anc_cov.shape[0] + 2, anc_cov.shape[0]).gen_proposal(
                            mean=proto.comp_param[comp_idx][1])
                    pdist_df = stats.gamma(proto.comp_param[comp_idx][2] + 1,
                                           scale=1)
                    prop_df = pdist_df.rvs()
                    lprop_df = pdist_df.logpdf(prop_df)
                    param_all = [prop_mu.sample, prop_cov.sample, prop_df]
                    self.llhc()
                    prop = {
                        "param": param_all,
                        "llhood": proto.llhood_comp_param(param_all, comp_idx),
                        "lprop": prop_mu.lprop + prop_cov.lprop + lprop_df
                    }
                    comp_param_lprops[-1].append(prop)

            for combination in itertools.product(*comp_param_lprops):
                rval.append(pmc.PmcSample(ancestor))
                rval[-1].sample = copy(proto)
                rval[-1].sample.update_comp_dists(
                    [c["param"] for c in combination])
                rval[-1].lprop = lprop_prob + np.sum(
                    [c["lprop"] for c in combination])
                rval[-1].lpost = rval[-1].sample.lprior() + np.sum(
                    [c["llhood"] for c in combination])
                rval[-1].lweight = rval[-1].lpost - rval[-1].lprop

    #            assert()
        if False:
            if self.naive_multi_proposal > 1:
                print("Multiple proposals:", len(rval))
            elif self.num_local_proposals > 1:
                print("Combined proposals:", len(rval))
        return rval
bu_estimates = []
bu_indiv_sets_estimates =[]
bu_rew_estimates = []

if True:
    M = 100
    K = 2
    log_evid = -1000
    (mu_true, K_true, offset) = (np.ones(K), np.eye(K)*2, 5)
    
    post_param = (mu_true, K_true)
    post = mvnorm(*post_param)
    post_lpdf = lambda x: post.logpdf(x) + log_evid

    prop_param = (mu_true+offset, K_true, 20)
    prop = mvt(*prop_param)
    prop_lpdf = lambda x: prop.logpdf(x)

    #check_tails(post, mu_true, prop)


    perm_x = []
    perm_weights = []
    for x in [prop.rvs(M) for _ in range(200)]:
        #plain_weights = log_imp_weight(x)
        perm_x.append(permutations(x))
        perm_weights.append([log_imp_weight(p, post_lpdf, prop_lpdf) for p in perm_x[-1]])
    with open("Gaussian_test_standard_imp_samp_200_M100_with_logevid_off_center_"+str(offset)+".pickle", "w") as f:
        obj = {"post":post_param, "prop":prop_param,
               "perm_x":perm_x,
               "log_importance_weights":perm_weights,
Ejemplo n.º 17
0
 def llhood_comp_param(self, x, component_idx):
     candidate_dist = dist.mvt(*x)
     relevant_data = (component_idx == np.argmax(self.comp_indic, 1))
     return np.sum(candidate_dist.logpdf(self.data[relevant_data]))
Ejemplo n.º 18
0
 def update_comp_dists(self, comp_params):
     assert (len(comp_params) == self.comp_indic.shape[1])
     self.comp_param = comp_params
     self.comp_dist = [dist.mvt(*p) for p in self.comp_param]
Ejemplo n.º 19
0
def approximate_iris_mixture_data():
    from sklearn.datasets import load_iris
    num_imp_samp = 100
    num_loc_proposals = 3
    n_comp = 3
    p_comp = np.array([1 / n_comp] * n_comp)
    dim = 4
    iris = load_iris()
    obs = iris["data"]
    labels = iris["target"]
    means = np.array([obs[i * 50:(i + 1) * 50].mean(0) for i in range(3)])

    count = {
        "local_lpost": 0,
        "local_llhood": 0,
        "naive_lpost": 0,
        "naive_llhood": 0,
        "standard_lpost": 0,
        "standard_llhood": 0
    }

    def count_closure(name):
        def rval():
            count[name] = count[name] + 1

        return rval

    initial_samples = []
    for _ in range(10):
        initial_samples.append(
            DirCatTMM(obs, [1] * n_comp,
                      dist.mvt(obs.mean(0), np.diag(obs.var(0)), 20),
                      dist.invwishart(np.eye(dim), 50),
                      stats.gamma(500, scale=0.1)))

    (infl_samp, infl_lpost) = pmc.sample(
        num_imp_samp,
        initial_samples,
        DirCatTMMProposal(num_local_proposals=num_loc_proposals,
                          lpost_count=count_closure("local_lpost"),
                          llhood_count=count_closure("local_llhood")),
        population_size=4)

    (stand_samp, stand_lpost) = pmc.sample(
        num_imp_samp * num_loc_proposals,
        initial_samples,
        DirCatTMMProposal(lpost_count=count_closure("standard_lpost"),
                          llhood_count=count_closure("standard_llhood")),
        population_size=4)

    print("===============\n", p_comp, means, "\n\n--LOCAL--\n",
          infl_samp[-1].comp_indic.sum(0),
          stats.entropy(p_comp, infl_samp[-1].cat_param.flatten()),
          count["local_llhood"], count["local_lpost"], "\n\n--STANDARD--\n",
          stand_samp[-1].comp_indic.sum(0),
          stats.entropy(p_comp, stand_samp[-1].cat_param.flatten()),
          count["standard_llhood"], count["standard_lpost"], "\n\n")
    return {
        "infl": (infl_samp, infl_lpost),
        "standard": (stand_samp, stand_lpost)
    }