示例#1
0
def toy_model(tau=10000, prior='Beta0.5'):
    b_obs = 200
    f_AB = 400
    f_CB = 1000
    f_CA = 600

    A = np.array([0, f_AB, f_CA, 0, f_CB, 0])

    if prior == 'Normal':
        ABp = Normal('ABp', mu=0.5, tau=100, trace=True)
        CBp = Normal('CBp', mu=0.5, tau=100, trace=True)
        CAp = Normal('CAp', mu=0.5, tau=100, trace=True)
    elif prior == 'Uniform':
        ABp = Uniform('ABp', lower=0.0, upper=1.0, trace=True)
        CBp = Uniform('CBp', lower=0.0, upper=1.0, trace=True)
        CAp = Uniform('CAp', lower=0.0, upper=1.0, trace=True)
    elif prior == 'Beta0.25':
        ABp = Beta('ABp', alpha=0.25, beta=0.25, trace=True)
        CBp = Beta('CBp', alpha=0.25, beta=0.25, trace=True)
        CAp = Beta('CAp', alpha=0.25, beta=0.25, trace=True)
    elif prior == 'Beta0.5':
        ABp = Beta('ABp', alpha=0.5, beta=0.5, trace=True)
        CBp = Beta('CBp', alpha=0.5, beta=0.5, trace=True)
        CAp = Beta('CAp', alpha=0.5, beta=0.5, trace=True)
    elif prior == 'Beta2':
        ABp = Beta('ABp', alpha=2, beta=2, trace=True)
        CBp = Beta('CBp', alpha=2, beta=2, trace=True)
        CAp = Beta('CAp', alpha=2, beta=2, trace=True)
    elif prior == 'Gamma':
        ABp = Gamma('ABp', alpha=1, beta=0.5, trace=True)
        CBp = Gamma('CBp', alpha=1, beta=0.5, trace=True)
        CAp = Gamma('CAp', alpha=1, beta=0.5, trace=True)

    AB1 = ABp
    AB3 = 1 - ABp
    CB4 = CBp
    CB5 = 1 - CBp
    CA42 = CAp
    CA52 = 1 - CAp

    b = Normal('b',
               mu=f_AB * AB3 + f_CB * CB4 + f_CA * CA42,
               tau=tau,
               value=b_obs,
               observed=True,
               trace=True)

    # print [x.value for x in [ABp,CBp,CAp]]
    # print b.logp

    return locals()
示例#2
0
    def test_mixture_list_of_poissons(self):
        with Model() as model:
            w = Dirichlet("w",
                          floatX(np.ones_like(self.pois_w)),
                          shape=self.pois_w.shape)
            mu = Gamma("mu", 1.0, 1.0, shape=self.pois_w.size)
            Mixture(
                "x_obs",
                w,
                [Poisson.dist(mu[0]), Poisson.dist(mu[1])],
                observed=self.pois_x)
            step = Metropolis()
            trace = sample(5000,
                           step,
                           random_seed=self.random_seed,
                           progressbar=False,
                           chains=1)

        assert_allclose(np.sort(trace["w"].mean(axis=0)),
                        np.sort(self.pois_w),
                        rtol=0.1,
                        atol=0.1)
        assert_allclose(np.sort(trace["mu"].mean(axis=0)),
                        np.sort(self.pois_mu),
                        rtol=0.1,
                        atol=0.1)
def generate_route_flows_from_incidence_matrix(M, alpha=1):
    """
    Generate blocks of route flows/splits (x) from an incidence matrix
    :param M: incidence matrix
    :return:
    """
    x_blocks = []
    order, block_sizes = [], []
    m, n = M.shape
    assert M.getnnz() == n
    # Construct a Gamma distribution for each row in the incidence matrix
    for i in xrange(m):
        block_ind = M.getrow(i).nonzero()[1]
        order.extend(block_ind)
        size = len(block_ind)
        block_sizes.append(size)

        block = Gamma('x%d' % i, np.array([alpha] * size), 1, shape=size)
        x_blocks.append(block)

    x_blocks_expanded = [[x[xi]/x.sum() for xi in range(i-1)] for (i,x) in \
                         zip(block_sizes,x_blocks)]
    [x.append(1 - sum(x)) for x in x_blocks_expanded]
    x_pri = list(chain(*x_blocks_expanded))
    # reorder
    x_pri = zip(*sorted(zip(x_pri, order), key=lambda x: x[1]))[0]
    return x_pri
示例#4
0
    def test_normal_mixture(self):
        with Model() as model:
            w = Dirichlet("w", floatX(np.ones_like(self.norm_w)), shape=self.norm_w.size)
            mu = Normal("mu", 0.0, 10.0, shape=self.norm_w.size)
            tau = Gamma("tau", 1.0, 1.0, shape=self.norm_w.size)
            NormalMixture("x_obs", w, mu, tau=tau, observed=self.norm_x)
            step = Metropolis()
            trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)

        assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.norm_w), rtol=0.1, atol=0.1)
        assert_allclose(
            np.sort(trace["mu"].mean(axis=0)), np.sort(self.norm_mu), rtol=0.1, atol=0.1
        )
示例#5
0
def test_find_MAP_issue_4488():
    # Test for https://github.com/pymc-devs/pymc/issues/4488
    with Model() as m:
        x = Gamma("x", alpha=3, beta=10, observed=np.array([1, np.nan]))
        y = Deterministic("y", x + 1)
        map_estimate = find_MAP()

    assert not set.difference({"x_missing", "x_missing_log__", "y"},
                              set(map_estimate.keys()))
    np.testing.assert_allclose(map_estimate["x_missing"],
                               0.2,
                               rtol=1e-4,
                               atol=1e-4)
    np.testing.assert_allclose(map_estimate["y"],
                               [2.0, map_estimate["x_missing"][0] + 1])

pathways, features, path_dict, reverse_path_dict, evidence, metfrag_evidence = data
for c,v in evidence.iteritems():
    print c,v 
for c,v in metfrag_evidence.iteritems():
    print c,v 
print "num_pathways:", len(pathways)
print "num_features:", len(features)
print "num_evidence:", len(evidence)
print "num_metfrag: ", len(metfrag_evidence)
rate_prior = 0.5

#eps = Beta('eps', 0.005, 1)
eps = 0.0001
ap =  {p : Gamma('p_' + p, rate_prior, 1) for p in pathways}
bmp = {p : {feat : Gamma('b_{' + p + ',' + feat + '}', ap[p],1) for feat in path_dict[p]} for p in pathways}
y_bmp = {}
g = {}

def logp_f(f, b, eps):
    if f in evidence:
        return math.log(1 - math.e ** (-1 * b) + epsilon)
    if f in metfrag_evidence:
        a_p = (1.0 / (1 - metfrag_evidence[f])) - 1
        return a_p * math.log(1 - math.e ** (-1 * b) + epsilon) - b
    return math.log(eps) - b
psi = {}
for feat, pathways in reverse_path_dict.iteritems():
    y_bmp[feat] = sum([bmp[pname][feat] for pname in pathways])
    g[feat] = Bernoulli('g_' + feat, 1 - math.e ** (-y_bmp[feat]))
def create_model():
    all_vars = []

    # b = 0.02
    # target_mean = 1/0.1**2
    # a = b*target_mean

    b = 0.001
    target_mean = 10/0.1**2
    a = b*target_mean

    # print a/b
    # print np.sqrt(a/(b**2))

    # tau_qd = Normal('tau_qd', 0.05,1/(0.15**2))
    # tau_qd = Exponential('tau_qd', 10)
    tau_qd = Gamma('tau_qd', a, b)
    plot_dist(tau_qd, transform=lambda x: 1/np.sqrt(x))
    print np.mean([1/np.sqrt(tau_qd.random()) for i in xrange(1000)])
    print np.std([1/np.sqrt(tau_qd.random()) for i in xrange(1000)])
    # pot = TruncPotential('tau_qd_potential', 0, 0.2, tau_qd)
    # all_vars.append(pot)
    # tau_qd.value = 1/0.05**2
    all_vars.append(tau_qd)

    # plot_dist(tau_qd, 0, 0.2)

    mu_sc = Normal('mu_sc', 0.5, 1/(0.25**2))
    mu_sc.value=0.5
    pot = TruncPotential('mu_sc_potential', 0, 1, mu_sc)
    all_vars.append(pot)

    # tau_sc = Normal('tau_sc', 0.2, 1/(0.25**2))
    # tau_sc = Exponential('tau_sc', 10)

    tau_sc = Gamma('tau_sc', a, b)
    # plot_dist(tau_sc)
    # tau_sc.value = 1/0.1**2
    # pot = TruncPotential('tau_sc_potential', 0, 0.3, tau_sc)
    # all_vars.append(pot)

    # tau_bias = Normal('tau_bias', 0.05, 1/(0.25**2))
    # tau_bias = Exponential('tau_bias', 10)
    tau_bias = Gamma('tau_bias', a, b)
    # pot = TruncPotential('tau_bias_potential', 0, 0.2, tau_bias)
    # all_vars.append(pot)
    # tau_bias.value = 1/0.01**2

    # tau_student_handin_capabilities = Normal('tau_student_handin_capabilities', 0.05, 1/(0.15**2))
    # tau_student_handin_capabilities = Exponential('tau_student_handin_capabilities', 10)
    tau_student_handin_capabilities = Gamma('tau_student_handin_capabilities', a, b)
    # tau_student_handin_capabilities.value = 1/0.05**2
    # pot = TruncPotential('tau_student_handin_capabilities_potential', 0, 0.3, tau_student_handin_capabilities)
    # all_vars.append(pot)


    # tau_student_question_capabilities = Normal('tau_student_question_capabilities', 0.05,1/(0.15**2))
    # tau_student_question_capabilities = Exponential('tau_student_question_capabilities', 10)
    tau_student_question_capabilities = Gamma('tau_student_question_capabilities', a, b)
    # plot_dist(tau_student_question_capabilities)
    # tau_student_question_capabilities.value = 1/0.05**2
    # pot = TruncPotential('tau_student_question_capabilities_potential', 0, 0.15, tau_student_question_capabilities)
    # all_vars.append(pot)

    # plot_dist(tau_student_question_capabilities, 0, 0.2)


    all_vars.append(mu_sc)
    all_vars.append(tau_sc)
    all_vars.append(tau_bias)
    all_vars.append(tau_student_handin_capabilities)
    all_vars.append(tau_student_question_capabilities)

    for i in xrange(num_assignments):
        questions = []
        for j in xrange(num_questions_pr_handin):
            # tau = pymc.Lambda('tau_%i_%i'% (i,j), lambda a=tau_qd: 1/ (tau_qd.value*tau_qd.value))
            tau = tau_qd
            difficulty = Normal('difficulty_q_%i_%i'% (i,j), 0, tau)
            q = Question(difficulty)
            questions.append(q)
            all_vars.append(difficulty)
        assignment = Assignment(questions)
        assignments.append(assignment)




    for i in xrange(num_students):
        # tau = pymc.Lambda('tau1_%i'%i, lambda a=tau_sc: 1/(tau_sc.value*tau_sc.value))
        tau = tau_sc
        student_capabilities = Normal('student_capabilities_s_%i'%i, mu_sc, tau)
        # pot = TruncPotential('student_capabilities_potential_s_%i'%i, 0, 1, student_capabilities)
        all_vars.append(student_capabilities)
        # all_vars.append(pot)

        # grading_bias = Normal('grading_bias_s_%i'%i, 0, 1/tau_bias)
        # tau = pymc.Lambda('tau2_%i'%i, lambda a=tau_bias: 1/ (tau_bias.value*tau_bias.value))
        tau = tau_bias
        grading_bias = Normal('grading_bias_s_%i'%i, 0, tau)
        all_vars.append(grading_bias)

        s = Student(student_capabilities, grading_bias)
        students.append(s)
        for j, assignment in enumerate(assignments):
            # student_handin_capabilities = Normal('student_handin_capabilities_sh_%i_%i' % (i,j), 0, 1/tau_student_handin_capabilities)
            tau = tau_student_handin_capabilities
            student_handin_capabilities = Normal('student_handin_capabilities_sh_%i_%i' % (i,j), 0, tau)
            all_vars.append(student_handin_capabilities)
            question_capabilities = []
            for k, q in enumerate(assignment.questions):
                tau = tau_student_question_capabilities
                student_question_capabilities = Normal('student_question_capabilities_shq_%i_%i_%i' % (i,j,k ), 0, tau)
                all_vars.append(student_question_capabilities)
                question_capabilities.append(student_question_capabilities)
            handins.append(Handin(s, assignment, student_handin_capabilities, question_capabilities))


    # assign grader
    all_grades = []
    for handin in handins:
        potential_graders = range(0, len(students))
        potential_graders.remove(students.index(handin.student))
        idx = np.random.randint(0, len(potential_graders), num_graders_pr_handin)
        graders = [students[i] for i in idx]
        grades = handin.grade(graders)
        all_grades.append(grades)

    grade_list = sum(sum(all_grades, []),[])


    b = 1.0
    target_mean = 1/np.sqrt(0.01)
    a = b*target_mean
    tau_exo_grade = Gamma('tau_exo_grade', a, b)
    # plt.hist([1/tau_exo_grade.random()**2 for i in xrange(50000)])
    # tau_exo_grade = Exponential('mu_exo_grade', 20)
    # tau_exo_grade = Normal('mu_exo_grade', 0.05, 1/(0.1**2))
    # pot = TruncPotential('tau_exo_grade', 0, 0.2, tau_exo_grade)
    # all_vars.append(pot)
    tau = tau_exo_grade
    all_vars.append(tau_exo_grade)

    print "Creating grade list"

    # grade_list_real = [g.value for g in grade_list]
    # print 1
    # grade_list_real = [min(max((g), 0), 1) for g in grade_list_real]
    # print 2
    # grade_list = [Normal('grade_%i'%i, g, tau, value=g_real, observed=True)  for i, (g_real, g) in enumerate(zip(grade_list_real, grade_list))]
    # print 3
    # grade_list_potentials = [TruncPotential('grade_potential_%i'%i, 0, 1, g) for i,g in enumerate(grade_list)]
    # print 4

    # take one MCMC step in order to make it more probable that all variables are in the allowed range
    # var_dict = {str(v):v for v in all_vars}
    # sampler = pymc.MCMC(var_dict)
    # sampler.sample(iter=1)

    grade_list_real = [g.value for g in grade_list]
    # plt.hist(grade_list_real)
    # plt.show()

    print "Number of illegal grades: %i (out of %i)" % (len([g for g in grade_list_real if g > 1 or g < 0]), len(grade_list))
    grade_list_real = [min(max((g), 0), 1) for g in grade_list_real]
    # grade_list = Normal('grade_%i'%i, np.array(grade_list), np.array([tau]*len(grade_list)), value=grade_list_real, observed=True)
    # grade_list = [Normal('grade_%i'%i, g, tau, value=g_real, observed=True)  for i, (g_real, g) in enumerate(zip(grade_list_real, grade_list))]
    grade_list_new = []
    for i, (g_real, g) in enumerate(zip(grade_list_real, grade_list)):
        grade_list_new.append(Normal('grade_%i'%i, g, tau, value=g_real, observed=True))
        if i % 100 == 0:
            print i
    # grade_list_potentials = [TruncPotential('grade_potential_%i'%i, 0, 1, g) for i,g in enumerate(grade_list)]
    grade_list = grade_list_new
    print "Grade list created"

    all_vars += grade_list
    # all_vars += grade_list_potentials

    all_vars = list(set(all_vars))
    print len(all_vars)
    # print [str(v) for v in all_vars]
    return locals(), grade_list_real, all_vars
features = read.get_metabolites(path_dict)
evidence = read.metlin(observation_file)
evidence |= read.hmdb(observation_file)
evidence -= cofactors
features -= cofactors
evidence &= features
reverse_path_dict = read.reverse_dict(path_dict)
metfrag = read.metfrag(observation_file)
metfrag_evidence = read.dict_of_set(
    read.metfrag_with_scores(observation_file, keep_zero_scores=False),
    metfrag & features - cofactors - evidence)
evidence = {e: 1 for e in evidence}

rate_prior = 0.5

ap = {p: Gamma('p_' + p, rate_prior, 1) for p in pathways}
bmp = {
    p: {
        feat: Gamma('b_{' + p + ',' + feat + '}', ap[p], 1)
        for feat in path_dict[p]
    }
    for p in pathways
}
y_bmp = {}
virtual = {}

se_count = 0
for feat, pathways in reverse_path_dict.iteritems():
    #g_bmp[feat] = Poisson('g_' + feat, sum([bmp[pname][feat] for pname in pathways]))
    y_bmp[feat] = Bernoulli(
        'y_' + feat,
示例#9
0
    def test_normal_mixture_nd(self, nd, ncomp):
        nd = to_tuple(nd)
        ncomp = int(ncomp)
        comp_shape = nd + (ncomp,)
        test_mus = np.random.randn(*comp_shape)
        test_taus = np.random.gamma(1, 1, size=comp_shape)
        observed = generate_normal_mixture_data(
            w=np.ones(ncomp) / ncomp, mu=test_mus, sd=1 / np.sqrt(test_taus), size=10
        )

        with Model() as model0:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            mixture0 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape)
            obs0 = NormalMixture(
                "obs", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape, observed=observed
            )

        with Model() as model1:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            comp_dist = [
                Normal.dist(mu=mus[..., i], tau=taus[..., i], shape=nd) for i in range(ncomp)
            ]
            mixture1 = Mixture("m", w=ws, comp_dists=comp_dist, shape=nd)
            obs1 = Mixture("obs", w=ws, comp_dists=comp_dist, shape=nd, observed=observed)

        with Model() as model2:
            # Expected to fail if comp_shape is not provided,
            # nd is multidim and it does not broadcast with ncomp. If by chance
            # it does broadcast, an error is raised if the mixture is given
            # observed data.
            # Furthermore, the Mixture will also raise errors when the observed
            # data is multidimensional but it does not broadcast well with
            # comp_dists.
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            if len(nd) > 1:
                if nd[-1] != ncomp:
                    with pytest.raises(ValueError):
                        NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
                    mixture2 = None
                else:
                    mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
            else:
                mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
            observed_fails = False
            if len(nd) >= 1 and nd != (1,):
                try:
                    np.broadcast(np.empty(comp_shape), observed)
                except Exception:
                    observed_fails = True
            if observed_fails:
                with pytest.raises(ValueError):
                    NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)
                obs2 = None
            else:
                obs2 = NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)

        testpoint = model0.recompute_initial_point()
        testpoint["mus"] = test_mus
        testpoint["taus"] = test_taus
        assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
        assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
        assert_allclose(obs0.logp(testpoint), obs1.logp(testpoint))
        if mixture2 is not None and obs2 is not None:
            assert_allclose(model0.logp(testpoint), model2.logp(testpoint))
        if mixture2 is not None:
            assert_allclose(mixture0.logp(testpoint), mixture2.logp(testpoint))
        if obs2 is not None:
            assert_allclose(obs0.logp(testpoint), obs2.logp(testpoint))
示例#10
0
from pymc import Gamma, Poisson, InverseGamma
import pymc
import numpy as np
import gen

pathways = gen.pathways()
features = gen.features_dict()
detected = gen.detected_features()
evidence = gen.evidence()
ap = {p.name: Gamma('p_' + p.name, p.rate, 1) for p in pathways}
#bmp = [Gamma('b_{' + str(i) + '}', 1, ap[i]) for i in range(3)]
bmp = {
    p.name: {
        feat: Gamma('b_{' + p.name + ',' + str(feat) + '}', ap[p.name], 1)
        for feat in p.mets
    }
    for p in pathways
}
print bmp
#g_bmp = {feat : Poisson('g_' + str(feat), sum([bmp[pname][feat] for pname in pathways])) for feat, pathways in features.iteritems()}
g_bmp = {}
for feat, pathways in features.iteritems():
    if detected(feat):
        print feat, "was detected"
        g_bmp[feat] = Poisson('g_' + str(feat),
                              sum([bmp[pname][feat] for pname in pathways]),
                              value=1,
                              observed=True)
    else:
        print feat, "was not detected"
        g_bmp[feat] = Poisson('g_' + str(feat),
def create_model():
    all_vars = []

    # b = 20
    # target_mean = 0.1
    b = 5
    target_mean = 0.2
    a = b*target_mean

    # sigma_qd = Normal('sigma_qd', 0.05,1/(0.15**2))
    # sigma_qd = Exponential('sigma_qd', 10)
    sigma_qd = Gamma('sigma_qd', a, b)
    # pot = TruncPotential('sigma_qd_potential', 0, 0.2, sigma_qd)
    # all_vars.append(pot)
    # sigma_qd.value = 0.05
    all_vars.append(sigma_qd)

    # plot_dist(sigma_qd, 0, 0.2)

    # mu_sc = Normal('mu_sc', 0.5, 1/(0.25**2))
    mu_sc = TruncNormal('mu_sc', 0, 1, 0.5, 1/(0.2**2))
    mu_sc.value=0.5
    # pot = TruncPotential('mu_sc_potential', 0, 1, mu_sc)
    # all_vars.append(pot)

    # sigma_sc = Normal('sigma_sc', 0.2, 1/(0.25**2))
    # sigma_sc = Exponential('sigma_sc', 10)
    sigma_sc = Gamma('sigma_sc', a, b)
    # plot_dist(sigma_sc)
    sigma_sc.value = 0.1
    # pot = TruncPotential('sigma_sc_potential', 0, 0.3, sigma_sc)
    # all_vars.append(pot)

    # sigma_bias = Normal('sigma_bias', 0.05, 1/(0.25**2))
    # sigma_bias = Exponential('sigma_bias', 10)
    sigma_bias = Gamma('sigma_bias', a, b)
    # pot = TruncPotential('sigma_bias_potential', 0, 0.2, sigma_bias)
    # all_vars.append(pot)
    sigma_bias.value = 0.1


    b_c = 5
    target_mean_c = 0.2
    a_c = b_c*target_mean
    # sigma_student_handin_capabilities = Normal('sigma_student_handin_capabilities', 0.05, 1/(0.15**2))
    # sigma_student_handin_capabilities = Exponential('sigma_student_handin_capabilities', 10)
    sigma_student_handin_capabilities = Gamma('sigma_student_handin_capabilities', a, b)
    sigma_student_handin_capabilities.value = 0.05
    # pot = TruncPotential('sigma_student_handin_capabilities_potential', 0, 0.3, sigma_student_handin_capabilities)
    # all_vars.append(pot)


    # sigma_student_question_capabilities = Normal('sigma_student_question_capabilities', 0.05,1/(0.15**2))
    # sigma_student_question_capabilities = Exponential('sigma_student_question_capabilities', 10)
    sigma_student_question_capabilities = Gamma('sigma_student_question_capabilities', a, b)
    # plot_dist(sigma_student_question_capabilities)
    sigma_student_question_capabilities.value = 0.05
    # pot = TruncPotential('sigma_student_question_capabilities_potential', 0, 0.15, sigma_student_question_capabilities)
    # all_vars.append(pot)

    # plot_dist(sigma_student_question_capabilities, 0, 0.2)


    all_vars.append(mu_sc)
    all_vars.append(sigma_sc)
    all_vars.append(sigma_bias)
    all_vars.append(sigma_student_handin_capabilities)
    all_vars.append(sigma_student_question_capabilities)

    for i in xrange(num_assignments):
        questions = []
        for j in xrange(num_questions_pr_handin):
            tau = pymc.Lambda('tau_%i_%i'% (i,j), lambda a=sigma_qd: 1/ (sigma_qd.value*sigma_qd.value))
            difficulty = Normal('difficulty_q_%i_%i'% (i,j), 0, tau)
            q = Question(difficulty)
            questions.append(q)
            all_vars.append(difficulty)
        assignment = Assignment(questions)
        assignments.append(assignment)




    for i in xrange(num_students):
        tau = pymc.Lambda('tau1_%i'%i, lambda a=sigma_sc: 1/(sigma_sc.value*sigma_sc.value))
        # student_capabilities = Normal('student_capabilities_s_%i'%i, mu_sc, tau)
        student_capabilities = TruncNormal('student_capabilities_s_%i'%i, 0, 1, mu_sc, tau)
        all_vars.append(student_capabilities)

        # pot = TruncPotential('student_capabilities_potential_s_%i'%i, 0, 1, student_capabilities)
        # all_vars.append(pot)

        # grading_bias = Normal('grading_bias_s_%i'%i, 0, 1/sigma_bias)
        tau = pymc.Lambda('tau2_%i'%i, lambda a=sigma_bias: 1/ (sigma_bias.value*sigma_bias.value))
        grading_bias = Normal('grading_bias_s_%i'%i, 0, tau)
        all_vars.append(grading_bias)

        s = Student(student_capabilities, grading_bias)
        students.append(s)
        for j, assignment in enumerate(assignments):
            # student_handin_capabilities = Normal('student_handin_capabilities_sh_%i_%i' % (i,j), 0, 1/sigma_student_handin_capabilities)
            tau = pymc.Lambda('tau2_%i_i%i'%(i, j), lambda a=sigma_student_handin_capabilities: 1/ (sigma_student_handin_capabilities.value*sigma_student_handin_capabilities.value))
            student_handin_capabilities = Normal('student_handin_capabilities_sh_%i_%i' % (i,j), 0, tau)
            all_vars.append(student_handin_capabilities)
            question_capabilities = []
            for k, q in enumerate(assignment.questions):
                tau = pymc.Lambda('tau2_%i_i%i_%i'%(i, j,k), lambda a=sigma_student_question_capabilities: 1/ (sigma_student_question_capabilities.value*sigma_student_question_capabilities.value))
                student_question_capabilities = Normal('student_question_capabilities_shq_%i_%i_%i' % (i,j,k ), 0, tau)
                all_vars.append(student_question_capabilities)
                question_capabilities.append(student_question_capabilities)
            handins.append(Handin(s, assignment, student_handin_capabilities, question_capabilities))


    # assign grader
    all_grades = []
    for handin in handins:
        potential_graders = range(0, len(students))
        potential_graders.remove(students.index(handin.student))
        idx = np.random.randint(0, len(potential_graders), num_graders_pr_handin)
        graders = [students[i] for i in idx]
        grades = handin.grade(graders)
        all_grades.append(grades)

    grade_list = sum(sum(all_grades, []),[])


    b = 50
    target_mean = 0.05
    a = b*target_mean
    sigma_exo_grade = Gamma('sigma_exo_grade', a, b)
    # sigma_exo_grade = Exponential('mu_exo_grade', 20)
    # sigma_exo_grade = Normal('mu_exo_grade', 0.05, 1/(0.1**2))
    # pot = TruncPotential('sigma_exo_grade', 0, 0.2, sigma_exo_grade)
    # all_vars.append(pot)
    tau = pymc.Lambda('tau3', lambda a=sigma_exo_grade: 1/ (sigma_exo_grade.value*sigma_exo_grade.value))
    all_vars.append(sigma_exo_grade)

    print "Creating grade list"

    # grade_list_real = [g.value for g in grade_list]
    # print 1
    # grade_list_real = [min(max((g), 0), 1) for g in grade_list_real]
    # print 2
    # grade_list = [Normal('grade_%i'%i, g, tau, value=g_real, observed=True)  for i, (g_real, g) in enumerate(zip(grade_list_real, grade_list))]
    # print 3
    # grade_list_potentials = [TruncPotential('grade_potential_%i'%i, 0, 1, g) for i,g in enumerate(grade_list)]
    # print 4

    # take one MCMC step in order to make it more probable that all variables are in the allowed range
    # var_dict = {str(v):v for v in all_vars}
    # sampler = pymc.MCMC(var_dict)
    # sampler.sample(iter=1)

    grade_list_real = [g.value for g in grade_list]
    # plt.hist(grade_list_real)
    # plt.show()

    print "Number of illegal grades: %i (out of %i)" % (len([g for g in grade_list_real if g > 1 or g < 0]), len(grade_list))
    grade_list_real = [min(max((g), 0), 1) for g in grade_list_real]
    grade_list = [Normal('grade_%i'%i, g, tau, value=g_real, observed=True)  for i, (g_real, g) in enumerate(zip(grade_list_real, grade_list))]
    # grade_list = [Normal('grade_%i'%i, g, 1/(0.02^2), value=g_real, observed=True)  for i, (g_real, g) in enumerate(zip(grade_list_real, grade_list))]
    grade_list_potentials = [TruncPotential('grade_potential_%i'%i, 0, 1, g) for i,g in enumerate(grade_list)]

    print "Grade list created"

    all_vars += grade_list
    all_vars += grade_list_potentials

    all_vars = list(set(all_vars))
    print len(all_vars)
    # print [str(v) for v in all_vars]
    return locals(), grade_list_real, all_vars
prior = 'Gamma'

if prior == 'Normal':
    ABp = Normal('ABp', mu=0.5, tau=100)
    CBp = Normal('CBp', mu=0.5, tau=100)
    CAp = Normal('CAp', mu=0.5, tau=100)
elif prior == 'Uniform':
    ABp = Uniform('ABp', lower=0.0, upper=1.0)
    CBp = Uniform('CBp', lower=0.0, upper=1.0)
    CAp = Uniform('CAp', lower=0.0, upper=1.0)
elif prior == 'Beta':
    ABp = Beta('ABp', alpha=0.5, beta=0.5)
    CBp = Beta('CBp', alpha=0.5, beta=0.5)
    CAp = Beta('CAp', alpha=0.5, beta=0.5)
elif prior == 'Gamma':
    ABp = Gamma('ABp', alpha=1, beta=0.5)
    CBp = Gamma('CBp', alpha=1, beta=0.5)
    CAp = Gamma('CAp', alpha=1, beta=0.5)

AB1 = ABp
AB3 = 1 - ABp
CB4 = CBp
CB5 = 1 - CBp
CA42 = CAp
CA52 = 1 - CAp

b = Normal('b',
           mu=400 * AB3 + 1000 * CB4 + 600 * CA42,
           tau=10000,
           value=200,
           observed=True)