Exemple #1
0
def run_Bernoulli_Normal():
    #Cluster 1
    Uh1 = pm.Uniform('UnifH1', lower=-50, upper=50)
    # @UndefinedVariable
    Nc1 = pm.Normal('NormC1', mu=Uh1,
                    tau=1)  #, observed=True, value=10);  # @UndefinedVariable
    #Cluster 2
    Uh2 = pm.Uniform('UnifH2', lower=-50, upper=50)
    # @UndefinedVariable
    Nc2 = pm.Normal('NormC2', mu=Uh2,
                    tau=1)  #, observed=True, value=10);  # @UndefinedVariable
    #Bernoulli Nodes
    B1 = pm.Bernoulli('Bern1', 0.8)
    # @UndefinedVariable
    B2 = pm.Bernoulli('Bern2', 0.8)
    # @UndefinedVariable
    B3 = pm.Bernoulli('Bern3', 0.5)
    # @UndefinedVariable
    #Points
    p_N1 = pm.Lambda('p_Norm1', lambda k=B1, c1=Nc1, c2=Nc2: [c2, c1][int(k)])
    p_N2 = pm.Lambda('p_Norm2', lambda k=B2, c1=Nc1, c2=Nc2: [c1, c2][int(k)])
    p_N3 = pm.Lambda('p_Norm3', lambda k=B3, c1=Nc1, c2=Nc2: [c1, c2][int(k)])
    normalObs1 = pm.Normal('NormX1', mu=p_N1, tau=1, observed=True, value=-3)
    # @UndefinedVariable
    normalObs2 = pm.Normal('NormX2', mu=p_N2, tau=1, observed=True, value=3)
    # @UndefinedVariable
    normalObsZ = pm.Normal('NormZ', mu=p_N3, tau=1)
    # @UndefinedVariable
    return [
        Nc1, Nc2, B1, B2, B3, Uh1, Uh2, normalObs1, normalObs2, normalObsZ
    ]
Exemple #2
0
def run_Bernoulli_Normal():
    #Cluster 1
    Uh1 = pm.Uniform('UnifH1', lower=-50, upper=50);  # @UndefinedVariable
    Nc1 = pm.Normal('NormC1', mu=Uh1, tau=1)#, observed=True, value=10);  # @UndefinedVariable
    #Cluster 2
    Uh2 = pm.Uniform('UnifH2', lower=-50, upper=50);  # @UndefinedVariable
    Nc2 = pm.Normal('NormC2', mu=Uh2, tau=1)#, observed=True, value=10);  # @UndefinedVariable
    #Bernoulli Nodes
    B1 = pm.Bernoulli('Bern1', 0.8);  # @UndefinedVariable
    B2 = pm.Bernoulli('Bern2', 0.8);  # @UndefinedVariable
    B3 = pm.Bernoulli('Bern3', 0.5);  # @UndefinedVariable
    #Points
    p_N1 = pm.Lambda('p_Norm1', lambda k=B1, c1=Nc1, c2=Nc2: [c2,c1][int(k)]);
    p_N2 = pm.Lambda('p_Norm2', lambda k=B2, c1=Nc1, c2=Nc2: [c1,c2][int(k)]);
    p_N3 = pm.Lambda('p_Norm3', lambda k=B3, c1=Nc1, c2=Nc2: [c1,c2][int(k)]);
    normalObs1 = pm.Normal('NormX1', mu=p_N1, tau=1, observed=True, value=-3);  # @UndefinedVariable
    normalObs2 = pm.Normal('NormX2', mu=p_N2, tau=1, observed=True, value=3);  # @UndefinedVariable
    normalObsZ = pm.Normal('NormZ',  mu=p_N3, tau=1);  # @UndefinedVariable

#     @pm.stochastic(observed=True)
#     def normalObs1(name='NormX1', parentVar=B1, parentVals=[Nc1,Nc2], value=8):
#         return pm.normal_like(x=value, mu=parentVals[int(parentVar)], tau=1);
#     @pm.stochastic(observed=True)
#     def normalObs2(name='NormX2', parentVar=B2, parentVals=[Nc1,Nc2], value=2):
#         return pm.normal_like(x=value, mu=parentVals[int(parentVar)], tau=1);
#     @pm.stochastic(observed=False)
#     def normalObsZ(name='NormZ', parentVar=B3, parentVals=[Nc1,Nc2], value=1):
#         return pm.normal_like(x=value, mu=parentVals[int(parentVar)], tau=1);

#     normalObs1 = pm.Normal('NormX1', mu=[Nc1,Nc2][int(B1)], tau=1, observed=True, value=2);  # @UndefinedVariable
#     normalObs2 = pm.Normal('NormX2', mu=[Nc1,Nc2][int(B2)], tau=1, observed=True, value=8);  # @UndefinedVariable
#     normalObsZ = pm.Normal('NormZ', mu=[Nc1,Nc2][int(B3)], tau=1);  # @UndefinedVariable

    return [Nc1,Nc2,B1,B2,B3,Uh1,Uh2,normalObs1,normalObs2,normalObsZ];
Exemple #3
0
    def _create_bayesian_gbmodel(self, database, initial_parameters):
        """
        Generates the PyMC model to sample within one GB model (no Reversible Jump)

        Arguments
        ---------
        database : dict
            Database of FreeSolv solvation free energy data
        initial_parameters : dict
            Dict containing the starting set of parameters for the model

        Returns
        -------
        gbffmodel : dict
            A dict containing the nodes of a PyMC model to sample

        """
        gbffmodel = dict()
        log_sigma_min = math.log(0.01)  # kcal/mol
        log_sigma_max = math.log(10.0)  # kcal/mol
        log_sigma_guess = math.log(0.2)
        cid_list = database.keys()

        def RMSE(**args):
            nmolecules = len(cid_list)
            error = np.zeros([nmolecules], np.float64)
            for (molecule_index, cid) in enumerate(cid_list):
                entry = database[cid]
                error[molecule_index] = args['dg_gbsa'][
                    molecule_index] - float(entry['expt'])
            mse = np.mean((error - np.mean(error))**2)
            return np.sqrt(mse)

        gbffmodel['log_sigma'] = pymc.Uniform('log_sigma',
                                              lower=log_sigma_min,
                                              upper=log_sigma_max,
                                              value=log_sigma_guess)
        gbffmodel['sigma'] = pymc.Lambda(
            'sigma',
            lambda log_sigma=gbffmodel['log_sigma']: math.exp(log_sigma))
        gbffmodel['tau'] = pymc.Lambda(
            'tau', lambda sigma=gbffmodel['sigma']: sigma**(-2))

        gbffmodel.update(self.parameter_model)

        gbffmodel_with_mols = self._add_parallel_gbffmodel(database, gbffmodel)
        gbffmodel_with_mols['RMSE'] = pymc.Deterministic(
            eval=RMSE,
            name='RMSE',
            parents={'dg_gbsa': gbffmodel_with_mols['dg_gbsa']},
            doc='RMSE',
            dtype=float,
            trace=True,
            verbose=1)

        return gbffmodel_with_mols
Exemple #4
0
def mymodel():
    mu = pm.Normal('mu', 0, 1)
    N = [pm.Normal('N_%i' % i, mu, 1) for i in xrange(3)]
    z1 = pm.Lambda('z1', lambda n=N: np.sum(n))
    z2 = pm.Lambda('z2', lambda n=N: np.sum(n))

    @pm.potential
    def y(z1=z1, z2=z2, mu=mu):
        return 0

    return mu, N, z1, z2, y
Exemple #5
0
 def __setup_obs(self):
     self.obs = pymc.Container(
         [pymc.Normal('obs_%s' % i,
                      mu=pymc.Lambda('omu_%s' % i,
                                     lambda cls=self.eqv[i]: self.mu[cls]),
                      tau=pymc.Lambda('otau_%s' % i,
                                      lambda cls=self.eqv[i]:
                                      1.0 / (self.sigma[cls]**2)),
                      value=self.logit(acc),
                      observed=True)
          for i, acc in enumerate(self.observations)])
Exemple #6
0
    def create_nodes_for_normal_normal(self, add_shift, tau_0, mu_0, sigma_beta,
                                       sigma_y, true_mu, n_subjs, avg_samples):
        """ create the normal normal nodes"""

        mu = pm.Normal('mu',mu_0,tau_0)
        nodes = {'mu': mu}
        size = [None]*n_subjs
        x_values = [None]*n_subjs
        if add_shift:
            b = []
        else:
            b = None
        for i in range(n_subjs):
            size[i] = int(max(1, avg_samples + randn()*10))
            if add_shift:
                x_values[i] = randn()*sigma_beta
                value = randn(size[i]) * sigma_y + true_mu + x_values[i]
                x = pm.Lambda('x%d' % i, lambda x=x_values[i]:x)
                y = pm.Normal('y%d' % i,mu+x, sigma_y**-2, value=value,observed=True)
                nodes['x%d' % i] = x
                b.append(x)
            else:
                value = randn(size[i]) * sigma_y + true_mu
                y = pm.Normal('y%d' % i,mu, sigma_y**-2, value=value,observed=True)

            nodes['y%d' % i] = y

        return nodes, size, x_values
def run_Categorical_Normal():
    nC = 3
    #Num. Clusters
    aD = [0, 1, 8, 9, 20, 21]
    #Data Points
    nPts = len(aD) + 1
    #Clusters
    aUh = [
        pm.Uniform('UnifH' + str(i), lower=-50, upper=50) for i in range(nC)
    ]
    # @UndefinedVariable
    aNc = [pm.Normal('NormC' + str(i), mu=aUh[i], tau=1) for i in range(nC)]
    # @UndefinedVariable
    #Dirichlet & Categorical Nodes
    Dir = pm.Dirichlet('Dirichlet', theta=[1] * nC)
    # @UndefinedVariable
    aC = [pm.Categorical('Cat' + str(i), Dir) for i in range(nPts)]
    # @UndefinedVariable
    aL = [
        pm.Lambda('p_Norm' + str(i), lambda k=aC[i], aNcl=aNc: aNcl[int(k)])
        for i in range(nPts)
    ]
    # @UndefinedVariable
    #Points
    aN = [
        pm.Normal('NormX' + str(i),
                  mu=aL[i],
                  tau=1,
                  observed=True,
                  value=aD[i]) for i in range(nPts - 1)
    ]
    # @UndefinedVariable
    Nz = pm.Normal('NormZ', mu=aL[-1], tau=1)
    # @UndefinedVariable
    return np.concatenate([[Nz, Dir], aUh, aNc, aC, aN])
Exemple #8
0
def model():
    # Priors
    sigma_y = pymc.Uniform('sigma_y', lower=0, upper=100)
    tau_y = pymc.Lambda('tau_y', lambda s=sigma_y: s**-2)

    xi = pymc.Uniform('xi', lower=0, upper=100, value=np.zeros(K))

    mu_raw = pymc.Normal('mu_raw', mu=0., tau=0.0001, value=np.zeros(K))
    Tau_B_raw = pymc.Wishart('Tau_B_raw', df, Tau=np.diag(np.ones(K)))
    B_raw = pymc.MvNormal('B_raw', mu_raw, Tau_B_raw, value=np.zeros((J, K)))

    # Model
    @pymc.deterministic(plot=True)
    def B(xi=xi, B_raw=B_raw):
        return xi * B_raw

    @pymc.deterministic
    def mu(xi=xi, mu_raw=mu_raw):
        return xi * mu_raw

    @pymc.deterministic(plot=False)
    def y_hat(B=B, X=X, i=index_c):
        return np.sum(B[i, ] * X, axis=1)

    # Likelihood
    @pymc.stochastic(observed=True)
    def y_i(value=y, mu=y_hat, tau=tau_y):
        return pymc.normal_like(value, mu, tau)

    return vars()
Exemple #9
0
def run_Bernoulli_Normal():
    B = pm.Bernoulli('Bern', 0.8)
    # @UndefinedVariable
    p_N1 = pm.Lambda('p_Norm', lambda k=B: [-5, 5][int(k)])
    N = pm.Normal('Norm', mu=p_N1, tau=1)
    # @UndefinedVariable
    return [B, N]
Exemple #10
0
def run_Categorical_Normal():
    C = pm.Categorical('Cat', [0.2, 0.4, 0.1, 0.3])
    # @UndefinedVariable
    p_N = pm.Lambda('p_Norm', lambda node=C: [-5, 0, 5, 10][node])
    N = pm.Normal('Norm', mu=p_N, tau=1)
    # @UndefinedVariable
    return [C, N]
Exemple #11
0
def test_age_pattern_model_sim():
    # simulate normal data
    a = np.arange(0, 100, 5)
    pi_true = .0001 * (a * (100. - a) + 100.)
    sigma_true = .025 * np.ones_like(pi_true)

    p = np.maximum(0., mc.rnormal(pi_true, 1. / sigma_true**2.))

    # create model and priors
    vars = {}

    vars.update(
        dismod_mr.model.spline.spline('test',
                                      ages=np.arange(101),
                                      knots=np.arange(0, 101, 5),
                                      smoothing=.1))

    vars['pi'] = mc.Lambda('pi', lambda mu=vars['mu_age'], a=a: mu[a])
    vars.update(
        dismod_mr.model.likelihood.normal('test', vars['pi'], 0., p,
                                          sigma_true))

    # fit model
    m = mc.MCMC(vars)
    m.sample(2)
Exemple #12
0
def getModel():
    nA, nK = 0.05, 4
    aDir = [nA / nK] * nK
    D = pm.Dirichlet('1-Dirichlet', theta=aDir)
    #@UndefinedVariable
    C1 = pm.Categorical('2-Cat', D)
    #@UndefinedVariable
    #     C2 = pm.Categorical('10-Cat', D); #@UndefinedVariable
    #     C3 = pm.Categorical('11-Cat', D); #@UndefinedVariable
    #     C4 = pm.Categorical('14-Cat', D); #@UndefinedVariable
    #     C5 = pm.Categorical('15-Cat', D); #@UndefinedVariable
    #     G0_0 = pm.Gamma('4-Gamma0_1', alpha=1, beta=1.5); #@UndefinedVariable
    N0_1 = pm.Normal('5-Norm0_1', mu=10, tau=1)
    #@UndefinedVariable
    N0_2 = pm.Normal('6-Norm0_2', mu=-10, tau=1)
    #@UndefinedVariable
    N0_3 = pm.Normal('7-Norm0_3', mu=30, tau=1)
    #@UndefinedVariable
    N0_4 = pm.Normal('16-Norm0_3', mu=-30, tau=1)
    #@UndefinedVariable
    aMu = [N0_1.value, N0_2.value, N0_3.value, N0_4.value]
    p_N1 = pm.Lambda('p_Norm1', lambda n=C1: aMu[n], doc='Pr[Norm|Cat]')
    #     p_N2 = pm.Lambda('p_Norm2', lambda n=C2: aMu[n], doc='Pr[Norm|Cat]');
    #     p_N3 = pm.Lambda('p_Norm3', lambda n=C3: aMu[n], doc='Pr[Norm|Cat]');
    #     p_N4 = pm.Lambda('p_Norm4', lambda n=C4: aMu[n], doc='Pr[Norm|Cat]');
    #     p_N5 = pm.Lambda('p_Norm6', lambda n=C5: aMu[n], doc='Pr[Norm|Cat]');
    N = pm.Normal('3-Norm', mu=p_N1, tau=1)
    #@UndefinedVariable
    #     obsN1 = pm.Normal('8-Norm', mu=p_N2, tau=1, observed=True, value=40); #@UndefinedVariable @UnusedVariable
    #     obsN2 = pm.Normal('9-Norm', mu=p_N3, tau=1, observed=True, value=40); #@UndefinedVariable @UnusedVariable
    #     obsN3 = pm.Normal('12-Norm', mu=p_N4, tau=1, observed=True, value=-40); #@UndefinedVariable @UnusedVariable
    #     obsN4 = pm.Normal('13-Norm', mu=p_N5, tau=1, observed=True, value=-40); #@UndefinedVariable @UnusedVariable
    return pm.Model([D, C1, N, N0_1, N0_2, N0_3, N0_4, N])
Exemple #13
0
 def __setup_predictive(self):
     for j in xrange(0, self.num_equiv):
         self.predictive.append(
             pymc.Lambda('predictive_%s' % j,
                         lambda p=pymc.Normal('log_pred_%s' % j,
                                              mu=self.mu[j],
                                              tau=1.0/self.sigma[j]**2):
                         self.inv_logit(p)))
Exemple #14
0
def performInference(graph, responses):
    #this is a really hacky solution for now until I can spend more time figuring out how to do this more programatically

    pStr = "p%s"

    def _build_probabilities(cid):
        #return memoized bernoulli variable
        if "_bp" in graph[cid]: return graph[cid]["_bp"]

        #hack since pymc seems to require ascii (and not unicode)
        cida = str(cid).encode('ascii')

        if graph[cid]["dependencies"]:
            # process dependencies first
            deps = map(_build_probabilities, graph[cid]["dependencies"])
            cp = calculateProbability(pStr % cida, deps)
            _bp = mc.Bernoulli(cida, cp, value=1)
        else:
            # roots get special treatment
            _bp = mc.Bernoulli(cida, .5, value=1)

        #memoize bernoulli variable
        graph[cid]["_bp"] = _bp
        return _bp

    concepts = map(_build_probabilities, graph)

    #variables = mc.Bernoulli('variables', .5, value=1)
    #concepts.append(variables);

    #pConditionals = calculateProbability('pConditionals', [variables])
    #conditionals = mc.Bernoulli('conditionals', pConditionals, value=1)
    #concepts.append(conditionals);

    otherQuestions = []
    for example in responses:
        # more pymc ascii hacks
        cida = str(example[0]).encode('ascii')

        tmp = graph[example[0]]["_bp"]
        prob = mc.Lambda(pStr % cida,
                         lambda tmp=tmp: pl.where(tmp, 1 - pS, pG))
        otherQuestions.append(
            mc.Bernoulli(cida, prob, value=example[1], observed=True))

    ##################some simple tests##########

    model = mc.Model(concepts + otherQuestions)

    samples = mc.MCMC(model)
    knownNodes = []
    samples.sample(1000)

    for concept in concepts:
        if concept.trace().mean() > 0.75:
            knownNodes.append(concept.__name__)
    return knownNodes
Exemple #15
0
def calculateProbability(name, dependencies, weights=0):
    #given that no specific weights are specified
    if weights == 0:
        weights = [1] * len(dependencies)
    assert len(weights) == len(dependencies)
    #get the current values of the dependencies
    return mc.Lambda(name,
                     lambda dependencies=dependencies, weights=weights:
                     (pK - pM) * sum(pl.multiply(dependencies, weights)) / sum(
                         weights) + pM)
Exemple #16
0
 def _lambda_heats_model(self, q_name='q_n_model'):
     """Model the heat using expected_injection_heats, providing all input by using a lambda function
         q_name is the name for the model
     """
     return pymc.Lambda(
         q_name,
         lambda P0=self.P0, Ls=self.Ls, DeltaG=self.DeltaG, DeltaH=self.
         DeltaH, DeltaH_0=self.DeltaH_0: self.expected_injection_heats(
             self.V0, self.DeltaVn, P0, Ls, DeltaG, DeltaH, DeltaH_0, self.
             beta, self.N))
Exemple #17
0
def main():
    x = pm.Normal("x", 4, 10)
    y = pm.Lambda("y", lambda x=x: 10 - x, trace=True)

    ex_mcmc = pm.MCMC(pm.Model([x, y]))
    ex_mcmc.sample(500)

    plt.plot(ex_mcmc.trace("x")[:])
    plt.plot(ex_mcmc.trace("y")[:])
    plt.title("Displaying (extreme) case of dependence between unknowns")
    plt.show()
Exemple #18
0
 def construct(self):
     first = pymc.Bernoulli('F', .6, value=pl.ones(self.obs))
     p_first = pymc.Lambda('p_F',
                           lambda R=R: pl.where(R, .005, .8),
                           doc='Pr[S|R]')
     second = pymc.Bernoulli('S', p_first, value=pl.ones(self.obs))
     p_G = mc.Lambda('p_G',
                     lambda S=S, R=R: pl.where(S, pl.where(R, .99, .9),
                                               pl.where(R, .8, 0.)),
                     doc='Pr[G|S,R]')
     G = mc.Bernoulli('G', p_G, value=G_obs, observed=True)
Exemple #19
0
def getModel():
    D = pm.Dirichlet('1-Dirichlet', theta=[3,2,4]); #@UndefinedVariable
    C1 = pm.Categorical('2-Cat', D); #@UndefinedVariable
    C2 = pm.Categorical('10-Cat', D); #@UndefinedVariable
    C3 = pm.Categorical('11-Cat', D); #@UndefinedVariable
    W0_0 = pm.WishartCov('4-Wishart0_1', n=5, C=np.eye(2)); #@UndefinedVariable
    N0_1 = pm.MvNormalCov('5-Norm0_1', mu=[-20,-20], C=np.eye(2)); #@UndefinedVariable
    N0_2 = pm.MvNormalCov('6-Norm0_2', mu=[0,0], C=np.eye(2)); #@UndefinedVariable
    N0_3 = pm.MvNormalCov('7-Norm0_3', mu=[20,20], C=np.eye(2)); #@UndefinedVariable
    aMu = [N0_1.value, N0_2.value, N0_3.value];
    fL1 = lambda n=C1: np.select([n==0, n==1, n==2], aMu);
    fL2 = lambda n=C2: np.select([n==0, n==1, n==2], aMu);
    fL3 = lambda n=C3: np.select([n==0, n==1, n==2], aMu);
    p_N1 = pm.Lambda('p_Norm1', fL1, doc='Pr[Norm|Cat]');
    p_N2 = pm.Lambda('p_Norm2', fL2, doc='Pr[Norm|Cat]');
    p_N3 = pm.Lambda('p_Norm3', fL3, doc='Pr[Norm|Cat]');
    N = pm.MvNormalCov('3-Norm', mu=p_N1, C=W0_0); #@UndefinedVariable
    obsN1 = pm.MvNormalCov('8-Norm', mu=p_N2, C=W0_0, observed=True, value=[-20,-20]); #@UndefinedVariable @UnusedVariable
    obsN2 = pm.MvNormalCov('9-Norm', mu=p_N3, C=W0_0, observed=True, value=[20,20]); #@UndefinedVariable @UnusedVariable
    return pm.Model([D,C1,C2,C3,N,W0_0,N0_1,N0_2,N0_3,N,obsN1,obsN2]);
def run_Categorical_Normal():
    C = pm.Categorical('1-Cat', [0.2, 0.4, 0.1, 0.3]);  # @UndefinedVariable
    p_N = pm.Lambda('p_Norm', lambda node=C: np.select([node==0, node==1, node==2, node==3],
                                                       [-5, 0, 5, 10]), doc='Pr[Norm|Cat]');
    N = pm.Normal('2-Norm', mu=p_N, tau=1);  # @UndefinedVariable
    model = pm.Model([C,N]);
    mcmc = pm.MCMC(model);
    mcmc.sample(5000, progress_bar=True);
    print "C:", C.stats()["mean"], C.value;
    print "N:", N.stats()["mean"], N.value;
    plot_Samples(mcmc, aBins=[2,500]);
Exemple #21
0
    def __init__(self, G=nx.balanced_tree(2, 3, create_using=nx.DiGraph())):
        self.G = G
        self.n = len(G)
        roots = [n for n, d in G.in_degree().items() if d == 0]
        # root random variables
        self.roots = [pm.Bernoulli(str(v), 0.5, value=0) for v in roots]

        self.node_top_sort = nx.topological_sort(G)
        node_variables = dict(zip(roots, self.roots))
        self.others = []

        for node in self.node_top_sort:
            if node not in roots:
                Pa_v = [
                    node_variables[key] for key in node_variables.keys()
                    if key in G.predecessors(node)
                ]
                weight = [
                    G[key][node]['weight'] for key in G.predecessors(node)
                ]
                bias = G.node[node]['bias']

                # energy =  w*Pa(v) + bias
                energy = pm.Lambda(
                    'energy_%d' % (node),
                    lambda Pa_v=Pa_v, weight=weight, bias=bias: pm.
                    LinearCombination('weighted_sum', x=Pa_v, y=weight) + bias)

                # Pr(v=1) = sigmoid(energy)
                # use pymc.Lambda deterministic function
                sigmoid_cond_prob = pm.Lambda('sigmoid_%d_cond' % (node),
                                              lambda energy=energy: 1 /
                                              (1 + exp(-energy)))
                node_variables[node] = pm.Bernoulli(str(node),
                                                    sigmoid_cond_prob,
                                                    value=0)

                self.others.append(node_variables[node])

        self.node_variables = node_variables
        pm.MCMC.__init__(self, [self.roots, self.others])
Exemple #22
0
    def get_z_data(self, p, p_pos, q):
        K = 2  # Num topics
        M = p  # Num documents
        N = q  # Total num of unique words across all documents

        alpha = 1.0  # Concentration parameter for distribution over
        # distributions over words (one for each topic)
        beta = 1.0  # Concentration parameter for distribution over
        # distributions over topics (one for each
        # document)

        phi = pymc.Container([
            pymc.CompletedDirichlet(
                name="phi_" + str(k),
                D=pymc.Dirichlet(name="phi_temp_" + str(k),
                                 theta=beta * numpy.ones(N)),
            ) for k in range(K)
        ])

        theta = pymc.Container([
            pymc.CompletedDirichlet(
                name="theta_" + str(m),
                D=pymc.Dirichlet(name="theta_temp_" + str(m),
                                 theta=alpha * numpy.ones(K)),
            ) for m in range(M)
        ])

        z = pymc.Container([
            pymc.Categorical(name="z_" + str(m), p=theta[m], size=N)
            for m in range(M)
        ])

        w = pymc.Container([
            pymc.Categorical(
                name="w_" + str(m) + "_" + str(n),
                p=pymc.Lambda(
                    "phi_z_" + str(m) + str(n),
                    lambda z_in=z[m][n], phi_in=phi: phi_in[z_in],
                ),
            ) for m in range(M) for n in range(N)
        ])
        lda = pymc.Model([w, z, theta, phi])

        z_rvs = []
        for m in range(M):
            metadata = {"doc_idx": m, "num_unique_words": N}
            rv = WordCountVecRV(
                model=lda, name="w_0_0",
                metadata=metadata)  # Note: w_0_0 is just a dummy
            # argument that must be present in
            # the pymc.Model
            z_rvs += [rv]
        return z_rvs
Exemple #23
0
def run_HDP():
    nG, nA, nC = 2, 2, 2
    #Gamma, Alpha & Max No. Clusters
    aDir = [nG / nC] * nC
    Dir0 = pm.Dirichlet('Dirichlet0', theta=aDir)
    # @UndefinedVariable
    lDir0 = pm.Lambda('p_Dir0',
                      lambda d=Dir0: np.concatenate([d, [1 - sum(d)]]) * nA)
    # @UndefinedVariable
    aNodes1 = get_DP('1', lDir0, [0, 1, 20, 21])
    aNodes2 = get_DP('2', lDir0, [50, 51, 70, 71, 72])
    return np.concatenate([[Dir0], aNodes1, aNodes2])
Exemple #24
0
def getModel():
    C = pm.Categorical('1-Cat', [0.2, 0.4, 0.1, 0.3])
    #@UndefinedVariable
    #     C = pm.Categorical('1-Cat', [0.2, 0.4, 0.1, 0.3], observed=True, value=3); #@UndefinedVariable
    p_N = pm.Lambda('p_Norm',
                    lambda n=C: np.select([n == 0, n == 1, n == 2, n == 3],
                                          [-5, 0, 5, 10]),
                    doc='Pr[Norm|Cat]')
    N = pm.Normal('2-Norm', mu=p_N, tau=1)
    #@UndefinedVariable
    #     N = pm.Normal('2-Norm', mu=p_N, tau=1, observed=True, value=2.5); #@UndefinedVariable
    return pm.Model([C, N])
Exemple #25
0
    def __init__(self, submod, V, eps_p_f, ti=None, tally=True, verbose=0):

        self.f_eval = submod.f_eval
        self.f = submod.f
        pm.StepMethod.__init__(self, [self.f, self.f_eval], tally=tally)

        self.children_no_data = copy.copy(self.children)
        if isinstance(eps_p_f, pm.Variable):
            self.children_no_data.remove(eps_p_f)
            self.eps_p_f = eps_p_f
        else:
            for epf in eps_p_f:
                self.children_no_data.remove(epf)
            self.eps_p_f = pm.Lambda('eps_p_f',
                                     lambda e=eps_p_f: np.hstack(e),
                                     trace=False)

        self.V = pm.Lambda('%s_vect' % V.__name__,
                           lambda V=V: np.resize(V, len(submod.mesh)))
        self.C_eval = submod.C_eval
        self.M_eval = submod.M_eval
        self.S_eval = submod.S_eval

        M_eval_shape = pm.utils.value(self.M_eval).shape
        C_eval_shape = pm.utils.value(self.C_eval).shape
        self.ti = ti or np.arange(M_eval_shape[0])

        # Work arrays
        self.scratch1 = np.asmatrix(np.empty(C_eval_shape, order='F'))
        self.scratch2 = np.asmatrix(np.empty(C_eval_shape, order='F'))
        self.scratch3 = np.empty(M_eval_shape)

        # Initialize hidden attributes
        self.accepted = 0.
        self.rejected = 0.
        self._state = ['rejected', 'accepted', 'proposal_distribution']
        self._tuning_info = []
        self.proposal_distribution = None

        self.verbose = verbose
Exemple #26
0
def getModel():
    C = pm.Categorical('1-Cat', [0.2, 0.4, 0.1, 0.3])
    #@UndefinedVariable
    #     C = pm.Categorical('1-Cat', [0.2, 0.4, 0.1, 0.3], observed=True, value=3); #@UndefinedVariable
    p_N = pm.Lambda(
        'p_Norm',
        lambda n=C: np.select([n == 0, n == 1, n == 2, n == 3],
                              [[-5, -5], [0, 0], [5, 5], [10, 10]]),
        doc='Pr[Norm|Cat]')
    N = pm.MvNormal('2-Norm_2D', mu=p_N, tau=np.eye(2, 2))
    #@UndefinedVariable
    #     N = pm.MvNormal('2-Norm', mu=p_N, tau=np.eye(2,2), observed=True, value=[2.5,2.5]); #@UndefinedVariable
    return pm.Model([C, N])
Exemple #27
0
    def __init__(self, corpus, K=10, iterations=1000, burn=100):
        print("Building model ...")
        self.K = K
        self.V = corpus.wordCount + 1
        self.M = corpus.documentCount
        self.alpha = np.ones(self.K)
        self.beta = np.ones(self.V)
        self.corpus = corpus
        self.observations = np.array(corpus.observations)

        self.phi = np.empty(self.K, dtype=object)
        for i in range(self.K):
            self.phi[i] = pm.CompletedDirichlet(
                "Phi[%i]" % i, pm.Dirichlet("phi[%i]" % i, theta=self.beta))
        self.phi = pm.Container(self.phi)

        self.theta = np.empty(self.M, dtype=object)
        for i in range(self.M):
            self.theta[i] = pm.CompletedDirichlet(
                "Theta[%i]" % i, pm.Dirichlet("theta[%i]" % i,
                                              theta=self.alpha))
        self.theta = pm.Container(self.theta)

        self.z = np.empty(self.observations.shape, dtype=object)
        for i in range(self.M):
            self.z[i] = pm.Categorical("z[%i]" % i,
                                       size=len(self.observations[i]),
                                       p=self.theta[i],
                                       value=np.random.randint(
                                           self.K,
                                           size=len(self.observations[i])))
        self.z = pm.Container(self.z)

        self.w = []
        for i in range(self.M):
            self.w.append([])
            for j in range(len(self.observations[i])):
                self.w[i].append(
                    pm.Categorical(
                        "w[%i][%i]" % (i, j),
                        p=pm.Lambda(
                            "phi[z[%i][%i]]" % (i, j),
                            lambda z=self.z[i][j], phi=self.phi: phi[z]),
                        value=self.observations[i][j],
                        observed=True))
        self.w = pm.Container(self.w)

        self.mcmc = pm.MCMC(pm.Model([self.theta, self.phi, self.z, self.w]))

        print("Fitting model ...")
        self.mcmc.sample(iter=iterations, burn=burn)
Exemple #28
0
def getModel():
    D = pm.Dirichlet('1-Dirichlet', theta=[3, 2, 4])
    #@UndefinedVariable
    C1 = pm.Categorical('2-Cat', D)
    #@UndefinedVariable
    C2 = pm.Categorical('10-Cat', D)
    #@UndefinedVariable
    C3 = pm.Categorical('11-Cat', D)
    #@UndefinedVariable
    G0_0 = pm.Gamma('4-Gamma0_1', alpha=1, beta=1.5)
    #@UndefinedVariable
    U1 = pm.Uniform('12-Unif', lower=-100, upper=500)
    #@UndefinedVariable
    U2 = pm.Uniform('13-Unif', lower=-100, upper=500)
    #@UndefinedVariable
    U3 = pm.Uniform('14-Unif', lower=-100, upper=500)
    #@UndefinedVariable
    N0_1 = pm.Normal('5-Norm0_1', mu=U1, tau=1)
    #@UndefinedVariable
    N0_2 = pm.Normal('6-Norm0_2', mu=U2, tau=1)
    #@UndefinedVariable
    N0_3 = pm.Normal('7-Norm0_3', mu=U3, tau=1)
    #@UndefinedVariable
    aMu = [N0_1.value, N0_2.value, N0_3.value]
    fL1 = lambda n=C1: np.select([n == 0, n == 1, n == 2], aMu)
    fL2 = lambda n=C2: np.select([n == 0, n == 1, n == 2], aMu)
    fL3 = lambda n=C3: np.select([n == 0, n == 1, n == 2], aMu)
    p_N1 = pm.Lambda('p_Norm1', fL1, doc='Pr[Norm|Cat]')
    p_N2 = pm.Lambda('p_Norm2', fL2, doc='Pr[Norm|Cat]')
    p_N3 = pm.Lambda('p_Norm3', fL3, doc='Pr[Norm|Cat]')
    N = pm.Normal('3-Norm', mu=p_N1, tau=1)
    #@UndefinedVariable
    obsN1 = pm.Normal('8-Norm', mu=p_N2, tau=1, observed=True, value=0)
    #@UndefinedVariable @UnusedVariable
    obsN2 = pm.Normal('9-Norm', mu=p_N3, tau=1, observed=True, value=150)
    #@UndefinedVariable @UnusedVariable
    return pm.Model(
        [D, C1, C2, C3, N, G0_0, N0_1, N0_2, N0_3, N, obsN1, obsN2])
Exemple #29
0
def run_HDP():
    nC = 3
    #Max No. Clusters
    Gam = pm.Uniform('Gamma0', lower=0, upper=15)
    # @UndefinedVariable
    aDir = [Gam / nC] * nC
    Dir0 = pm.Dirichlet('Dirichlet0', theta=aDir)
    # @UndefinedVariable
    lDir0 = pm.Lambda('p_Dir0',
                      lambda d=Dir0: np.concatenate([d, [1 - sum(d)]]))
    # @UndefinedVariable
    aNodes1 = get_DP('1', lDir0, [0, 1, 20, 21])
    aNodes2 = get_DP('2', lDir0, [50, 51, 70, 71, 72])
    return np.concatenate([[Dir0], aNodes1, aNodes2])
Exemple #30
0
def run_Bernoulli():
    B = pm.Bernoulli('Bern', 0.4)
    # @UndefinedVariable
    p_N = pm.Lambda('p_Norm',
                    lambda B=B: np.where(B, -5, 5),
                    doc='Pr[Norm|Bern]')
    N = pm.Normal('Norm', mu=p_N, tau=1)
    # @UndefinedVariable
    model = pm.Model([B, N])
    mcmc = pm.MCMC(model)
    mcmc.sample(1000, progress_bar=True)
    print "B:", B.stats()["mean"], B.value
    print "N:", N.stats()["mean"], N.value
    plot_Samples(mcmc)