Esempio n. 1
0
def test_bnet_exact_sum_product():
    """EXAMPLE: Junction tree sum-product on BNET"""
    """Create all data required to instantiate the bnet object"""
    nodes = 4
    dag = np.zeros((nodes, nodes))
    C = 0
    S = 1
    R = 2
    W = 3
    dag[C, [R, S]] = 1
    dag[R, W] = 1
    dag[S, W] = 1
    ns = 2 * np.ones(nodes, dtype='int')
    """Instantiate the CPD for each node in the network"""
    node_cpds = [[], [], [], []]
    CPT = np.array([0.5, 0.5])
    node_cpds[C] = cpds.TabularCPD(CPT)
    CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
    node_cpds[R] = cpds.TabularCPD(CPT)
    CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
    node_cpds[S] = cpds.TabularCPD(CPT)
    CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
    node_cpds[W] = cpds.TabularCPD(CPT)
    """Instantiate the object"""
    net = models.bnet(dag, ns, node_cpds=node_cpds)
    net.init_inference_engine(exact=True)
    """Create and enter evidence"""
    evidences = create_all_evidence(4, 2)
    results = []
    for evidence in [evidences[0]]:
        net.enter_evidence(evidence)
        net.sum_product()
        result = []
        result.append(np.max(net.marginal_nodes([0]).T))
        result.append(np.max(net.marginal_nodes([1]).T))
        result.append(np.max(net.marginal_nodes([2]).T))
        result.append(np.max(net.marginal_nodes([3]).T))
        results.append(result)

    results = np.array(results)
    """Get the expected results"""
    exp_results = np.array(
        pylab.loadtxt('./Data/bnet_exact_sum_product_res.txt'))
    """Assert that the output matched the expected values"""
    assert_array_equal(results, exp_results)
Esempio n. 2
0
 def init_cpds(self):
     self.node_cpds = np.empty(self.node_cpds_N, dtype=object)
     for i in range(0, self.num_nodes):
         """Create a blank CPD for the node"""
         family = graph.family(self.adj_mat, i)
         if i in self.continuous_nodes:
             self.node_cpds[i] = node_cpds.GaussianCPD()
         else:
             fam = np.hstack([i, graph.parents(self.adj_mat, i)])
             fam_sizes = self.node_sizes[fam]
             self.node_cpds[i] = cpds.TabularCPD(np.ones(fam_sizes))
Esempio n. 3
0
def test_bnet_approx_max_sum():
    """EXAMPLE: Loopy belief max-sum on BNET"""
    """Create all data required to instantiate the bnet object"""
    nodes = 4
    dag = np.zeros((nodes, nodes))
    C = 0
    S = 1
    R = 2
    W = 3
    dag[C, [R, S]] = 1
    dag[R, W] = 1
    dag[S, W] = 1
    ns = 2 * np.ones((1, nodes))
    """Instantiate the CPD for each node in the network"""
    node_cpds = [[], [], [], []]
    CPT = np.array([0.5, 0.5])
    node_cpds[C] = cpds.TabularCPD(CPT)
    CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
    node_cpds[R] = cpds.TabularCPD(CPT)
    CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
    node_cpds[S] = cpds.TabularCPD(CPT)
    CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
    node_cpds[W] = cpds.TabularCPD(CPT)
    """Instantiate the object"""
    net = models.bnet(dag, ns, node_cpds)
    net.init_inference_engine(exact=False)
    """Create and enter evidence"""
    evidences = create_all_evidence(4, 2)
    mlcs = np.array([0, 0, 0, 0])
    for evidence in evidences:
        mlc = net.max_sum(evidence)
        mlcs = np.vstack((mlcs, mlc))
    """Read in expected values"""
    exp_mlcs = np.array(pylab.load('./Data/bnet_approx_max_sum_res.txt'))
    """Assert that the output matched the expected values"""
    assert_array_equal(mlcs, exp_mlcs)
Esempio n. 4
0
 dag[S, W] = 1
 """
 Define the size of each node, which is the number of different values a
 node could observed at. For example, if a node is either True of False,
 it has only 2 possible values it could be, therefore its size is 2. All
 the nodes in this graph has a size 2.
 """
 node_sizes = 2 * np.ones(nodes)
 """
 We now need to assign a conditional probability distribution to each
 node.
 """
 node_cpds = [[], [], [], []]
 """Define the CPD for node 0"""
 CPT = np.array([0.5, 0.5])
 node_cpds[C] = cpds.TabularCPD(CPT)
 """Define the CPD for node 1"""
 CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
 node_cpds[R] = cpds.TabularCPD(CPT)
 """Define the CPD for node 2"""
 CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
 node_cpds[S] = cpds.TabularCPD(CPT)
 """Define the CPD for node 3"""
 CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
 node_cpds[W] = cpds.TabularCPD(CPT)
 """Create the Bayesian network"""
 net = models.bnet(dag, node_sizes, node_cpds=node_cpds)
 """
 Intialize the BNET's inference engine to use EXACT inference
 by setting exact=True.
 """
def test_bnet_sumproduct():
    """
    Testing: SUM-PRODUCT on BNET
    This example is based on the lawn sprinkler example, and the Bayesian
    network has the following structure, with all edges directed downwards:

                            Cloudy - 0
                             /  \
                            /    \
                           /      \
                   Sprinkler - 1  Rainy - 2
                           \      /
                            \    /
                             \  /
                           Wet Grass -3                
    """
    """Assign a unique numerical identifier to each node"""
    C = 0
    S = 1
    R = 2
    W = 3
    """Assign the number of nodes in the graph"""
    nodes = 4
    """
    The graph structure is represented as a adjacency matrix, dag.
    If dag[i, j] = 1, then there exists a directed edge from node
    i and node j.
    """
    dag = np.zeros((nodes, nodes))
    dag[C, [R, S]] = 1
    dag[R, W] = 1
    dag[S, W] = 1
    """
    Define the size of each node, which is the number of different values a
    node could observed at. For example, if a node is either True of False,
    it has only 2 possible values it could be, therefore its size is 2. All
    the nodes in this graph has a size 2.
    """
    node_sizes = 2 * np.ones(nodes)
    """
    We now need to assign a conditional probability distribution to each
    node.
    """
    node_cpds = [[], [], [], []]
    """Define the CPD for node 0"""
    CPT = np.array([0.5, 0.5])
    node_cpds[C] = cpds.TabularCPD(CPT)
    """Define the CPD for node 1"""
    CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
    node_cpds[R] = cpds.TabularCPD(CPT)
    """Define the CPD for node 2"""
    CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
    node_cpds[S] = cpds.TabularCPD(CPT)
    """Define the CPD for node 3"""
    CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
    node_cpds[W] = cpds.TabularCPD(CPT)
    """Create the Bayesian network"""
    net = models.bnet(dag, node_sizes, node_cpds=node_cpds)
    """
    Intialize the BNET's inference engine to use EXACT inference
    by setting exact=True.
    """
    net.init_inference_engine(exact=True)
    """Create and enter evidence ([] means that node is unobserved)"""
    all_ev = sprinkler_evidence()
    all_prob = sprinkler_probs()

    count = 0
    errors = 0
    for evidence in all_ev:
        """Execute the max-sum algorithm"""
        net.sum_product(evidence)

        ans = [1, 1, 1, 1]
        marginal = net.marginal_nodes([C])

        if evidence[C] is None:
            ans[C] = marginal.T[1]

        marginal = net.marginal_nodes([S])
        if evidence[S] is None:
            ans[S] = marginal.T[1]

        marginal = net.marginal_nodes([R])
        if evidence[R] is None:
            ans[R] = marginal.T[1]

        marginal = net.marginal_nodes([W])
        if evidence[W] is None:
            ans[W] = marginal.T[1]

        errors = errors +  \
                 np.round(np.sum(np.array(ans) - np.array(all_prob[count])), 3)
        count = count + 1

    assert errors == 0
# state emission probabilities
B = np.array([Gauss(mean=np.array([1.0, 2.0]),
                    cov=np.eye(2)),
              Gauss(mean=np.array([0.0, -1.0]),
                    cov=np.eye(2))
              ])
# DBN
intra = np.array([[0,1],[0,0]])  # Intra-slice dependencies
inter = np.array([[1,0],[0,0]])  # Inter-slice dependencies

node_sizes = np.array([2,inf])

discrete_nodes = [0]
continuous_nodes = [1]

node_cpds = [cpds.TabularCPD(pi),
            cpds.GaussianCPD(B),
             cpds.TabularCPD(A)]

dbn = models.DBN(intra, inter, node_sizes, discrete_nodes,
        continuous_nodes, node_cpds)

inference_engine = inference.JTreeUnrolledDBNInferenceEngine()
inference_engine.model = dbn

inference_engine.initialize(T=5)
dbn.inference_engine = inference_engine

# INERENCE
evidence = [[None,[1.0,2.0]]
            ,[None,[3.0,4.0]],[None,[5.0,6.0]],[None,[7.0,8.0]],[None,[9.0,10.0]]]
Esempio n. 7
0
def test_ghmm():
    """
    Testing: GHMM
    """
    # Handles for readability.
    inf = np.inf

    # HMM parameters
    # prior
    pi = np.array([0.6, 0.4])

    # state transition matrix
    A = np.array([[0.7, 0.3], [0.2, 0.8]])

    # state emission probabilities
    B = np.array([
        Gauss(mean=np.array([1.0, 2.0]), cov=np.eye(2)),
        Gauss(mean=np.array([0.0, -1.0]), cov=np.eye(2))
    ])
    # DBN
    intra = np.array([[0, 1], [0, 0]])  # Intra-slice dependencies
    inter = np.array([[1, 0], [0, 0]])  # Inter-slice dependencies

    node_sizes = np.array([2, inf])

    discrete_nodes = [0]
    continuous_nodes = [1]

    node_cpds = [cpds.TabularCPD(pi), cpds.GaussianCPD(B), cpds.TabularCPD(A)]

    dbn = models.DBN(intra, inter, node_sizes, discrete_nodes,
                     continuous_nodes, node_cpds)

    inference_engine = inference.JTreeUnrolledDBNInferenceEngine()
    inference_engine.model = dbn

    inference_engine.initialize(T=5)
    dbn.inference_engine = inference_engine

    # INERENCE
    evidence = [[None, [1.0, 2.0]], [None, [3.0, 4.0]], [None, [5.0, 6.0]],
                [None, [7.0, 8.0]], [None, [9.0, 10.0]]]
    dbn.enter_evidence(evidence)
    print "Likelihood of single sample: %f" % dbn.sum_product()

    # LEARNING
    samples = [[[None, [-0.9094, -3.3056]], [None, [2.7887, 2.3908]],
                [None, [1.0203, 1.5940]], [None, [-0.5349, 2.2214]],
                [None, [-0.3745, 1.1607]]],
               [[None, [0.7914, 2.7559]], [None, [0.3757, -2.3454]],
                [None, [2.4819, 2.0327]], [None, [2.8705, 0.7910]],
                [None, [0.2174, 1.2327]]]]

    print "\nEM parameter learning:"
    dbn.learn_params_EM(samples, max_iter=10)
    print "\nPrior (pi):"
    print dbn.node_cpds[0]
    print "\nTransition matrx (A):"
    print dbn.node_cpds[2]
    print "\nEmission probabilities (B):"
    print dbn.node_cpds[1]