Beispiel #1
0
def create_model():
    # Init graph.
    G = FactorGraph()

    # Add variable nodes to model.
    G.add_nodes_from(v)

    # Add factor nodes to model.
    G.add_nodes_from(f)

    # Add edges to the model.
    edges = [('X1', f1), ('X2', f2),    \
             ('X3', f31), ('X1', f31),  \
             ('X5', f52), ('X2', f52),  \
             ('X4', f42), ('X2', f42),  \
             ('X6', f68), ('X8', f68),
             ('X8', f843), ('X4', f843), ('X3', f843),
             ('X7', f758), ('X5', f758), ('X8', f758)]
    G.add_edges_from(edges)

    # Finally add all factors.
    G.add_factors(f1, f2, f31, f52, f42, f68, f843, f758)

    assert (G.check_model())
    return G
Beispiel #2
0
    def to_factor_graph(self):
        """
        Converts the markov model into factor graph.

        A factor graph contains two types of nodes. One type corresponds to
        random variables whereas the second type corresponds to factors over
        these variables. The graph only contains edges between variables and
        factor nodes. Each factor node is associated with one factor whose
        scope is the set of variables that are its neighbors.

        Examples
        --------
        >>> from pgmpy.models import MarkovModel
        >>> from pgmpy.factors import Factor
        >>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')])
        >>> factor1 = Factor(['Alice', 'Bob'], [3, 2], np.random.rand(6))
        >>> factor2 = Factor(['Bob', 'Charles'], [2, 2], np.random.rand(4))
        >>> student.add_factors(factor1, factor2)
        >>> factor_graph = student.to_factor_graph()
        """
        from pgmpy.models import FactorGraph
        factor_graph = FactorGraph()

        if not self.factors:
            raise ValueError('Factors not associated with the random variables.')

        factor_graph.add_nodes_from(self.nodes())
        for factor in self.factors:
            scope = factor.scope()
            factor_node = 'phi_' + '_'.join(scope)
            factor_graph.add_edges_from(itertools.product(scope, [factor_node]))
            factor_graph.add_factors(factor)

        return factor_graph
Beispiel #3
0
 def add_new_model(self):
     if self.max_inference_size > 0 or isinstance(
             self.inference, ApproximateSearchInference):
         model = FactorGraph()
         self.ordered_models.append(model)
         return model
     else:
         return None
Beispiel #4
0
    def reset(self):
        """ Resets variables which need to be updated for each new scenario
        """
        self.model = FactorGraph()

        self.ordered_models = []

        if self.inference_type == InferenceType.SearchInference:
            self.inference = ApproximateSearchInference(
                self.max_beam_size, self.ordered_models)
            # print("Using approximate search inference")
        else:
            self.inference = PGMPYInference(self.model,
                                            inference_type=self.inference_type,
                                            sampling_type=self.sampling_type)

        self.observed = {}

        self.models = []
        self.model_nodes = set()
Beispiel #5
0
def reduce_model(model, evidence):
    model = copy.deepcopy(model)
    # continuous_factors = [factor for factor in model.factors if isinstance(factor, ContinuousFactor)]

    for var, val in evidence.items():
        for factor in model.factors:
            if var in factor.scope(
            ):  # and "F(" in var:  # make sure that we only reduce at this stage for continuous values, let the inference algorithm deal with reducing for binary variables
                try:
                    factor.reduce([(var, val)])
                except ValueError as e:
                    print(factor)
                    raise e

    new_model = FactorGraph()

    additional_evidence = {}

    for node in model.factors:
        if isinstance(node, ContinuousFactor):
            if len(node.scope()) == 1:
                node = TabularCPD(str(node.scope()[0]), 2,
                                  [[node.assignment(0),
                                    node.assignment(1)]]).to_factor()
        if len(node.scope()) == 0:
            continue

        #         try:
        #             var = node.variable
        #         except:
        #             print(node.scope())
        #         for v in node.scope():
        #             if var != v:
        #                 new_model.add_edge(str(v), str(var))

        #         if "same_reason" in var:
        #             additional_evidence[var] = 1

        new_model.add_nodes_from([str(n) for n in node.scope()])
        new_model.add_factors(node)
    return new_model, additional_evidence
Beispiel #6
0
 def setUp(self):
     self.graph = FactorGraph()
Beispiel #7
0
 def test_class_init_data_string(self):
     self.graph = FactorGraph([('a', 'phi1'), ('b', 'phi1')])
     self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'phi1'])
     self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
                          [['a', 'phi1'], ['b', 'phi1']])
Beispiel #8
0
# Cluster Graph -- "Cluster Graph"

########################################################
#
#              ________   _______   ________
#              |__f1__|---|__A__|---|__f3__|
#                  |                    |     
#                  |                    |
#               ___|___   ________   ___|___
#               |__B__|---|__f2__|---|__C__|
#             
#               
########################################################
# First import factor graph class from pgmpy.models
from pgmpy.models import FactorGraph
factor_graph = FactorGraph()

# Add variable nodes and factor nodes to model
factor_graph.add_nodes_from(['A','B','C','D','phi1','phi2','phi3'])

# Add edges between all nodes
factor_graph.add_edges_from([('A','phi1'), ('B','phi1'),
	                         ('B','phi2'), ('C','phi2'),
	                         ('C','phi3'), ('A','phi3')])


# Add factors into phi1, phi2, phi3
from pgmpy.factors import Factor
import numpy as np
phi1 = Factor(['A','B'], [2,2], np.random.rand(4))
phi2 = Factor(['A','B'], [2,2], np.random.rand(4))
Beispiel #9
0
    def compute_pk(self, type_list, fid):
        assert len(type_list) == 5, print("ComputePk Error: number of type_list should be 5")

        constraint_name = ['m', 'r', 's', 'd', 'v']
        '''
        m, r, s, d, v = type_list
        p_m, p_r, p_s, p_d, p_v = self.p_observation
        p_ktox, p_xtok = self.p_implication
        p_ktom, p_ktor, p_ktos, p_ktod, p_ktov = p_ktox
        p_mtok, p_rtok, p_stok, p_dtok, p_vtok = p_xtok
        '''
        fg = FactorGraph()
        fg.add_node('k')

        for i in range(len(type_list)):
            if type_list[i] == 0:
                fg = self.add_constraints_k2x_x2k(fg, self.p_observation[fid][i], self.p_implication[fid][0][i], self.p_implication[fid][1][i], constraint_name[i])
            elif type_list[i] == 1:
                fg = self.add_constraints_k2x(fg, self.p_observation[fid][i], self.p_implication[fid][0][i], constraint_name[i])
            elif type_list[i] == 2:
                fg = self.add_constraints_x2k(fg, self.p_observation[fid][i], self.p_implication[fid][1][i], constraint_name[i])
        '''
        if m == 0:
            fg = add_constraints_kv_vk(fg, p_m, p_ktom, p_mtok, 'm')
        elif m == 1:
            fg = add_constraints_kv(fg, p_m, p_mtok, 'm')
        elif m == 2:
            fg = add_constraints_vk(fg, p_m, p_mtok, 'm')

        if r == 0:
            fg = add_constraints_kv_vk(fg, p_r, p_ktor, p_rtok, 'r')
        elif r == 1:
            fg = add_constraints_kv(fg, p_r, p_ktor, 'r')
        elif r == 2:
            fg = add_constraints_vk(fg, p_r, p_rtok, 'r')

        if s == 0:
            fg = add_constraints_kv_vk(fg, p_s, p_ktos, p_stok, 's')
        elif s == 1:
            fg = add_constraints_kv(fg, p_s, p_ktos, 's')
        elif s == 2:
            fg = add_constraints_vk(fg, p_s, p_stok, 's')

        if d == 0:
            fg = add_constraints_kv_vk(fg, p_d, p_ktod, p_dtok, 'd')
        elif d == 1:
            fg = add_constraints_kv(fg, p_d, p_ktod, 'd')
        elif d == 2:
            fg = add_constraints_vk(fg, p_d, p_dtok, 'd')

        if v == 0:
            fg = add_constraints_kv_vk(fg, p_v, p_ktov, p_vtok, 'v')
        elif v == 1:
            fg = add_constraints_kv(fg, p_v, p_ktov, 'v')
        elif v == 2:
            fg = add_constraints_vk(fg, p_v, p_vtok, 'v')
        '''

        bp = BeliefPropagation(fg)

        #result = bp.query(variables=['k'])['k']
        #result = bp.query(variables=['k'], joint=False)['k']
        result = bp.query(variables=['k'])
        result.normalize()
        #print(result)

        return result.values[1]
import numpy as np
from pgmpy.models import FactorGraph
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.inference import BeliefPropagation

G = FactorGraph()
G.add_node(0)
G.add_node(1)
G.add_node(2)

f01 = DiscreteFactor([0, 1], [2, 2], np.random.rand(4))
f02 = DiscreteFactor([0, 2], [2, 2], np.random.rand(4))
f12 = DiscreteFactor([1, 2], [2, 2], np.random.rand(4))
G.add_factors(f01)
G.add_factors(f02)
G.add_factors(f12)

G.add_edges_from([(0, f01), (1, f01), (0, f02), (2, f02), (1, f12), (2, f12)])
bp = BeliefPropagation(G)
bp.calibrate()
Beispiel #11
0
def build_combined_model(relevant_models):
    new_model = FactorGraph()
    for model in relevant_models:
        combine_models(new_model, model)
    return new_model
Beispiel #12
0
def create_new_model(nodes, factor):
    new_model = FactorGraph()
    new_model.add_nodes_from([str(n) for n in nodes] + [str(factor)])
    new_model.add_factors(factor)
    return new_model
Beispiel #13
0
 def test_class_init_data_string(self):
     self.graph = FactorGraph([("a", "phi1"), ("b", "phi1")])
     self.assertListEqual(sorted(self.graph.nodes()), ["a", "b", "phi1"])
     self.assertListEqual(
         hf.recursive_sorted(self.graph.edges()), [["a", "phi1"], ["b", "phi1"]]
     )