Exemple #1
0
    def get_independencies(self, latex=False):
        """
        Computes independencies in the Bayesian Network, by checking d-seperation.

        Parameters
        ----------
        latex: boolean
            If latex=True then latex string of the independence assertion
            would be created.

        Examples
        --------
        >>> from pgm.models import BayesianModel
        >>> chain = BayesianModel([('X', 'Y'), ('Y', 'Z')])
        >>> chain.get_independencies()
        (X _|_ Z | Y)
        (Z _|_ X | Y)
        """
        independencies = Independencies()
        for start in (self.nodes()):
            rest = set(self.nodes()) - {start}
            for r in range(len(rest)):
                for observed in itertools.combinations(rest, r):
                    d_seperated_variables = rest - set(observed) - set(
                        self.active_trail_nodes(start, observed=observed))
                    if d_seperated_variables:
                        independencies.add_assertions(
                            [start, d_seperated_variables, observed])

        independencies.reduce()

        if not latex:
            return independencies
        else:
            return independencies.latex_string()
    def get_independencies(self, condition=None):
        """
        Returns the independent variables in the joint probability distribution.
        Returns marginally independent variables if condition=None.
        Returns conditionally independent variables if condition!=None

        Parameter
        ---------
        condition: array_like
                Random Variable on which to condition the Joint Probability Distribution.

        Examples
        --------
        >>> import numpy as np
        >>> from pgm.factors.discrete import JointProbabilityDistribution
        >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)
        >>> prob.get_independencies()
        (x1 _|_ x2)
        (x1 _|_ x3)
        (x2 _|_ x3)
        """
        JPD = self.copy()
        if condition:
            JPD.conditional_distribution(condition)
        independencies = Independencies()
        for variable_pair in itertools.combinations(list(JPD.variables), 2):
            if (JPD.marginal_distribution(variable_pair, inplace=False) ==
                    JPD.marginal_distribution(variable_pair[0], inplace=False) *
                    JPD.marginal_distribution(variable_pair[1], inplace=False)):
                independencies.add_assertions(variable_pair)
        return independencies
Exemple #3
0
    def local_independencies(self, variables):
        """
        Returns a independencies object containing the local independencies
        of each of the variables.

        Parameters
        ----------
        variables: str or array like
            variables whose local independencies are to be found.

        Examples
        --------
        >>> from pgm.models import BayesianModel
        >>> student = BayesianModel()
        >>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
        >>>                         ('grade', 'letter'), ('intel', 'SAT')])
        >>> ind = student.local_independencies('grade')
        >>> ind.event1
        {'grade'}
        >>> ind.event2
        {'SAT'}
        >>> ind.event3
        {'diff', 'intel'}
        """
        def dfs(node):
            """
            Returns the descendents of node.

            Since Bayesian Networks are acyclic, this is a very simple dfs
            which does not remember which nodes it has visited.
            """
            descendents = []
            visit = [node]
            while visit:
                n = visit.pop()
                neighbors = self.neighbors(n)
                visit.extend(neighbors)
                descendents.extend(neighbors)
            return descendents

        independencies = Independencies()
        for variable in [variables] if isinstance(variables,
                                                  str) else variables:
            non_descendents = set(self.nodes()) - {variable} - set(
                dfs(variable))
            parents = set(self.get_parents(variable))
            if non_descendents - parents:
                independencies.add_assertions(
                    [variable, non_descendents - parents, parents])
        return independencies
Exemple #4
0
    def get_local_independencies(self, latex=False):
        """
        Returns all the local independencies present in the markov model.

        Local independencies are the independence assertion in the form of
        .. math:: {X \perp W - {X} - MB(X) | MB(X)}
        where MB is the markov blanket of all the random variables in X

        Parameters
        ----------
        latex: boolean
            If latex=True then latex string of the indepedence assertion would
            be created

        Examples
        --------
        >>> from pgm.models import MarkovModel
        >>> mm = MarkovModel()
        >>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
        >>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
        ...                    ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
        ...                    ('x4', 'x7'), ('x5', 'x7')])
        >>> mm.get_local_independecies()
        """
        local_independencies = Independencies()

        all_vars = set(self.nodes())
        for node in self.nodes():
            markov_blanket = set(self.markov_blanket(node))
            rest = all_vars - set([node]) - markov_blanket
            try:
                local_independencies.add_assertions(
                    [node, list(rest), list(markov_blanket)])
            except ValueError:
                pass

        local_independencies.reduce()

        if latex:
            return local_independencies.latex_string()
        else:
            return local_independencies