def test_issubset():
    """
    FUNCTION: mysubset, in general.py.
    """
    """Create arrays small and large, where small IS a subset of large."""
    small = np.array(([1, 3]))
    large = np.array(([0, 1, 2, 3]))
    """Assert that small is a subset of large."""
    assert general.issubset(small, large)
    """Assert that large is not a subset of small."""
    assert not (general.issubset(large, small))
    """
    Create array small so that it contains less elements than large,
    but is not a subset of large.
    """
    small = np.array(([1, 4]))
    """Assert that small is not a subset of large."""
    assert not (general.issubset(small, large))
Example #2
0
def triangulate(G, order):
    """
    This function ensures that the input graph is triangulated (chordal),
    i.e., every cycle of length > 3 has a chord. To find the maximal
    cliques, we save each induced cluster (created by adding connecting
    neighbors) that is not a subset of any previously saved cluster. (A
    cluster is a complete, but not necessarily maximal, set of nodes.)
    Parameters
    ----------
    G: Numpy ndarray
        G[i,j] = 1 iff there is an edge between node i and node j.
    order: List
        The order in which to eliminate the nodes.
    """
    MG = G.copy()
    
    """Obtain the the number of nodes in the graph"""
    n = G.shape[0]
    eliminated = np.zeros((1,n))
    cliques = []
    for i in range(0,n):
        """Obtain the index of the next node to be eliminated"""
        u = order[0,i]
        U = find(eliminated == 0)
        #nodes = np.intersect1d_nu(neighbours(G, u), U)#################################################################################################################################################
        nodes = np.intersect1d(neighbours(G, u), U)
        nodes = np.union1d(nodes, np.array([u]))
        """
        Connect all uneliminated neighbours of the node to be eliminated
        together.
        """
        for i in nodes:
            for j in nodes:
                i=int(i)
                j=int(j)
                G[i, j] = 1
        G = setdiag(G, 0)
        u=int(u)
        """Mark the node as 'eliminated'"""
        eliminated[0, u] = 1

        """
        If the generated clique is a subset of an existing clique, then it is
        not a maximal clique, so it is excluded from the list if cliques.
        """
        exclude = False
        for c in range(0, len(cliques)):
            if issubset(nodes, np.array(cliques[c])):
                exclude = True
                break

        if not exclude:
            cliques.append(nodes)

    return [G, cliques]
Example #3
0
def neighbours(adj_mat, i):
    """
    Returns the indices of the neighbours nodes of the input node, i, in the
    given adjacency matrix.
    Parameters
    ----------
    adj_mat: Numpy ndarray
        Adjacency matrix. If adj_mat[i, j] = 1, there exists a directed
        edge from node i to node j.
    i: Int
        The index of the node whose parents are to be found.
    """
    kids = np.array(children(adj_mat, i))
    folks = np.array(parents(adj_mat, i))
    if issubset(kids, folks) and issubset(folks, kids):
        nbrs = kids
    else:
        nbrs = np.hstack((kids, folks)).tolist()

    return nbrs
Example #4
0
def triangulate(G, order):
    """
    This function ensures that the input graph is triangulated (chordal),
    i.e., every cycle of length > 3 has a chord. To find the maximal
    cliques, we save each induced cluster (created by adding connecting
    neighbors) that is not a subset of any previously saved cluster. (A
    cluster is a complete, but not necessarily maximal, set of nodes.)

    Parameters
    ----------
    G: Numpy ndarray
        G[i,j] = 1 iff there is an edge between node i and node j.

    order: List
        The order in which to eliminate the nodes.
    """
    MG = G.copy()
    
    """Obtain the the number of nodes in the graph"""
    n = G.shape[0]
    eliminated = np.zeros((1,n))
    cliques = []
    for i in range(0,n):
        """Obtain the index of the next node to be eliminated"""
        u = order[0,i]
        U = find(eliminated == 0)
        nodes = np.intersect1d_nu(neighbours(G, u), U)
        nodes = np.union1d(nodes, np.array([u]))
        """
        Connect all uneliminated neighbours of the node to be eliminated
        together.
        """
        for i in nodes:
            for j in nodes:
                G[i, j] = 1
        G = setdiag(G, 0)

        """Mark the node as 'eliminated'"""
        eliminated[0, u] = 1

        """
        If the generated clique is a subset of an existing clique, then it is
        not a maximal clique, so it is excluded from the list if cliques.
        """
        exclude = False
        for c in range(0, len(cliques)):
            if issubset(nodes, np.array(cliques[c])):
                exclude = True
                break

        if not exclude:
            cliques.append(nodes)

    return [G, cliques]
Example #5
0
def neighbours(adj_mat, i):
    """
    Returns the indices of the neighbours nodes of the input node, i, in the
    given adjacency matrix.

    Parameters
    ----------
    adj_mat: Numpy ndarray
        Adjacency matrix. If adj_mat[i, j] = 1, there exists a directed
        edge from node i to node j.

    i: Int
        The index of the node whose parents are to be found.
    """
    kids = np.array(children(adj_mat, i))
    folks = np.array(parents(adj_mat,i))
    if issubset(kids, folks) and issubset(folks, kids):
        nbrs = kids
    else:
        nbrs = np.hstack((kids, folks)).tolist()

    return nbrs
def test_issubset():
    """
    FUNCTION: mysubset, in general.py.
    """
    """Create arrays small and large, where small IS a subset of large."""
    small = np.array(([1, 3]))
    large = np.array(([0, 1, 2, 3]))

    """Assert that small is a subset of large."""
    assert general.issubset(small, large)

    """Assert that large is not a subset of small."""
    assert not (general.issubset(large, small))

    """
    Create array small so that it contains less elements than large,
    but is not a subset of large.
    """
    small = np.array(([1, 4]))

    """Assert that small is not a subset of large."""
    assert not (general.issubset(small, large))
Example #7
0
    def update_ess(self, sample, expected_vals, node_id, model):
        """
        Update the expected sufficient statistics for this CPD.

        Parameters
        ----------
        sample: List
            A partially observed sample of the all the nodes in the model
            this CPD is part of. sample[i] = [] if node i in unobserved.

        expected_vals: marginal
            A marginal object containing the expected values for any unobserved
            nodes in this CPD.

        node_sizes: Array
            A list of the sizes of each node in the model. If sizes[2] = 10,
            then node 2 can assume 1 of 10 different states.
        """      
        """Determine which nodes were observed in this sample"""
        node_sizes = model.node_sizes_unobserved
        [hidden, observed] = general.determine_observed(sample)

        """If the entire domain of the CPD is hidden"""
        if general.issubset(np.array(expected_vals.domain), np.array(hidden)):
            """
            If the entire domain of the CPD was unobserved in
            the last sample. Then the marginal over the CPD domain will
            be just the CPD's entire CPT. Therefore we can add this
            directly to the CPD's expected sufficient statistics.
            """
            self.ess = self.ess + expected_vals.T.flatten()
        else:
            """
            If any part of the CPD's domain was observed, the expected values
            for the observed domain has been marginalized out. Therefore
            we need to pump the marginal up to its correct dimensions based
            on the observed evidence, and place the observed values where the
            'expected' values were for the observed nodes.
            """
            expected_vals.add_ev_to_dmarginal(sample, node_sizes)

            """
            Add the new values to the CPD's expected sufficient statistics.
            """
            self.ess = self.ess + expected_vals.T.flatten()
Example #8
0
    def update_ess(self, sample, expected_vals, node_sizes):
        """
        Update the expected sufficient statistics for this CPD.

        Parameters
        ----------
        sample: List
            A partially observed sample of the all the nodes in the model
            this CPD is part of. sample[i] = [] if node i in unobserved.

        expected_vals: marginal
            A marginal object containing the expected values for any unobserved
            nodes in this CPD.

        node_sizes: Array
            A list of the sizes of each node in the model. If sizes[2] = 10,
            then node 2 can assume 1 of 10 different states.
        """      
        """Determine which nodes were observed in this sample"""
        [hidden, observed] = general.determine_observed(sample)

        """If the entire domain of the CPD is hidden"""
        if general.issubset(np.array(expected_vals.domain), np.array(hidden)):
            """
            If the entire domain of the CPD was unobserved in
            the last sample. Then the marginal over the CPD domain will
            be just the CPD's entire CPT. Therefore we can add this
            directly to the CPD's expected sufficient statistics.
            """
            self.ess = self.ess + expected_vals.T.flatten()
        else:
            """
            If any part of the CPD's domain was observed, the expected values
            for the observed domain has been marginalized out. Therefore
            we need to pump the marginal up to its correct dimensions based
            on the observed evidence, and place the observed values where the
            'expected' values were for the observed nodes.
            """
            expected_vals.add_ev_to_dmarginal(sample, node_sizes)

            """
            Add the new values to the CPD's expected sufficient statistics.
            """
            self.ess = self.ess + expected_vals.T.flatten()