Ejemplo n.º 1
0
def enum_maximal_matchings(graph: nx.Graph) -> Iterator[Dict[Any, Any]]:
    # Step 1
    # If all vertices of G have degrees 0 or 1, output the unique maximal matching of G and stop.
    node_degree = next((node_degree for node_degree in graph.degree if node_degree[1] >= 2), None)
    if node_degree is None:
        matching = maximum_matching(graph, top_nodes=top_nodes(graph))
        yield {k: v for k, v in matching.items() if k in top_nodes(graph)}
        return

    # Step 2
    # Choose a vertex v with degree at least 2.
    node, degree = node_degree

    for neighbor in graph.neighbors(node):
        # Step 3: For each edge e in G incident to v, construct G+(e) and enumerate all maximal
        # matchings including e by recursive calls.
        # After the recursive call, reconstruct G from G+(e)

        # Oder nodes in the edge according to matching convention
        edge = (node, neighbor)
        edge = edge if edge[0] in top_nodes(graph) else edge[::-1]
        # Create G+(e)
        graph_plus = graph_without_nodes_of_edge(graph, edge)
        # Recursively get maximal matchings from G+(e) and then add e
        for maximal_matching in enum_maximal_matchings(graph_plus):
            maximal_matching.update({edge})
            yield maximal_matching

    # Let G' be the subgraph composed of edges incident
    # to vertices adjacent to v, except for edges incident to v
    edges = set()
    for neighbor in graph.neighbors(node):
        for second_neighbor in graph.neighbors(neighbor):
            if second_neighbor is not node:
                edges.add(tuple(sorted((neighbor, second_neighbor))))
    graph_prime = graph.edge_subgraph(edges)
    # Step 4
    # Find a maximum matching M in G'. If |M| = d(v),
    # then enumerate all maximum matchings in G' by ENUM_MAXIMUM_MATCHING_ITER(M,G').
    matching = maximum_matching(graph_prime, top_nodes=top_nodes(graph_prime))
    matching = {k: v for k, v in matching.items() if k in top_nodes(graph_prime)}
    if len(matching) == degree:
        directed_match_graph = create_directed_matching_graph(graph_prime, top_nodes(graph_prime),
                                                              matching)
        for max_matching in _enum_maximum_matchings_iter(graph_prime, matching,
                                                         directed_match_graph):
            # Step 5
            # For each matching, enumerate all maximal matchings including it.
            subgraph = nx.Graph(graph_prime)
            for edge in max_matching.items():
                subgraph = graph_without_nodes_of_edge(subgraph, edge)
            for maximal_matching in enum_maximal_matchings(subgraph):
                maximal_matching.update(max_matching)
                yield maximal_matching
Ejemplo n.º 2
0
def approx_birkhoff_von_neumann_decomposition(D):
    m, n = D.shape
    if m != n:
        raise ValueError('Input matrix must be square ({} x {})'.format(m, n))
    indices = list(itertools.product(range(m), range(n)))
    # These two lists will store the result as we build it up each iteration.
    coefficients = []
    permutations = []
    # Create a copy of D so that we don't modify it directly. Cast the
    # entries of the matrix to floating point numbers, regardless of
    # whether they were integers.
    S = D.astype('float')
    while not np.all(S == 0):
        # Create an undirected graph whose adjacency matrix contains a 1
        # exactly where the matrix S has a nonzero entry.
        W = to_pattern_matrix(S)
        # Construct the bipartite graph whose left and right vertices both
        # represent the vertex set of the pattern graph (whose adjacency matrix
        # is ``W``).
        X = to_bipartite_matrix(W)
        # Convert the matrix of a bipartite graph into a NetworkX graph object.
        G = from_numpy_matrix(X)
        # Compute a perfect matching for this graph. The dictionary `M` has one
        # entry for each matched vertex (in both the left and the right vertex
        # sets), and the corresponding value is its partner.
        #
        # The bipartite maximum matching algorithm requires specifying
        # the left set of nodes in the bipartite graph. By construction,
        # the left set of nodes is {0, ..., n - 1} and the right set is
        # {n, ..., 2n - 1}; see `to_bipartite_matrix()`.
        left_nodes = range(n)
        M = maximum_matching(G, left_nodes)
        # However, since we have both a left vertex set and a right vertex set,
        # each representing the original vertex set of the pattern graph
        # (``W``), we need to convert any vertex greater than ``n`` to its
        # original vertex number. To do this,
        #
        #   - ignore any keys greater than ``n``, since they are already
        #     covered by earlier key/value pairs,
        #   - ensure that all values are less than ``n``.
        #
        M = {u: v % n for u, v in M.items() if u < n}
        if (len(M.items()) < n):
            break
        # Convert that perfect matching to a permutation matrix.
        P = to_permutation_matrix(M)
        # Get the smallest entry of S corresponding to the 1 entries in the
        # permutation matrix.
        q = min(S[i, j] for (i, j) in indices if P[i, j] == 1)
        # Store the coefficient and the permutation matrix for later.
        coefficients.append(q)
        permutations.append(P)
        # Subtract P scaled by q. After this subtraction, S has a zero entry
        # where the value q used to live.
        S -= q * P
        # PRECISION ISSUE: There seems to be a problem with floating point
        # precision here, so we need to round down to 0 any entry that is very
        # small.
        S[np.abs(S) < TOLERANCE] = 0.0
    return list(zip(coefficients, permutations))
def find_largest_t(valid_matches, weights, edge_matrix):
    """Calculates the largest edge threshold using binary search such that
    the found maximum cardinality matching is a perfect matching

    Arguments:

    valid_matches: dict
        the last found perfect matching of largest edge weight threshold
        found using a binary search
    weights: np.ndarray
        an array of candidate thresholds for the edge weights in sorted order,
        where the largest elements are first
    edge_matrix: tf.Tensor
        a matrix of edge weights that correspond to a bipartite graph; used for
        constructing a bipartite graph in networkx

    Returns:

    permutation: tf.Tensor
        a tensor containing the Berkhoff-Von-Neumann perumtation matrices
        found using the Berkhoff-Von-Neumann decomposition"""

    # calculate the current loc of binary search
    n, loc = edge_matrix.shape[1] // 2, (weights.size - 1) // 2

    # calculate the bipartite graph whose edges all have weight of at
    # least the largest threshold found so far
    threshold = weights[loc]
    bipartite_matrix = np.where(edge_matrix >= threshold, 1, 0)

    # calculate the maximum matching using the hopcroft karp algorithm
    matches = maximum_matching(from_numpy_matrix(bipartite_matrix), range(n))
    matches = {u: v % n for u, v in matches.items() if u < n}

    # calculate if the found matching is a perfect matching
    is_perfect_matching = len(matches) == n
    valid_matches = matches if is_perfect_matching else valid_matches

    # otherwise if the result found is a perfect matching
    # then move onto larger thresholds
    if weights.size > 2 and is_perfect_matching:
        return find_largest_t(valid_matches, weights[:loc], edge_matrix)

    # otherwise if the result found is not a perfect matching
    # then move onto smaller thresholds
    elif weights.size > 1 and not is_perfect_matching:
        return find_largest_t(valid_matches, weights[loc + 1:], edge_matrix)

    # edge case when no valid permutation is a perfect matching and
    # the decomposition terminates with coefficient zero
    if not valid_matches:
        return np.ones((n, n), dtype=np.float32)

    # at the last iteration of binary search return the best
    # permutation matrix found so far
    permutation = np.zeros((n, n), dtype=np.float32)
    permutation[tuple(zip(*valid_matches.items()))] = 1
    return permutation
Ejemplo n.º 4
0
    def test_vertex_cover_issue_3306(self):
        G = nx.Graph()
        edges = [(0, 2), (1, 0), (1, 1), (1, 2), (2, 2)]
        G.add_edges_from([((i, "L"), (j, "R")) for i, j in edges])

        matching = maximum_matching(G)
        vertex_cover = to_vertex_cover(G, matching)
        for u, v in G.edges():
            assert u in vertex_cover or v in vertex_cover
Ejemplo n.º 5
0
def test_create_directed_matching_graph(n_m_k_seed):
    n, m, k, seed = n_m_k_seed
    graph = nx.bipartite.gnmk_random_graph(n, m, k, seed)

    matching = maximum_matching(G=graph, top_nodes=gu.top_nodes(graph))
    digraph = gu.create_directed_matching_graph(graph=graph,
                                                top_nodes=gu.top_nodes(graph),
                                                matching=matching)
    assert graph.nodes == digraph.nodes
    assert len(graph.edges) == len(digraph.edges)
Ejemplo n.º 6
0
def test_eppstein_matching():
    """Test in accordance to issue #1927"""
    G = nx.Graph()
    G.add_nodes_from(['a', 2, 3, 4], bipartite=0)
    G.add_nodes_from([1, 'b', 'c'], bipartite=1)
    G.add_edges_from([('a', 1), ('a', 'b'), (2, 'b'),
                      (2, 'c'), (3, 'c'), (4, 1)])
    matching = eppstein_matching(G)
    assert_true(len(matching) == len(maximum_matching(G)))
    assert all(x in set(matching.keys()) for x in set(matching.values()))
def test_eppstein_matching():
    """Test in accordance to issue #1927"""
    G = nx.Graph()
    G.add_nodes_from(['a', 2, 3, 4], bipartite=0)
    G.add_nodes_from([1, 'b', 'c'], bipartite=1)
    G.add_edges_from([('a', 1), ('a', 'b'), (2, 'b'),
                      (2, 'c'), (3, 'c'), (4, 1)])
    matching = eppstein_matching(G)
    assert_true(len(matching) == len(maximum_matching(G)))
    assert all(x in set(matching.keys()) for x in set(matching.values()))
Ejemplo n.º 8
0
def test_eppstein_matching():
    """Test in accordance to issue #1927"""
    G = nx.Graph()
    G.add_nodes_from(["a", 2, 3, 4], bipartite=0)
    G.add_nodes_from([1, "b", "c"], bipartite=1)
    G.add_edges_from([("a", 1), ("a", "b"), (2, "b"), (2, "c"), (3, "c"),
                      (4, 1)])
    matching = eppstein_matching(G)
    assert len(matching) == len(maximum_matching(G))
    assert all(x in set(matching.keys()) for x in set(matching.values()))
 def test_unorderable_nodes(self):
     a = object()
     b = object()
     c = object()
     d = object()
     e = object()
     G = nx.Graph([(a, d), (b, d), (b, e), (c, d)])
     matching = maximum_matching(G)
     vertex_cover = to_vertex_cover(G, matching)
     for u, v in G.edges():
         assert_true(u in vertex_cover or v in vertex_cover)
Ejemplo n.º 10
0
 def test_unorderable_nodes(self):
     a = object()
     b = object()
     c = object()
     d = object()
     e = object()
     G = nx.Graph([(a, d), (b, d), (b, e), (c, d)])
     matching = maximum_matching(G)
     vertex_cover = to_vertex_cover(G, matching)
     for u, v in G.edges():
         assert_true(u in vertex_cover or v in vertex_cover)
Ejemplo n.º 11
0
def enum_maximum_matchings(graph: nx.Graph) -> Iterator[Dict[Any, Any]]:
    matching = maximum_matching(graph, top_nodes=top_nodes(graph))
    # Express the matching only from a top node to a bottom node
    matching = {k: v for k, v in matching.items() if k in top_nodes(graph)}
    if matching:
        yield matching
        directed_match_graph = create_directed_matching_graph(graph, top_nodes(graph), matching)
        trimmed_directed_match_graph = strongly_connected_components_decomposition(
            directed_match_graph)
        yield from _enum_maximum_matchings_iter(graph=copy.deepcopy(graph),
                                                matching=matching,
                                                directed_match_graph=trimmed_directed_match_graph)
Ejemplo n.º 12
0
def dulmage_mendelsohn(bg, top_nodes=None, matching=None):
    """
    The Dulmage-Mendelsohn decomposition for bipartite graphs.
    This is the coarse decomposition.
    """
    # TODO: Should top_nodes be required? We can try to infer, but
    # the result is in terms of this partition...
    top, bot = bipartite_sets(bg, top_nodes)
    bot_nodes = [n for n in bg if n not in top]
    if top_nodes is None:
        top_nodes = [n for n in bg if n in top]

    if matching is None:
        # This maps top->bot AND bot->top
        matching = maximum_matching(bg, top_nodes=top_nodes)

    t_unmatched = [t for t in top_nodes if t not in matching]
    b_unmatched = [b for b in bot_nodes if b not in matching]

    # A traversal along these graphs corresponds to an alternating path
    t_digraph = _get_projected_digraph(bg, matching, top_nodes)
    b_digraph = _get_projected_digraph(bg, matching, bot_nodes)

    # Nodes reachable by an alternating path from unmatched nodes
    t_reachable, t_filter = _get_reachable_from(t_digraph, t_unmatched)
    b_reachable, b_filter = _get_reachable_from(b_digraph, b_unmatched)

    # Nodes matched with those reachable from unmatched nodes
    t_matched_with_reachable = [matching[b] for b in b_reachable]
    b_matched_with_reachable = [matching[t] for t in t_reachable]

    _filter = t_filter.union(b_filter)
    _filter.update(t_unmatched)
    _filter.update(t_matched_with_reachable)
    _filter.update(b_unmatched)
    _filter.update(b_matched_with_reachable)
    t_other = [t for t in top_nodes if t not in _filter]
    b_other = [b for b in bot_nodes if b not in _filter]

    return ((
        t_unmatched,
        t_reachable,
        t_matched_with_reachable,
        t_other,
        ), (
        b_unmatched,
        b_reachable,
        b_matched_with_reachable,
        b_other,
        ))
Ejemplo n.º 13
0
def enum_perfect_matchings(graph: nx.Graph) -> Iterator[Dict[Any, Any]]:
    if len(list(top_nodes(graph))) != len(list(bottom_nodes(graph))):
        return
    size = len(list(top_nodes(graph)))
    matching = maximum_matching(graph, top_nodes=top_nodes(graph))
    # Express the matching only from a top node to a bottom node
    matching = {k: v for k, v in matching.items() if k in top_nodes(graph)}
    if matching and len(matching) == size:
        yield matching
        directed_match_graph = create_directed_matching_graph(graph, top_nodes(graph), matching)
        trimmed_directed_match_graph = strongly_connected_components_decomposition(
            directed_match_graph)
        graph = trimmed_directed_match_graph.to_undirected()
        assert len(graph.edges) == len(trimmed_directed_match_graph.edges)
        assert len(graph.nodes) == len(trimmed_directed_match_graph.nodes)
        yield from _enum_perfect_matchings_iter(graph=copy.deepcopy(graph), matching=matching)
Ejemplo n.º 14
0
 def test_vertex_cover_issue_2384(self):
     G = nx.Graph([(0, 3), (1, 3), (1, 4), (2, 3)])
     matching = maximum_matching(G)
     vertex_cover = to_vertex_cover(G, matching)
     for u, v in G.edges():
         assert_true(u in vertex_cover or v in vertex_cover)
 def test_vertex_cover_issue_2384(self):
     G = nx.Graph([(0, 3), (1, 3), (1, 4), (2, 3)])
     matching = maximum_matching(G)
     vertex_cover = to_vertex_cover(G, matching)
     for u, v in G.edges():
         assert_true(u in vertex_cover or v in vertex_cover)
 def test_to_vertex_cover(self):
     """Test for converting a maximum matching to a minimum vertex cover."""
     matching = maximum_matching(self.graph, self.top_nodes)
     vertex_cover = to_vertex_cover(self.graph, matching, self.top_nodes)
     self.check_vertex_cover(vertex_cover)
Ejemplo n.º 17
0
 def test_to_vertex_cover(self):
     """Test for converting a maximum matching to a minimum vertex cover."""
     matching = maximum_matching(self.graph)
     vertex_cover = to_vertex_cover(self.graph, matching)
     self.check_vertex_cover(vertex_cover)
Ejemplo n.º 18
0
def birkhoff_von_neumann_decomposition(D):
    """Returns the Birkhoff--von Neumann decomposition of the doubly
    stochastic matrix `D`.

    The input `D` must be a square NumPy array representing a doubly
    stochastic matrix (that is, a matrix whose entries are nonnegative
    reals and whose row sums and column sums are all 1). Each doubly
    stochastic matrix is a convex combination of at most ``n ** 2``
    permutation matrices, where ``n`` is the dimension of the input
    array.

    The returned value is a list of pairs whose length is at most ``n **
    2``. In each pair, the first element is a real number in the interval **(0,
    1]** and the second element is a NumPy array representing a permutation
    matrix. This represents the doubly stochastic matrix as a convex
    combination of the permutation matrices.

    The input matrix may also be a scalar multiple of a doubly
    stochastic matrix, in which case the row sums and column sums must
    each be *c*, for some positive real number *c*. This may be useful
    in avoiding precision issues: given a doubly stochastic matrix that
    will have many entries close to one, multiply it by a large positive
    integer. The returned permutation matrices will be the same
    regardless of whether the given matrix is a doubly stochastic matrix
    or a scalar multiple of a doubly stochastic matrix, but in the
    latter case, the coefficients will all be scaled by the appropriate
    scalar multiple, and their sum will be that scalar instead of one.

    For example::

        >>> import numpy as np
        >>> from birkhoff import birkhoff_von_neumann_decomposition as decomp
        >>> D = np.ones((2, 2))
        >>> zipped_pairs = decomp(D)
        >>> coefficients, permutations = zip(*zipped_pairs)
        >>> coefficients
        (1.0, 1.0)
        >>> permutations[0]
        array([[ 1.,  0.],
               [ 0.,  1.]])
        >>> permutations[1]
        array([[ 0.,  1.],
               [ 1.,  0.]])
        >>> zipped_pairs = decomp(D / 2)  # halve each value in the matrix
        >>> coefficients, permutations = zip(*zipped_pairs)
        >>> coefficients  # will be half as large as before
        (0.5, 0.5)
        >>> permutations[0]  # will be the same as before
        array([[ 1.,  0.],
               [ 0.,  1.]])
        >>> permutations[1]
        array([[ 0.,  1.],
               [ 1.,  0.]])

    The returned list of pairs is given in the order computed by the algorithm
    (so in particular they are not sorted in any way).

    """

    m, n = D.shape
    len_original_Matrix = m + n
    if m != n:
        raise ValueError('Input matrix must be square ({} x {})'.format(m, n))
    indices = list(itertools.product(range(m), range(n)))
    # These two lists will store the result as we build it up each iteration.
    coefficients = []
    permutations = []
    # Create a copy of D so that we don't modify it directly. Cast the
    # entries of the matrix to floating point numbers, regardless of
    # whether they were integers.
    S = D.astype('float')
    theta = 0
    end = 0
    begin = np.count_nonzero(S)
    count = 0
    # only keep computing decomposition while theta smaller than 0.95 
    # and the nonzero entries in S are getting viewer.
    # This is needed to account for cases where the algorithm does not converge due to 
    # the inputed matrix not being hundert percent douply stochastic but only 
    # approximately. Hence, if one iteration does not bring more zero entries
    # in S we can conclude that the next iteration also will not do so.
    # Furthermore, for our algorithm we only need the decomposition matrix
    # with the highes probability, consequently, if theta is equal to 0.95
    # the one with the highest probability was most likely already computed.
    while theta <= 0.95 and not begin == end:
        begin = np.count_nonzero(S)
        #print evaluation progress after ever 1000 decompositions
        if (count%1000) == 0:
            print("Birkhoff von Neumann decomposition is still running.")
            print("Number of decompositions: " + str(count))
            print(str(begin) + " decompositions still possible.")
            
        # Create an undirected graph whose adjacency matrix contains a 1
        # exactly where the matrix S has a nonzero entry.
        W = to_pattern_matrix(S)
        # Construct the bipartite graph whose left and right vertices both
        # represent the vertex set of the pattern graph (whose adjacency matrix
        # is ``W``).
        X = to_bipartite_matrix(W)
        # Convert the matrix of a bipartite graph into a NetworkX graph object.
        G = from_numpy_matrix(X)
        # Compute a perfect matching for this graph. The dictionary `M` has one
        # entry for each matched vertex (in both the left and the right vertex
        # sets), and the corresponding value is its partner.
        #
        # The bipartite maximum matching algorithm requires specifying
        # the left set of nodes in the bipartite graph. By construction,
        # the left set of nodes is {0, ..., n - 1} and the right set is
        # {n, ..., 2n - 1}; see `to_bipartite_matrix()`.
        left_nodes = range(n)
        M = maximum_matching(G, left_nodes)
        
        # account the case when the maximum_matching does not return a matching
        # for each inputed value. Sort the missing matchings together.
        if len(M) < len_original_Matrix:
            M = dict(sorted(M.items(), key=operator.itemgetter(0)))
            inv_M = {v: k for k, v in M.items()}
            sorted_inv_M = dict(sorted(inv_M.items(), key=operator.itemgetter(0)))
            name_iter = 0
            value_iter = 0
            missing_name = []
            missing_value = []
            for name, value in zip(M, sorted_inv_M):
                if name_iter != name:
                    missing_name.append(name_iter)
                    name_iter += 1
                name_iter += 1
                    
                if value_iter != value:
                    missing_value.append(value_iter)
                    value_iter += 1
                value_iter += 1
            for name, value in zip (missing_name, missing_value[::-1]):
                M[name] = value
            M = dict(sorted(M.items(), key=operator.itemgetter(0)))
        
        # However, since we have both a left vertex set and a right vertex set,
        # each representing the original vertex set of the pattern graph
        # (``W``), we need to convert any vertex greater than ``n`` to its
        # original vertex number. To do this,
        #
        #   - ignore any keys greater than ``n``, since they are already
        #     covered by earlier key/value pairs,
        #   - ensure that all values are less than ``n``.
        #
        M = {u: v % n for u, v in M.items() if u < n}
        # Convert that perfect matching to a permutation matrix.
        P = to_permutation_matrix(M)
        # Get the smallest entry of S corresponding to the 1 entries in the
        # permutation matrix.
        q = min(S[i, j] for (i, j) in indices if P[i, j] == 1)
        # Store the coefficient and the permutation matrix for later.
        coefficients.append(q)
        permutations.append(P)
        # Subtract P scaled by q. After this subtraction, S has a zero entry
        # where the value q used to live.
        S -= q * P
        # PRECISION ISSUE: There seems to be a problem with floating point
        # precision here, so we need to round down to 0 any entry that is very
        # small.        
        S[np.abs(S) < TOLERANCE] = 0.0
        
        # add the coefficient value to theta
        theta += q
        # get non-zero entries for S
        end = np.count_nonzero(S)
        #counter for number of decompositions
        count += 1
        
    return list(zip(coefficients, permutations))
Ejemplo n.º 19
0
def birkhoff_von_neumann_decomposition(D):
    """Returns the Birkhoff--von Neumann decomposition of the doubly
    stochastic matrix `D`.

    The input `D` must be a square NumPy array representing a doubly
    stochastic matrix (that is, a matrix whose entries are nonnegative
    reals and whose row sums and column sums are all 1). Each doubly
    stochastic matrix is a convex combination of at most ``n ** 2``
    permutation matrices, where ``n`` is the dimension of the input
    array.

    The returned value is a list of pairs whose length is at most ``n **
    2``. In each pair, the first element is a real number in the interval **(0,
    1]** and the second element is a NumPy array representing a permutation
    matrix. This represents the doubly stochastic matrix as a convex
    combination of the permutation matrices.

    The input matrix may also be a scalar multiple of a doubly
    stochastic matrix, in which case the row sums and column sums must
    each be *c*, for some positive real number *c*. This may be useful
    in avoiding precision issues: given a doubly stochastic matrix that
    will have many entries close to one, multiply it by a large positive
    integer. The returned permutation matrices will be the same
    regardless of whether the given matrix is a doubly stochastic matrix
    or a scalar multiple of a doubly stochastic matrix, but in the
    latter case, the coefficients will all be scaled by the appropriate
    scalar multiple, and their sum will be that scalar instead of one.

    For example::

        >>> import numpy as np
        >>> from birkhoff import birkhoff_von_neumann_decomposition as decomp
        >>> D = np.ones((2, 2))
        >>> zipped_pairs = decomp(D)
        >>> coefficients, permutations = zip(*zipped_pairs)
        >>> coefficients
        (1.0, 1.0)
        >>> permutations[0]
        array([[ 1.,  0.],
               [ 0.,  1.]])
        >>> permutations[1]
        array([[ 0.,  1.],
               [ 1.,  0.]])
        >>> zipped_pairs = decomp(D / 2)  # halve each value in the matrix
        >>> coefficients, permutations = zip(*zipped_pairs)
        >>> coefficients  # will be half as large as before
        (0.5, 0.5)
        >>> permutations[0]  # will be the same as before
        array([[ 1.,  0.],
               [ 0.,  1.]])
        >>> permutations[1]
        array([[ 0.,  1.],
               [ 1.,  0.]])

    The returned list of pairs is given in the order computed by the algorithm
    (so in particular they are not sorted in any way).

    """
    m, n = D.shape
    if m != n:
        raise ValueError('Input matrix must be square ({} x {})'.format(m, n))
    indices = list(itertools.product(range(m), range(n)))
    # These two lists will store the result as we build it up each iteration.
    coefficients = []
    permutations = []
    # Create a copy of D so that we don't modify it directly. Cast the
    # entries of the matrix to floating point numbers, regardless of
    # whether they were integers.
    S = D.astype('float')
    while not np.all(S == 0):
        # Create an undirected graph whose adjacency matrix contains a 1
        # exactly where the matrix S has a nonzero entry.
        W = to_pattern_matrix(S)
        # Construct the bipartite graph whose left and right vertices both
        # represent the vertex set of the pattern graph (whose adjacency matrix
        # is ``W``).
        X = to_bipartite_matrix(W)
        # Convert the matrix of a bipartite graph into a NetworkX graph object.
        G = from_numpy_matrix(X)
        # Compute a perfect matching for this graph. The dictionary `M` has one
        # entry for each matched vertex (in both the left and the right vertex
        # sets), and the corresponding value is its partner.
        #
        # The bipartite maximum matching algorithm requires specifying
        # the left set of nodes in the bipartite graph. By construction,
        # the left set of nodes is {0, ..., n - 1} and the right set is
        # {n, ..., 2n - 1}; see `to_bipartite_matrix()`.
        left_nodes = range(n)
        M = maximum_matching(G, left_nodes)
        if len(M) < n * 2:
            break
        # However, since we have both a left vertex set and a right vertex set,
        # each representing the original vertex set of the pattern graph
        # (``W``), we need to convert any vertex greater than ``n`` to its
        # original vertex number. To do this,
        #
        #   - ignore any keys greater than ``n``, since they are already
        #     covered by earlier key/value pairs,
        #   - ensure that all values are less than ``n``.
        #
        M = {u: v % n for u, v in M.items() if u < n}
        # Convert that perfect matching to a permutation matrix.
        P = to_permutation_matrix(M)
        # Get the smallest entry of S corresponding to the 1 entries in the
        # permutation matrix.
        q = min(S[i, j] for (i, j) in indices if P[i, j] == 1)
        # Store the coefficient and the permutation matrix for later.
        coefficients.append(q)
        permutations.append(P)
        # Subtract P scaled by q. After this subtraction, S has a zero entry
        # where the value q used to live.
        S -= q * P
        # PRECISION ISSUE: There seems to be a problem with floating point
        # precision here, so we need to round down to 0 any entry that is very
        # small.
        S[np.abs(S) < TOLERANCE] = 0.0
    return list(zip(coefficients, permutations))