Exemplo n.º 1
0
def test_empty_graph():
    graph = csr_matrix((0, 0))
    x = maximum_bipartite_matching(graph, perm_type='row')
    y = maximum_bipartite_matching(graph, perm_type='column')
    expected_matching = np.array([])
    assert_array_equal(expected_matching, x)
    assert_array_equal(expected_matching, y)
Exemplo n.º 2
0
def test_graph_that_causes_augmentation():
    # In this graph, column 1 is initially assigned to row 1, but it should be
    # reassigned to make room for row 2.
    graph = csr_matrix([[1, 1], [1, 0]])
    x = maximum_bipartite_matching(graph, perm_type='column')
    y = maximum_bipartite_matching(graph, perm_type='row')
    expected_matching = np.array([1, 0])
    assert_array_equal(expected_matching, x)
    assert_array_equal(expected_matching, y)
Exemplo n.º 3
0
def test_explicit_zeros_count_as_edges():
    data = [0, 0]
    indices = [1, 0]
    indptr = [0, 1, 2]
    graph = csr_matrix((data, indices, indptr), shape=(2, 2))
    x = maximum_bipartite_matching(graph, perm_type='row')
    y = maximum_bipartite_matching(graph, perm_type='column')
    expected_matching = np.array([1, 0])
    assert_array_equal(expected_matching, x)
    assert_array_equal(expected_matching, y)
def test_graph_maximum_bipartite_matching():
    A = diags(np.ones(25), offsets=0, format='csc')
    rand_perm = np.random.permutation(25)
    rand_perm2 = np.random.permutation(25)

    Rrow = np.arange(25)
    Rcol = rand_perm
    Rdata = np.ones(25,dtype=int)
    Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()

    Crow = rand_perm2
    Ccol = np.arange(25)
    Cdata = np.ones(25,dtype=int)
    Cmat = coo_matrix((Cdata,(Crow,Ccol))).tocsc()
    # Randomly permute identity matrix
    B = Rmat*A*Cmat
    
    # Row permute
    perm = maximum_bipartite_matching(B,perm_type='row')
    Rrow = np.arange(25)
    Rcol = perm
    Rdata = np.ones(25,dtype=int)
    Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
    C1 = Rmat*B
    
    # Column permute
    perm2 = maximum_bipartite_matching(B,perm_type='column')
    Crow = perm2
    Ccol = np.arange(25)
    Cdata = np.ones(25,dtype=int)
    Cmat = coo_matrix((Cdata,(Crow,Ccol))).tocsc()
    C2 = B*Cmat
    
    # Should get identity matrix back
    assert_equal(any(C1.diagonal() == 0), False)
    assert_equal(any(C2.diagonal() == 0), False)
    
    # Test int64 indices input
    B.indices = B.indices.astype('int64')
    B.indptr = B.indptr.astype('int64')
    perm = maximum_bipartite_matching(B,perm_type='row')
    Rrow = np.arange(25)
    Rcol = perm
    Rdata = np.ones(25,dtype=int)
    Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
    C3 = Rmat*B
    assert_equal(any(C3.diagonal() == 0), False)
    
Exemplo n.º 5
0
def max_bipartile_matching(C, return_order=True):
    rorder = maximum_bipartite_matching(csr_matrix(C), perm_type='col')

    if return_order:
        return C[np.ix_(rorder, rorder)], rorder
    else:
        return C[np.ix_(rorder, rorder)]
Exemplo n.º 6
0
def test_graph_maximum_bipartite_matching():
    A = diags(np.ones(25), offsets=0, format='csc')
    rand_perm = np.random.permutation(25)
    rand_perm2 = np.random.permutation(25)

    Rrow = np.arange(25)
    Rcol = rand_perm
    Rdata = np.ones(25, dtype=int)
    Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsc()

    Crow = rand_perm2
    Ccol = np.arange(25)
    Cdata = np.ones(25, dtype=int)
    Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsc()
    # Randomly permute identity matrix
    B = Rmat * A * Cmat

    # Row permute
    perm = maximum_bipartite_matching(B, perm_type='row')
    Rrow = np.arange(25)
    Rcol = perm
    Rdata = np.ones(25, dtype=int)
    Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsc()
    C1 = Rmat * B

    # Column permute
    perm2 = maximum_bipartite_matching(B, perm_type='column')
    Crow = perm2
    Ccol = np.arange(25)
    Cdata = np.ones(25, dtype=int)
    Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsc()
    C2 = B * Cmat

    # Should get identity matrix back
    assert_equal(any(C1.diagonal() == 0), False)
    assert_equal(any(C2.diagonal() == 0), False)

    # Test int64 indices input
    B.indices = B.indices.astype('int64')
    B.indptr = B.indptr.astype('int64')
    perm = maximum_bipartite_matching(B, perm_type='row')
    Rrow = np.arange(25)
    Rcol = perm
    Rdata = np.ones(25, dtype=int)
    Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsc()
    C3 = Rmat * B
    assert_equal(any(C3.diagonal() == 0), False)
Exemplo n.º 7
0
def part_two(lines, ranges, nearby, valid):
    valids = [l for l in nearby if all(n in valid for n in l)]
    loc = [[
        all((t1 <= l[j] <= t2) or (t3 <= l[j] <= t4) for l in valids)
        for t1, t2, t3, t4 in ranges
    ] for j in range(20)]
    m = maximum_bipartite_matching(csr_matrix(loc))
    return your[m[:6]].prod()
Exemplo n.º 8
0
def test_feasibility_of_result():
    # This is a regression test for GitHub issue #11458
    data = np.ones(50, dtype=int)
    indices = [11, 12, 19, 22, 23, 5, 22, 3, 8, 10, 5, 6, 11, 12, 13, 5, 13,
               14, 20, 22, 3, 15, 3, 13, 14, 11, 12, 19, 22, 23, 5, 22, 3, 8,
               10, 5, 6, 11, 12, 13, 5, 13, 14, 20, 22, 3, 15, 3, 13, 14]
    indptr = [0, 5, 7, 10, 10, 15, 20, 22, 22, 23, 25, 30, 32, 35, 35, 40, 45,
              47, 47, 48, 50]
    graph = csr_matrix((data, indices, indptr), shape=(20, 25))
    x = maximum_bipartite_matching(graph, perm_type='row')
    y = maximum_bipartite_matching(graph, perm_type='column')
    assert (x != -1).sum() == 13
    assert (y != -1).sum() == 13
    # Ensure that each element of the matching is in fact an edge in the graph.
    for u, v in zip(range(graph.shape[0]), y):
        if v != -1:
            assert graph[u, v]
    for u, v in zip(x, range(graph.shape[1])):
        if u != -1:
            assert graph[u, v]
Exemplo n.º 9
0
def test2() -> None:
    ingr, algs = list(ing_al), list(alg_candidates)  # Keys

    adj = np.zeros((len(ingr), len(algs)), dtype=bool)
    for alg, ings in alg_candidates.items():
        for ing in ings:
            adj[ingr.index(ing), algs.index(alg)] = True

    matches = maximum_bipartite_matching(csr_matrix(adj))
    x = {ingr[m]: algs[i] for i, m in enumerate(matches)}

    assert ",".join(sorted(x, key=x.get)) == "tmp,pdpgm,cdslv,zrvtg,ttkn,mkpmkx,vxzpfp,flnhl"  # type: ignore [arg-type]
Exemplo n.º 10
0
def test2() -> None:
    v_tics = nearby[np.all(valid_arr, axis=1)]  # Valid tickets.
    n_cols = len(ranges)

    adj = np.zeros((n_cols, n_cols), dtype=bool)  # "Adjacency matrix"
    for i, vals in enumerate(ranges):
        adj[:, i] = np.all(match(v_tics, vals), axis=0)

    matches = maximum_bipartite_matching(csr_matrix(adj))
    targets = [
        i for i, s in enumerate(dic.split("\n")) if s.startswith("departure")
    ]
    assert np.prod(my[matches[targets]]) == 998358379943
Exemplo n.º 11
0
def test_large_random_graph_with_one_edge_incident_to_each_vertex():
    np.random.seed(42)
    A = diags(np.ones(25), offsets=0, format='csr')
    rand_perm = np.random.permutation(25)
    rand_perm2 = np.random.permutation(25)

    Rrow = np.arange(25)
    Rcol = rand_perm
    Rdata = np.ones(25, dtype=int)
    Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr()

    Crow = rand_perm2
    Ccol = np.arange(25)
    Cdata = np.ones(25, dtype=int)
    Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr()
    # Randomly permute identity matrix
    B = Rmat * A * Cmat

    # Row permute
    perm = maximum_bipartite_matching(B, perm_type='row')
    Rrow = np.arange(25)
    Rcol = perm
    Rdata = np.ones(25, dtype=int)
    Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr()
    C1 = Rmat * B

    # Column permute
    perm2 = maximum_bipartite_matching(B, perm_type='column')
    Crow = perm2
    Ccol = np.arange(25)
    Cdata = np.ones(25, dtype=int)
    Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr()
    C2 = B * Cmat

    # Should get identity matrix back
    assert_equal(any(C1.diagonal() == 0), False)
    assert_equal(any(C2.diagonal() == 0), False)
Exemplo n.º 12
0
def solve_part2(input_):
    your_ticket, nearby_tickets, ranges = process_input(input_)
    full_range = set()
    for r1, r2, r3, r4 in ranges:
        for v in range(r1, r2 + 1):
            full_range.add(v)
        for v in range(r3, r4 + 1):
            full_range.add(v)

    valid_tickets = [
        t for t in nearby_tickets if all(v in full_range for v in t)
    ]
    valids_r = [[
        all((r1 <= v[i] <= r2) or (r3 <= v[i] <= r4) for v in valid_tickets)
        for r1, r2, r3, r4 in ranges
    ] for i in range(20)]
    perm = maximum_bipartite_matching(csr_matrix(valids_r))
    print(prod([your_ticket[idx] for idx in perm][:6]))
Exemplo n.º 13
0
def bipartite_vertex_cover(bigraph, algo="Hopcroft-Karp"):
    """Bipartite minimum vertex cover by Koenig's theorem

    :param bigraph: adjacency list, index = vertex in U,
                                    value = neighbor list in V
    :comment: U and V can have different cardinalities
    :returns: boolean table for U, boolean table for V
    :comment: selected vertices form a minimum vertex cover,
              i.e. every edge is adjacent to at least one selected vertex
              and number of selected vertices is minimum
    :complexity: `O(\sqrt(|V|)*|E|)`
    """
    if algo == "Hopcroft-Karp":
        coord = [(irow,icol) for irow,cols in enumerate(bigraph) for icol in cols]
        coord = np.array(coord)
        graph = csr_matrix((np.ones(coord.shape[0]),(coord[:,0],coord[:,1])))
        matchV = maximum_bipartite_matching(graph, perm_type='row')
        matchV = [None if x==-1 else x for x in matchV]
        nU, nV = graph.shape
        assert len(matchV) == nV
    elif algo ==  "Hungarian":
        matchV = max_bipartite_matching2(bigraph)
        nU, nV = len(bigraph), len(matchV)
    else:
        assert False

    matchU = [None] * nU
    
    for v in range(nV):       # -- build the mapping from U to V
        if matchV[v] is not None:
            matchU[matchV[v]] = v

    visitU = [False] * nU     # -- build max alternating forest
    visitV = [False] * nV
    for u in range(nU):
        if matchU[u] is None:        # -- starting with free vertices in U
            _alternate(u, bigraph, visitU, visitV, matchV)
    inverse = [not b for b in visitU]
    return (inverse, visitV)
Exemplo n.º 14
0
def test_graph_with_more_columns_than_rows():
    graph = csr_matrix([[1, 1, 0], [0, 0, 1]])
    x = maximum_bipartite_matching(graph, perm_type='column')
    y = maximum_bipartite_matching(graph, perm_type='row')
    assert_array_equal(np.array([0, 2]), x)
    assert_array_equal(np.array([0, -1, 1]), y)
Exemplo n.º 15
0
def calc_bipartite_matching(graph: csr_matrix) -> np.array:
    # かつ V1->V2 の枝が入った csr_matrix
    return maximum_bipartite_matching(graph, perm_type='column')
Exemplo n.º 16
0
def test_graph_with_no_edges():
    graph = csr_matrix((2, 2))
    x = maximum_bipartite_matching(graph, perm_type='row')
    y = maximum_bipartite_matching(graph, perm_type='column')
    assert_array_equal(np.array([-1, -1]), x)
    assert_array_equal(np.array([-1, -1]), y)
Exemplo n.º 17
0
def test_empty_right_partition():
    graph = csr_matrix((0, 3))
    x = maximum_bipartite_matching(graph, perm_type='row')
    y = maximum_bipartite_matching(graph, perm_type='column')
    assert_array_equal(np.array([-1, -1, -1]), x)
    assert_array_equal(np.array([]), y)
Exemplo n.º 18
0
    print(
        f"The ticket scanning error rate is {sum(invalid_val for _, invalid_val in invalid_values)}."
    )

    # Part two
    invalid_tickets = sorted(set(ticket_id for ticket_id, _ in invalid_values))
    tickets = np.array([
        ticket for id, ticket in enumerate(tickets)
        if id not in invalid_tickets
    ])

    field_validity_matrix = [[
        all(val in range1 or val in range2 for val in tickets[:, field_rank])
        for field_name, (range1, range2) in fields.items()
    ] for field_rank in range(len(fields))]
    bipartite_matching = maximum_bipartite_matching(
        csr_matrix(field_validity_matrix))
    field_ranks = {
        field_name: bipartite_matching[idx]
        for idx, field_name in enumerate(fields.keys())
    }

    product = np.prod([
        tickets[0, field_rank]
        for field_name, field_rank in field_ranks.items()
        if field_name.startswith("departure")
    ])
    print(
        f"The product of the six fields on the ticket that start with the word departure is {product}."
    )

Exemplo n.º 19
0
def bipartite_vertex_cover(bigraph, algo="Hopcroft-Karp"):
    r"""Bipartite minimum vertex cover by Koenig's theorem

    :param bigraph: adjacency list, index = vertex in U,
                                    value = neighbor list in V
    :comment: U and V can have different cardinalities
    :returns: boolean table for U, boolean table for V
    :comment: selected vertices form a minimum vertex cover,
              i.e. every edge is adjacent to at least one selected vertex
              and number of selected vertices is minimum
    :complexity: `O(\sqrt(|V|)*|E|)`
    """
    if algo == "Hopcroft-Karp":
        coord = [(irow,icol) for irow,cols in enumerate(bigraph) for icol in cols]
        coord = np.array(coord)
        graph = csr_matrix((np.ones(coord.shape[0]),(coord[:,0],coord[:,1])))
        matchV = maximum_bipartite_matching(graph, perm_type='row')
        matchV = [None if x==-1 else x for x in matchV]
        nU, nV = graph.shape
        assert len(matchV) == nV
    elif algo ==  "Hungarian":
        matchV = max_bipartite_matching2(bigraph)
        nU, nV = len(bigraph), len(matchV)
    else:
        assert False

    matchU = [None] * nU
    
    for v in range(nV):       # -- build the mapping from U to V
        if matchV[v] is not None:
            matchU[matchV[v]] = v
    
    def old_konig():
        visitU = [False] * nU     # -- build max alternating forest
        visitV = [False] * nV
        for u in range(nU):
            if matchU[u] is None:        # -- starting with free vertices in U
                _alternate(u, bigraph, visitU, visitV, matchV)
        inverse = [not b for b in visitU]
        return (inverse, visitV)
    
    def new_konig():
        # solve the limitation of huge number of recursive calls
        visitU = [False] * nU     # -- build max alternating forest
        visitV = [False] * nV
        wait_u = set(range(nU)) - set(matchV) 
        while len(wait_u) > 0:
            u = wait_u.pop()
            visitU[u] = True
            for v in bigraph[u]:
                if not visitV[v]:
                    visitV[v] = True
                    assert matchV[v] is not None  # otherwise match is not maximum
                    assert matchV[v] not in wait_u
                    wait_u.add(matchV[v])
        inverse = [not b for b in visitU]
        return (inverse, visitV)
    
    #res_old = old_konig()
    res_new = new_konig()
    #assert res_old == res_new
    return res_new
Exemplo n.º 20
0
ranges = set()
m = re.findall(r'(\d+)-(\d+)', inp[0])
for lo, hi in m:
    ranges.update(range(int(lo), int(hi) + 1))


def possibilities(ticket):
    ret = []
    for t in ticket:
        s = [0] * len(ticket)
        for i, r in enumerate(rules):
            l0, h0, l1, h1 = r
            s[i] = l0 <= t <= h0 or l1 <= t <= h1
        ret.append(s)
    return ret


valid = [t for t in tickets if all(i in ranges for i in t)]
p = possibilities(your)
for ticket in valid:
    p1 = possibilities(ticket)
    for i in range(len(p)):
        for j in range(len(p[i])):
            p[i][j] &= p1[i][j]

m = maximum_bipartite_matching(csr_matrix(p), perm_type='column')
sol = 1
for i, j in enumerate(m):
    sol *= your[i] if j < 6 else 1
print(sol)
Exemplo n.º 21
0
                E[r][c] = 1
                row_deg[r] += 1
                col_deg[c] += 1
    print("Excluded", len(excluded_rows), "rows")
    print("Excluded", len(excluded_cols), "columns")
    return karp_sipser(E, row_deg, col_deg)


##################################################
# Read graph data, and start algorithm
##################################################

#Graph Index
graph_index = 7

#Graph Data
G = mmread(GRAPHS[graph_index])
A = np.where(G.toarray() != 0, 1, 0)

print(GRAPHS[graph_index])
print("Dim:", G.shape, "Edges:", G.nnz)
print(A)
start = time.process_time()
M = two_sided(A)
end = time.process_time()
print("Matching Cardinality:", len(M))  #, "Matching:", M)
max_cardinality = len(np.where(maximum_bipartite_matching(G) != -1)[0])
print("Maximum Cardinality:", max_cardinality)
print("Ratio:", len(M) / max_cardinality)
print("Process Time:", end - start, "seconds")
def compute_counts(preds, gts, iou_thr=0.5, conf_thr=0.5, disp=False):
    '''
    This function takes a pair of dictionaries (with our JSON format; see ex.) 
    corresponding to predicted and ground truth bounding boxes for a collection
    of images and returns the number of true positives, false positives, and
    false negatives. 
    <preds> is a dictionary containing predicted bounding boxes and confidence
    scores for a collection of images.
    <gts> is a dictionary containing ground truth bounding boxes for a
    collection of images.
    '''
    TP = 0  # true positive
    FP = 0  # false positive
    FN = 0  # false negative

    # Loop over images
    for pred_file, pred in preds.items():
        gt = gts[pred_file]

        # Make a matrix for the max flow problem, row is ground truth
        # col is predicted box
        # Row 0 and row 1 are reserved for source and sink respectively
        # The next len(gt) rows are reserved for ground truth boxes
        # Then after that remaining rows are for pred boxes
        # n = len(gt) + len(pred) + 2 # +2 because add source and sink
        # A = np.zeros(n, n)
        A = np.zeros((len(gt), len(pred)))

        # Keep track of number of boxes that were not discarded
        keptBoxes = 0

        for i in range(len(gt)):
            for j in range(len(pred)):
                # Only add edges in the case where this bounding box
                # exceeds confidence threshold
                if pred[j][-1] > conf_thr:
                    keptBoxes += 1
                    iou = compute_iou(pred[j][:4], gt[i])
                    if iou > iou_thr:
                        # Set capacity of this edge to 1 to allow for a
                        # potential match
                        # Connect from ground truth to predicted box node
                        # A[i+2, len(gt)+2+j] = 1
                        # Connect all ground truth to source
                        # A[0, i+2] = 1
                        # Connect all predictions to sink
                        # A[len(gt)+2+j, 1] = 1
                        A[i, j] = 1
        A = csr_matrix(A)

        # Run Edmond-Karps to find max matching via max flow
        # res = maximum_flow(A, 0, 1)

        # correctPred = res.flow_value

        perm = maximum_bipartite_matching(A, perm_type='column')

        # The correct number of predictions is simply the number of locations
        # which are not -1 in perm
        truePositives = np.sum(np.array(perm) != -1)
        falsePositives = keptBoxes - truePositives
        falseNegatives = len(gt) - truePositives

        assert truePositives >= 0
        assert falsePositives >= 0
        assert falseNegatives >= 0

        TP += truePositives
        FP += falsePositives
        FN += falseNegatives

        if disp:
            im = Image.open(data_path + '/' + pred_file)
            for i, matchInd in enumerate(perm):
                if matchInd == -1: continue
                # i is the ground truth index
                # matchInd is the predicted both index
                draw = ImageDraw.Draw(im)
                color = (int(np.random.rand(1) * 255), \
                         int(np.random.rand(1) * 255), \
                         int(np.random.rand(1) * 255))
                try:
                    for box in [gt[i], pred[matchInd]]:
                        try:
                            (y0, x0, y1, x1) = tuple(box)[:4]
                        except:
                            pdb.set_trace()
                        draw.rectangle([x0, y0, x1, y1], outline=color)
                except:
                    print('hi')
                    pdb.set_trace()

            im.show()
    '''
    END YOUR CODE
    '''

    return TP, FP, FN
Exemplo n.º 23
0
def test_raises_on_dense_input():
    with pytest.raises(TypeError):
        graph = np.array([[0, 1], [0, 0]])
        maximum_bipartite_matching(graph)
Exemplo n.º 24
0
import re

import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import maximum_bipartite_matching

with open("input") as f:
    ls = [line.strip() for line in f.readlines()]

ranges = [list(map(int, re.findall('\d+', x))) for x in ls[:20]]
your = np.array([int(x) for x in ls[22].split(',')], dtype=np.int64)
nearby = [list(map(int, re.findall('\d+', x))) for x in ls[25:]]

valid = set()
for t1, t2, t3, t4 in ranges:
    valid |= set(range(t1, t2 + 1))
    valid |= set(range(t3, t4 + 1))

# Answer 1
print(sum(n for l in nearby for n in l if n not in valid))

# Answer 2
valids = [l for l in nearby if all(n in valid for n in l)]
loc = [[
    all((t1 <= l[j] <= t2) or (t3 <= l[j] <= t4) for l in valids)
    for t1, t2, t3, t4 in ranges
] for j in range(20)]
m = maximum_bipartite_matching(csr_matrix(loc))
print(your[m[:6]].prod())
Exemplo n.º 25
0
 def time_maximum_bipartite_matching(self, n, density):
     maximum_bipartite_matching(self.graph)
Exemplo n.º 26
0
    def solve(self):
        # https://de.wikipedia.org/wiki/Algorithmus_von_Christofides
        # 1. Get minimal spanning tree (msp)
        logging.debug("Compute minimal spanning tree ...")
        v = list(range(self.num_nodes))
        graph = csgraph_from_dense(self.d)
        msp = minimum_spanning_tree(graph)
        msp_d = csgraph_to_dense(msp)
        # Somehow the dense graph is not undirected, which is bad for the next step ...
        for i in range(self.num_nodes):
            for j in range(self.num_nodes):
                if i == j:
                    continue
                if msp_d[i, j] == 0:
                    msp_d[i, j] = msp_d[j, i]

        # 2. Find the set of vertices with odd degree in the msp (T)
        logging.debug("Find vertices with odd degree in minimal spanning tree ...")
        odd_vertices = [i for i in v if sum([msp_d[i, j] != 0 for j in v]) % 2 == 1]

        # 3. Find a minimum-weight perfect matching M in the induced subgraph (isg) given by the set of odd vertices
        # Create adjacency matrix and take the negative values, as scipy only provides maximum weight bipartite matching
        logging.debug("Find maximum weight bipartite matching in subgraph induced by odd nodes induced ...")
        isg_d = np.zeros((self.num_nodes, self.num_nodes))
        for i in range(self.num_nodes):
            for j in range(i+1, self.num_nodes):
                if i in odd_vertices and j in odd_vertices:
                    isg_d[i, j] = -self.d[i, j]
                    isg_d[j, i] = -self.d[i, j]
        isg_graph = csgraph_from_dense(isg_d)
        m = maximum_bipartite_matching(isg_graph)

        # 4. Combine edges of M and T (the msp) to form a connected multigraph H.
        logging.debug("Combine edges from minimal spanning tree"
                      "and the maximum weight bipartite matching to a multigraph")
        msp_m_combined = networkx.MultiGraph()
        # Add edges from the minimum spanning tree
        for i in range(self.num_nodes):
            for j in range(i+1, self.num_nodes):
                if msp_d[i, j] > 0:
                    msp_m_combined.add_edge(i, j, weight=msp_d[i, j])
        # Add edges from the minimum-weight perfect matching M
        m_cp = m.copy()
        for i in m:
            if i >= 0 and m_cp[m[i]] >= 0:
                msp_m_combined.add_edge(i, m[i], weight=self.d[i, m[i]])
                # Do not add the same edge twice
                m_cp[m[i]] = -1
                m_cp[i] = -1

        # 5. Find Eulerian circuit in H, starting at node 0
        logging.debug("Find an eulerian path in the multigraph starting at vertice 0")
        eulerian_path = networkx.eulerian_path(msp_m_combined, source=0)

        # 6. Make the circuit found in previous step into a Hamiltonian circuit
        # Follow the eulerian path and replace already visited vertices by an edge to the next not yet visited vertice.
        logging.debug("Convert eulerarian path to hamiltonian circuit")
        first_edge = next(eulerian_path)
        self.solution = [first_edge[0], first_edge[1]]
        for e in eulerian_path:
            if e[1] in self.solution:
                continue
            else:
                self.solution.append(e[1])
        self.dist = self.tour_to_dist(self.solution)
Exemplo n.º 27
0
    with open('inputs/16.txt', 'r') as file:
        lines = file.read().split('\n')

    rules = [list(map(int, re.findall(r'\d+', x))) for x in lines[:20]]
    mine = np.array([int(x) for x in lines[22].split(',')], dtype=np.int64)
    nearby = [list(map(int, re.findall(r'\d+', x))) for x in lines[25:]]

    # Part 1
    valid = get_valid_set(rules)
    total = 0
    for line in nearby:
        for num in line:
            if num not in valid:
                total += num
    print(total)

    # Part 2
    nearby = [line for line in nearby if all(num in valid for num in line)]

    graph = []
    for j in range(len(nearby[0])):
        check = []
        for t1, t2, t3, t4 in rules:
            check.append(
                all((t1 <= l[j] <= t2) or (t3 <= l[j] <= t4) for l in nearby))
        graph.append(check)

    m = maximum_bipartite_matching(csr_matrix(graph))
    print(mine[m[:6]].prod())
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import shortest_path, floyd_warshall, dijkstra, bellman_ford, johnson, NegativeCycleError, maximum_bipartite_matching, maximum_flow, minimum_spanning_tree
import numpy as np

n, m = map(int, input().split())
edges = [list(map(int, input().split())) for i in range(m)]


def graph_csr(edges, n, directed=True, indexed_1=True):  # 隣接リストから粗行列を作成
    arr = np.array(edges, dtype=np.int64).T
    arr = arr.astype(np.int64)
    index = int(indexed_1)
    if not directed:
        return csr_matrix((np.concatenate([arr[2], arr[2]]), (np.concatenate([arr[0]-index, arr[1]-index]), np.concatenate([arr[1]-index, arr[0]-index]))), shape=(n, n))
    else:
        return csr_matrix((arr[2], (arr[0]-index, arr[1]-index)), shape=(n, n))


csr = graph_csr(edges, n)
try:
    print(floyd_warshall(csr))
except NegativeCycleError:
    print('-1')
dijkstra(csr, indices=0)
bellman_ford(csr, indices=0)
maximum_bipartite_matching(csr, perm_type='column')
maximum_flow(csr, source=0, sink=1).flow_value
maximum_flow(csr, source=0, sink=1).residual
int(sum(minimum_spanning_tree(csr).data))