コード例 #1
0
ファイル: pipeline.py プロジェクト: enj/hivemind
    def update_max_concurrency(self):
        u"""
        Determine the maximum number of concurrent Tasks that can still run.

        This is based on the maximum independent set of the subgraph
        of the transitive closure of the Tasks that are not done.

        * http://cs.stackexchange.com/a/16829
        * http://cs.stackexchange.com/a/10303/5323
        * http://stackoverflow.com/a/10304235
        * `Hopcroft–Karp algorithm <https://en.wikipedia.org/wiki/Hopcroft%E2%80%93Karp_algorithm>`_
        * `Dilworth's theorem <https://en.wikipedia.org/wiki/Dilworth's_theorem>`_
        * `Kőnig's theorem <https://en.wikipedia.org/wiki/K%C5%91nig%27s_theorem_(graph_theory)>`_

        Konig’s theorem proves an equivalence between a maximum matching and
        a minimum vertex cover in bipartite graphs.  A minimum vertex cover is
        the complement of a maximum independent set for any graph.

        networkx.algorithms.approximation.independent_set has a method called
        maximum_independent_set but it does not seem to work correctly.
        """
        # Cannot get the length of the max independent set of an empty graph
        if len(self.btc) == 0:
            self.mc = 0
            return

        # Use the length of the max matching to calculate the length of the max independent set
        # Divide by two since the bipartite graph has two copies of each Task
        self.mc = (len(self.btc) - len(hopcroft_karp_matching(self.btc))) / 2
コード例 #2
0
ファイル: main.py プロジェクト: JoePittsy/AdventOfCode
def task_one_and_two(dataset):
    allergens = defaultdict(list)
    for ingredients, allergens_in_ingredients in dataset:
        for allergen in allergens_in_ingredients:
            allergens[allergen].append(ingredients)

    for name, possibles in allergens.items():
        suspect_ingredients = set(possibles[0]).intersection(*possibles[1:])
        allergens[name] = suspect_ingredients

    network = []
    for name, ingredients in allergens.items():
        for ingredient in ingredients:
            network.append((name, ingredient))

    allergen_names = allergens.keys()

    network = nx.Graph(network)
    matching = hopcroft_karp_matching(network)

    allergens_part1 = [
        match[1] for match in matching.items() if match[0] in allergen_names
    ]
    allergens_part2 = [
        match for match in matching.items() if match[0] in allergen_names
    ]

    all_ingredients = [i for ingredient in dataset for i in ingredient[0]]
    non_allergens = [i for i in all_ingredients if i not in allergens_part1]
    task_2 = ",".join([
        ingredient[1]
        for ingredient in sorted(allergens_part2, key=lambda x: x[0])
    ])

    return len(non_allergens), task_2
コード例 #3
0
def maximum_matching_all(bipartite_graph):
    matches = dict()
    if is_directed(bipartite_graph):
        parts = weakly_connected_components(bipartite_graph)
    else:
        parts = connected_components(bipartite_graph)
    for conn in parts:
        sub = bipartite_graph.subgraph(conn)
        max_matching = hopcroft_karp_matching(sub)
        matches.update(max_matching)
    return matches
コード例 #4
0
def get_optimal_edges(sg):
    paths = {}
    orig_sg = sg
    sg = sg.copy()
    while len(sg.nodes) != 0:
        # first, find the root(s) of the subgraph at the highest level
        roots = {n for n, d in sg.in_degree() if d == 0}
        max_size_root = len(max(roots, key=lambda x: len(x)))
        roots = {r for r in roots if len(r) == max_size_root}

        # find everything within reach of 1
        reach_1 = set()
        for root in roots:
            reach_1.update(sg.neighbors(root))

        # build a bipartite graph and do the matching
        all_nodes = reach_1 | roots
        bipart_layer = sg.subgraph(all_nodes).to_undirected()
        assert (bipartite.is_bipartite(bipart_layer))
        matching = bipartite.hopcroft_karp_matching(bipart_layer, roots)
        matching = {k: v for k, v in matching.items() if k in roots}

        # sanity check -- every vertex should appear in exactly one path
        assert len(set(matching.values())) == len(matching)

        # find unmatched roots and add a path to $, indicating that
        # the path has terminated.
        for unmatched_root in roots - matching.keys():
            matching[unmatched_root] = "$"
        assert len(matching) == len(roots)

        # sanity check -- nothing was already in our paths
        for k, v in matching.items():
            assert k not in paths.keys()
            assert v not in paths.keys()
            assert v == "$" or v not in paths.values()

        # sanity check -- all roots have an edge assigned
        for root in roots:
            assert root in matching.keys()

        paths.update(matching)

        # remove the old roots
        sg.remove_nodes_from(roots)
    return paths
コード例 #5
0
ファイル: main.py プロジェクト: JoePittsy/AdventOfCode
def task_two(rules, mine, others):
    rule_list = rules.values()

    def check_validity(number):
        for rule in rule_list:
            for sub_rule in rule:
                valid = bool(sub_rule[0] <= number <= sub_rule[1])
                if valid:
                    return True
        return False

    def check_field_validity(number):
        rule = rules[field_name]
        for sub_rule in rule:
            valid = bool(sub_rule[0] <= number <= sub_rule[1])
            if valid:
                return True
        return False

    all_valid = np.array([
        ticket for ticket in others
        if False not in list(map(check_validity, ticket))
    ])

    network = []
    for field_name in rules:
        for column in range(len(all_valid[0])):
            valid = True if False not in list(
                map(check_field_validity, all_valid[:, column])) else False
            if valid:
                network.append((field_name, column))
    network = nx.Graph(network)
    # Use the Hopcroft Karp bipartite matching algorithm to match the field
    matching = hopcroft_karp_matching(network)
    total = 1

    for key in matching.keys():
        if isinstance(key, str):
            if "departure" in key:
                total *= mine[matching[key]]

    return total
コード例 #6
0
import os

import networkx as nx
from networkx import read_gpickle
from networkx.algorithms import bipartite, s_metric
from timeit import default_timer as timer
import pandas as pd

from networkx.algorithms.bipartite import hopcroft_karp_matching, density

N = 21
df = pd.DataFrame(columns=['time', 'nodes left', 'nodes right', 'edges'])
for i in range(4, N):
    print("Initializing graph reading " + str(i) + " ...")
    G = read_gpickle("graphs/graph" + str(i) + ".pickle")
    left, right = nx.bipartite.sets(G)
    print("Initializing matching " + str(i) + " ...")
    start = timer()
    hopcroft_karp_matching(G, top_nodes=None)
    end = timer()
    time = end - start
    print("Matching finished " + str(i) + " ...")
    df.loc[i] = [time, len(left), len(right), len(G.edges)]
if not os.path.isfile('filename.csv'):
    df.to_csv('output/result_matching.csv',
              header=['time', 'nodes left', 'nodes right', 'edges'])
else:  # else it exists so append without writing the header
    df.to_csv('output/result_matching.csv', mode='a', header=False)
コード例 #7
0
ファイル: 1.py プロジェクト: qifanyyy/CLCDSA
         assert (l[i][j] == ".")
         l[i][j] = "x"
     stt += 1
 gg = nx.Graph()
 for i in range(2 * n - 1):
     gg.add_node((i, 0))
 for i in range(-(n - 1), n):
     gg.add_node((i, 1))
 for (i, j) in bis:
     gg.remove_node((i + j, 0))
     gg.remove_node((i - j, 1))
 for i in range(n):
     for j in range(n):
         if i + j not in bisx and i - j not in bisy:
             gg.add_edge((i + j, 0), (i - j, 1))
 match = bipartite.hopcroft_karp_matching(gg)
 for cc in match:
     if cc[1] == 1: continue
     ppp = cc[0]
     mmm = match[cc][0]
     i = (ppp + mmm) // 2
     j = (ppp - mmm) // 2
     if l[i][j] == "x":
         l[i][j] = "o"
     else:
         assert (l[i][j] == ".")
         l[i][j] = "+"
     stt += 1
 chg = []
 for i in range(n):
     for j in range(n):