Пример #1
0
def construct_one_vs_one_cost(**keywords):
    xs, ys, zs   = keywords["xs"], keywords["ys"], keywords["zs"]
    radii, param = keywords["radii"], keywords["param"]
    sekihara_cos = keywords["sekihara_cos"]
    radius_ratio = keywords["radius_ratio"]

    common.assert_same_size(xs=xs, ys=ys, zs=zs)
    links = []
    
    n = len(xs)
    UF = DisjointSet(n)
    max_d = compute_max_distance(xs, ys, zs)

    if sekihara_cos:
        cost_func = lambda cos_theta, abs_dst : param * (1.0 - cos_theta) + abs_dst / max_d
    else:
        cost_func = lambda cos_theta, abs_dst : math.acos(cos_theta) + param * abs_dst / max_d

    # 中心に遠い点から順番に処理する
    order_by_dist = []
    for i in xrange(1, n):
        order_by_dist.append([i, (xs[i]-xs[0])**2 + (ys[i]-ys[0])**2 + (zs[i] - zs[0])**2])
    order_by_dist.sort(key=lambda x:x[1], reverse=True)

    center = [xs[0], ys[0], zs[0]]
    for t in xrange(len(order_by_dist)):
        i = order_by_dist[t][0]

        cost = float('inf')
        next_index = -1

        src = [xs[i], ys[i], zs[i]]
         
        for j in xrange(0, len(xs)):
            if i == j: continue
            if j != 0 and not (radii[j] / radii[i] >= radius_ratio): continue
            
            # 閉路をつくらないようにする
            if UF.same(i, j): continue

            dst = [xs[j], ys[j], zs[j]]
            c = sekihara_method(src, dst, center, cost_func)

            if cost > c:
                cost = c
                next_index = j

        if next_index != -1:
            links.append((i, next_index))
            UF.merge(i, next_index)
       
    return links, 0
def kruskal(graph: Graph) -> Graph:
    graph = sort_graph(graph)
    ds: DisjointSet = DisjointSet()
    mst = Graph()
    for vertex in graph.vertices:
        ds.find(vertex)
    for edge in graph:
        if not ds.connected(edge.start, edge.end):
            mst += edge
            component_1 = ds.find(edge.start)
            component_2 = ds.find(edge.end)
            ds.union(component_1, component_2)
    return mst
Пример #3
0
def Kruskals(self):
    """Kruskal's Algorithm"""
    Dset = DisjointSet(self._numVertices)

    # Generate numbers that will act as a wall between two cells in a row
    rows = set()
    pre = .5
    for i in range(self._columns):
        for j in range(self._rows - 1):
            rows.add(pre)
            pre += 1
        pre += 1

    # Generate numbers that will act as a wall between two cells in a column
    columns = set()
    offset = self._rows / 2
    pre = offset
    for i in range(self._rows):
        for j in range(self._columns - 1):
            columns.add(pre)
            pre += 1

    while Dset.nsets != 1:
        if random() < 0.5:
            """Pick a random row"""
            random_row_edge = sample(rows, 1)[0]
            rows.remove(random_row_edge)

            left_cell = int(random_row_edge - .5)
            right_cell = int(random_row_edge + .5)
            # If the left and right cell are not part of the same set merge them
            if Dset.find(left_cell) != Dset.find(right_cell):
                # print("Joining two rows: ", left_cell, right_cell)
                Dset.merge(left_cell, right_cell)
                self.add_edge((left_cell, right_cell))
                self.genTile(left_cell)
                self.genTile(right_cell)
        else:
            """Pick a random column"""
            random_column_edge = sample(columns, 1)[0]
            columns.remove(random_column_edge)

            left_cell = int(random_column_edge - offset)
            right_cell = int(random_column_edge + offset)
            # If the top and bottom cell are not part of the same set merge them
            if Dset.find(left_cell) != Dset.find(right_cell):
                # print("Joining two columns: ", left_cell, right_cell)
                Dset.merge(left_cell, right_cell)
                self.add_edge((left_cell, right_cell))
                self.genTile(left_cell)
                self.genTile(right_cell)
def filter_overlaping_bbs(input_csv, output_csv):
    df = pd.read_csv(input_csv, sep=';')
    df['keep'] = False

    iou_threshold = 0.8

    print('filtering overlaping bbs...')
    for img_name, group in tqdm.tqdm(df.groupby('img_name')):

        ds = DisjointSet()

        for start_idx, (idx1, row1) in enumerate(group.iterrows()):
            ds.union(idx1, idx1)

            xywha1 = np.array(
                list([
                    row1['cx'], row1['cy'], row1['w'], row1['h'],
                    row1['bb_angle']
                ]))

            g2 = list(group.iterrows())
            for j in range(start_idx, len(g2)):

                idx2, row2 = g2[j]

                if idx1 != idx2:
                    # if bbs are identical iou is 0 insteed of 1, probably a bug, so a small number is added
                    e = 0.00001
                    xywha2 = np.array(
                        list([
                            row2['cx'], row2['cy'], row2['w'], row2['h'],
                            row2['bb_angle'] + e
                        ]))
                    iou = rbbox_iou(xywha1, xywha2)
                    #if bbs overlaps, join them in one cluster,
                    if iou > iou_threshold:
                        ds.union(idx1, idx2)

        #select one final bb with largest score in every cluster
        for disjointset in ds.itersets():
            idxs = list(disjointset)
            g = group.loc[idxs]
            scores = g.angle_class_degrees_prob.values
            keepidx = idxs[np.argmax(scores)]
            df.loc[keepidx, 'keep'] = True
            group.loc[keepidx, 'keep'] = True

    df = df[df.keep == True]

    df.to_csv(output_csv, sep=';')
Пример #5
0
 def kruskal(self) -> Set[Tuple[int]]:
     edge_queue = []
     for e, cost in self.edges.items():
         heapq.heappush(edge_queue, (cost, e))
     vertex_set = DisjointSet()
     for v in self.vertexes:
         vertex_set.find(v)
     known_edges = set()
     while len(list(vertex_set.itersets())) > 1:
         cost, edge = heapq.heappop(edge_queue)
         if not vertex_set.connected(*edge):
             known_edges.add(tuple(sorted(list(edge))))
             vertex_set.union(*edge)
     return known_edges
Пример #6
0
def kruskal_minimum_spanning_tree(graph: Graph) -> list[Edge]:
    """Return list of edges, representing minimum spanning tree"""
    vertices = get_all_nodes(graph)
    edges = get_all_edges(graph)

    djs = DisjointSet(vertices)
    sorted_edges = sorted(edges, key=lambda edge: edge.weight)

    res: list[Edge] = []
    for edge in sorted_edges:
        if djs.find_set(edge.u) != djs.find_set(edge.v):
            res.append(edge)
            djs.union_sets(edge.u, edge.v)
    return res
Пример #7
0
def kruskal(graphe: Graphe):
    retour = []

    sommets = [sommet for sommet in graphe]

    aretes = graphe.liens(trier=True)

    ds = DisjointSet(sommets)

    for arete in aretes:
        if ds.find(arete[0]) != ds.find(arete[1]):
            retour.append(f'{arete[0]}-{arete[1]}')
            ds.union(arete[0], arete[1])

    return retour
Пример #8
0
def Ellers(self):
    Dset = DisjointSet(self._numTiles[0])
    for i in range(self._numTiles[1]):
        self.genTile(i * self._numTiles[0])
        for j in range(1, self._numTiles[0]):

            if Dset.find(j) == Dset.find(j - 1):
                self.genTile(self.toIndex((j, i)))
                continue

            idx = self.toIndex((j, i))
            shouldMerge = bool(randint(int(i == self._numTiles[1] - 1), 1))
            if shouldMerge:
                Dset.merge(j - 1, j)
                self.add_edge((idx - 1, idx))
            self.genTile(idx)

        if i != self._numTiles[1] - 1:
            remainders = [i for i in range(self._numTiles[0])]

            for idx, s in enumerate(Dset.Sets):
                if s == None:
                    continue

                s = s.copy()
                numDownward = randint(1, len(s))
                for k in range(numDownward):
                    c = randint(0, len(s) - 1)
                    cid = s[c]
                    c1 = self.toIndex((s[c], i))
                    c2 = self.toIndex((s[c], i + 1))
                    self.add_edge((c1, c2))
                    self.genTile(c2)
                    s.pop(c)
                    remainders.remove(cid)

            #recreate the disjoint set with the correct set/cell locations
            for r in remainders:
                n = next((i for i, v in enumerate(Dset.Sets) if v == None))
                Dset.Cells[r] = n
                Dset.Sets[n] = [n]
            Dset.Sets = [None] * self._numTiles[0]
            for i, v in enumerate(Dset.Cells):
                if Dset.Sets[v] == None:
                    Dset.Sets[v] = [i]
                if i not in Dset.Sets[v]:
                    Dset.Sets[v].append(i)
Пример #9
0
def greedy_clusters(graph, homes, homes_to_index, shortest, k):
    """ We will return the solution for all values of k """

    # Create all pairs of homes
    pairs = []
    for i in range(len(homes)):
        for j in range(i + 1, len(homes)):
            pairs.append((i, j))
    
    # Sort the pairs
    pairs = sorted(pairs, key = lambda pair: shortest[homes_to_index[homes[pair[0]]]][homes_to_index[homes[pair[1]]]])

    # Create a WQU for clustering
    quick_union = DisjointSet()
    for i in range(len(homes)):
        quick_union.find(i)

    # print(list(quick_union.itersets()))
    # Greedy combine pairs together until all the homes are together
    while len(list(quick_union.itersets())) > k:
        curr = pairs.pop(0)
        quick_union.union(curr[0], curr[1])

    
    # map home to the cluster that it is in
    home_to_cluster = {}
    cluster_index_homes = list(quick_union.itersets())
    clusters_answer = []

    # We just want to move from indices back to homes
    for lst in cluster_index_homes:
        new_lst = []
        for i in lst:
            new_lst.append(homes_to_index[homes[i]])
            home_to_cluster[homes[i]] = new_lst
        clusters_answer.append(new_lst)

    # Now with these cluster_homes, we can add to all the surrounding neighbors 
    # We look at each node, and then we add the node to the cluster of its nearest home
    vals = list(homes_to_index.values())
    for node in graph.nodes:
        if node not in vals: #if it is not a home
            closest_home = min(homes, key = lambda home: shortest[homes_to_index[home]][node])
            home_to_cluster[closest_home].append(node)

    return clusters_answer
Пример #10
0
    def kruskal(self):

        # TODO what are these counters?
        edge_i, edge_n = (0, 0)
        self.ds = DisjointSet()
        self.mst = []
        while edge_n < len(self.vertices) - 1:
            vertex_1, vertex_2, weight = self.edges[edge_i]
            edge_i += 1
            cluster_1 = self.ds.find(vertex_1)
            cluster_2 = self.ds.find(vertex_2)
            if cluster_1 != cluster_2:
                self.ds.union(cluster_1, cluster_2)
                self.mst.append([vertex_1, vertex_2, weight])
                edge_n += 1

        return self.mst
Пример #11
0
    def kruskal(self):
        self.visitados = []
        self.grafoN = dict()
        self.llenarGrafo()
        self.pq = PriorityQueue()
        self.disjointSet = []
        self.grafoKruskal = dict()

        for particula in self.capturador.lista:
            self.pq.put([
                particula.distancia,
                ((particula.origenX, particula.origenY), (particula.destinoX,
                                                          particula.destinoY))
            ])

        self.llenarDisjointSet()

        for particula in self.capturador.lista:
            self.pq.put([
                particula.distancia,
                ((particula.origenX, particula.origenY), (particula.destinoX,
                                                          particula.destinoY))
            ])

        conjunto = DisjointSet(self.disjointSet)
        print(conjunto.get())

        while not self.pq.empty():
            actual = self.pq.get()

            if actual[1][0] not in conjunto.find(actual[1][1]):
                self.llenarGrafoKrustal(actual[1][0], actual[1][1], actual[0])
                print(actual[1][0], actual[1][1])
                conjunto.union(actual[1][0], actual[1][1])

        self.mostrarGrafoG()

        self.pen.setWidth(3)
        for item in self.grafoKruskal.items():
            print(item)
            for item2 in item[1]:
                self.pen.setColor(QColor(0, 0, 0))
                self.scene.addLine(item[0][0], item[0][1], item2[0][0],
                                   item2[0][1], self.pen)
                print(item[0][0], item[0][1], item2[0][0], item2[0][1])
        self.pen.setWidth(1)
Пример #12
0
    def __init__(self, offset_lst, dst_v_lst, deg_lst, eps, min_pts):
        # parameters
        self.eps = eps
        self.min_pts = min_pts

        self.n = len(deg_lst)

        # offset and vertex properties
        self.offset_lst = offset_lst
        self.inc_degree_lst = map(lambda degree_val: degree_val + 1, deg_lst)
        self.similar_degree_lst = [0] * len(self.inc_degree_lst)

        # dst_v and edeg properties
        self.dst_v_lst = dst_v_lst
        self.min_cn_lst = [PScan.not_sure] * len(self.dst_v_lst)

        self.src_lst = [0] * len(dst_v_lst)
        for u in range(0, len(offset_lst) - 1):
            for i in range(offset_lst[u], offset_lst[u + 1]):
                self.src_lst[i] = u
        self.el_lst = list(zip(self.src_lst, self.dst_v_lst))

        # disjoint set
        self.disjoint_set = DisjointSet(self.n)

        # non-core clustering
        self.cluster_dict = [self.n] * self.n
        self.non_core_cluster = []

        # 1. statistics for prune
        self.prune0 = 0  # definitely not reachable
        self.prune1 = 0  # definitely reachable

        # 2.1 statistics for check core 1st bsp: set intersection with early stop
        self.intersect = 0
        self.cmp0 = 0
        self.cmp1 = 0
        self.cmp_equ = 0

        # 2.2 statistics for check core 2nd bsp: binary search
        self.binary_search_call = 0

        # 3 statistics for disjoint set

        self.result_lines = []
def kruskal(points):
    """finds minimum spanning tree of given points

    :param points: numpy array of shape (n_points, 2)
    :return: array of tuples (length, vertex_1, vertex_2) - edges of minimum spanning tree
    """
    length = points.shape[0]
    disjoint_set = DisjointSet(length)
    edges = [(np.linalg.norm(points[i] - points[j]), i, j)
             for i in range(length) for j in range(i + 1, length)]
    edges.sort(key=lambda x: x[0])
    adjacency_list = [[] for _ in range(length)]
    for w, u, v in edges:
        if disjoint_set.find(u) != disjoint_set.find(v):
            adjacency_list[u].append(v)
            adjacency_list[v].append(u)
            disjoint_set.union(u, v)
    return adjacency_list
Пример #14
0
    def __init__(self, graph):
        pq = PriorityQueue()
        for edge in graph.get_edges():
            pq.put(edge)

        V = graph.get_num_node()
        disjoint_set = DisjointSet(V)

        minimum_spaning_tree = []
        while not pq.empty() or len(minimum_spaning_tree) < V - 1:
            edge = pq.get()
            v = edge.either()
            w = edge.other(v)

            if not disjoint_set.isConnected(v, w):
                disjoint_set.union(v, w)
                minimum_spaning_tree.append(edge)
        self.mst = minimum_spaning_tree
Пример #15
0
    def MST_kruskal(self):
        mst = Graph(self.vertex_num)
        ds = DisjointSet(self.vertex_num)
        self.edge_list.sort(key=lambda e: e.w)

        mst_edge_num = 0
        edge_idx = 0
        while mst_edge_num < self.vertex_num-1:
            edge = self.edge_list[edge_idx]
            if ds.collapsing_find(edge.u) != ds.collapsing_find(edge.v):
                mst.insert_edge(edge.u, edge.v, edge.w)
                ds.weighted_union(ds.collapsing_find(edge.u),
                                  ds.collapsing_find(edge.v))
                mst_edge_num += 1

            edge_idx += 1

        return mst
Пример #16
0
 def populate_kruskal(self):
     cells = []
     edges = []
     for i in range(self.h):
         for j in range(self.w):
             cells.append((i, j))
             if i + 1 < self.h:
                 edges.append(((i, j), (i + 1, j)))
             if j + 1 < self.w:
                 edges.append(((i, j), (i, j + 1)))
     ds = DisjointSet(cells)
     random.shuffle(edges)
     for edge in edges:
         (a, b), (c, d) = edge
         if ds.union((a, b),  (c, d)):
             nb = (c-a, d-b)
             nd = self.OFF_DIR[nb]
             self.set_dir(a, b, nd)
             self.set_dir(c, d, self.OPOSITE[nd])
Пример #17
0
def group_similar_images(
        similar_pairs_ll: list,
        groups_ds: DisjointSet = None,
        stat: bool = True,
        **kwargs
) -> DisjointSet:
    if not groups_ds:
        groups_ds = DisjointSet()
    pairs_l = [pair for pairs in similar_pairs_ll for pair in pairs]
    round_cnt, total_cnt = 0, len(pairs_l)
    for pair in pairs_l:
        groups_ds.union(*pair)
        if stat:
            round_cnt += 1
            print('group:', percentage(round_cnt / total_cnt), total_cnt, round_cnt, len(list(groups_ds.itersets())),
                  end='\r')
    if stat:
        print()
    return groups_ds
Пример #18
0
 def compute_must_alias(self):
     pointsto_map = self.table.pointsto_map()
     variables = pointsto_map.keys()
     alias_sets = DisjointSet()
     print(f"\t\t#Variables= {len(variables)}")
     """A O(N logN) algorithm to unify variables. Brute-force approach matches (u,v)  and unifies them if pt(u) = pt(v). 
     This approach maintains a list of visited_heap_objects which maps the integer representation of set of heap 
     objects. If a matching heap objects is found in the visited_heap_objects then the variables are unified,  
     otherwise it updates the visited_heap_objects."""
     visited_heap_objects = defaultdict()
     for v_i in variables:
         heap_objs = int(pointsto_map[v_i])
         if heap_objs in visited_heap_objects.keys():
             v_j = visited_heap_objects[heap_objs]
             if not alias_sets.connected(v_i, v_j):
                 alias_sets.union(v_i, v_j)
         else:
             alias_sets.find(v_i)
         visited_heap_objects[heap_objs] = v_i
     return alias_sets.itersets()
Пример #19
0
    def kruskal_pp(self):
        """Retourne un arbre de recouvrement minimal s'il existe,
        avec utilisation du rang et de la compression de chemins"""

        min_tree = Graph('Arbre Minimal')

        disj_sets = {}

        nodes = self.nodes

        # Le nombre de noeuds du graphe
        nb_nodes = self.get_nb_nodes()

        # On remplit le dictionnaire de disjoint_sets
        for node in nodes:
            disj_sets[node] = DisjointSet(node)

        edges = self.edges

        # La liste est triee selon la comparaison implementee dans edge
        edges.sort()

        # Construction de l'arbre
        for edge in edges:
            (node1, node2) = edge.nodes

            # Si l'union des deux disjoint_sets est reussie
            if disj_sets[node1].rank_compressed_union(disj_sets[node2]):
                # On complete l'arbre minimal
                min_tree.add_node(node1)
                logging.debug('Ajout de %s', node1)
                min_tree.add_node(node2)
                logging.debug('Ajout de %s', node2)
                min_tree.add_edge(edge)
                logging.debug('Ajout de %s', edge)

            # Si tous les noeuds sont dans min_tree, c'est que l'arbre est fini
            if min_tree.get_nb_edges() == nb_nodes - 1:
                break

        return min_tree
Пример #20
0
    def build_gene_groups(self,
                          functions: Optional[Dict[str,
                                                   List[str]]] = None) -> None:
        """Builds gene groups based on functions and stored gene-gene links.

        `functions` maps genes to user-assigned functions; keys should correspond
        to the `label` property of Gene objects. If specified, groups will first
        be built from these functional groups and extended using gene-gene links.
        If not, all groups will be built purely from gene-gene links.

        e.g. If genes A and B are both assigned as category Z, and have links to
             genes C and D, respectively, the result will be a single group Z,
             containing genes A, B, C and D. Group Z is first made with genes A and B,
             then extended to include C (because of link to A) and D (link to B).
        """
        self.groups = []
        if functions:
            for function, genes in functions.items():
                uids = self.get_gene_uids(genes)
                group = Group(label=function, genes=set(uids))
                self.groups.append(group)
        if not self._links:
            for group in self.groups:
                group.genes = list(group.genes)
            return
        ds = DisjointSet()
        for link in self._links.values():
            ds.union(link.query.uid, link.target.uid)
        for genes in ds.itersets():
            merged = False
            for group in self.groups:
                if not genes.isdisjoint(group.genes):
                    group.genes.update(genes)
                    merged = True
                    break
            if not merged:
                group = Group(label=f"Group {len(self.groups)}",
                              genes=set(genes))
                self.groups.append(group)
        for group in self.groups:
            group.genes = list(group.genes)
Пример #21
0
def minimum_distance(n, x, y):
    '''
    Implements the Kruskal's algorithm and builds a minimum spanning tree (MST) that spans all n vertices represented by coordinates x, y
    Calculates and returns the length of the MST
    Input: n = number of vertices, a list of x coordinates (x) and y coordinates (y) with each pair (x,y) represents a vertex
    Output: length of the MST
    '''

    result = 0
    v, heap = build_edges(n, x, y)
    ds = DisjointSet()
    while heap:
        min_edge = heappop(heap)
        w = min_edge[0]
        u = min_edge[1]
        v = min_edge[2]
        if not ds.connected(min_edge[1], min_edge[2]):
            ds.union(u, v)
            result += w

    return result
Пример #22
0
 def __KruChoose(self):
     PartitionSize = dict(zip(range(self.Vcount), [1] * self.Vcount))
     ListofMaxPartitionSizes = []
     Echoose = []
     # Components = self.Vcount
     ds = DisjointSet()
     for I, J, W in self.E:
         if ds.find(I) != ds.find(J):
             MergedSize = PartitionSize[ds.find(I)] + PartitionSize[ds.find(
                 J)]
             ds.union(I, J)
             # PartitionSize[I] = 0
             PartitionSize[ds.find(J)] = MergedSize
             ListofMaxPartitionSizes.append(max(PartitionSize.values()))
             Echoose.append(True)
             # Components -= 1
             # if Components == 1:
             #     break
             continue
         Echoose.append(False)
     return Echoose, ListofMaxPartitionSizes
Пример #23
0
    def prim(self, root="default"):
        "Algorithme de Prim"

        min_tree = Graph('Arbre Minimal')
        disj_sets = {}
        nodes = self.nodes

        # File de priorite
        Q = []

        for node in nodes:
            disj_sets[node] = DisjointSet(node)
            heappush(Q, disj_sets[node])

        # Choix de la racine
        if root == "default":
            r = heappop(Q)
        else:
            r = disj_sets[root]
            Q.remove(r)

        r.key = 0
        heappush(Q, r)

        while len(Q) > 0:
            u_set = heappop(Q)
            u = u_set.node
            logging.debug("Noeud ajoute a l'arbre minimal : %s", u)
            min_tree.add_node(u)

            if u_set.key is not 0:
                p = u_set.parent.node
                min_tree.add_edge(self.__adj[p][u])

            for v in [w for w in self.__adj[u].keys() if disj_sets[w] in Q\
                    and self.__adj[u][w].weight < disj_sets[w].key]:
                disj_sets[v].parent = u_set
                disj_sets[v].key = self.__adj[u][v].weight

        return min_tree
Пример #24
0
    def minimum_formula_one_agent(self, agent_id: int,
                                  winning_states: Set[int]) -> Set[int]:
        result_states = self.prepare_result_states(winning_states)
        current_states = winning_states.copy()
        winning_states_disjoint = DisjointSet(0)
        winning_states_disjoint.subsets = copy.deepcopy(
            self.epistemic_class_disjoint[agent_id].subsets)
        first_winning = winning_states_disjoint.find(iter(
            next(winning_states)))
        epistemic_class_ids = set()
        for state_id in winning_states:
            epistemic_class_id = self.epistemic_class_membership[agent_id][
                state_id]
            epistemic_class_ids.add(epistemic_class_id)

        for epistemic_class_id in epistemic_class_ids:
            epistemic_states = self.imperfect_information[agent_id][
                epistemic_class_id]
            is_ok = True
            for epistemic_state in epistemic_states:
                state_id = epistemic_state
                if epistemic_state not in winning_states:
                    is_ok = False
                    break
            if is_ok:
                winning_states_disjoint.union(first_winning, state_id)

        custom_can_go_there = self.can_go_there[agent_id][:]

        while True:
            current_states, modified = self.basic_formula_one_agent(
                agent_id, current_states, first_winning,
                winning_states_disjoint, custom_can_go_there)
            result_states.update(current_states)
            if not modified:
                break

        return result_states
Пример #25
0
def kruskal_mst(agraph):
    """
	Return a minimum spanning tree using kruskal's algorithm
	"""
    # minimum spanning tree
    mst = []

    # disjoint set
    disjoint_set = DisjointSet()

    # make set
    for vertex in agraph.Vertices():
        disjoint_set.make_set(vertex)

    # edges of the graph
    edges = agraph.edges()
    edges.sort(key=lambda tup: tup[2])

    for u, v, cost in edges:
        if disjoint_set.find_set(u) != disjoint_set.find_set(v):
            mst.append((u, v, cost))
            disjoint_set.union(u, v)
    return mst
Пример #26
0
def construct_minimum_spanning_tree(xs, ys, zs):
    """
    Parameters
    ----------
    xs : [float]
    ys : [float]
    zs : [float]

    Returns
    -------
    links : [(int, int)]
    biggest_root : int
    """
    common.assert_same_size(xs=xs, ys=ys, zs=zs)
    n = len(xs)
    es = []
    for i in xrange(n):
        for j in xrange(i + 1, n):
            l2norm = (xs[i] - xs[j])**2 + (ys[i] - ys[j])**2 + (zs[i] - zs[j])**2
            es.append((math.sqrt(l2norm), i, j))

    es.sort(key=lambda tup: tup[0])
    disjoint_set = DisjointSet(n)
    links = []
    for _, src, dst in es:
        if not disjoint_set.same(src, dst):
            disjoint_set.merge(src, dst)
            links.append((src, dst))

    biggest_root = -1

    for i in xrange(n):
        if biggest_root == -1 or disjoint_set.size(biggest_root) < disjoint_set.size(i):
            biggest_root = i

    return links, biggest_root
def generate_mst(self, verts):
    num_sites = len(verts)

    # Create connected graph
    edges = []
    for i in range(num_sites):
        for j in range(i + 1, num_sites):
            src, dest = verts[i], verts[j]
            dist = abs(self.x_loc[dest] -
                       self.x_loc[src]) + abs(self.y_loc[dest] -
                                              self.y_loc[src])
            edges.append((src, dest, dist))

    # Find MST
    edges.sort(key=lambda x: x[2])
    ds = DisjointSet(301)
    mst = defaultdict(list)
    for src, dest, dist in edges:
        if ds.find(src) != ds.find(dest):
            ds.union(src, dest)
            mst[src].append(dest)
            mst[dest].append(src)

    return mst
Пример #28
0
def compute_partial_mws_prim_segmentation(edge_weight_exp,
                                          valid_edges_exp,
                                          offsets,
                                          number_of_attractive_channels,
                                          image_shape, iterations=None):
    visited = np.zeros(edge_weight_exp.size, dtype=bool)
    node_labeling = np.zeros(image_shape).ravel()
    number_of_nodes = node_labeling.size
    number_of_attractive_edges = number_of_nodes * number_of_attractive_channels
    ndims = len(offsets[0])
    array_stride = np.empty(ndims, dtype=np.int64)
    current_stride = 1
    mutexes = {}
    for i in range(ndims-1, -1, -1):
        array_stride[i] = current_stride
        current_stride *= image_shape[i]

    offset_strides = []
    for offset in offsets:
        stride = 0
        for i in range(len(offset)):
            stride += offset[i] * array_stride[i]
        offset_strides.append(stride)

    offset_strides = np.asarray(offset_strides)
    node_ufd = DisjointSet()
    for lbl in range(number_of_nodes):
        node_ufd.find(lbl)

    # mutexes = np.ndarray(number_of_nodes)
    pq = queue.PriorityQueue()

    # start prim from top left node
    add_neighbours(0, offset_strides, number_of_nodes, edge_weight_exp, valid_edges_exp, node_ufd, visited, pq)
    itr = 0
    # iterate over all edges
    cut_edges = []
    used_mtxs = []
    while not pq.empty():
        # extract next element from the queue
        position_vector = pq.get()
        edge_id = position_vector[1]
        u = position_vector[2]
        v = position_vector[3]

        if visited[edge_id]:
            continue
        visited[edge_id] = 1
        # find the current reps and skip if identical or mtx exists
        ru = node_ufd.find(u)
        rv = node_ufd.find(v)
        if ru == rv:
            continue
        if check_mutex(ru, rv, mutexes):
            if edge_id <= number_of_attractive_edges:
                # this edge is attractive and neighbour has different class
                cut_edges.append(edge_id)
            continue

        # check whether this edge is mutex via the edge offset
        if edge_id >= number_of_attractive_edges:
            used_mtxs.append(edge_id)
            insert_mutex(ru, rv, edge_id, mutexes)
        else:
            node_ufd.union(u,v)
            if node_ufd.find(ru) == rv:
                rv, ru = ru, rv
            merge_mutexes(rv, ru, mutexes)

        # add the next node to pq
        add_neighbours(v, offset_strides, number_of_nodes, edge_weight_exp, valid_edges_exp, node_ufd, visited, pq)
        itr += 1
        if iterations is not None:
            if itr > iterations:
                break

    # recover essential edges and neighbors
    class CutFeatures:
        def __init__(self, cut_edges, mutexes):
            self.cut_edges = cut_edges
            self.mutexes = mutexes
    neighbors_features = {}
    for e_id in cut_edges:
        n1 = e_id % number_of_nodes
        n2 = n1 + offset_strides[e_id//number_of_nodes]
        r1, r2 = node_ufd.find(n1), node_ufd.find(n2)
        sm = min(r1, r2)
        bg = max(r1, r2)
        if (sm, bg) in neighbors_features:
            neighbors_features[(sm, bg)].cut_edges += [e_id]
        else:
            neighbors_features[(sm, bg)] = CutFeatures([e_id],
                                                        get_common_mtxs(node_ufd.find(sm), node_ufd.find(bg), mutexes))

    # create node labeling from disjoint sets
    # 0's indicate no labeling
    for idx, cc in enumerate(node_ufd.itersets()):
        for node in cc:
            node_labeling[node] = idx+1

    return node_labeling, cut_edges, used_mtxs, neighbors_features
Пример #29
0
def compute_mws_prim_segmentation(edge_weight_exp,
                                  valid_edges_exp,
                                  offsets,
                                  number_of_attractive_channels,
                                  image_shape):

    visited = np.zeros(edge_weight_exp.size, dtype=bool)
    node_labeling = np.zeros(image_shape).ravel()
    number_of_nodes = node_labeling.size
    number_of_attractive_edges = number_of_nodes * number_of_attractive_channels
    ndims = len(offsets[0])
    array_stride = np.empty(ndims, dtype=np.int64)
    current_stride = 1
    mutexes = {}
    for i in range(ndims-1, -1, -1):
        array_stride[i] = current_stride
        current_stride *= image_shape[i]

    offset_strides = []
    for offset in offsets:
        stride = 0
        for i in range(len(offset)):
            stride += offset[i] * array_stride[i]
        offset_strides.append(stride)

    offset_strides = np.asarray(offset_strides)
    node_ufd = DisjointSet()
    for lbl in range(number_of_nodes):
        node_ufd.find(lbl)

    # mutexes = np.ndarray(number_of_nodes)
    pq = queue.PriorityQueue()

    # start prim from top left node
    add_neighbours(0, offset_strides, number_of_nodes, edge_weight_exp, valid_edges_exp, node_ufd, visited, pq)
    # iterate over all edges
    while not pq.empty():
        # extract next element from the queue
        position_vector = pq.get()
        edge_id = position_vector[1]
        u = position_vector[2]
        v = position_vector[3]

        if visited[edge_id]:
            continue
        visited[edge_id] = 1
        # find the current reps and skip if identical or mtx exists
        ru = node_ufd.find(u)
        rv = node_ufd.find(v)
        if ru == rv or check_mutex(ru, rv, mutexes):
            continue

        # check whether this edge is mutex via the edge offset
        if edge_id >= number_of_attractive_edges:
            insert_mutex(ru, rv, edge_id, mutexes)
        else:
            node_ufd.union(u,v)
            if node_ufd.find(ru) == rv:
                rv, ru = ru, rv
            merge_mutexes(rv, ru, mutexes)

        # add the next node to pq
        add_neighbours(v, offset_strides, number_of_nodes, edge_weight_exp, valid_edges_exp, node_ufd, visited, pq)

    # create node labeling from disjoint sets
    # 0's indicate no labeling
    for idx, cc in enumerate(node_ufd.itersets()):
        for node in cc:
            node_labeling[node] = idx+1

    return node_labeling
Пример #30
0
    def minimize(self):

        self._remove_unreachable_states()

        def order_tuple(a, b):
            return (a, b) if a < b else (b, a)

        table = {}

        sorted_states = sorted(self.states)

        # initialize the table
        for i, item in enumerate(sorted_states):
            for item_2 in sorted_states[i + 1:]:
                table[(item, item_2)] = (item in self.final_states) != (item_2 \
                                                                        in self.final_states)

        flag = True

        # table filling method
        while flag:
            flag = False

            for i, item in enumerate(sorted_states):
                for item_2 in sorted_states[i + 1:]:

                    if table[(item, item_2)]:
                        continue

                    # check if the states are distinguishable
                    for w in self.terminals:
                        t1 = self.transitions.get((item, w), None)
                        t2 = self.transitions.get((item_2, w), None)

                        if t1 is not None and t2 is not None and t1 != t2:
                            marked = table[order_tuple(t1, t2)]
                            flag = flag or marked
                            table[(item, item_2)] = marked

                            if marked:
                                break

        d = DisjointSet(self.states)

        # form new states
        for k, v in table.items():
            if not v:
                d.union(k[0], k[1])

        self.states = [str(x) for x in range(1, 1 + len(d.get()))]
        new_final_states = []
        self.start_state = str(d.find_set(self.start_state))

        for s in d.get():
            for item in s:
                if item in self.final_states:
                    new_final_states.append(str(d.find_set(item)))
                    break

        self.transitions = {(str(d.find_set(k[0])), k[1]): str(d.find_set(v))
                            for k, v in self.transitions.items()}

        self.final_states = new_final_states