コード例 #1
0
def test_csgraph_from_dense():
    np.random.seed(1234)
    G = np.random.random((10, 10))
    some_nulls = (G < 0.4)
    all_nulls = (G < 0.8)

    for null_value in [0, np.nan, np.inf]:
        G[all_nulls] = null_value
        olderr = np.seterr(invalid="ignore")
        try:
            G_csr = csgraph_from_dense(G, null_value=0)
        finally:
            np.seterr(**olderr)

        G[all_nulls] = 0
        assert_array_almost_equal(G, G_csr.toarray())

    for null_value in [np.nan, np.inf]:
        G[all_nulls] = 0
        G[some_nulls] = null_value
        olderr = np.seterr(invalid="ignore")
        try:
            G_csr = csgraph_from_dense(G, null_value=0)
        finally:
            np.seterr(**olderr)

        G[all_nulls] = 0
        assert_array_almost_equal(G, G_csr.toarray())
コード例 #2
0
def main():
    import heapq
    import sys
    input = sys.stdin.buffer.readline
    N, M, L = map(int, input().split())

    G = [[-1] * N for i in range(N)]

    for i in range(M):
        A, B, C = map(int, input().split())
        A -= 1
        B -= 1
        G[A][B] = C
        G[B][A] = C
    g = csgraph_from_dense(G, null_value=-1)
    D = dijkstra(g)
    G2 = [[-1] * N for i in range(N)]
    for i in range(N):
        for j in range(N):
            if D[i][j] >= 0 and D[i][j] <= L:
                G2[i][j] = 1

    g2 = csgraph_from_dense(G2, null_value=-1)
    D = dijkstra(g2)

    Q = int(input())
    for _ in range(Q):
        s, t = map(int, input().split())
        s -= 1
        t -= 1
        if D[s][t] <= N**2:
            print(int(D[s][t]) - 1)
        else:
            print(-1)
コード例 #3
0
def test_strong_connections():
    X1de = np.array([[0, 1, 0],
                     [0, 0, 0],
                     [0, 0, 0]])
    X2de = X1de + X1de.T

    X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
    X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)

    for X in X1sp, X1de:
        n_components, labels =\
            csgraph.connected_components(X, directed=True,
                                         connection='strong')

        assert_equal(n_components, 3)
        labels.sort()
        assert_array_almost_equal(labels, [0, 1, 2])

    for X in X2sp, X2de:
        n_components, labels =\
            csgraph.connected_components(X, directed=True,
                                         connection='strong')

        assert_equal(n_components, 2)
        labels.sort()
        assert_array_almost_equal(labels, [0, 0, 1])
コード例 #4
0
def test_csgraph_from_dense():
    np.random.seed(1234)
    G = np.random.random((10, 10))
    some_nulls = (G < 0.4)
    all_nulls = (G < 0.8)

    for null_value in [0, np.nan, np.inf]:
        G[all_nulls] = null_value
        olderr = np.seterr(invalid="ignore")
        try:
            G_csr = csgraph_from_dense(G, null_value=0)
        finally:
            np.seterr(**olderr)

        G[all_nulls] = 0
        assert_array_almost_equal(G, G_csr.toarray())

    for null_value in [np.nan, np.inf]:
        G[all_nulls] = 0
        G[some_nulls] = null_value
        olderr = np.seterr(invalid="ignore")
        try:
            G_csr = csgraph_from_dense(G, null_value=0)
        finally:
            np.seterr(**olderr)

        G[all_nulls] = 0
        assert_array_almost_equal(G, G_csr.toarray())
コード例 #5
0
def test_strong_connections():
    X1de = np.array([[0, 1, 0],
                     [0, 0, 0],
                     [0, 0, 0]])
    X2de = X1de + X1de.T

    X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
    X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)

    for X in X1sp, X1de:
        n_components, labels =\
            csgraph.connected_components(X, directed=True,
                                         connection='strong')

        assert_equal(n_components, 3)
        labels.sort()
        assert_array_almost_equal(labels, [0, 1, 2])

    for X in X2sp, X2de:
        n_components, labels =\
            csgraph.connected_components(X, directed=True,
                                         connection='strong')

        assert_equal(n_components, 2)
        labels.sort()
        assert_array_almost_equal(labels, [0, 0, 1])
コード例 #6
0
    def dijkstra(self, graph_dense, s_i, s_j, t_i, t_j):
        """
            Performs a graph search for source and target

            graph_dense : dense graph retpresentation of the costmap
            s_i, s_j : source coordinate on the costmap
            t_i, t_j : target coordinate on the costmap
        """
        source_id = self.graph_id(s_i, s_j)
        target_id = self.graph_id(t_i, t_j)
        graph_sparse = csgraph.csgraph_from_dense(graph_dense)
        dist_matrix, predecessors = csgraph.dijkstra(
            graph_sparse,
            directed=not self.average_cost,
            return_predecessors=True,
            indices=source_id,
            limit=np.inf)
        path = []
        path.append((t_i, t_j))
        while True:
            target_id_bkp = target_id
            target_id = predecessors[target_id]
            t_i, t_j = self.costmap_id(target_id)
            s_i, s_j = self.costmap_id(target_id_bkp)
            path.append((t_i, t_j))
            if source_id == target_id:
                break
        return path
コード例 #7
0
    def _compute_adjacency_matrix(self, indexes):
        """
        Construct the adjacency graph over all segments

        :return:
        :rtype: sp.csr_matrix
        """

        if indexes:
            segs = [self.segments[i] for i in indexes]
        else:
            segs = self.segments

        # Generate an empty, N x N sparse graph
        node_count = len(segs)
        g_sparse = np.zeros((node_count, node_count), dtype=float)

        # Generate all pairs of segments
        segment_pairs = itertools.combinations(range(node_count), 2)

        # Fill the graph with the distances between segments
        for src, dst in segment_pairs:
            src_pos = segs[src].location.nd
            dst_pos = segs[dst].location.nd
            distance = np.linalg.norm(src_pos - dst_pos)
            g_sparse[src, dst] = distance

        g_sparse = sp.csgraph_from_dense(g_sparse)
        return g_sparse
コード例 #8
0
def stitch(stack, numpix_threshold=0):
    from scipy.sparse.csgraph import csgraph_from_dense, connected_components
    nonzero_idx = np.any(stack, axis=2)

    # get unique label combinations across stacks
    labels_to_combine = np.unique(stack[nonzero_idx], axis=0)

    conn_mat = np.zeros(
        (labels_to_combine.max() + 1, labels_to_combine.max() + 1),
        dtype='bool')

    for row, label_combo in enumerate(labels_to_combine):
        group = label_combo[np.nonzero(label_combo)]
        for i in range(len(group) - 1):
            for j in range(i + 1, len(group)):
                conn_mat[group[i], group[j]] = True
                conn_mat[group[j], group[i]] = True

    np.fill_diagonal(conn_mat, True)

    graph = csgraph_from_dense(conn_mat)

    n_conncomp, graph_complabels = connected_components(graph, directed=False)

    result = np.zeros_like(stack[:, :, 0])

    for label in np.unique(stack):
        mask = np.any(stack == label, axis=2)
        if mask.sum() > numpix_threshold:
            result[np.any(stack == label, axis=2)] = graph_complabels[label]

    return result
コード例 #9
0
def __find_cc_of_synapses(synapses, dist_threshold, skeleton=None):
    points = np.array([syn.location_post for syn in synapses])
    if skeleton is None:
        dists = np.sqrt(((points.reshape(-1, 1, 3) -
                          points.reshape(1, -1, 3))**2).sum(axis=2))
    else:
        tree_node_ids = get_closest_treenode_ids(points, skeleton)
        dists = np.zeros((len(synapses), len(synapses)))
        for i, node_a in enumerate(tree_node_ids):
            for j, node_b in enumerate(tree_node_ids):
                dist = navis.graph_utils.dist_between(skeleton, node_a, node_b)
                dists[i, j] = dist if dist != 0 else -1

    # it is a symmetric matrix, remove redundancy
    dists *= np.tri(*dists.shape)

    dists[dists > dist_threshold] = np.NaN
    dists[dists ==
          0] = np.NaN  # half of matrix set to np.nan to account for redundancy
    dists[dists ==
          -1] = 0  # Those distances that are actually 0, were marked with -1
    sparsematrix = csgraph_from_dense(dists, null_value=np.NAN)
    num_cc, labels = connected_components(sparsematrix, directed=False)
    clusters_of_indeces = []
    for label in np.unique(labels):
        clusters_of_indeces.append(list(np.where(labels == label)[0]))
    clustered_synapses = []
    for cluster in clusters_of_indeces:
        if len(cluster) > 1:
            cluster = [synapses[ind] for ind in cluster]
            clustered_synapses.append(cluster)
    return clustered_synapses
コード例 #10
0
def get_comp_freqs(adj, rel_freq):
    sparseMatrix = csgraph_from_dense(adj)
    connected = connected_components(sparseMatrix,
                                     directed=False,
                                     connection='weak',
                                     return_labels=True)
    comp_num = connected[0]
    comp_list = connected[1]

    comps_freqlist = np.zeros(comp_num)
    comps_info = []
    for i in range(comp_num):
        comps_info.append([])
    for i in range(len(rel_freq)):
        freq = rel_freq[i]
        comp = comp_list[i]
        comps_freqlist[comp] += freq
        comps_info[comp].append(i)
        # print(i,freq)
    max_comp = max(comps_freqlist)
    # print(comp_list)
    # print(comps_freqlist)
    # print(comps_info)
    # if list(comps_freqlist).count(max_comp)!=1:
    # print(comps_freqlist)
    # exit('ambiguous as to which component is biggest! Investigate')
    # print('biggest comp percentage='+str(max_comp*100)+'%')
    return comps_freqlist, list(comps_freqlist).index(max_comp), comps_info
コード例 #11
0
ファイル: energy.py プロジェクト: nallg00d/wsnsims
    def build_cluster_graph(self):

        cluster_graph = collections.defaultdict(list)
        for cluster in self.sim.clusters:

            other_clusters = list(self.sim.clusters)
            other_clusters.remove(cluster)
            for other_cluster in other_clusters:
                overlaps = set(cluster.tour.objects).intersection(
                    other_cluster.tour.objects)

                if len(overlaps) > 0:
                    cluster_graph[cluster].append(other_cluster)

        node_count = len(self.sim.clusters)
        dense = np.zeros((node_count, node_count), dtype=float)

        for cluster, neighbors in cluster_graph.items():

            cluster_index = self.sim.clusters.index(cluster)
            for neighbor in neighbors:
                neighbor_index = self.sim.clusters.index(neighbor)

                dense[cluster_index, neighbor_index] = 1
                dense[neighbor_index, cluster_index] = 1

        sparse = sp.csgraph_from_dense(dense)
        return sparse
コード例 #12
0
    def join_clusters(self):

        # Generate an empty, N x N sparse graph
        node_count = len(self.clusters)
        dense = np.zeros((node_count, node_count), dtype=float)

        expand = {}

        cluster_pairs = itertools.combinations(self.clusters, 2)
        for cluster_pair in cluster_pairs:
            weights, segs = self.compute_edge_weights(*cluster_pair)
            c1_index = self.clusters.index(cluster_pair[0])
            c2_index = self.clusters.index(cluster_pair[1])

            dense[c1_index, c2_index] = weights[0]
            dense[c2_index, c1_index] = weights[1]

            expand[(c1_index, c2_index)] = segs[1]
            expand[(c2_index, c1_index)] = segs[0]

        sparse = sp.csgraph_from_dense(dense)
        mst = sp.minimum_spanning_tree(sparse)

        edges = mst.nonzero()
        edges = zip(edges[0], edges[1])
        for edge in edges:
            cluster = self.clusters[edge[0]]
            cluster.add(expand[edge])
            cluster.intersections.append(self.clusters[edge[1]])
コード例 #13
0
def create_data_model():
    """Stores the data for the problem."""

    cities_map = create_cities_map()

    path_DB = cities_map.get_path('D', 'B')

    G2_data = np.array([[np.inf, np.inf, 1, np.inf, np.inf],
                        [np.inf, np.inf, 1, 2, 0],
                        [np.inf, 1, np.inf, np.inf, np.inf],
                        [np.inf, 2, np.inf, np.inf, np.inf],
                        [0, np.inf, np.inf, np.inf, np.inf]])
    G2_data = cities_map.adjacency_matrix

    G2_sparse = csgraph_from_dense(G2_data, null_value=CitiesMap.NAN_VALUE)
    dist_matrix, predecessors = floyd_warshall(csgraph=G2_sparse,
                                               directed=True,
                                               return_predecessors=True)

    data = {}

    data['distance_matrix'] = dist_matrix

    data['num_vehicles'] = 1
    data['depot'] = cities_map.city_name_to_idx(cities_map.route_city_end)
    return data, predecessors, cities_map
コード例 #14
0
def main():
    from scipy.sparse.csgraph import dijkstra, csgraph_from_dense
    n, m = map(int, input().split())
    s, t = map(int, input().split())
    s -= 1
    t -= 1
    g = [[-1] * n for i in range(n)]
    for i in range(m):
        x, y, d = map(int, input().split())
        x -= 1
        y -= 1
        g[x][y] = d
        g[y][x] = d

    G = csgraph_from_dense(g, null_value=-1)

    sd = dijkstra(G, indices=s)
    td = dijkstra(G, indices=t)
    ans = -1

    for i, xy in enumerate(zip(sd, td)):
        x, y = xy
        if x == y and x <= 1000:
            ans = i + 1
            break
    print(ans)
コード例 #15
0
    def test_error_handling(self):
        with np.testing.assert_raises(TypeError):
            sp.seed_competition(self.seeds, image=0)

        with np.testing.assert_raises(TypeError):
            sp.seed_competition(self.seeds.flatten(), graph=0)

        with np.testing.assert_raises(TypeError):
            sp.dynamic_arc_weight(self.seeds, image=0)

        with np.testing.assert_raises(ValueError):
            sp.seed_competition(self.seeds, np.ones(self.seeds.size))

        with np.testing.assert_raises(ValueError):
            sp.dynamic_arc_weight(self.seeds, np.ones(self.seeds.size))

        with np.testing.assert_raises(ValueError):
            sp.seed_competition(self.seeds)

        with np.testing.assert_raises(ValueError):
            sp.seed_competition(self.seeds, image=self.image,
                                graph=csgraph.csgraph_from_dense(self.image))

        with np.testing.assert_raises(ValueError):
            sp.dynamic_arc_weight(self.seeds, self.image, alpha=-1.0)

        with np.testing.assert_raises(ValueError):
            sp.dynamic_arc_weight(self.seeds, self.image, mode='fake')
コード例 #16
0
def process_component(only_seqs, only_freqs, component, comps_info, adj,
                      dists):
    nodes_real_names = comps_info[component]
    adj_comp, comp_size = smaller_adj(adj, nodes_real_names)
    sparseMatrixComp = csgraph_from_dense(adj_comp)
    path_dists = shortest_path(sparseMatrixComp,
                               method='auto',
                               directed=False,
                               return_predecessors=False,
                               unweighted=True,
                               overwrite=False)
    links = []
    for p in range(comp_size - 1):
        for q in range(p + 1, comp_size):
            realp = nodes_real_names[p]
            realq = nodes_real_names[q]
            s = [
                p, q, dists[realp, realq], adj_comp[p][q], path_dists[p][q],
                only_freqs[realp], only_freqs[realq]
            ]
            links.append(s)
    comp_seqs = {}
    for k in nodes_real_names:
        seq = str(only_seqs[k])
        freq = only_freqs[k]
        comp_seqs[seq] = freq
    return links, comp_seqs, adj_comp, comp_size
コード例 #17
0
def graphme(TransNet, fileList, output):  #plots nx graph
    sparse_graph = csgraph_from_dense(TransNet)
    graph_obj = nx.from_scipy_sparse_matrix(sparse_graph,
                                            create_using=nx.DiGraph())
    nodeLabels = {}
    edgeLabels = {}
    for node in graph_obj.node:
        #------------------------------------------------------------------------------------------
        #this part generates names using some regex grab from your file names for cleaner charts
        #for no regex, use
        nodeLabels[node] = fileList[node]
        #or match the unique part of your filename and use that instead of the full name (below)
        # name=re.findall('.*_clipped/(.*)_unique.*',fileList[node])
        # nodeLabels[node]=name
        # name=re.findall('(.*).*',fileList[node])
        # nodeLabels[node]=name
    #------------------------------------------------------------------------------------------
    edgeListIterator = 0
    for edge in sparse_graph.data:
        edgeLabels[graph_obj.edges()[edgeListIterator]] = str(edge)
        edgeListIterator += 1
    pos = nx.shell_layout(graph_obj, dim=2)
    nx.draw_networkx_nodes(graph_obj, pos, node_shape="s")
    nx.draw_networkx_edges(graph_obj, pos, arrows=True)
    nx.draw_networkx_labels(graph_obj, pos, labels=nodeLabels)
    nx.draw_networkx_edge_labels(graph_obj, pos, edge_labels=edgeLabels)
    plt.axis('off')
    plt.savefig(output)
コード例 #18
0
def localEfficiencyCalc(GL):
    nodeNum = len(GL)
    localEfficiency = 0
    if nodeNum > 1:
        local1 = []
        for boxName in nx.nodes_iter(GL):
            radiusNodeList = GL.neighbors(boxName)
            boxNet = nx.Graph(GL.subgraph(radiusNodeList))
            boxNodes = len(boxNet)
            boxMat = nx.to_numpy_matrix(boxNet)
            boxSparse = csgraph_from_dense(boxMat)
            boxMatPath = shortest_path(boxSparse, method='auto', directed=False, return_predecessors=False, unweighted=True, overwrite=False)    
            boxPathList = []
            for i in range(boxNodes-1):
                for j in range(i+1, boxNodes):
                    tempDist = boxMatPath[i][j]
                    if np.isfinite(tempDist):
                        boxPathList.append(np.divide(1, tempDist, dtype = float))
            if len(boxPathList) > 0:
                local1.append(np.mean(boxPathList))
            else:
                local1.append(0)    
        localEfficiency = np.mean(local1)    
    
    return localEfficiency                    
コード例 #19
0
def calcDistances(haploNum, haploSize,
                  ordSeqs):  #Calculate hamming distances between sequences
    compNum = haploSize
    compList = range(haploNum)
    t = 0
    adjMatrix = np.zeros((haploNum, haploNum))
    kStepList = []
    while compNum > 1:
        t = t + 1
        # Check each query sequence
        for r1 in range(haploNum - 1):
            haplotype1 = ordSeqs[r1]
            for r2 in range(r1 + 1, haploNum):
                if compList[r1] != compList[r2]:
                    haplotype2 = ordSeqs[r2]
                    tempDist = 0
                    for a, b in izip(haplotype1, haplotype2):
                        if a != b:
                            tempDist = tempDist + 1
                            if tempDist > t:
                                break
                    if tempDist == t:
                        adjMatrix[r1][r2] = 1
                        kStepList.append([r1, r2, t])
        # Calculate components
        sparseMatrix = csgraph_from_dense(adjMatrix)
        connected = connected_components(sparseMatrix,
                                         directed=False,
                                         connection='weak',
                                         return_labels=True)
        compNum = connected[0]
        compList = connected[1]
    return kStepList
コード例 #20
0
def inferTransNetFromEvolTimes(DSamp, failtime):
    # print(DSamp)
    # raw_input("press enter to continue")
    numFiles = len(DSamp)
    AMSamp = np.eye(numFiles)
    for u in range(numFiles):
        for v in range(u + 1, numFiles):
            if DSamp[u, v] <= DSamp[v, u]:
                if DSamp[u, v] < failtime - 20:
                    AMSamp[u, v] = 1
            else:
                if DSamp[v, u] < failtime - 20:
                    AMSamp[v, u] = 1
    sparseMatrix = csgraph_from_dense(AMSamp)
    connected = connected_components(sparseMatrix,
                                     directed=False,
                                     connection='weak',
                                     return_labels=True)
    S = connected[0]
    C = connected[1]
    transNets = []
    for c in range(S):
        tmp = np.where(C == c)
        comp = tmp[0]
        if len(comp) > 1:
            DSamp_comp = reduceMat(DSamp, comp)
            transNetsComp = findTransNetMCMC(DSamp_comp, failtime)
            try:
                transNets.append(transNetsComp[0])
            except TypeError:
                transNets.append(transNetsComp)
    TransNet = transNets[0]
    return TransNet
コード例 #21
0
ファイル: minds_sim.py プロジェクト: forgetfulyoshi/wsnsims
    def _compute_adjacency_matrix(self, indexes):
        """
        Construct the adjacency graph over all segments

        :return:
        :rtype: sp.csr_matrix
        """

        if indexes:
            segs = [self.segments[i] for i in indexes]
        else:
            segs = self.segments

        # Generate an empty, N x N sparse graph
        node_count = len(segs)
        g_sparse = np.zeros((node_count, node_count), dtype=float)

        # Generate all pairs of segments
        segment_pairs = itertools.combinations(range(node_count), 2)

        # Fill the graph with the distances between segments
        for src, dst in segment_pairs:
            src_pos = segs[src].location.nd
            dst_pos = segs[dst].location.nd
            distance = np.linalg.norm(src_pos - dst_pos)
            g_sparse[src, dst] = distance

        g_sparse = sp.csgraph_from_dense(g_sparse)
        return g_sparse
コード例 #22
0
    def merge_cells(self):
        """
        Find connected cells between multiple inferences and merge strongly connected 
        cells that have overlap more than a threshold.
        """
        self.build_connectivity_matrix()
        print("Connectivity matrix built")        
        #Filter out week connections
        self.conn_matrix[self.conn_matrix < self.threshold] = 0

        #Get connected components 
        np.fill_diagonal(self.conn_matrix, 1)

        from scipy.sparse.csgraph import csgraph_from_dense, connected_components
        graph = csgraph_from_dense(self.conn_matrix)
        n_conn_comp, graph_labels =  connected_components(graph, True) 

        print(self.conn_matrix.shape)
        print(n_conn_comp)
        print(graph_labels.shape)
        print(type(graph_labels))

        #Convert all labels to their group:
        updated_labels = graph_labels[self.stack]
        print("applied lookup")
        print(updated_labels.shape)
        self.masks = np.max(updated_labels, 2)
        return self.masks
コード例 #23
0
def calcKstep(haploNum, haploSize,
              seqs):  #Calculate hamming distances between sequences
    ordSeqs = seqs.keys()
    colors = []
    compNum = haploSize
    compList = range(haploNum)
    t = 0
    adjMatrix = np.zeros((haploNum, haploNum))
    kStepList = []
    while compNum > 1:
        t = t + 1
        # Check each query sequence
        for r1 in range(haploNum - 1):
            haplotype1 = ordSeqs[r1]
            for r2 in range(r1 + 1, haploNum):
                if compList[r1] != compList[r2]:
                    haplotype2 = ordSeqs[r2]
                    tempDist = 0
                    for a, b in izip(haplotype1, haplotype2):
                        if a != b:
                            tempDist = tempDist + 1
                            if tempDist > t:
                                break
                    if tempDist == t:
                        adjMatrix[r1][r2] = 1
                        # seqs[ordSeqs[r1]]
                        kStepList.append(
                            [seqs[ordSeqs[r1]], seqs[ordSeqs[r2]], t])
                        if seqs[ordSeqs[r1]] not in colors:
                            colors.append(seqs[ordSeqs[r1]])
                        if seqs[ordSeqs[r2]] not in colors:
                            colors.append(seqs[ordSeqs[r2]])
        # Calculate components
        sparseMatrix = csgraph_from_dense(adjMatrix)
        connected = connected_components(sparseMatrix,
                                         directed=False,
                                         connection='weak',
                                         return_labels=True)
        compNum = connected[0]
        compList = connected[1]
        if t / haploSize > .42:
            if sum(compList) == 1:
                offender = ordSeqs[np.argmax(compList)]
                splitname = seqs[offender].split("_")
                if splitname[0] == "1":
                    f1counter -= 1
                elif splitname[0] == "2":
                    f2counter -= 1
                else:
                    both -= 1
                del seqs[offender]
                altwrapper(seqs, colors, haploNum - 1, haploSize, f1counter,
                           f2counter, both, output, drawmode)
                sys.exit()
            else:
                sys.exit(
                    "FATAL ERROR: Your output will contain disconnected components at an unreconcilable distance from one another. Exiting"
                )
    return kStepList, colors, t
コード例 #24
0
ファイル: graphs.py プロジェクト: allista/DegenPrimer
 def _subgroups(edges):
     graph = _matrix_from_edges(edges)
     n_comp, comp = connected_components(csgraph_from_dense(graph), directed=False)
     if n_comp < 2: return [edges]
     groups = [[] for _n in xrange(n_comp)]
     for edge in edges: groups[comp[edge[0]]].append(edge)
     groups = [group for group in groups if group]
     return groups
コード例 #25
0
def test_csgraph_from_dense():
    G = np.random.random((10, 10))
    some_nulls = (G < 0.4)
    all_nulls = (G < 0.8)

    for null_value in [0, np.nan, np.inf]:
        G[all_nulls] = null_value
        G_csr = csgraph_from_dense(G, null_value=0)
        G[all_nulls] = 0
        assert_array_almost_equal(G, G_csr.toarray())

    for null_value in [np.nan, np.inf]:
        G[all_nulls] = 0
        G[some_nulls] = null_value
        G_csr = csgraph_from_dense(G, null_value=0)
        G[all_nulls] = 0
        assert_array_almost_equal(G, G_csr.toarray())
コード例 #26
0
ファイル: test_conversions.py プロジェクト: MrCreosote/scipy
def test_csgraph_from_dense():
    G = np.random.random((10, 10))
    some_nulls = (G < 0.4)
    all_nulls = (G < 0.8)

    for null_value in [0, np.nan, np.inf]:
        G[all_nulls] = null_value
        G_csr = csgraph_from_dense(G, null_value=0)
        G[all_nulls] = 0
        assert_array_almost_equal(G, G_csr.toarray())

    for null_value in [np.nan, np.inf]:
        G[all_nulls] = 0
        G[some_nulls] = null_value
        G_csr = csgraph_from_dense(G, null_value=0)
        G[all_nulls] = 0
        assert_array_almost_equal(G, G_csr.toarray())
def stitch(stack, numpix_threshold=0):
    '''
    Combine multiple instance segmentations based on overlapping patches into a single
    segmentation
    
    Args
    ----
        stack : np.ndarray
            first two dimensions of stack should be the dimensions of the input image,
            and the third dimension be the number of overlapping patches
        numpix_threshold : int
            a label will be retained in the output only if it has at least 
            numpix_threshold pixels
    
    Returns
    -------
        result : numpy.ndarray
            a 2-D array labels
    '''
    from scipy.sparse.csgraph import csgraph_from_dense, connected_components
    
    # find foreground labels
    nonzero_idx = np.any(stack,axis=2)
    
    # get unique label combinations across patches in stack
    labels_to_combine = np.unique(stack[nonzero_idx],axis=0)
    
    # compute a "connectivity matrix" that indicates which labels overlap across patches
    conn_mat = np.zeros((labels_to_combine.max()+1,labels_to_combine.max()+1), dtype='bool')
    
    for row, label_combo in enumerate(labels_to_combine):
        group = label_combo[np.nonzero(label_combo)]
        for i in range(len(group)-1):
            for j in range(i+1,len(group)):
                conn_mat[group[i], group[j]] = True
                conn_mat[group[j], group[i]] = True

    #np.fill_diagonal(conn_mat, True)
    
    # find connected components using this connectivity matrix
    # each connected component will be a different label in the result (as long as it
    # contains the minimum required number of pixels)
    graph = csgraph_from_dense(conn_mat)
    n_conncomp, graph_complabels = connected_components(graph, directed=False)
    
    result = np.zeros_like(stack[:,:,0])
    
    # reassign labels to the ids of the connected components
    for label in np.unique(stack):
        # get 2-D mask of voxels with a given label
        mask = np.any(stack==label,axis=2)
        
        # make sure that there are enough many pixels
        if mask.sum() > numpix_threshold:
            # if so, reassign this label to its corresponding connected component id
            result[np.any(stack==label,axis=2)] = graph_complabels[label]
    
    return result
コード例 #28
0
ファイル: day7.py プロジェクト: rsirefelt/advent_of_code
def create_graph(bag_rules):
    num_bags = len(bag_rules)
    bags = list(bag_rules.keys())
    g_dense = np.zeros((num_bags, num_bags), dtype=np.int64)
    for i, rule in enumerate(bag_rules.values()):
        for bg, num in rule.items():
            g_dense[i, bags.index(bg)] = num

    return csgraph.csgraph_from_dense(g_dense).astype(np.int64)
コード例 #29
0
def test_graph_depth_first_trivial_graph():
    csgraph = np.array([[0]])
    csgraph = csgraph_from_dense(csgraph, null_value=0)

    bfirst = np.array([[0]])

    for directed in [True, False]:
        bfirst_test = depth_first_tree(csgraph, 0, directed)
        assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst)
コード例 #30
0
def shortest_paths(graph_dense):
    graph_sparse = csgraph.csgraph_from_dense(graph_dense)
    # print graph_sparse
    # print graph_sparse.shape
    dist_matrix, predecessors = csgraph.shortest_path(graph_sparse,
                                                      directed=False,
                                                      return_predecessors=True)
    # print predecessors
    return predecessors
コード例 #31
0
def calcDistances(haploNum, haploSize,
                  asdf):  #Calculate hamming distances between sequences
    ordSeqs = asdf.keys()
    # for seq in ordSeqs:
    # print(seq,asdf[seq])
    colors = []
    compNum = haploSize
    compList = range(haploNum)
    t = 0
    adjMatrix = np.zeros((haploNum, haploNum))
    kStepList = []
    while compNum > 1:
        t = t + 1
        # Check each query sequence
        for r1 in range(haploNum - 1):
            haplotype1 = ordSeqs[r1]
            for r2 in range(r1 + 1, haploNum):
                if compList[r1] != compList[r2]:
                    haplotype2 = ordSeqs[r2]
                    tempDist = 0
                    for a, b in izip(haplotype1, haplotype2):
                        if a != b:
                            tempDist = tempDist + 1
                            if tempDist > t:
                                break
                    if tempDist == t:
                        adjMatrix[r1][r2] = 1
                        asdf[ordSeqs[r1]]
                        kStepList.append(
                            [asdf[ordSeqs[r1]], asdf[ordSeqs[r2]], t])
                        if asdf[ordSeqs[r1]] not in colors:
                            colors.append(asdf[ordSeqs[r1]])
                        if asdf[ordSeqs[r2]] not in colors:
                            colors.append(asdf[ordSeqs[r2]])
        # Calculate components
        sparseMatrix = csgraph_from_dense(adjMatrix)
        connected = connected_components(sparseMatrix,
                                         directed=False,
                                         connection='weak',
                                         return_labels=True)
        compNum = connected[0]
        compList = connected[1]
        # print(compNum)
        # print(compList)
        # print(t)
        # print("=")
        if t / haploSize > .42:
            if sum(compList) == 1:
                offender = ordSeqs[np.argmax(compList)]
                del asdf[offender]
                altwrapper(asdf, colors, haploNum - 1, haploSize)
                sys.exit()
            else:
                sys.exit(
                    "Your output is going to look really messed up. Exiting")
    return kStepList, colors
コード例 #32
0
ファイル: test_conversions.py プロジェクト: MrCreosote/scipy
def test_csgraph_to_dense():
    G = np.random.random((10, 10))
    nulls = (G < 0.8)
    G[nulls] = np.inf
    
    G_csr = csgraph_from_dense(G)

    for null_value in [0, 10, -np.inf, np.inf]:
        G[nulls] = null_value
        assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value))
コード例 #33
0
def test_weak_connections():
    Xde = np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]])

    Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)

    for X in Xsp, Xde:
        n_components, labels = csgraph.connected_components(X, directed=True, connection="weak")

        assert_equal(n_components, 2)
        assert_array_almost_equal(labels, [0, 0, 1])
コード例 #34
0
def test_csgraph_to_dense():
    G = np.random.random((10, 10))
    nulls = (G < 0.8)
    G[nulls] = np.inf

    G_csr = csgraph_from_dense(G)

    for null_value in [0, 10, -np.inf, np.inf]:
        G[nulls] = null_value
        assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value))
コード例 #35
0
ファイル: graphs.py プロジェクト: npilshchikova/DegenPrimer
 def _subgroups(edges):
     graph = _matrix_from_edges(edges)
     n_comp, comp = connected_components(csgraph_from_dense(graph),
                                         directed=False)
     if n_comp < 2: return [edges]
     groups = [[] for _n in xrange(n_comp)]
     for edge in edges:
         groups[comp[edge[0]]].append(edge)
     groups = [group for group in groups if group]
     return groups
コード例 #36
0
 def graph(self):
     '''Map elements of the kernel related to one another through fluctuations as a graph'''
     k_size = len(self.kernel)
     kernel_sig = map(self.signature, self.kernel)
     adj = np.zeros([k_size, k_size])
     for r, u in enumerate(self.kernel):
         accessible = self.sim_anneal(u)
         for w_sig in map(self.signature, accessible):
             adj[r, kernel_sig.index(w_sig)] = 1
     return csgraph.csgraph_from_dense(adj)
コード例 #37
0
ファイル: test_traversal.py プロジェクト: 1641731459/scipy
def test_graph_depth_first_trivial_graph():
    csgraph = np.array([[0]])
    csgraph = csgraph_from_dense(csgraph, null_value=0)

    bfirst = np.array([[0]])

    for directed in [True, False]:
        bfirst_test = depth_first_tree(csgraph, 0, directed)
        assert_array_almost_equal(csgraph_to_dense(bfirst_test),
                                  bfirst)
コード例 #38
0
def test_graph_breadth_first():
    csgraph = np.array([[0, 1, 2, 0, 0], [1, 0, 0, 0, 3], [2, 0, 0, 7, 0],
                        [0, 0, 7, 0, 1], [0, 3, 0, 1, 0]])
    csgraph = csgraph_from_dense(csgraph, null_value=0)

    bfirst = np.array([[0, 1, 2, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 7, 0],
                       [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])

    for directed in [True, False]:
        bfirst_test = breadth_first_tree(csgraph, 0, directed)
        assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst)
コード例 #39
0
def test_graph_depth_first():
    if csgraph_from_dense is None:
        raise SkipTest("Old version of scipy, doesn't have csgraph.")
    csgraph = np.array([[0, 1, 2, 0, 0], [1, 0, 0, 0, 3], [2, 0, 0, 7, 0], [0, 0, 7, 0, 1], [0, 3, 0, 1, 0]])
    csgraph = csgraph_from_dense(csgraph, null_value=0)

    dfirst = np.array([[0, 1, 0, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 0, 0], [0, 0, 7, 0, 0], [0, 0, 0, 1, 0]])

    for directed in [True, False]:
        dfirst_test = depth_first_tree(csgraph, 0, directed)
        assert_array_almost_equal(csgraph_to_dense(dfirst_test), dfirst)
コード例 #40
0
ファイル: movement.py プロジェクト: forgetfulyoshi/wsnsims
    def _compute_adjacency_matrix(self):
        """
        Build out the adjacency matrix based on the paths created by the
        builder. This takes the cluster paths and builds out a matrix suitable
        for use in Dijkstra's algorithm.

        This routine just sets the value of the self.sim attribute.

        :return: None
        """

        i = 0
        for clust in self.sim.clusters:
            for seg_vertex in clust.tour.vertices:
                seg = clust.tour.objects[seg_vertex]
                if seg not in self._segment_indexes:
                    self._segment_indexes[seg] = i
                    i += 1

        # First we need to get the total number of segments and relay nodes so
        # we can create an N x N matrix. This is simply the number of segments
        # plus the number of rendezvous points. As ToCS guarantees a single
        # rendezvous point per cluster, we can just use the number of clusters.

        node_count = len(self.sim.segments)
        g_sparse = np.zeros((node_count, node_count), dtype=float)
        g_sparse[:] = np.inf

        for clust in self.sim.clusters:
            cluster_tour = clust.tour
            i = len(cluster_tour.vertices) - 1
            j = 0
            while j < len(cluster_tour.vertices):
                start_vertex = cluster_tour.vertices[i]
                stop_vertex = cluster_tour.vertices[j]

                start_pt = cluster_tour.collection_points[start_vertex]
                stop_pt = cluster_tour.collection_points[stop_vertex]
                distance = np.linalg.norm(stop_pt - start_pt)

                start_seg = cluster_tour.objects[start_vertex]
                stop_seg = cluster_tour.objects[stop_vertex]

                start_index = self._segment_indexes[start_seg]
                stop_index = self._segment_indexes[stop_seg]

                g_sparse[start_index, stop_index] = distance

                i = j
                j += 1

        g_sparse = sp.csgraph_from_dense(g_sparse, null_value=np.inf)
        return g_sparse
コード例 #41
0
ファイル: movement.py プロジェクト: forgetfulyoshi/wsnsims
    def _compute_adjacency_matrix(self):
        """
        Build out the adjacency matrix based on the paths created by the
        builder. This takes the cluster paths and builds out a matrix suitable
        for use in Dijkstra's algorithm.

        :return: The adjacency matrix for the simulation
        :rtype: sp.csr_matrix
        """

        # Set up a quick-reference index to map cells to indexes
        for i, cell in enumerate(self.sim.cells):
            self._cell_indexes[cell] = i

        if all([self.sim.hub.cells == [self.sim.damaged],
                self.sim.damaged not in self.sim.cells]):
            # Add the "damaged" virtual cell to the index if we need it
            self._cell_indexes[self.sim.damaged] = len(self.sim.cells)

        node_count = len(list(self._cell_indexes.keys()))
        g_sparse = np.zeros((node_count, node_count), dtype=float)
        g_sparse[:] = np.inf

        for cluster in self.sim.clusters + [self.sim.hub]:
            cluster_tour = cluster.tour
            i = len(cluster_tour.vertices) - 1
            j = 0
            while j < len(cluster_tour.vertices):
                start_vertex = cluster_tour.vertices[i]
                stop_vertex = cluster_tour.vertices[j]

                start_pt = cluster_tour.points[start_vertex]
                stop_pt = cluster_tour.points[stop_vertex]
                distance = np.linalg.norm(stop_pt - start_pt)

                start_seg = cluster_tour.objects[start_vertex]
                stop_seg = cluster_tour.objects[stop_vertex]

                start_index = self._cell_indexes[start_seg]
                stop_index = self._cell_indexes[stop_seg]

                g_sparse[start_index, stop_index] = distance

                i = j
                j += 1

        g_sparse = sp.csgraph_from_dense(g_sparse, null_value=np.inf)
        return g_sparse
コード例 #42
0
ファイル: test_traversal.py プロジェクト: 1641731459/scipy
def test_graph_breadth_first():
    csgraph = np.array([[0, 1, 2, 0, 0],
                        [1, 0, 0, 0, 3],
                        [2, 0, 0, 7, 0],
                        [0, 0, 7, 0, 1],
                        [0, 3, 0, 1, 0]])
    csgraph = csgraph_from_dense(csgraph, null_value=0)

    bfirst = np.array([[0, 1, 2, 0, 0],
                       [0, 0, 0, 0, 3],
                       [0, 0, 0, 7, 0],
                       [0, 0, 0, 0, 0],
                       [0, 0, 0, 0, 0]])

    for directed in [True, False]:
        bfirst_test = breadth_first_tree(csgraph, 0, directed)
        assert_array_almost_equal(csgraph_to_dense(bfirst_test),
                                  bfirst)
コード例 #43
0
    def induce_type1_cascades(tuple):
        entity = tuple[0]
        chains = tuple[1]
        # entity_posts = tuple[1]['posts']

        # Get the entity posts
        entity_citation_posts = entity_posts_map_broadcast.value
        entity_posts = entity_citation_posts[entity]

        # log the connected chains of the entity
        connected_chains = []
        for chain in chains:
            # new chain formed by filtering the chain with only entity citing posts
            # Filter the chain down to include only posts citing the entity
            new_chain = set()
            for chain_edge in chain:
                source = chain_edge.split("->")[0]
                target = chain_edge.split("->")[1]
                if source in entity_posts and target in entity_posts:
                    new_chain.add(chain_edge)

            # too inefficient, better to convert to matrix form and then run this the graph
            # 1. Induce maps between node label and ids
            node_to_index = {}
            index_to_node = {}
            index = -1
            for chain_edge in new_chain:
                source = chain_edge.split("->")[0]
                target = chain_edge.split("->")[1]

                if source not in node_to_index:
                    index += 1
                    node_to_index[source] = index
                    index_to_node[index] = source

                if target not in node_to_index:
                    index += 1
                    node_to_index[target] = index
                    index_to_node[index] = target


            # 2. Populate the nd matrix
            dim = len(node_to_index)
            if dim > 1:
                M = np.zeros(shape=(dim, dim))
                for chain_edge in new_chain:
                    source = chain_edge.split("->")[0]
                    source_index = node_to_index[source]
                    target = chain_edge.split("->")[1]
                    target_index = node_to_index[target]
                    M[source_index, target_index] = 1

                # 3. Induce the connected components from the matrix
                # print(str(dim))
                # print(str(M))
                Msp = csgraph.csgraph_from_dense(M, null_value=0)
                n_components, labels = csgraph.connected_components(Msp, directed=True)
                # print("Number of connected components = " + str(n_components))
                # print("Components labels = " + str(labels))
                # get the components and their chains
                for i in range(0, n_components):
                    # print("Component: " + str(i))
                    component_chain = []
                    # get the nodes in that component
                    # print(labels)
                    c_nodes = [j for j in range(len(labels)) if labels.item(j) is i]
                    # print(c_nodes)
                    # Only log the component if more than two nodes are in in
                    if len(c_nodes) > 1:
                        # build the canonical edges
                        for source_id in c_nodes:
                            for target_id in c_nodes:
                                if int(M[(source_id, target_id)]) is 1:
                                    component_chain.append(str(source_id) + "->" + str(target_id))
                        if len(component_chain) > 0:
                            connected_chains.append(component_chain)

            canonical_chains = connected_chains
            print("Canonical Chains:")
            print(canonical_chains)


        # return back to the function the mapping between the entity and the connected chains
        return (entity, canonical_chains)
コード例 #44
0
ファイル: TREE.py プロジェクト: yesimon/rosalind
#!/usr/bin/env python
import sys
import numpy as np
from scipy.sparse.csgraph import csgraph_from_dense, connected_components


if __name__ == '__main__':
    lines = sys.stdin.read().strip().split('\n')
    n = int(lines[0])
    m = np.zeros((n, n))
    adj = [[int(x) for x in line.split(' ')] for line in lines[1:]]
    for edge in adj:
        m[edge[0]-1, edge[1]-1] = 1
    g = csgraph_from_dense(m)
    print(connected_components(g, False)[0] - 1)
コード例 #45
0
ファイル: graphs.py プロジェクト: allista/DegenPrimer
    def _subgroups(edges):
        graph = _matrix_from_edges(edges)
        n_comp, comp = connected_components(csgraph_from_dense(graph), directed=False)
        if n_comp < 2: return [edges]
        groups = [[] for _n in xrange(n_comp)]
        for edge in edges: groups[comp[edge[0]]].append(edge)
        groups = [group for group in groups if group]
        return groups
    #end def
    
    edges = [[1,2], [3,4], [5,6], [1,4], [3,4], [5,2], [7,8], [8,9], [7,10]]
    
    graph = _matrix_from_edges(edges)
    print graph
    
    components = connected_components(csgraph_from_dense(graph), directed=False)
    print components
    
    groups = _subgroups(edges)
    print np.array(groups)
    
    
#    from timeit import timeit
#    from tests.violin_plot import violin_plot
#    from matplotlib.pyplot import figure, show
#    
#    lfor = [timeit('''
#A = 0
#for edge in edges:
#    A += edge[0]*edge[1]
#    ''', 'from __main__ import edges') for _i in xrange(100)]