示例#1
0
def convert_local_tree_topology_to_graph(loc_tree_topo, tree_node_labeling):
    """ Creates a directed, acyclic NetworkX graph from a local tree topology

    Parameters
    ----------
    loc_tree_topo: array-like
        The local tree toplogy, where the root node element is -1

    tree_node_labeling: array-like
        The integer ids for each tree node

    Returns
    -------
    G : NetworkX graph

    """

    assert( loc_tree_topo[0] == -1 )

    G = Graph()
    G.add_nodes_from( tree_node_labeling )
    # build up graph connectivity
    con = vstack( (loc_tree_topo, range(len(loc_tree_topo))) )
    # prune root node connectivity
    con = con[:,1:]
    # update with correct labels
    con = tree_node_labeling[con]
    G.add_edges_from( zip(con[0,:], con[1,:]) )

    return G
示例#2
0
    def root_tree(self, tree: nx.Graph, root_sample: str,
                  remaining_samples: List[str]):
        """Roots a tree produced by UPGMA.

        Adds the root at the top of the UPGMA reconstructed tree. By the
        ultrametric assumption, the root is placed as the parent to the last
        two unjoined nodes.

        Args:
            tree: Networkx object representing the tree topology
            root_sample: Ignored in this case, the root is known in this case
            remaining_samples: The last two unjoined nodes in the tree

        Returns:
            A rooted tree.
        """

        tree.add_node("root")
        tree.add_edges_from([("root", remaining_samples[0]),
                             ("root", remaining_samples[1])])

        rooted_tree = nx.DiGraph()
        for e in nx.dfs_edges(tree, source="root"):
            rooted_tree.add_edge(e[0], e[1])

        return rooted_tree
示例#3
0
def part_2(entries):
    def has_doubled_small_cave(path):
        small_caves = [c for c in path if not c.isupper()]
        return len(small_caves) != len(set(small_caves))

    def walk(node):
        path.append(node)
        if node == 'end':
            all_paths.append(path.copy())
        else:
            for neighbor in graph[node].keys():
                if neighbor.isupper():
                    walk(neighbor)
                elif neighbor in ('start', 'end'):
                    if neighbor not in path:
                        walk(neighbor)
                elif not has_doubled_small_cave(path):
                    walk(neighbor)
                elif neighbor not in path:
                    walk(neighbor)
        path.pop()

    graph = Graph()
    graph.add_edges_from(entries)

    path = []
    all_paths = []
    walk('start')
    return len(all_paths)
def generate_wMVC_summ(input_path: str, reference_path: str, output_path: str, limit: int):
    for doc in listdir(input_path):
        with open(input_path + doc, 'r') as f:
            txt = f.read()
            if txt == '' or txt == 'None.':
                open(output_path + doc, 'w', encoding='utf8').write('')
                continue

        sent_tokens, factors = get_sentences_with_factors(input_path, reference_path, doc)

        if document_word_count(sent_tokens) < 250:
            open(output_path + doc, 'w', encoding='utf8').write('\n\n'.join(sent_tokens))
            continue

        dist = compute_entailment(sent_tokens)

        threshold = compute_threshold(dist, 0.5)
        large = 2 * max(factors) * len(sent_tokens)

        vertices = [(i, {'weight': large - factors[i] * (sum(dist[i]) - 1)})
                    for i in range(len(sent_tokens))]

        edges = create_edge_set(dist, threshold)

        G = Graph()
        G.add_nodes_from(vertices)
        G.add_edges_from(edges)

        wMVC = min_weighted_vertex_cover(G, 'weight')

        tops = sorted([i for i in wMVC],
                      key=lambda x: vertices[x][1]['weight'])

        write_summary(output_path, doc, limit, sent_tokens, tops)
示例#5
0
def cache_langs():
    ''' Read in all files and save as pickle
    '''
    langs = {}
    dir_path = Path(LANGS_DIR)
    # Sort by language code
    paths = sorted(dir_path.glob('./*/config.y*ml'),
                   key=lambda x: x.parent.stem)
    mappings_legal_pairs = []
    for path in paths:
        code = path.parent.stem
        with open(path, encoding='utf8') as f:
            data = yaml.safe_load(f)
        # If there is a mappings key, there is more than one mapping
        # TODO: should put in some measure to prioritize non-generated mappings and warn when they override
        if 'mappings' in data:
            for index, mapping in enumerate(data['mappings']):
                mappings_legal_pairs.append(
                    (data['mappings'][index]['in_lang'],
                     data['mappings'][index]['out_lang']))
                data['mappings'][index] = load_mapping_from_path(path, index)
        else:
            data = load_mapping_from_path(path)
        langs = {**langs, **{code: data}}

    lang_network = Graph()
    lang_network.add_edges_from(mappings_legal_pairs)

    with open(LANGS_NWORK_PATH, 'wb') as f:
        write_gpickle(lang_network, f)

    with open(LANGS_PKL, 'wb') as f:
        pickle.dump(langs, f)

    return langs
示例#6
0
    def __init__(self, y):
        # Pre-cache a sparse LU decomposition of the FL matrix
        from pygfl.utils import get_1d_penalty_matrix
        from scipy.sparse.linalg import factorized
        from scipy.sparse import csc_matrix
        D = get_1d_penalty_matrix(y.shape[0])
        D = np.vstack([D, np.zeros(y.shape[0])])
        D[-1,-1] = 1e-6 # Nugget for full rank matrix
        D = csc_matrix(D)
        self.invD = factorized(D)

        # Setup the fast GFL solver
        from pygfl.solver import TrailSolver
        from pygfl.trails import decompose_graph
        from pygfl.utils import hypercube_edges, chains_to_trails
        from networkx import Graph
        edges = hypercube_edges(y.shape)
        g = Graph()
        g.add_edges_from(edges)
        chains = decompose_graph(g, heuristic='greedy')
        ntrails, trails, breakpoints, edges = chains_to_trails(chains)
        self.solver = TrailSolver()
        self.solver.set_data(y, edges, ntrails, trails, breakpoints)

        from pygfl.easy import solve_gfl
        self.beta = solve_gfl(y)
示例#7
0
 def set_plot_edges(self, g: nx.Graph, edges=[]):
     #[g.add_edge(iEdge[0],iEdge[1]) for iEdge in edges]
     if edges != None:
         edges = [(self.data_svc.get_node(iEdge[0]).name,
                   self.data_svc.get_node(iEdge[1]).name)
                  for iEdge in edges]
     g.add_edges_from(edges) if edges != None else None
示例#8
0
def wsi(ego, topn=TOPN):
    tic = time()
    ego_network = Graph(name=ego)

    pairs = get_disc_pairs(ego, topn)
    nodes = get_nodes(pairs)   
    
    ego_network.add_nodes_from( [(node, {'size': size}) for node, size in nodes.items()] )
    
    for r_node in ego_network:
        related_related_nodes = list2dict(get_nns(r_node))
        related_related_nodes_ego = sorted(
            [(related_related_nodes[rr_node], rr_node) for rr_node in related_related_nodes if rr_node in ego_network],
            reverse=True)[:topn]
        
        related_edges = []
        for w, rr_node in related_related_nodes_ego:
            if get_pair(r_node, rr_node) not in pairs:
                related_edges.append( (r_node, rr_node, {"weight": w}) )
            else:
                print("Skipping:", r_node, rr_node)
        ego_network.add_edges_from(related_edges)

    chinese_whispers(ego_network, weighting="top", iterations=20)
    if verbose: print("{}\t{:f} sec.".format(ego, time()-tic))

    return {"network": ego_network,  "nodes": nodes}
def extract_colored_faces(fname, colors):
    output = {color:[] for color in colors}
    vertices, faces = load_ply(fname)
    
    for color in colors:
        colored_vertices_indices = np.nonzero((vertices['color'] == color).all(axis=1))[0]
        colored_faces = np.nonzero(np.all((np.in1d(faces["indices"][:,0], colored_vertices_indices),
                                           np.in1d(faces["indices"][:,1], colored_vertices_indices),
                                           np.in1d(faces["indices"][:,2], colored_vertices_indices)), axis=0))[0]

        colored_faces_graph = Graph()
        colored_faces_graph.add_edges_from(faces['indices'][colored_faces][:,:2])
        colored_faces_graph.add_edges_from(faces['indices'][colored_faces][:,1:])
        colored_faces_graph.add_edges_from(faces['indices'][colored_faces][:,(0,2)])
        
        planes_vertices_indices = list(connected_components(colored_faces_graph))
        print len(planes_vertices_indices)
        for  plane_vertices_indices in planes_vertices_indices:
                colored_vertices = vertices["position"][list(plane_vertices_indices)]
                dipdir, dip = calc_sphere(*general_axis(colored_vertices, -1))
                X, Y, Z = colored_vertices.mean(axis=0)
                highest_vertex = colored_vertices[np.argmax(colored_vertices[:,2]),:]
                lowest_vertex = colored_vertices[np.argmin(colored_vertices[:,2]),:]
                trace = np.linalg.norm(highest_vertex - lowest_vertex)
                output[color].append((dipdir, dip, X, Y, Z, trace))
    return output
def _update_unfit_groups_with_crossgroup_dist(dist_metrics, fit_group, fit_pvals, unfit_group, buffer_group,
                                              user_ids, user_profiles, user_connections, ks_alpha=0.05):
    """ update members in unfit_group with cross-group distance. unfit members are kept in buffer_group
    """
    # to keep API consistant
    # restore user_profiles to DataFrame including
    user_graph = Graph()
    user_graph.add_edges_from(user_connections)

    unfit_group_copy = unfit_group.copy()
    for gg, gg_user_ids in unfit_group_copy.items():
        # extract cross-group distance metrics dictionary to avoid duplicate
        # tests with distance metrics associated with user's group
        other_group_keys = [group_key for group_key in dist_metrics.keys() if not group_key == gg]
        cross_group_dist_metrics = {key: dist_metrics[key] for key in other_group_keys}

        for ii, ii_user_id in enumerate(gg_user_ids):
            ii_new_group, ii_new_pval = find_fit_group(ii_user_id, cross_group_dist_metrics,
                                                       user_ids, user_profiles, user_graph, ks_alpha,
                                                       current_group=None, fit_rayleigh=False)
            # redistribute the user based on fit-tests
            if not ii_new_group is None:
                # remove member with fit from buffer_group
                if ii_new_group in fit_group:
                    fit_group[ii_new_group].append(ii_user_id)
                    fit_pvals[ii_new_group].append(ii_new_pval)
                else:
                    fit_group[ii_new_group] = [ii_user_id]
                    fit_pvals[ii_new_group] = [ii_new_pval]
            else:
                buffer_group.append(ii_user_id)

    return fit_group, fit_pvals, buffer_group
示例#11
0
def plot_graph(g):
    s = Graph()
    s.add_edges_from(g.edges_tuples())
    draw(s, with_labels=True)
    plt.savefig("grafo.png")

    s = None
def create_render(state):  # Should be done once.
    g = Graph()

    pos = {}
    edges = []
    index = 0

    target = state
    dim = len(target)
    y_start = dim
    x_start = 0
    for i in range(dim):
        row_dim = len(target[i])
        for j in range(row_dim):
            g.add_node(index)

            # Add Edges
            if j > 0 and i + 1 < dim:  # Next column, previous row.
                edges.append((index, index + row_dim - 1))
            if j + 1 < row_dim:  # Next in row.
                edges.append((index, index + 1))
            if i + 1 < dim and j < len(
                    target[i + 1]):  # Next column, next row.
                edges.append((index, index + row_dim))

            pos[index] = (j + x_start, y_start - j)
            index += 1
        x_start -= 1
        y_start -= 1

    g.add_edges_from(edges)

    return g, pos
def _update_buffer_group(dist_metrics, fit_group, fit_pvals, buffer_group,
                         user_ids, user_profiles, user_connections, ks_alpha=0.05):
    """ return fit_group, fit_pvals, buffer_group
        redistribute member in buffer group into fit_group if fit had been found
    """
    # to keep API consistant
    # restore user_profiles to DataFrame including
    user_graph = Graph()
    user_graph.add_edges_from(user_connections)

    buffer_group_copy = buffer_group.copy()
    if len(buffer_group_copy) > 0:
        for ii, ii_user_id in enumerate(buffer_group_copy):
            ii_new_group, ii_new_pval = find_fit_group(ii_user_id, dist_metrics,
                                                       user_ids, user_profiles, user_graph, ks_alpha,
                                                       current_group=None, fit_rayleigh=False)
            if not ii_new_group is None:
                # remove member with fit from buffer_group
                buffer_group.remove(ii_user_id)
                if ii_new_group in fit_group:
                    fit_group[ii_new_group].append(ii_user_id)
                    fit_pvals[ii_new_group].append(ii_new_pval)
                else:
                    fit_group[ii_new_group] = [ii_user_id]
                    fit_pvals[ii_new_group] = [ii_new_pval]

    return fit_group, fit_pvals, buffer_group
示例#14
0
def create_difficult_pattern(size):
    '''The eq ids go from 0..size-1, the column ids from size..2*size-1.
    A pathological pattern, resulting in many ties:
    | x x         | 
    |     x x     |
    |         x x |
    | x x x x     |
    |     x x x x |
    | x x     x x |  '''
    assert size % 2 == 0, size
    rows, cols = list(irange(size)), list(irange(size, 2 * size))
    g = Graph()
    half_size = size // 2
    # build upper half
    for i in irange(half_size):
        g.add_edges_from(((i, size + 2 * i), (i, size + 2 * i + 1)))
    # build lower half
    for i in irange(half_size, size):
        k = 2 * (i - half_size)
        vrs = [size + v % size for v in irange(k, k + 4)]
        g.add_edges_from(izip(repeat(i), vrs))
    assert is_bipartite_node_set(g, rows)
    assert is_bipartite_node_set(g, cols)
    #to_pdf(g, rows, cols, '', str(size))
    #plot_dm_decomp(g, size)
    return g
示例#15
0
def compute_molecule(universe):
    '''
    Cluster atoms into molecules.

    The algorithm is to create a network graph containing every atom (in every
    frame as nodes and bonds as edges). Using this connectivity information,
    one can perform a (breadth first) traversal of the network graph to cluster
    all nodes (whose indices correspond to physical atoms).

    Args:
        universe (:class:`~exatomic.universe.Universe`): Atomic universe

    Returns:
        objs (tuple): Molecule indices (for atom dataframe(s)) and molecule dataframe

    Warning:
        This function will modify (in place) a few tables of the universe!
    '''
    if 'bond_count' not in universe.atom:    # The bond count is used to find single atoms;
        universe.compute_bond_count()        # single atoms are treated as molecules.
    b0 = None
    b1 = None
    bonded = universe.two[universe.two['bond'] == True]
    if universe.is_periodic:
        mapper = universe.projected_atom['atom']
        b0 = bonded['prjd_atom0'].map(mapper)
        b1 = bonded['prjd_atom1'].map(mapper)
    else:
        b0 = bonded['atom0']
        b1 = bonded['atom1']
    graph = Graph()
    graph.add_edges_from(zip(b0.values, b1.values))
    mapper = {}
    for i, molecule in enumerate(connected_components(graph)):
        for atom in molecule:
            mapper[atom] = i
    n = 1
    if len(mapper.values()) > 0:
        n += max(mapper.values())
    else:
        n -= 1
    idxs = universe.atom[universe.atom['bond_count'] == 0].index
    for i, index in enumerate(idxs):
        mapper[index] = i + n
    # Set the molecule indices
    universe.atom['molecule'] = universe.atom.index.map(lambda idx: mapper[idx])
    # Now compute molecule table
    universe.atom['mass'] = universe.atom['symbol'].map(symbol_to_element_mass)
    # The coordinates of visual_atom represent grouped molecules for
    # periodic calculations and absolute coordinates for free boundary conditions.
    molecules = universe.atom.groupby('molecule')
    molecule = molecules['symbol'].value_counts().unstack().fillna(0).astype(np.int64)
    molecule.columns.name = None
    molecule['frame'] = universe.atom.drop_duplicates('molecule').set_index('molecule')['frame']
    molecule['mass'] = molecules['mass'].sum()
    del universe.atom['mass']
    frame = universe.atom[['molecule', 'frame']].drop_duplicates('molecule')
    frame = frame.set_index('molecule')['frame'].astype(np.int64)
    molecule['frame'] = frame.astype('category')
    return Molecule(molecule)
示例#16
0
    def downgrade(self):
        """
        Downgrade hypergraph diagram to :class:`discopy.rigid.Diagram`.

        Examples
        --------
        >>> x = Ty('x')
        >>> v = Box('v', Ty(), x @ x)
        >>> print((v >> Swap(x, x) >> v[::-1]).downgrade())
        v >> Swap(x, x) >> v[::-1]
        >>> print((Id(x) @ Swap(x, x) >> v[::-1] @ Id(x)).downgrade())
        Id(x) @ Swap(x, x) >> v[::-1] @ Id(x)
        """
        diagram = self.make_progressive()
        graph = Graph()
        graph.add_nodes_from(diagram.ports)
        graph.add_edges_from([(diagram.ports[i], diagram.ports[j])
                              for i, j in enumerate(diagram.bijection)])
        graph.add_nodes_from([
            Node("box",
                 depth=depth,
                 box=box if isinstance(box, rigid.Box) else rigid.Box(
                     box.name,
                     box.dom,
                     box.cod,
                     _dagger=box.is_dagger,
                     data=box.data)) for depth, box in enumerate(diagram.boxes)
        ])
        graph.add_nodes_from([
            Node("box",
                 depth=len(diagram.boxes) + i,
                 box=rigid.Spider(0, 0, diagram.spider_types[s]))
            for i, s in enumerate(diagram.scalar_spiders)
        ])
        return drawing.nx2diagram(graph, rigid.Ty, rigid.Id)
示例#17
0
def tuples_to_graph(tuples):
    G = Graph()
    for node, attribute in tuples:
        print 'adding', node, attribute
        G.add_nodes_from(node, freq=attribute)
        G.add_edges_from(to_edges(node))
    return G
示例#18
0
def naughty_brute_force():
    for size in irange(1, 6):
        print_timestamp()
        print('Testing (naughty) bipartite graphs of size', size)
        opts = marshal_load('data/all_bips/opt_n'+str(size)+'.bin')
        all_edgelists = marshal_load('data/all_bips/bip_n'+str(size)+'.bin')
        print('Loaded', len(all_edgelists), 'graphs')
        print_timestamp()
        for i, (edgelist, opt) in enumerate(izip(all_edgelists, opts)):
            g = Graph()
            g.add_edges_from(e for e in izip(edgelist[::2], edgelist[1::2]))
            g.graph['name'] = str(i)
            _, _, _, tear_set, _ = bb2_solve(g, set(irange(size)))
            assert opt == len(tear_set)
            #to_pdf(g, rowp,  colp)
        #print([t[0] for t in _worst_cases])
        #print('Len:', len(_worst_cases))
        #_worst_cases.sort(key=sort_patterns)
#         for i, (explored, g, _, rowp, colp, ub) in enumerate(_worst_cases, 1):
#             msg   = 'Index: ' + g.graph['name']
#             fname = '{0:03d}a'.format(i)
#             to_pdf(g, list(irange(size)), irange(size, 2*size), msg, fname)
#             msg   = 'OPT = {}, BT: {}'.format(ub, explored)
#             fname = '{0:03d}b'.format(i)
#             to_pdf(g, rowp, colp, msg, fname)
        #_worst_cases[:] = [ ]
        print_timestamp()
        print()
示例#19
0
def build_propagation_graph(sequences, cor_threshold, best_fit_threshold):
    '''
    sequence should be a tupple (id, array)
    threshold should exist in [0,1]
    '''
    #list to hold tuple associations
    tups_list = []
    #for each sequence in sequences
    for sequence in sequences:
        #find the corrcoef in relation to all other sequences
        for sec in [o_sec for o_sec in sequences if o_sec != sequence]:
            #if correlated within threshold bounds, ensure not already in list
            cor = corrcoef(sequence[1], sec[1])[0, 1]
            ratio = polyfit_ratio(sequence[1], sec[1])
            if cor <= cor_threshold and abs(
                    1 - ratio) <= best_fit_threshold and ([
                        tup
                        for tup in tups_list if sequence[0] and sec[0] in tup
                    ] == []):
                #if not already in list, append (_id1, _id2, corrcoef) tuple
                tups_list.append((sequence[0], sec[0], {'weight': cor}))
    #build graph from association list
    graph = Graph()
    graph.add_edges_from(tups_list)
    #return graph
    return graph
def get_ego_network(ego):
    tic = time()
    ego_network = Graph(name=ego)

    # Add related and substring nodes
    substring_nodes = []
    for j, node in enumerate(G.index):
        if ego.lower() == node.lower():
            nns_node = G.get_neighbors(node)
            ego_nodes = [(rn, {"weight": w}) for rn, w in nns_node.items()]
            ego_network.add_nodes_from(ego_nodes)
        else:
            if "_" not in node: continue
            if node.startswith(ego + "_") or node.endswith("_" + ego):
                substring_nodes.append((node, {"weight": WEIGHT_COEF}))
    ego_network.add_nodes_from(substring_nodes)

    # Find edges of the ego network
    for r_node in ego_network:
        related_related_nodes = G.get_neighbors(r_node)
        related_related_nodes_ego = sorted(
            [(related_related_nodes[rr_node], rr_node)
             for rr_node in related_related_nodes if rr_node in ego_network],
            reverse=True)[:n]
        related_edges = [(r_node, rr_node, {
            "weight": w
        }) for w, rr_node in related_related_nodes_ego]
        ego_network.add_edges_from(related_edges)

    chinese_whispers(ego_network, weighting="top", iterations=20)
    if verbose: print("{}\t{:f} sec.".format(ego, time() - tic))

    return ego_network
示例#21
0
def case_on_20(desired_runs, repetitions, k):
    G = Graph()
    G.add_nodes_from([
        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
    ])
    G.add_edges_from([(1, 6), (1, 4), (2, 7), (3, 8), (3, 9), (3, 4), (3, 13),
                      (4, 14), (4, 5), (5, 10), (6, 12),
                      (7, 8), (8, 9), (8, 13), (9, 13), (9, 14), (10, 14),
                      (11, 5), (11, 15), (10, 15), (10, 11), (5, 15), (14, 18),
                      (14, 19), (15, 19), (15, 20), (17, 18), (12, 17),
                      (12, 18), (19, 20)])
    mp = [[1, 2, 3, 5, 8, 9, 10, 11, 12, 13, 17, 18, 19, 20],
          [4, 6, 7, 15, 16]]
    k = k
    v = 14

    average_times = []
    for x in range(0, repetitions):
        start_time = time.time()
        for n in range(0, desired_runs):
            inductive_recognition(G, v, mp, k)  #rm-print
        elapsed_time = time.time() - start_time

        average_time = elapsed_time / desired_runs
        average_times.append(average_time)

    print inductive_recognition(G, v, mp, k)
    return min(average_times)
示例#22
0
    def compute_connected_components(self) -> bool:
        graph = Graph()
        graph.add_edges_from(self.joined_pairs)
        self.bridges = list(connected_components(graph))

        if not self.bridges:
            return False
        return True
def test_total_cut_size():
    """ Test if cut size of subsets in partition match """
    G = Graph()
    G.add_edges_from([(1, 4), (2, 3)])
    partition = [(1, 2), (3, 4)]
    output = group_graph.total_cut_size(G, partition)
    expected_output = 2
    assert output == expected_output
示例#24
0
def adj_to_graph(path):
    mat = np.genfromtxt(path, delimiter=',')
    rows, cols = np.where(mat == 1)
    edges = zip(rows, cols)
    graph = Graph()
    graph.add_edges_from(edges)

    return graph
示例#25
0
def filtered_edge_insert(g: nx.Graph, edges):
    # check that node is already in graph: else networkx will create that node and we don't want that
    def both_nodes_exist(e):
        return g.has_node(e[0]) and g.has_node(e[1])

    edges = filter(both_nodes_exist, edges)
    g.add_edges_from(edges)
    return len(list(edges))
示例#26
0
def AlphaShape(atoms, alpha, get_graph=False, write_objfile=None):
    """Get the Alpha Shape of the atoms.

    Given set of atoms as 'Atom' (packman.molecule.Atom) object(s) and the alpha value, this class will calculate and return the alpha shape tessellations. It will also write the .obj file if the filename is provided as an input.

    Notes:
        * Tip: If you do not want to use the function multiple times to save computation, calculate it once with alpha = float('Inf') and then use the tessellations to calculate radius and save it as a dictionary to retrieve. Tessellations with any cutoff.
        * For more information on the alpha shape, read the following paper:
            EdelsbrunnerandE. P. M ̈ucke.Three-dimensional alpha shapes.
            Manuscript UIUCDCS-R-92-1734, Dept.Comput.Sci. ,Univ.Illinois, Urbana-Champaign, IL, 1992.
    
    Args:
        atoms (packman.molecule.Atom) : 
        alpha (float)                 : 
        get_graph (networkx.Graph)    : 
        write_objfile (str)           : 
    
    Returns:
        - Alpha Shape Tessellations                ; if 'get_graph' = False
        - Alpha Shape Tessellations, Protein Graph ; if 'get_graph' = True
    """
    def calculate_alphafitness(alpha, circumradius):
        """Alpha Test as per the paper.
        
        Notes:
            * Resides inside 'get_alphashape' function.

        Args:
            alpha (float)                  : Alpha.                            (Read parent method description)
            circumradius ([float])         : Circumradius of the circumsphere. (Read parent method description)
        
        Returns:
            bool: True if alpha test is passed. False otherwise.
        """
        if (circumradius < alpha):
            return True
        else:
            return False

    #get_alphashape function commands.
    DelaunayTesssellations = Delaunay([i.get_location() for i in atoms])
    AlphaShape = []
    ProteinGraph = Graph()

    for a, b, c, d in DelaunayTesssellations.vertices:
        Tetrahydron = [atoms[a], atoms[b], atoms[c], atoms[d]]
        Centre, Radius = Circumsphere(Tetrahydron)
        #Alpha Test
        if (calculate_alphafitness(alpha, Radius)):
            ProteinGraph.add_nodes_from([a, b, c, d])
            ProteinGraph.add_edges_from([(a, b), (a, c), (a, d), (b, c),
                                         (b, d), (c, d)])
            AlphaShape.append([atoms[a], atoms[b], atoms[c], atoms[d]])

    if (get_graph):
        return AlphaShape, ProteinGraph
    else:
        return AlphaShape
    def all_clusters(cls):
        graph = Graph()
        for edges in [[(h, p) for p in h.ha_cluster_peers.all()] for h in ManagedHost.objects.all()]:
            graph.add_edges_from(edges)

        clusters = []
        for cluster_peers in find_cliques(graph):
            clusters.append(cls(cluster_peers))
        return clusters
示例#28
0
 def setUp(self) -> None:
     """
     Sets up unittest
     """
     graph = Graph()
     graph.add_nodes_from(NODES)
     graph.add_edges_from(EDGES)
     self.individual = TravelPath(0.3, graph)
     self.stable_one = TravelPath(0, graph)
示例#29
0
def greedyColor(edgesX, node_countX):
    G = Graph()
    G.add_edges_from(edgesX)

    res = algorithms.coloring.greedy_color(G, strategy='independent_set')

    taken = [0] * node_countX
    for key, value in res.items():
        taken[key] = value
    return taken
示例#30
0
文件: p4.py 项目: slsyy/hypergraphs
def P4(graph: nx.Graph, central_hyperedge, image: Image):
    copy = graph.copy()

    try:
        run_P4(graph, central_hyperedge, image)
    except Exception:
        graph.clear()
        graph.add_nodes_from(copy)
        graph.add_edges_from(copy.edges)
        raise
示例#31
0
def get_bridges_from_pairs(query_database,
                           query_collection,
                           name_collection_outgoing,
                           n=2):
    """
    Function gets pairs from collection generated in get_pairs function. Function
    then takes advantage of NetworkX to find n-bridges using network theory approaches.
    Function then exports to another collection in MongoDB: a bridges collection.

    :param query_database: Database in which Met-aromatic pairs are located.
    :param query_collection: Collection in which Met-aromatic pairs are located.
    :param name_collection_outgoing: The MongoDB collection to export data to.
    :param n: 2-bridge, 3-bridge, 4-bridge, ..., n-bridge
    :return: Nothing. Function performs operation pass-by style.
    """
    if n < 2:
        exit("Incorrect bridge order. A bridge must be of n >= 2!")
    else:
        # ensure duplicates are not being loaded
        client[query_database][name_collection_outgoing].drop()

        bridges = []
        for entry in client[query_database][query_collection].find():
            pairs = []
            for pair in entry.get('pairs'):
                pairs.append(tuple(pair.split(
                    '|')))  # 'TYR123|MET123' -> ('TYR123', 'MET123')

            G = Graph()
            G.add_edges_from(pairs)

            for disconnects in list(connected_components(G)):
                if len(disconnects) == n + 1:
                    if ''.join(disconnects).count(
                            'MET'
                    ) == 1:  # remove inverse bridges -> MET-ARO-MET
                        bridges.append({
                            'code': entry.get('code'),
                            'EC': entry.get('EC'),
                            'bridge': list(disconnects)
                        })
                    else:
                        pass
                else:
                    pass

        try:
            client[query_database][name_collection_outgoing].insert_many(
                bridges)
        except errors.BulkWriteError as pymongo_exception:
            print(pymongo_exception.details['writeErrors'])
        else:
            print(
                ' -- Collected all {}-bridges and exported to collection: {} \n'
                .format(n, name_collection_outgoing))
示例#32
0
def make_tests():
    '''Performs tests on the provided examples to check the result of the
    computation functions is ok.'''
    graph = Graph()
    graph.add_edges_from(
        parse_input('''London to Dublin = 464
London to Belfast = 518
Dublin to Belfast = 141'''))
    route_lengths = compute_route_lengths(graph)
    assert min(route_lengths) == 605
    assert max(route_lengths) == 982
示例#33
0
文件: p4.py 项目: mrapacz/hypergraphs
def P4(graph: nx.Graph, central_hyperedge, image: Image):
    copy = graph.copy()

    try:
        run_P4(graph, central_hyperedge, image)
    except Exception:
        graph.clear()
        x = [(k, v) for k, v in copy.nodes(data=True)]
        graph.add_nodes_from(x)
        graph.add_edges_from(copy.edges)
        raise
示例#34
0
def draw_graph(graph):
    list_edges = []
    for no in graph:
        for nei in no.neighbors:
            tup = (no.id, nei)
            list_edges.append(tup)

    g_draw = Graph()
    g_draw.add_edges_from(list_edges, color='red')
    draw(g_draw, with_labels=True, font_weight='bold')
    show()
示例#35
0
def init_graph(io_obj):
    edges_count = int(io_obj.readline())
    edges = [tuple(io_obj.readline().strip().split(' '))
             for _ in range(edges_count)]
    g = Graph()
    for edge in edges:
        for vertex in edge:
            if vertex not in g:
                g.add_node(vertex)
    g.add_edges_from(edges)
    return g
示例#36
0
文件: minfill.py 项目: darabbt/les
def eliminate_node(G, a):
    fillins = ()
    nb = frozenset(G.neighbors(a))
    for u in nb:
        for v in nb - frozenset((u,)):
            if not G.has_edge(v, u) and frozenset((u, v)) not in fillins:
                fillins += (frozenset((u, v)),)
    kill_edges = frozenset([(u, a) for u in nb] + [(a, u) for u in nb])
    H = Graph()
    H.add_nodes_from(list(frozenset(G.nodes()) - frozenset((a,))))
    H.add_edges_from(list((frozenset(G.edges()) - kill_edges) | frozenset(fillins)))
    return H
    def _build_authors_graph(self):
        """
        Build authors graph with each author name as nodes and the collaboration between them as edges.

        @author 1: CipherHat

        @rtype:   networkx.Graph()
        @return:  the Graph containing nodes and edges
        """
        all_data = self.get_network_data()
        # TODO refactor: revision on this part. whether to move the Graph code to its own class
        graph = Graph()
        # the nodes format will be {"id":int, "name":str}
        graph.add_nodes_from([(i, {"name": all_data[0][i][0]}) for i in range(len(all_data[0]))])
        graph.add_edges_from(all_data[1])
        return graph
    def get_coauthor_graph_by_author_name(self, name):
        coauthors = set()
        for p in self.publications:
            for a in p.authors:
                if a == self.author_idx[name]:
                    for a2 in p.authors:
                        if a != a2:
                            coauthors.add(a2)

        graph = Graph()
        # the nodes format will be {"id":int, "name":str}
        graph.add_node(self.author_idx[name], name = name)
        # graph.add_nodes_from([(i, {"name": all_data[0][i][0]}) for i in range(len(all_data[0]))])
        graph.add_nodes_from([(ca, {"name": self.authors[ca].name}) for ca in coauthors])
        graph.add_edges_from([(self.author_idx[name], ca) for ca in coauthors])

        return graph
    def post_process(self):
        trackings = self.parents['irit_harmo_tracking'].results['irit_harmo_tracking'].data_object.value

        graph = Graph()

        for t, h in [(track, track.harmo_link(trackings)) for track in trackings]:

            graph.add_node(t)

            if len(h) > 0:

                graph.add_edges_from([(t, o) for o in h])

        res = self.new_result(time_mode='global')
        res.data_object.value = [c2 for c in connected_components(graph) for c2 in Cluster(c).harmo_sub()]
        self.add_result(res)

        return
示例#40
0
def test_two_communities():
    test = Graph()

    # c1
    c1_edges = [(0, 2), (0, 3), (0, 4), (0, 5), (1, 2), (1, 4), (1, 7), (2, 4), (2, 5),\
                    (2, 6), (3, 7), (4, 10), (5, 7), (5, 11), (6, 7), (6, 11)]

    # c2
    c2_edges = [(8, 9), (8, 10), (8, 11), (8, 14), (8, 15), (9, 12), (9, 14), (10, 11),\
                    (10, 12), (10, 13), (10, 14), (11, 13)]
    test.add_edges_from(c1_edges + c2_edges)

    # ground truth
    ground_truth = set([frozenset([0, 1, 2, 3, 4, 5, 6, 7]),
                        frozenset([8, 9, 10, 11, 12, 13, 14, 15])])

    communities = asyn_lpa.asyn_lpa_communities(test)
    result = {frozenset(c) for c in communities}
    assert_equal(result, ground_truth)
示例#41
0
def test_two_communities():
    test = Graph()

    # c1
    c1_edges = [(0, 2), (0, 3), (0, 4), (0, 5), (1, 2), (1, 4), (1, 7), (2, 4), (2, 5),\
                    (2, 6), (3, 7), (4, 10), (5, 7), (5, 11), (6, 7), (6, 11)]

    # c2
    c2_edges = [(8, 9), (8, 10), (8, 11), (8, 14), (8, 15), (9, 12), (9, 14), (10, 11),\
                    (10, 12), (10, 13), (10, 14), (11, 13)]
    test.add_edges_from(c1_edges + c2_edges)

    # ground truth
    ground_truth = set([frozenset([0, 1, 2, 3, 4, 5, 6, 7]),
                        frozenset([8, 9, 10, 11, 12, 13, 14, 15])])

    communities = asyn_lpa.asyn_lpa_communities(test)
    result = {frozenset(c) for c in communities}
    # probabilitistic result could be all nodes in one community. So test result is either.
    assert(result in [ground_truth, set(frozenset(range(16)))])
示例#42
0
文件: graph.py 项目: laegrim/pycef
def build_cluster_graph(sequences, threshold):
    '''
    sequence should be a tupple (id, array)
    threshold should exist in [0,1]
    '''
    #list to hold tuple associations
    tups_list = []
    #for each sequence in sequences
    for sequence in sequences:
        #find the corrcoef in relation to all other sequences
        for sec in [o_sec for o_sec in sequences if o_sec != sequence]:
            #if correlated within threshold bounds, ensure not already in list
            cor = corrcoef(sequence[1], sec[1])[0,1]
            if cor >= threshold and ([tup for tup in tups_list if sequence[0] and sec[0] in tup] == []):
                #if not already in list, append (_id1, _id2, corrcoef) tuple
                tups_list.append((sequence[0], sec[0], {'weight':cor}))
    #build graph from association list
    graph = Graph()
    graph.add_edges_from(tups_list)
    #return graph
    return graph
示例#43
0
	def merge_slices_to_events(self, current_slices):
		"""
		Method merges DBSCAN-generated event slices with previously found events. 
		Bimodal network is used to find connections between events and slices,
		then slices are being merged with events, or transformed to new ones.
		Merged events are being deleted.

		Args:
			current_slices (Dict(List[Dict])): output of self.current_datapoints_dbscan method. Every item of dict is a slice cluster: list with dicts of messages from that cluster.
		"""
		slices_ids = set(current_slices.keys())
		events_ids = set(self.events.keys())
		edges = []
		for slice_id, event_slice in current_slices.items():
			slice_ids = {x['id'] for x in event_slice}
			for event in self.events.values():
				if event.is_successor(slice_ids):
					edges.append((slice_id, event.id))
		G = Graph()
		G.add_nodes_from(slices_ids.union(events_ids))
		G.add_edges_from(edges)
		events_to_delete = []
		for cluster in [x for x in connected_components(G) if x.intersection(slices_ids)]:
			unify_slices = cluster.intersection(slices_ids)
			unify_events = list(cluster.intersection(events_ids))
			meta_slice = [msg for i in unify_slices for msg in current_slices[i]]
			if not unify_events:
				new_event = Event(self.mysql, self.redis, self.tokenizer, self.morph, self.classifier, meta_slice)
				self.events[new_event.id] = new_event
			elif len(unify_events) == 1 and len(unify_slices) == 1 and set(self.events[unify_events[0]].messages.keys()) == {x['id'] for x in meta_slice}:
				continue
			else:
				if len(unify_events) > 1:
					for ancestor in unify_events[1:]:
						self.events[unify_events[0]].merge(self.events[ancestor])
						events_to_delete.append(ancestor)
				self.events[unify_events[0]].add_slice(meta_slice)
		for event in events_to_delete:
			del self.events[event]
			self.redis.delete("event:{}".format(event))
示例#44
0
文件: easy.py 项目: tansey/gfl
def solve_gfl(data, edges, weights=None,
              minlam=0.2, maxlam=1000.0, numlam=30,
              alpha=0.2, inflate=2., converge=1e-6,
              maxsteps=1000000, lam=None, verbose=0):
    '''A very easy-to-use version of GFL solver that just requires the data and
    the edges.'''
    if verbose:
        print 'Decomposing graph into trails'

    ########### Setup the graph
    g = Graph()
    g.add_edges_from(edges)
    chains = decompose_graph(g, heuristic='greedy')
    ntrails, trails, breakpoints, edges = chains_to_trails(chains)

    if verbose:
        print 'Setting up trail solver'

    ########### Setup the solver
    solver = TrailSolver(alpha, inflate, maxsteps, converge)

    # Set the data and pre-cache any necessary structures
    solver.set_data(data, edges, ntrails, trails, breakpoints, weights=weights)

    if verbose:
        print 'Solving'

    ########### Run the solver
    if lam:
        # Fixed lambda
        beta = solver.solve(lam)
    else:
        # Grid search to find the best lambda
        beta = solver.solution_path(minlam, maxlam, numlam, verbose=max(0, verbose-1))['best']
    
    return beta
def _update_fit_group_with_groupwise_dist(dist_matrics,
                                          fit_group, fit_pvals,
                                          user_ids, user_profiles, user_connections,
                                          ks_alpha=0.05):
    """ return fit_group, fit_pvals, unfit_group by updating members in fit_group
    with distance metrics unfit member will be sent to unfit group.
    (fit_group, fit_pvals, unfit_group)

    Parameters:
    ----------
    dist_metrics: dictionary

    fit_group: dictionary

    fit_pvals: dictionary

    user_profiles: matrix-like (numpy.array)

    user_graph:

    ks_alpha: float, default value = 0.05

    Returns:
    -------
    fit_group, fit_pvals, unfit_group
    """

    fit_group = _convert_array_to_list(fit_group)
    fit_pvals = _convert_array_to_list(fit_pvals)

    user_graph = Graph()
    user_graph.add_edges_from(user_connections)

    # create container
    fit_group_copy = fit_group.copy()
    unfit_group = {}

    for gg, gg_user_ids in fit_group_copy.items():
        gg_dist = dist_matrics[gg]

        for ii, ii_user_id in enumerate(gg_user_ids):
            sim_dist, diff_dist = user_grouped_dist(ii_user_id, gg_dist, user_ids, user_profiles,  user_graph)
            ii_pval = user_dist_kstest(sim_dist, diff_dist)

            if ii_pval < ks_alpha:
                # remove the user from fit group, retreive [0] to ensure slice is integer
                idx = [idx for idx, uid in enumerate(fit_group[gg]) if uid == ii_user_id][0]
                # fit_group[gg].remove(idx)
                del fit_group[gg][idx]
                # fit_pvals[gg].remove(idx)
                del fit_pvals[gg][idx]
                # add the user into unfit group
                if gg in unfit_group:
                    unfit_group[gg].append(ii_user_id)
                else:
                    unfit_group[gg] = [ii_user_id]
            else:
                # update pvalue for user, retreive [0] to ensure slice is integer
                idx = [idx for idx, uid in enumerate(fit_group[gg]) if uid == ii_user_id][0]
                fit_pvals[gg][idx] = ii_pval

    return fit_group, fit_pvals, unfit_group
示例#46
0
	def getGraph(self):
		"""Return a networkx graph for the trees"""
		graph = Graph()
		graph.add_edges_from(self.root._evalEdges())
		return graph
示例#47
0
# #add edges
# G.add_edge('A','B')
# G.add_edges_from([('B','C'), ('B','D'), ('D','A')]
# G.add_edges_from([('C','E'), ('A','E')]) #creates node E

###################example2
import networkx as nx
from networkx import Graph
import matplotlib.pyplot as plt

H = Graph()

H.add_edges_from([
   ('Alice',"Bob"), ('Alice','Charles'), 
   ('Bob', 'Gertrude'), ('Charles','Debbie'), 
   ('Charles', 'Gertrude'), ('Debbie','Edward'),
   ('Debbie','Gertrude'),('Edward','Gertrude'),
   ('Edward','Gertrude'),('Gertrude','Herbert'),
   ('Herbert','Fred')
])

# print("Nodes:")
# for node in H.nodes_iter(): #iterable of nodes - go thru nodes in a list (dont need to convert to list then iter)
#    print(node)

# print("\nEdges:")
# for edge in H.edges_iter():
#    print(edge)

nx.draw(H,pos=nx.spring_layout(H),node_color="pink",edge_color='#89cff0',with_labels=True, node_size=2500, node_shape="o", font_family="verdana", font_size=10, font_color='#3F3F3F', width=2.0)
# plt.show()
示例#48
0
    ips = {}
    # filter all relays in this consensus to those that
    # have a descriptor, are running, and are fast
    for relay in consensus.relays:
        if (relay in descriptors):
            sd = descriptors[relay] # server descriptor
            rse = consensus.relays[relay] # router status entry
            if "Running" in rse.flags and "Fast" in rse.flags:
                if relay not in ips: ips[relay] = []
                ips[relay].append(sd.address)
    # build edges between every relay that could have been
    # selected in a path together
    for r1 in ips:
        for r2 in ips:
            if r1 is r2: continue
            g.add_edges_from(product(ips[r1], ips[r2]))                    
    nsf_i += 1
    # check if we should do a checkpoint and save our progress
    if nsf_i == nsf_len or "01-00-00-00" in fname:
        chkpntstart = fname[0:10]
        with open("relaypairs.{0}--{1}.json".format(chkpntstart, chkpntend), 'wb') as f: json.dump(g.edges(), f)

print ""
print('Num addresses: {0}'.format(g.number_of_nodes()))
print('Num unique pairs: {0}'.format(g.number_of_edges()))

# write final graph to disk
with open(out_file, 'wb') as f: json.dump(g.edges(), f)
##########

class FrameworkFeatureAnalyzer(object):
    """ A class to do feature location analyses on a project written in a specific framework

        Project Graph Details:
        -----------------------
        Node Groups:
            1: Android package
            2: -
            3: Android imported indentifier
            4: Java class
            5: Java method
            6: XML file Category
            7: XML file

        Edge Groups:
            1: internal/hierarchical links
            2: Java---Android mappings
            3: Java---XML mappings
    """

    def __init__(self, framework, project):
        """
            :param inspector.models.base.Project project: the project to be analyzed
        """
        self.project = project

        self.framework_namespace = str(framework)
        self.graph = Graph()
        self.graph.add_node(self.framework_namespace)
        self.import_usages = []

    def add_source_file(self, source_file):
        """
            :param inspector.models.base.SourceFile source_file: the file
        """
        self.analyze_framework_imports(source_file)
        self.analyze_source(source_file)

    def analyze_framework_imports(self, source_file):
        """
            :param inspector.models.base.SourceFile source_file: the file
        """
        for im in source_file.imports:
            if im.import_str.startswith(self.framework_namespace):
                self.import_usages.append((im, im.find_usages()))

                components = im.import_str.split('.')

                data = {'group': 1}
                if re.match(r'^[A-Z]+(_[A-Z]+)*$', components[-1]):
                    data['group'] = 3

                last = None
                for i in range(len(components)):
                    cn = '.'.join(components[:i + 1])
                    self.graph.add_node(cn, **data)
                    if last:
                        self.graph.add_edge(last, cn, weight=1, group=1)
                    last = cn
                if last:
                    data['group'] = 3
                    self.graph.add_node(last, **data)

    def analyze_source(self, source_file):
        """
            :param inspector.models.base.SourceFile source_file: the file
        """
        for cl in source_file.classes:
            self.graph.add_node(cl.name, group=4)
            for fu in cl.methods:
                # print '[{0}-{1}]'.format(fu.starting_line, fu.ending_line), re.sub('\s*\n\s*', ' ', unicode(fu))
                fn = fu.qualified_name
                self.graph.add_node(fn, group=5)
                self.graph.add_edge(cl.name, fn, weight=1, group=1)
                for im, usages in self.import_usages:
                    w = 0
                    for ln in usages:
                        if fu.starting_line <= ln <= fu.ending_line:
                            w += 1
                    if w:
                        self.graph.add_edge(im.import_str, fn, weight=w, group=2)

    def add_xml_files(self):
        xml_sub_groups = {':layout', ':values', ':drawable', ':menu', ':xml', ':color'}
        self.graph.add_nodes_from([':XML'] + list(xml_sub_groups), group=6)
        self.graph.add_edges_from([(':XML', g) for g in xml_sub_groups], weight=1, group=1)
        for path in self.project.filter_files(extension='xml'):
            xml_file = self.project.get_file(path)

            if path.startswith('app/res/'):
                g = path.split('/')[2]
                name = '/'.join(path.split('/')[2:])
                self.graph.add_node(name, group=7)
            else:
                if not path.split('/')[-1] in ['pom.xml', 'AndroidManifest.xml']:  # is ignored?
                    print 'invalid path:', path
                continue

            valid_group = False
            if g == 'values':
                g = 'values-default'
            if g.startswith('values-'):
                g = g[7:]
                self.graph.add_edge(':values', ':' + g, weight=1, group=1)
                valid_group = True
            g = ':' + g
            if valid_group or g in xml_sub_groups:
                self.graph.add_edge(g, name, weight=1, group=1)
            else:
                print 'invalid subgroup:', g
示例#50
0
def multigraph_to_graph(g: MultiGraph) -> Graph:
    gx = Graph()
    gt = Graph(g)
    gx.add_nodes_from(gt.nodes())
    gx.add_edges_from(gt.edges())
    return gx
def to_graph(l):
    G = Graph()
    for clique in l:
        G.add_nodes_from(clique)
        G.add_edges_from(to_edges(clique))
    return G