コード例 #1
0
ファイル: main.py プロジェクト: qiqiisprincess/python_exam
 def __init__(self, gr):
     if isinstance(gr, ScienceGraph):
         self._gen = nx.enumerate_all_cliques(gr.gr)
     elif isinstance(gr, nx.Graph):
         self._gen = nx.enumerate_all_cliques(gr)
     else:
         self._gen = None
コード例 #2
0
def clique_percolation(k: int, g: nx.Graph):
    """
    Get the communities of a given graph using clique percolation.
    this is brute-force function. if we add a dictionary that maps any node with its cliques, and then go only
    over cliques with at least one interaction, it could be more efficient
    :param k: number of node of each clique
    :param g: networkx graph
    :return: list of communities, sorted by size (increase order)
    """
    cliques = [
        frozenset(clq) for clq in nx.enumerate_all_cliques(g) if len(clq) == k
    ]

    graph_of_cliques = nx.Graph()
    for clq in cliques:
        is_isolate = True  # this clique is not connected to any other clique
        for other_clq in cliques:
            if clq is other_clq:
                continue
            if len(clq.intersection(
                    other_clq)) >= k - 1 and not graph_of_cliques.has_edge(
                        clq, other_clq):
                is_isolate = False
                graph_of_cliques.add_edge(clq, other_clq)
        if is_isolate:
            graph_of_cliques.add_node(clq)

    # create communities:
    communities = []
    for component in nx.connected_components(graph_of_cliques):
        communities.append(sorted(frozenset.union(*component)))

    return sorted(communities, key=len)
コード例 #3
0
def three(G):
    edge_list = []
    node_list = []
    i = 0
    for cycle in nx.enumerate_all_cliques(G):
        if len(cycle) == 3:
            for n in cycle:
                node_list.append(n)
            temp_co_list = combinations(cycle, 2)  # 添加作者列表
            for temp in temp_co_list:
                edge_list.append(temp)

    print 'first step'

    with open('b3_edge_result.csv', 'wb') as csvFile:
        csv_writer = csv.writer(csvFile)
        for a in G.edges():
            if a in set(edge_list):
                w = 1 + float(edge_list.count(a)) / 3
                csv_writer.writerow([a[0], a[1], w])
            else:
                csv_writer.writerow([a[0], a[1], 1])
    print 'edge step'

    with open('b3_node_result.csv', 'wb') as csvFile:
        csv_writer = csv.writer(csvFile)
        for a in G.nodes():
            if a in set(node_list):
                w = 1 + float(node_list.count(a)) / 3
                csv_writer.writerow([a, w])
            else:
                csv_writer.writerow([a, 1])
    print 'node step'
コード例 #4
0
    def generate_largest_cliques_immunization_array(self, Q=1):
        """
        Generates an immunization array consisting of the Q unique individuals
        in the largest cliques.

        Parameters
        ----------
        Q : `int`
            Number of individuals to immunize; default to 1

        Returns
        -------
        Im : `numpy.ndarray`
            an (n, 1) array with 1 at indices to be immunized and 0 elsewhere

        Raises
        ------
        ValueError : if no cliques are found.
        """
        Im = np.zeros(self.network.n)
        cliques = [i for i in nx.enumerate_all_cliques(self.network.G)]
        if not cliques:
            raise ValueError("No cliques found.")

        Im_cliques = []
        while len(set(Im_cliques)) <= Q:
            Im_cliques += cliques[-1]
            cliques = cliques[:-1]
        Im_cliques = list(set(Im_cliques))[:Q]

        for i in Im_cliques:
            Im[i] = 1

        return Im.reshape(self.network.n, 1)
コード例 #5
0
ファイル: gng_neupy_run.py プロジェクト: manishsaroya/GNG
def get_important_regions(g, regions):
    nxgraph = nx.Graph()
    nodeid = {}
    for indx, node in enumerate(g.graph.nodes):
        nodeid[node] = indx
        nxgraph.add_node(nodeid[node],
                         pos=(node.weight[0][0], node.weight[0][1]))
    for node_1, node_2 in g.graph.edges:
        nxgraph.add_edge(nodeid[node_1], nodeid[node_2])

    for clique in nx.enumerate_all_cliques(nxgraph):
        print(clique)

    print(regions)
    fig = plt.figure(figsize=(40 / 4, 35 / 4))
    plt.axis("equal")
    #plt.colorbar(fraction= 0.047, pad=0.02)
    position = nx.get_node_attributes(nxgraph, 'pos')
    if nxgraph is not None:
        for clique in nx.find_cliques(nxgraph):
            if len(clique) > 2:
                for i in range(len(clique)):
                    weights = np.concatenate([[position[clique[i]]],
                                              [position[clique[i - 1]]]])
                    line, = plt.plot(*weights.T, color='lightsteelblue')
                    plt.setp(line, linewidth=2, color='lightsteelblue')
    plt.show()
    pass
コード例 #6
0
ファイル: formGraph.py プロジェクト: SiddharthChoudhary/cs581
def formGraph(filename):
    graph = networkx.Graph()
    self_loops=0
    trust=0
    distrust=0
    #let's open the csv file and read line by line
    with open(filename,"r") as file:
        for row in file:
            rowelements=row.split(",")
            reviewer=rowelements[0]
            reviewee=rowelements[1]
            weight  =int(rowelements[2])
            if int(reviewer) == int(reviewee):
                self_loops+=1
            elif int(weight) == 1:
                trust+=1
            elif int(weight)==-1:
                distrust+=1
            graph.add_edge(reviewer,reviewee,weight=int(weight))
    # after calculating the graphs positive and negative count now it is time to calculate 
    # triads in the folllowing graph
    edges = networkx.get_edge_attributes(graph,"weight")
    triads=[]
    for triad in networkx.enumerate_all_cliques(graph):
        if len(triad)==3:
            triads.append(triad)
    #till here we got the triads for the following data
    triads_with_weights = calculatetriadsWithWeight(triads,edges)
    return triads_with_weights,self_loops,trust,distrust,graph,triads
コード例 #7
0
def ComposeSegSets(BigSet2, segmentpool, n):
    candidates = [
        segment.id for segment in segmentpool if segment.status == False
    ]
    bs2 = copy.deepcopy(BigSet2)
    for pair in BigSet2:
        seg1id = pair['seg1id']
        seg2id = pair['seg2id']
        if seg1id not in candidates or seg2id not in candidates:
            bs2.remove(pair)
    edges = [(pair['seg1id'], pair['seg2id'], pair['WAS']) for pair in bs2]
    G = nx.Graph()
    G.add_weighted_edges_from(edges)
    completed_graphs = [s for s in nx.enumerate_all_cliques(G) if len(s) == n]
    graphs = [G.subgraph(g) for g in completed_graphs]
    seg_ids = ['seg%did' % (p + 1) for p in range(n)]
    results = []
    for graph in graphs:
        edges = copy.deepcopy(graph.nodes())
        res = {}
        for n, seg_id in enumerate(seg_ids):
            res[seg_id] = edges[n]
        if graph.size() != 0:
            res['WAS'] = graph.size(weight='weight') / 2
            results.append(res)
    return results
コード例 #8
0
    def _create_cliques_dict(self):

        """
        Collect all cliques of a graph and create dictionary with all simplices
        """

        # Index for edges and nodes, arbitrary id to edges
        sim_complex = dict()

        sim_complex[1] = dict(zip(np.arange(
            nx.number_of_edges(self.graph)), self.graph.edges))
        sim_complex[0] = dict(zip(self.graph.nodes, self.graph.nodes))

        # Dictionary containing simplexes orders as indexes,
        # list of tuplas with node id forming the simplexes
        cliques = list(nx.enumerate_all_cliques(self.graph))

        for x in range(nx.number_of_nodes(self.graph) +
                       nx.number_of_edges(self.graph), len(cliques)):
            if sim_complex.get(len(cliques[x]) - 1) is None:
                i = 0
                sim_complex[(len(cliques[x]) - 1)] = dict()
                sim_complex[len(cliques[x]) - 1][i] = tuple(cliques[x])
                i += 1
            else:
                sim_complex[len(cliques[x]) - 1][i] = tuple(cliques[x])
                i += 1

        self.complex_dict = sim_complex
コード例 #9
0
ファイル: epinion_analyzer.py プロジェクト: Ajohnson18/CS581
def findTriangles(G):
    count = 0
    cliques = nx.enumerate_all_cliques(G)  # find all the cliques in graph G
    for c in cliques:
        if len(c) == 3:
            count += 1  # check if the click is of size three and then increase counter
    return count
コード例 #10
0
def from_noasig_to_largest_clique_1(noasig_data_input, distance_input):
    "NOASIG_DATA -> LARGEST_CLIQUE | FINDS THE LARGEST CLIQUE IN NOSASIG DATA"
    noasig_data = noasig_data_input
    clique_data = noasig_data[noasig_data['value'] < distance_input].dropna()
    G = nx.from_pandas_edgelist(clique_data, 'Source', 'Target')
    largest_clique = max(nx.enumerate_all_cliques(G), key=len)
    return largest_clique, clique_data
コード例 #11
0
def make_seating(agreement_graph, chaotic=True):

    num_people = len(agreement_graph.nodes)
    if not chaotic:
        agreement_graph = nx.complement(agreement_graph)
    # Make the agreement graph from people and edges (agreements)
    # Make the table graph with adjacency for neighboring seats
    # Also generate the positions of seats for plotting
    table, seat_positions = make_longtable_graph(num_people)
    # For every person, list their cliques
    clique_dict = {}
    #clique_list = list(nx.find_cliques(agreement_graph))
    # List every clique for every person
    clique_list = (list(nx.enumerate_all_cliques(agreement_graph)))
    for person in agreement_graph.nodes():
        clique_dict[person] = []
        for clique in [
                c for c in clique_list if str(person) in c and len(c) > 1
        ]:
            clique_dict[person].append(clique)
    # print(clique_dict)
    mapping = {}  # Dictionary of seat number to person sitting in it
    # For every seat
    for seat in table.nodes():
        # Give people scores
        scores = {}
        # For every neighbor
        for person in [
                p for p in agreement_graph.nodes()
                if p not in mapping.values()
        ]:
            for neighbor in nx.neighbors(table, seat):
                for clique in clique_dict[person]:
                    # "Punish" candidates who are in cliques with neighbors
                    if mapping.get(neighbor, None) in clique:
                        scores[person] = scores.get(person, 0) - 1
                    scores[person] = scores.get(person, 0)
        # Seat unseated person with best score here
        print("{}: {}\n".format(
            seat, sorted(scores.items(), key=lambda x: x[1], reverse=True)))
        for person in sorted(scores.items(), key=lambda x: x[1], reverse=True):
            if person[0] not in mapping.values():
                mapping[seat] = person[0]
                break

    print(mapping)
    # Apply the calculated seating arrangement
    T = nx.relabel_nodes(table, mapping)
    pos = {mapping[k]: v for k, v in seat_positions.items()}
    # Plot the seating arrangement
    fig = Figure()
    output = io.BytesIO()
    axis = fig.add_subplot(1, 1, 1)
    axis.set_xlim(-0.1, num_people / 2 + 1)
    axis.set_ylim(-0.1, 0.6)
    nx.draw_networkx(T, pos=pos, with_labels=True, ax=axis)

    # Return an image to the flask application
    FigureCanvas(fig).print_png(output)
    return b64encode(output.getvalue()).decode("utf-8")
コード例 #12
0
def from_clique_dist_to_largest_clique_3(genome_i, asig_dist_matrix_3,
                                         distance3):
    #se podria coger el valor en vez de usar read
    tabla_asig = pd.read_csv("tabla_asig.csv")
    value_prev_clique_T1 = tabla_asig.loc[tabla_asig['Genome'] ==
                                          genome_i].values[0, 1]
    value_prev_clique_T2 = tabla_asig.loc[tabla_asig['Genome'] ==
                                          genome_i].values[0, 2]
    #value_prev_clique = tabla_asig.loc[tabla_asig['Genome'] == genome_i].values[0, 2]
    clique_prev = tabla_asig.loc[(tabla_asig['T1'] == value_prev_clique_T1)
                                 & (tabla_asig['T2'] == value_prev_clique_T2)]
    #clique_prev = tabla_asig.loc[(tabla_asig['T2'] == value_prev_clique)]
    clique_prev_list = clique_prev['Genome'].tolist()

    #distance dataframe of clique T1 elements
    asig_1 = asig_dist_matrix_3[asig_dist_matrix_3['Target'].isin(
        clique_prev_list)]
    asig_2 = asig_1[asig_1['Source'].isin(clique_prev_list)]
    asig_4 = asig_2.drop_duplicates()

    clique_dataframe = asig_4[asig_4['value'] < distance3].dropna()
    G = nx.from_pandas_edgelist(clique_dataframe, 'Source', 'Target')
    largest_clique = max(nx.enumerate_all_cliques(G), key=len)

    return largest_clique, clique_dataframe
コード例 #13
0
def find_special_motif(G, modelNumOfNeighbors, modelNeighborsLists):
    i = 0
    for clique in nx.enumerate_all_cliques(G):
        if len(clique) == 3:
            i += len([
                neigh for neigh in modelNeighborsLists[clique[0]]
                if ((neigh not in clique) and (
                    neigh in modelNeighborsLists[clique[1]]) and (
                        neigh not in modelNeighborsLists[clique[2]]))
            ])
            i += len([
                neigh for neigh in modelNeighborsLists[clique[0]]
                if ((neigh not in clique) and (
                    neigh in modelNeighborsLists[clique[2]]) and (
                        neigh not in modelNeighborsLists[clique[1]]))
            ])
            i += len([
                neigh for neigh in modelNeighborsLists[clique[1]]
                if ((neigh not in clique) and (
                    neigh in modelNeighborsLists[clique[2]]) and (
                        neigh not in modelNeighborsLists[clique[0]]))
            ])
        if len(clique) > 3:
            break
    return round(i / 2)
コード例 #14
0
ファイル: test_clique.py プロジェクト: ArtShp/DataScience
    def test_paper_figure_4(self):
        # Same graph as given in Fig. 4 of paper enumerate_all_cliques is
        # based on.
        # http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129
        G = nx.Graph()
        edges_fig_4 = [('a', 'b'), ('a', 'c'), ('a', 'd'), ('a', 'e'),
                       ('b', 'c'), ('b', 'd'), ('b', 'e'), ('c', 'd'),
                       ('c', 'e'), ('d', 'e'), ('f', 'b'), ('f', 'c'),
                       ('f', 'g'), ('g', 'f'), ('g', 'c'), ('g', 'd'),
                       ('g', 'e')]
        G.add_edges_from(edges_fig_4)

        cliques = list(nx.enumerate_all_cliques(G))
        clique_sizes = list(map(len, cliques))
        assert sorted(clique_sizes) == clique_sizes

        expected_cliques = [['a'], ['b'], ['c'], ['d'], ['e'], ['f'], ['g'],
                            ['a', 'b'], ['a', 'b', 'd'], ['a', 'b', 'd', 'e'],
                            ['a', 'b', 'e'], ['a', 'c'], ['a', 'c', 'd'],
                            ['a', 'c', 'd', 'e'], ['a', 'c', 'e'], ['a', 'd'],
                            ['a', 'd', 'e'], ['a', 'e'], ['b', 'c'],
                            ['b', 'c', 'd'], ['b', 'c', 'd', 'e'],
                            ['b', 'c', 'e'], ['b', 'c', 'f'], ['b', 'd'],
                            ['b', 'd', 'e'], ['b', 'e'],
                            ['b', 'f'], ['c', 'd'], ['c', 'd', 'e'],
                            ['c', 'd', 'e', 'g'], ['c', 'd', 'g'], ['c', 'e'],
                            ['c', 'e', 'g'], ['c', 'f'], ['c', 'f', 'g'],
                            ['c', 'g'], ['d', 'e'], ['d', 'e',
                                                     'g'], ['d', 'g'],
                            ['e', 'g'], ['f', 'g'], ['a', 'b', 'c'],
                            ['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd', 'e'],
                            ['a', 'b', 'c', 'e']]

        assert (sorted(map(sorted,
                           cliques)) == sorted(map(sorted, expected_cliques)))
コード例 #15
0
def find_cliques_size_k(G, k):
    i = 0
    for clique in nx.enumerate_all_cliques(G):
        if len(clique) == k:
            i += 1
        if len(clique) > k:
            break
    return i
コード例 #16
0
def dong_matching(graph):
    matched = []
    vertices = list(graph.nodes())
    all_completes = list(nx.enumerate_all_cliques(graph))
    all_completes.append([])
    all_completes = set([frozenset(c) for c in all_completes])
    for vertex in vertices:
        neigh = open_neighborhood(graph, vertex)
        completes = list(nx.enumerate_all_cliques(neigh))
        completes.append([])
        for complete in completes:
            complete = frozenset(complete)
            if (complete not in matched) \
               and (not complete | {vertex} in matched):
                matched.append(complete)
                matched.append(complete | {vertex})
    return all_completes - set(matched)
コード例 #17
0
def enumerate_all_cliques_size_k(G, k): #counting all the cliques in G of size k
    i = 0
    for clique in nx.enumerate_all_cliques(G):
        if len(clique) == k:
            i += 1
        elif len(clique) > k:
            return i
    return i
コード例 #18
0
 def valid_temporal_relations(self, new_edge, model):
     return True
     edges = list(model.edges()) + [new_edge]
     triangles = [
         tri for tri in nx.enumerate_all_cliques(nx.Graph(edges))
         if len(tri) == 3
     ]
     if len(triangles) < 1:
         return True
     return False
コード例 #19
0
 def process(self, net, date):
     u = net.to_undirected()
     cliques = list(nx.enumerate_all_cliques(u))
     if cliques == []:
         max_c = 0
     else:
         max_c = len(max(cliques, key=len))
     print('number of cliques:', len(cliques))
     print('max clique size: ', max_c)
     self.cl_list.append([date, max_c])
コード例 #20
0
    def test_enumerate_all_cliques(self):
        # G = nx.Graph()
        # elist = [(1, 2), (2, 3), (1, 4), (4, 2)]
        # G.add_edges_from(elist)

        G = nx.Graph()
        G.add_edges_from(combinations(range(0, 5), 2))  # Add a five clique
        G.add_edges_from(combinations(range(5, 10), 2))  # Add another five clique

        res = nx.enumerate_all_cliques(G)
コード例 #21
0
ファイル: main.py プロジェクト: CodLiver/Miscellaneous
def clique(G, fulRes):
    print()
    print("solving clique problem")
    starting = time.time()
    cliques = [set(s) for s in nx.enumerate_all_cliques(G)]
    totsubset = set(
        frozenset(list(G.neighbors(each)) + [each])
        for each in range(0, len(G)))

    initVarList = LpVariable.dicts("cliques", range(len(cliques)), lowBound=0)

    assignment = {}
    ptr = 0
    varSet = set([])
    for each in totsubset:
        subassignment = []
        subset = set([])
        for eacher in range(len(cliques) - 1, -1, -1):
            if cliques[eacher].issubset(
                    each) and not cliques[eacher].issubset(subset):
                subassignment.append(initVarList[eacher])
                varSet.add(initVarList[eacher])
                subset |= cliques[eacher]
                if subset == each:
                    break
        if not subassignment == []:
            assignment[ptr] = subassignment
            ptr += 1

    prob = LpProblem("mininum clique cover number", LpMinimize)
    prob += lpSum(varSet), "objective func"
    prob += lpSum(varSet) >= 1, "objective const"
    for eachSet in varSet:
        prob += eachSet >= 0
    for eachNode in assignment:
        prob += lpSum(assignment[eachNode]) >= 1

    status = prob.solve()
    print()
    print("status:", LpStatus[prob.status])

    resultLP = lpSum(varSet).value()

    if fulRes == "Y":
        for variable in prob.variables():
            print("{} = {}".format(
                list(cliques[int(variable.name[8:])]),
                Fraction(variable.varValue).limit_denominator(100)))

    print()
    print("result: ", Fraction(resultLP).limit_denominator(100))
    print()
    print("finished in", round(time.time() - starting, 3), "secs")

    return resultLP
コード例 #22
0
ファイル: ppiNetwork.py プロジェクト: aidy80/ppiNetwork
    def cliqueFinding(self):
        with open('triangles.dat', 'w') as file:
            numTri = 0
            for clique in nx.enumerate_all_cliques(self.ppi):
                if (len(clique) == 3):
                    numTri += 1
                    file.write(str(clique) + "\n")

                if (len(clique) == 4):
                    break

            print "numTri = " + str(numTri)
コード例 #23
0
def create_simplex_from_graph(G):
    st = SimplexTree()
    node_values = nx.get_node_attributes(G, "intensity")
    print("node intensities", node_values)
    for clique in nx.enumerate_all_cliques(G):
        clique_value = node_values[clique[0]]
        for n in clique:
            # take max values
            if clique_value < node_values[n]:
                clique_value = node_values[n]
        st.insert(clique, clique_value)
    return st
コード例 #24
0
def get_cliques_by_length(G, length_clique):
    """ Return the list of all cliques in an undirected graph G with length
    equal to length_clique. """
    cliques = []
    for c in nx.enumerate_all_cliques(G):
        if len(c) <= length_clique:
            if len(c) == length_clique:
                cliques.append(c)
        else:
            return cliques
    # return empty list if nothing is found
    return cliques
コード例 #25
0
def get_gt_atnn_triangles(args):
    G, N = args
    node_ids = []
    if G is not None:
        for clq in nx.enumerate_all_cliques(G):
            if len(clq) == 3:
                node_ids.extend(clq)
    node_ids = np.array(node_ids)
    gt_attn = np.zeros((N, 1), np.int32)
    for i in np.unique(node_ids):
        gt_attn[i] = int(np.sum(node_ids == i))
    return gt_attn  # unnormalized (do not sum to 1, i.e. use int32 for storage efficiency)
コード例 #26
0
def find_triads(graph):
    trust_graph = graph.to_undirected()
    list_cliques = nx.enumerate_all_cliques(trust_graph)



    triad_cliques = [triad for triad in list_cliques if len(triad) == 3]

    small_graph = nx.DiGraph()

    for triads in triad_cliques:
        # Find all the triads that eliminates the status theory

        if not trust_graph.has_edge(triads[0], triads[1]):
            print "True"

        if not trust_graph.has_edge(triads[1], triads[2]):
            print "False"

        if not trust_graph.has_edge(triads[2], triads[0]):
            print "False"

        # Change the direction of the edges and add in another graph

        edge01 = trust_graph.get_edge_data(triads[0], triads[1])
        edge12 = trust_graph.get_edge_data(triads[1], triads[2])
        edge20 = trust_graph.get_edge_data(triads[2], triads[0])



        if edge01["weight"] == -1:
            small_graph.add_edge(triads[1], triads[0], weight=1)
        else:
            small_graph.add_edge(triads[0], triads[1], weight=1)

        if edge12["weight"] == -1:
            small_graph.add_edge(triads[2], triads[1], weight=1)
        else:
            small_graph.add_edge(triads[1], triads[2], weight=1)

        if edge20["weight"] == -1:
            small_graph.add_edge(triads[0], triads[2], weight=1)
        else:
            small_graph.add_edge(triads[2], triads[0], weight=1)

        try:
            cycle = nx.find_cycle(small_graph)
        except:
            print triads
            pass

        small_graph.clear()
コード例 #27
0
 def add_children(self, node):
     st_add = time.time()
     #grafos me ola ta edges tou kombou.
     tempGraph = nx.Graph()
     tempedges = self.PG.G.edges(node.name)
     tempGraph.add_edges_from(tempedges)
     #print(tempGraph.edges)
     enum_cliques = [x for x in nx.enumerate_all_cliques(tempGraph) if len(node.name) +1 == len(x)]
     #print("Nodes : {0} List :{1}\n".format(node.name,list(enum_cliques)))
     for clique in enum_cliques:
         Node(clique, parent=node)
     dur_ch = time.time() - st_add
     self.add_ch_ph1 += dur_ch
コード例 #28
0
ファイル: arc.py プロジェクト: MarkCBell/curver
 def all_disjoint_multiarcs(self):
     ''' Yield all multiarcs that are disjoint from this one.
     
     Assumes that this multiarc is filling. '''
     
     arcs = list(self.all_disjoint_arcs())  # Checks is filling.
     
     G = networkx.Graph()
     G.add_nodes_from(arcs)
     G.add_edges_from([(a_1, a_2) for a_1, a_2 in combinations(arcs, r=2) if a_1.intersection(a_2) == 0])
     
     for clique in networkx.enumerate_all_cliques(G):
         yield self.triangulation.disjoint_sum(clique)
コード例 #29
0
def find_k_cliques(G, k):
    clique_iter = nx.enumerate_all_cliques(G)
    cliques = []
    for c in clique_iter:
        if len(c) <= k:
            cliques.append(c)
        else:
            break
    sizes = [len(c) for c in cliques]
    idx = np.where(np.array(sizes) == k)
    cliques = np.array(cliques)[idx]
    cliques = [tuple(c) for c in cliques]
    return tuple(cliques)
コード例 #30
0
def fractional_clique_cover(G):
    # Create xs (init to 0) for all subsets of vertices
    xs = {}
    for L in range(0, len(G.nodes) + 1):
        for subset in itertools.combinations(G.nodes, L):
            xs[subset] = 0

    # Clique is a complete subgraph - K(G) denotes all cliques in G (includes empty set & singular nodes)
    # Calculate K(G) (all cliques)
    K = list(nx.enumerate_all_cliques(G))

    # Formulate as LP problem
    prob = LpProblem("Fraction Clique Cover", LpMinimize)

    # Problem Vars
    lp_xs = LpVariable.dicts("Xs",
                             xs,
                             lowBound=0,
                             upBound=None,
                             cat='Continuous')

    # Add objective function first
    prob += lpSum(lp_xs), "Sum of Xs"

    # Add constraints to problem
    # 1. Xs = 0 iff S not a clique -> redundant (as explained in report)

    # 2. For any vertex v, the sum of the weights of the cliques containing v is >= 1
    for vertex in G.nodes:
        cliques_containing_v = []
        for clique in K:
            if vertex in clique:
                cliques_containing_v.append(clique)
        # Cliques containing v now contains the indexes of all
        # cliques that contain the current vertex
        prob += lpSum(lp_xs[tuple(x)] for x in cliques_containing_v) >= 1  # 2.

    # Solve
    prob.solve()

    print("Fractional Clique Cover:")
    print("Status -", LpStatus[prob.status])
    total = 0
    for item in lp_xs:
        val = lp_xs[item].varValue
        total += val

    total = Fraction.from_float(total).limit_denominator(100)

    print("Sum of Optimal Xs Vals -", total)
    return total, lp_xs
コード例 #31
0
def using_graph():
    """
    reads files that start with analyse. the number analyse_clique_#.txt will be the number of structs in the clique.
    chooses designs that create as many cliques as possible
    :return:
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('-weight_cutoff', type=int)
    parser.add_argument('-maximal', default=False)
    args = vars(parser.parse_args())
    weight_cutoff = args['weight_cutoff']
    if not os.path.isfile('graph_%i.obj' % weight_cutoff):
        # analyse_files = [a for a in os.listdir('./') if 'analyse' in a and '.obj' not in a]
        analyse_files = ['analyse_clique_6.txt']
        analyses, num = {}, 1
        for f in analyse_files:
            parsed = parse_anlyse(f)
            for l in parsed:
                analyses[num] = l
                num += 1
        G = nx.Graph()
        [G.add_node(a) for a in analyses.keys()]

        for n1 in G.nodes_iter():
            for n2 in G.nodes_iter():
                if n1 != n2:
                    wt = len(set(analyses[n1]) & set(analyses[n2]))
                    if wt > weight_cutoff:
                        G.add_edge(n1, n2, weight=wt)
        print('finished building graph with %i nodes and %i edges' % (G.number_of_nodes(), G.number_of_edges()))
        with open('graph_%i.obj' % weight_cutoff, 'wb') as out:
            pickle.dump(G, out)
        with open('analyses_%i.obj' % weight_cutoff, 'wb') as out:
            pickle.dump(analyses, out)
    else:
        print('reading graph')
        with open('graph_%i.obj' % weight_cutoff, 'rb') as fin:
            G = pickle.load(fin)
        with open('analyses_%i.obj' % weight_cutoff, 'rb') as fin:
            analyses = pickle.load(fin)
        print('finished reading graph with %i nodes and %i edges' % (G.number_of_nodes(), G.number_of_edges()))

    if args['maximal']:
        if not os.path.isfile('clq_size_maximal_%i.obj' % weight_cutoff):
            clq_size, clq_grade = [], []
            for clq in nx.find_cliques(G):
                clq_size.append(len(clq))
                clq_grade.append(len(set([a for b in clq for a in analyses[b]])))
            with open('clq_size_maximal_%i.obj' % weight_cutoff, 'wb') as fout:
                pickle.dump(clq_size, fout)
            with open('clq_grade_maximal_%i.obj' % weight_cutoff, 'wb') as fout:
                pickle.dump(clq_grade, fout)
        else:
            with open('clq_size_maximal_%i.obj' % weight_cutoff, 'rb') as fin:
                clq_size = pickle.load(fin)
            with open('clq_grade_maximal_%i.obj' % weight_cutoff, 'rb') as fin:
                clq_grade = pickle.load(fin)

    if not os.path.isfile('clq_size_%i.obj' % weight_cutoff):
        clq_size, clq_grade = [], []
        for clq in nx.enumerate_all_cliques(G):
            clq_size.append(len(clq))
            clq_grade.append(len(set([a for b in clq for a in analyses[b]])))
        with open('clq_size_%i.obj' % weight_cutoff, 'wb') as fout:
            pickle.dump(clq_size, fout)
        with open('clq_grade_%i.obj' % weight_cutoff, 'wb') as fout:
            pickle.dump(clq_grade, fout)
    else:
        with open('clq_size_%i.obj' % weight_cutoff, 'rb') as fin:
            clq_size = pickle.load(fin)
        with open('clq_grade_%i.obj' % weight_cutoff, 'rb') as fin:
            clq_grade = pickle.load(fin)
    plt.scatter(clq_grade, clq_size)
    plt.show()
コード例 #32
0
ファイル: test_clique.py プロジェクト: 4c656554/networkx
    def test_paper_figure_4(self):
        # Same graph as given in Fig. 4 of paper enumerate_all_cliques is
        # based on.
        # http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129
        G = nx.Graph()
        edges_fig_4 = [('a', 'b'), ('a', 'c'), ('a', 'd'), ('a', 'e'),
                       ('b', 'c'), ('b', 'd'), ('b', 'e'),
                       ('c', 'd'), ('c', 'e'),
                       ('d', 'e'),
                       ('f', 'b'), ('f', 'c'), ('f', 'g'),
                       ('g', 'f'), ('g', 'c'), ('g', 'd'), ('g', 'e')]
        G.add_edges_from(edges_fig_4)

        cliques = list(nx.enumerate_all_cliques(G))
        clique_sizes = list(map(len, cliques))
        assert_equal(sorted(clique_sizes), clique_sizes)

        expected_cliques = [['a'],
                            ['b'],
                            ['c'],
                            ['d'],
                            ['e'],
                            ['f'],
                            ['g'],
                            ['a', 'b'],
                            ['a', 'b', 'd'],
                            ['a', 'b', 'd', 'e'],
                            ['a', 'b', 'e'],
                            ['a', 'c'],
                            ['a', 'c', 'd'],
                            ['a', 'c', 'd', 'e'],
                            ['a', 'c', 'e'],
                            ['a', 'd'],
                            ['a', 'd', 'e'],
                            ['a', 'e'],
                            ['b', 'c'],
                            ['b', 'c', 'd'],
                            ['b', 'c', 'd', 'e'],
                            ['b', 'c', 'e'],
                            ['b', 'c', 'f'],
                            ['b', 'd'],
                            ['b', 'd', 'e'],
                            ['b', 'e'],
                            ['b', 'f'],
                            ['c', 'd'],
                            ['c', 'd', 'e'],
                            ['c', 'd', 'e', 'g'],
                            ['c', 'd', 'g'],
                            ['c', 'e'],
                            ['c', 'e', 'g'],
                            ['c', 'f'],
                            ['c', 'f', 'g'],
                            ['c', 'g'],
                            ['d', 'e'],
                            ['d', 'e', 'g'],
                            ['d', 'g'],
                            ['e', 'g'],
                            ['f', 'g'],
                            ['a', 'b', 'c'],
                            ['a', 'b', 'c', 'd'],
                            ['a', 'b', 'c', 'd', 'e'],
                            ['a', 'b', 'c', 'e']]

        assert_equal(sorted(map(sorted, cliques)),
                     sorted(map(sorted, expected_cliques)))
コード例 #33
0
def find_solution_numerical(G, n_elements, n_unused=None, results=None):
    """
    Sort nodes in G into groups of n_elements members such that
    the total sum of weights is maximized.
    If the graph includes hard constraints on the relationship between
    nodes (i.e. missing edges), it is possible that no solution is found.

    In the case of a fully connected graph, the solution will be that
    which maximizes the weights. The weights are inherent attributes of
    the Graph and must be calculated beforehand (see `add_edges` for details).

    Parameters
    ----------
    G : networkx.Graph() instance
        Undirected graph with nodes and edges. The edges must have weights
        between 0 and 1, but edges can be missing if no relationship exists
        between nodes.

    n_elements : integer
        The number of elements per group. Must be an integer divisor of the
        total number of nodes in the graph.

    n_unused : integer
        The number of unused nodes in the graph at every recursion step.
        If None, then it will be initialized as the total number of nodes
        in the graph.

    weights_total_sum : list
        The total sum of weights of elements in `groups`.
        If None, then it will be initialized as an empty list to count
        the sum of weights for each individual group. Will be summed at
        the end before output into a float value.
        Note: DO NOT SET THIS AT THE BEGINNING OF THE RUN!

    Returns
    -------
    success : bool
        Flag indicating success or failure of the algorithm

    groups: iterable
        A list of lists containing all groups of n_elements members fulfilling
        the connectivity constraints that maximize the sum of weights of all
        groups being used.

    weights_total_sum : float
        The total sum of all weights of the output groups

    """

    if G.number_of_nodes() % np.float(n_elements) == 0:
	       print("Caution! Number of sessions is not an integer multiple of the number of parallel slots!")

    ## initialize results object
    if results is None:
        results = Results(n_elements)

    if n_unused is None:
        n_unused = G.number_of_nodes()


    ## base case
    if n_unused == 0:
        results.success = True
        return results

    ## recursion
    else:
        ## find all cliques in the graph G
        cliques = list(nx.enumerate_all_cliques(G))

        ## find all cliques that have the required number of elements
        cliques = np.array([c for c in cliques if len(c)==n_elements])

        ## sort cliques by weights
        cliques, summed_weights = _sort_cliques_by_weights(G, cliques, n_elements)

        ## find the total number of cliques with n_elements members
        ncliques = len(cliques)

        ## loop over all cliques:
        for g,(cl,ww) in enumerate(zip(cliques, summed_weights)):
            cl_topics = [G.node[c] for c in cl]

            ## add the new clique to the list of output groups
            results.update_groups(zip(cl, cl_topics))

            ## add total weight of the clique:
            results.update_weights(ww)

            ## make a new deep copy for the next recursion step
            G_new = copy.deepcopy(G)

            ## remove clique from graph
            for n in cl:
                G_new.remove_node(n)

            ## compute new unused number of nodes
            n_unused = G_new.number_of_nodes()

            ## if no unused nodes are left, return the selected groups,
            ## otherwise recurse
            results = find_solution_numerical(G_new, n_elements, n_unused, results)
            if results is not None:
                if results.success:
                        return results

            ## backtrack
            else:
                results.success = False
                results.groups.pop(-1)
                results.all_weights.pop(-1)
                continue

    # TODO: Need to add something here to figure out which sessions
    # have potentially been left out because the number of sessions wasn't
    # an integer multiple of the number of slots

    if len(results.groups) == 0:
        print("No solution found!")
        results.success = False
        return results

    else:
        results.groups.pop(-1)
        results.all_weights.pop(-1)

        results.success = False
        return results
def getCliques(UDG):
    clique = nx.enumerate_all_cliques(UDG)
    clique = list(clique)
コード例 #35
0
ファイル: extractTrimerTM.py プロジェクト: mmravic314/bin
def pairwise_contacts(storeDict):
    prvPDB = ""
    trimersByPDB = {}
    for pdb in storeDict.keys():

        pdbPath = os.path.join(tmPDB_dir, pdb + ".pdb")
        inPDBFull = parsePDB(pdbPath)
        inPDB = inPDBFull.select("ca").copy()
        pairList = []

        print pdbPath
        segments = []
        segNames = []
        for k in inPDB.iterSegments():
            if len(k.getSegnames()[0]) < 2:
                continue
            segments.append(k)
            segNames.append(k.getSegnames()[0])

        linkMat = np.zeros((len(segments), len(segments)))

        # find all pairwise chain interactions, log in matrix
        seg = 0
        for h in segments:

            mates = inPDB.select("exwithin 9 of hel", hel=h)
            if mates:
                mates = set(mates.getSegnames())
            else:
                continue

            for p in mates:

                if len(p) != 2:
                    continue  ## avoiding undocumented helices

                name = tuple(sorted([p, h.getSegnames()[0]]))
                dMat = buildDistMatrix(h, inPDB.select("segment  %s" % p))
                cnt = 0

                for d in dMat:
                    if min(d) < 8:
                        cnt += 1
                        if cnt == 5:
                            linkMat[seg][segNames.index(p)] += 1
                            break
            seg += 1

            ## Analyze matrix for 3 body contacts
            # Grab all k=3 cliques
        net = nx.Graph(linkMat)
        trimers = [x for x in nx.enumerate_all_cliques(net) if len(x) == 3]

        if len(trimers) == 0:
            continue

            ## Extract and print these prelim trimers

        saved = []

        # remove redundants via sequence,
        # input c-alpha prody objects

        for tri in trimers:

            segs = [segNames[k] for k in tri]
            call = "%s_%s.pdb" % (pdb, "-".join(segs))
            newPath = os.path.join(outDir, call)
            atoms = ""
            for hel in segs:

                if len(atoms) == 0:
                    atoms = inPDBFull.select("segment %s" % hel).copy()

                else:
                    atoms += inPDBFull.select("segment %s" % hel).copy()
            atoms.setTitle("-".join(segs))

            ## Make comparison to all trimers in this PDB, to remove exact duplicates
            # accept the first trimer
            if len(saved) == 0:
                saved.append(atoms.select("ca").copy())
                writePDB(newPath, atoms)
                # remember each trimer made, and compare all new ones with those saved
            else:
                fail = 0

                for k in saved:
                    if compare_Trimers(k, atoms.select("ca").copy()) > 0:
                        fail += 1
                        break
                        # only accept new trimer if its sequences dont match >95% with an existing trimer
                if fail == 0:
                    saved.append(atoms.select("ca").copy())
                    writePDB(newPath, atoms)

    return