コード例 #1
0
def main():
    graph = nx.karate_club_graph();
    dendrogram = GreedyAgglomerativeClusterer().cluster(graph)
    print dendrogram.quality_history
    print dendrogram.clusters()
    try:
        dendrogram.plot(os.path.join(os.path.dirname(__file__), '..', 'pics', 'karate_dend.png'), show=False)
        dendrogram.plot_quality_history('Karate', os.path.join(os.path.dirname(__file__), '..', 'pics', 'karate'), show=False)
    except:
        pass
コード例 #2
0
def main():
    graph = nx.karate_club_graph();
    dendrogram = GreedyAgglomerativeClusterer().cluster(graph)
    print(dendrogram.quality_history)
    print(dendrogram.clusters())
    try:
        dendrogram.plot(os.path.join(os.path.dirname(__file__), '..', 'pics', 'karate_dend.png'), show=False)
        dendrogram.plot_quality_history('Karate', os.path.join(os.path.dirname(__file__), '..', 'pics', 'karate'), show=False)
    except:
        pass
コード例 #3
0
from hac import GreedyAgglomerativeClusterer
from pandas import *
import dipha_utils
import community
import networkx as nx
import os
import helpers
import read_matrix

import matplotlib.pyplot as plt

read_matrix.read()

clusterer = GreedyAgglomerativeClusterer()

# f = open("C:\Users\jdbba\OneDrive\Documents\School\Research\DataSets\Neural network\celegansneural\celegansneural.gml")

G = nx.karate_club_graph()

size = G.number_of_nodes()
Matrix = helpers.create_shortest_path_matrix(G, size)

dFrame = DataFrame(Matrix)

dFrame.to_csv("Matrix_Files/M1.csv")

file_path = "C:\Users\jdbba\OneDrive\Documents\School\Research\Tests\Python Modularity Testing\dipha\Test"
infile1 = file_path + "\distances.bin"
infile2 = file_path + "\distances2.bin"

# save matrix to file for output to dipha
コード例 #4
0
def cluster(PARequest):
    """Executes the clustering of the PARequest

    :PARequest: REST API PARequest
    :returns: PARequest dictionary with the clustering decisions

    """

    scenario = PARequest

    vnf_graph = nx.Graph()
    for e in scenario['nsd']['VNFLinks']:
        vnf_graph.add_edge(e['source'],
                           e['destination'],
                           weight=e['required_capacity'])

    host_graph = nx.Graph()
    for e in scenario['nfvi']['LLs']:
        host_graph.add_edge(e['source']['id'],
                            e['destination']['id'],
                            weight=e['capacity']['total'])

    max_n = min(len(host_graph), len(vnf_graph))

    # We now have the two graphs (note that the library does not support directed graphs!) and the maximum number ``max_n`` of clusters to try. We can now compute the two dendograms (VNF and host).

    # In[33]:

    clusterer = GreedyAgglomerativeClusterer()
    dendo_vnf = clusterer.cluster(vnf_graph)
    dendo_host = clusterer.cluster(host_graph)

    # Now, for every value of $n$, we know which cluster each VNF and host belongs to. We write this information in the ``scenario`` data structure.

    # In[34]:

    scenario['clustering_decisions'] = []
    for n in range(1, max_n + 1):
        this_decision = {
            'no_clusters': n,
            'assignment_hosts': {},
            'assignment_vnfs': {}
        }
        print >> sys.stderr, 'number of clusters is', n
        for h in host_graph:
            c_id = [i for i in range(n) if h in dendo_host.clusters(n)[i]][0]
            print >> sys.stderr, '  host', h, 'belongs to cluster no.', c_id + 1
            this_decision['assignment_hosts'][h] = 'host_cluster_' + str(c_id +
                                                                         1)
        for v in vnf_graph:
            c_id = [i for i in range(n) if v in dendo_vnf.clusters(n)[i]][0]
            print >> sys.stderr, '  VNF', v, 'belongs to cluster no.', c_id + 1
            this_decision['assignment_vnfs'][v] = 'vnf_cluster_' + str(c_id +
                                                                       1)
        # We do not check feasibility
        # this_decision['prima_facie_feasible']=prima_facie_feasible(dendo_host.clusters(n),dendo_vnf.clusters(n))
        scenario['clustering_decisions'].append(this_decision)

    # In[35]:

    scenario['clustering_decisions']

    return scenario
コード例 #5
0
def cluster(PARequest):
    """Executes the clustering of the PARequest

    :PARequest: REST API PARequest
    :returns: PARequest dictionary with the clustering decisions

    """

    scenario = PARequest
    i = 0
    vlSaps = [(s, sIdx) for (s, sIdx) in zip(scenario['nsd']['SAP'],\
                            range(len(scenario['nsd']['SAP'])))]\
                if 'SAP' in scenario['nsd'] else [] # Maybe no SAPs
    vlSaps = list(
        filter(lambda (s, sIdx): 'VNFLink' in s and s['VNFLink'] != '',
               vlSaps))

    for (sap, sapIdx) in vlSaps:
        # Find the associated VNFLink
        vnfLink = [
            vl for vl in scenario['nsd']['VNFLinks']
            if vl['id'] == sap['VNFLink']
        ][0]
        sapVNF = {
            'VNFid': 'sap' + str(i),
            'SAPidx': sapIdx,
            'instances': 1,
            'requirements': {
                'cpu': 0,
                'ram': 0,
                'storage': 0
            },
            'failure_rate': 0,
            'processing_latency': 0,
            'CP': [{
                'cpId': 'sap' + str(i),
                'VNFLink': vnfLink
            }]
        }
        scenario['nsd']['VNFs'].append(sapVNF)
        i += 1

    # Now, we build two graphs, one for VNFs and one for VNF. Weights are:
    # * for the VNF graph, the traffic between VNFs;
    # * for the host graph, the capacity of links.

    # In[60]:

    vnf_graph = nx.Graph()
    for vl in scenario['nsd']['VNFLinks']:
        # Search VNFs connected to vl
        connected_vnfs = []
        for vnf in scenario['nsd']['VNFs']:
            linked_cps = [
                cp for cp in vnf['CP']
                if 'VNFLink' in cp and cp['VNFLink']['id'] == vl['id']
            ]
            if len(linked_cps) > 0:
                connected_vnfs.append(vnf['VNFid'])
        # Add all pairs VNF1---vl---VNF2
        for VNFe in list(combinations(connected_vnfs, 2)):
            vnf_graph.add_edge(VNFe[0],
                               VNFe[1],
                               weight=vl['required_capacity'])

    host_graph = nx.Graph()
    for e in scenario['nfvi']['LLs']:
        host_graph.add_edge(e['source']['id'],
                            e['destination']['id'],
                            weight=e['capacity']['total'])

    max_n = min(len(host_graph), len(vnf_graph))

    # We now have the two graphs (note that the library does not support directed graphs!) and the maximum number ``max_n`` of clusters to try. We can now compute the two dendograms (VNF and host).

    # In[61]:

    clusterer = GreedyAgglomerativeClusterer()
    dendo_vnf = clusterer.cluster(vnf_graph)
    dendo_host = clusterer.cluster(host_graph)

    # Now, for every value of $n$, we know which cluster each VNF and host belongs to. We write this information in the ``scenario`` data structure.

    # In[62]:

    scenario['clustering_decisions'] = []
    for n in range(1, max_n + 1):
        this_decision = {
            'no_clusters': n,
            'assignment_hosts': {},
            'assignment_vnfs': {}
        }
        print >> sys.stderr, 'number of clusters is', n
        for h in host_graph:
            c_id = [i for i in range(n) if h in dendo_host.clusters(n)[i]][0]
            print >> sys.stderr, '  host', h, 'belongs to cluster no.', c_id + 1
            this_decision['assignment_hosts'][h] = 'host_cluster_' + str(c_id +
                                                                         1)
        for v in vnf_graph:
            c_id = [i for i in range(n) if v in dendo_vnf.clusters(n)[i]][0]
            print >> sys.stderr, '  VNF', v, 'belongs to cluster no.', c_id + 1
            this_decision['assignment_vnfs'][v] = 'vnf_cluster_' + str(c_id +
                                                                       1)
        # We do not check feasibility
        # this_decision['prima_facie_feasible']=prima_facie_feasible(dendo_host.clusters(n),dendo_vnf.clusters(n))
        scenario['clustering_decisions'].append(this_decision)

    # In[63]:

    scenario['clustering_decisions']

    # Now we can save our output file, i.e., a version of the scenario including the ``clustering_decisions`` data structure.

    # In[36]:

    # with open(output_path,'w') as fp:
    #     json.dump(scenario,fp,indent=2)
    return scenario
コード例 #6
0
ファイル: classify_users.py プロジェクト: ehsanfar/TwiNet
def userClusterMethod():
    user_id_dict = {}
    with open('user_id_dict.json', 'r') as jsonfile:
        user_id_dict = json.load(jsonfile)

    user_id_dict['BrianMBendis'] = 16395449
    user_id_dict['robdelaney'] = 22084427
    user_id_dict['stephenfry'] = 15439395

    for name, id in user_id_dict.items():
        user_id_dict[id] = name

    all_ids = user_id_dict.keys()
    print(all_ids)

    def create_graph():
        global user_id_dict
        dir = dirname(abspath(__file__))
        G = nx.Graph()

        def updateGraph(source_id, retweeters_dict):
            G.add_node(source_id)
            edges_weight = []
            for r, w in retweeters_dict:
                if w < 2:
                    continue
                retid = int(r)
                edges_weight.append(tuple([retid, source_id, 1]))

            G.add_weighted_edges_from(edges_weight)

        for file in os.listdir(dir):
            reusername = re.search(r'user_(.+)_retweeters.json', file)
            if file.endswith(".json") and reusername:
                print(file)
                screen_name = reusername.group(1)
                source_id = user_id_dict[screen_name]
                print(screen_name, source_id)
                with open(file, 'r') as infile:
                    retweeters = json.load(infile)

                updateGraph(source_id, retweeters)
                print(file[5:-16], retweeters)

        nx.write_gpickle(G, 'retweeter_graph')

    create_graph()
    G = nx.read_gpickle('retweeter_graph')

    print(len(G.nodes()))
    print(len(G.edges()))

    from hac import GreedyAgglomerativeClusterer
    clusterer = GreedyAgglomerativeClusterer()

    karate_dendrogram = clusterer.cluster(G)
    karate_dendrogram.clusters(1)
    cluster2 = karate_dendrogram.clusters(2)
    # print [len(a) for a in cluster2]

    user_set = set(list(user_id_dict.values()))

    u1 = cluster2[0]
    u2 = cluster2[1]

    print([user_id_dict[e] for e in list(user_set.intersection(u1))])
    print([user_id_dict[e] for e in list(user_set.intersection(u2))])
コード例 #7
0
def agglomgreedytk(graph, forced_cl=None):
    clusterer = GreedyAgglomerativeClusterer()
    dendrogram = clusterer.cluster(graph, forced_cl)
    return dendrogram.labels()
コード例 #8
0
ファイル: agg.py プロジェクト: srinath116/BIGDATA-
import networkx as nx
from hac import GreedyAgglomerativeClusterer
clusterer = GreedyAgglomerativeClusterer()
# This cluster call is where most of the heavy lifting happens
karate_dendrogram = clusterer.cluster(nx.karate_club_graph())
karate_dendrogram.clusters(1)
karate_dendrogram.plot(karate_dendrogram.clusters(3),
                       karate_dendrogram.labels())
コード例 #9
0
# This import fixes sys.path issues
from . import parentpath

import networkx as nx
import random
import time
import sys
from hac import GreedyAgglomerativeClusterer

if __name__ == '__main__':
    size = 1000
    edge_size = 6  # 2*log(size)
    graph = nx.Graph()
    print("Adding nodes...")
    graph.add_nodes_from(xrange(size))
    print("Adding edges...")
    edges = []
    for node in xrange(size):
        for rand_node in random.sample(xrange(size), edge_size):
            edges.append((node, rand_node))
    graph.add_edges_from(edges)

    print("Starting Clustering on ({} nodes, {} edges) ....".format(
        graph.number_of_nodes(), graph.number_of_edges()))
    sys.stdout.flush()
    start = time.clock()
    GreedyAgglomerativeClusterer().cluster(graph).clusters()
    print("Finished Clustering in {} seconds".format((time.clock() - start)))
コード例 #10
0
    args = parser.parse_args()

    # Get the NS number
    m = re.search('vls-(\d+).csv', args.vlCSV)
    nsNum = m.group(1)
    
    # Create the NS and set weights to be traffic
    readNS = NS.readCSV(vnfCSV = args.vnfCSV, vlCSV = args.vlCSV)
    nsG = readNS.getChain()
    weights = {}
    for (vnfA, vnfB, data) in nsG.edges(data=True):
        weights[vnfA, vnfB] = float(data['traffic'])
    nx.set_edge_attributes(nsG, 'weight', weights)

    # Perform the clustering
    clusterer = GreedyAgglomerativeClusterer()
    dendoVnf = clusterer.cluster(nsG)
    clustering = {}
    for n in range(2, len(nsG) + 1):
        clustering[n] = {}
        for vnf in nsG.nodes():
            cId = [i for i in range(n) if vnf in dendoVnf.clusters(n)[i]][0]
            if cId not in clustering[n]:
                clustering[n][cId] = []
            clustering[n][cId].append(vnf)

    # Split the network service graph
    for numCls in clustering:
        print "Splitting in " + str(numCls) + " clusters"
        clusters = clustering[numCls].keys()
        clusters.sort()