Exemplo n.º 1
0
def main():
    if len(sys.argv) == 2 and os.path.exists(sys.argv[1]):
        g = graph.load_graph(sys.argv[1])
        interpretor = SixDegreesCmd(g)
        interpretor.cmdloop()
    else:
        print("Please provide valid input database")
Exemplo n.º 2
0
def main():
    g = graph.load_graph(sys.argv[1])
    s = g.get_info()
    logging.info('#actors: %s, #movies: %s' % (s.actors_count, s.movies_count))

    degree = degree_finder.DegreeFinder(g)
    test(degree)
Exemplo n.º 3
0
def plot_citation_graph():
    """
    Function for plotting the in-degree distribution of 
    a specific citation graph.
    """
    # Load the external graph of citation information
    citation_graph = g.load_graph(CITATION_URL)

    # Calculate the in-degree distribution of the citation graph
    in_degree_dist = g.in_degree_distribution(citation_graph)

    # Normalize the in-degree distribution to sum up to 1
    normalized_distribution = g.normalize_distribution(in_degree_dist)

    # Plot the normalized distribution on a log/log graph
    plt.loglog(normalized_distribution.keys(),
               normalized_distribution.values(),
               'ro',
               basex=10,
               basey=10)
    plt.title(
        'Log/log plot of the normalized distribution of a citation graph')
    plt.ylabel('Number of citations - base 10')
    plt.xlabel('Papers - base 10')
    plt.show()
Exemplo n.º 4
0
def plot_targeted_attack_res():
    """
    Function plots the computed resilience of three networks 
    represented as undirected graphs as they undergo an attack
    sequence that eliminates nodes in descending order of 
    connectivity (most highly connected nodes are eliminated first).

    1. A real computer network.
    2. A simulated network built using the ER algorithm.
    3. A simulated network built using the UPA algorithm.
    """

    # Load three different graphs
    network_graph = g.load_graph(NETWORK_URL)
    er_graph = g.random_undirected_graph(1347, .0034)
    upa_graph = g.random_UPA_undirected_graph(1347, 2)

    # Compute resilience for the three graphs
    network_res = g.compute_resilience(network_graph,
                                       g.fast_targeted_order(network_graph))
    er_res = g.compute_resilience(er_graph, g.fast_targeted_order(er_graph))
    upa_res = g.compute_resilience(upa_graph, g.fast_targeted_order(upa_graph))

    plt.plot(network_res, '-b', label='Computer network')
    plt.plot(er_res, '-r', label='ER graph, p = .0034')
    plt.plot(upa_res, '-y', label='UPA graph, m = 2')
    plt.legend(loc='upper right')
    plt.title('Network resilience in a targeted attack sequence')
    plt.ylabel('Largest connected component')
    plt.xlabel('Number of nodes removed')
    plt.show()
Exemplo n.º 5
0
def inference(config):
    graph = load_graph(config['dataset'], labels_is_onehot=False)

    y = graph.labels
    X = graph.features
    A = graph.adjcency_matrix(is_sparse=False)
    C = np.concatenate([A, config['lambda'] * X], axis=1)

    config['struct'][0] = C.shape[1]
    model = NAIE(config)
    model.restore("./Log/" + config['dataset'] + "/")
    macro, micro = model.evaluate(C, y)
    print("macro={:.4f}, micro={:.4f}".format(macro, micro))
Exemplo n.º 6
0
    def process(self):
        #convert the npz into pytorch tensors and save them
        path = self.processed_dir
        for idx, raw_path in enumerate(tqdm(self.raw_paths)):

            g = load_graph(raw_path)

            Ro = g.Ro[0].T.astype(np.int64)
            Ri = g.Ri[0].T.astype(np.int64)

            i_out = Ro[Ro[:, 1].argsort(kind='stable')][:, 0]
            i_in = Ri[Ri[:, 1].argsort(kind='stable')][:, 0]

            x = g.X.astype(np.float32)
            edge_index = np.stack((i_out, i_in))
            y = g.y.astype(np.int64)

            y_nodes = np.zeros(x.shape[0])
            categories = np.unique(y)

            if not self._categorical:
                y = g.y.astype(np.float32)
            #print('y type',y.dtype)

            for i_category in categories:
                # Get all the edges belonging to this category
                indices_edges_this_category = (y == i_category)
                # Get all the nodes belonging to this category
                # (Use both ingoing and outgoing)
                node_indices_this_category = np.unique(
                    np.concatenate(
                        (edge_index[0][indices_edges_this_category],
                         edge_index[1][indices_edges_this_category])))
                # Set the y value to the category
                y_nodes[node_indices_this_category] = i_category

            outdata = Data(x=torch.from_numpy(x),
                           edge_index=torch.from_numpy(edge_index),
                           y=torch.from_numpy(y))
            outdata.y_nodes = torch.from_numpy(y_nodes.astype(np.int64))

            if not self._directed and not outdata.is_undirected():
                rows, cols = outdata.edge_index
                temp = torch.stack((cols, rows))
                outdata.edge_index = torch.cat([outdata.edge_index, temp],
                                               dim=-1)
                outdata.y = torch.cat([outdata.y, outdata.y])

            torch.save(outdata,
                       osp.join(self.processed_dir, 'data_{}.pt'.format(idx)))
Exemplo n.º 7
0
def run_NAIE(config):
    graph = load_graph(config['dataset'], labels_is_onehot=False)

    if config['task'] == 'lp':
        graph.G = remove_edges(
            graph.G,
            config['lp_test_path'] + config['dataset'] + "_lp_test.edgelist")
        print("Left edges in G: {}".format(graph.G.number_of_edges()))
        test_pairs, test_labels = read_test_links(config['lp_test_path'] +
                                                  config['dataset'] +
                                                  "_lp_test.edgelist")
        config['link_test_pairs'] = [
            (edges[0], edges[1], label)
            for edges, label in zip(test_pairs, test_labels)
        ]

    y = graph.labels
    X = graph.features
    A = graph.adjcency_matrix(is_sparse=False)
    C = np.concatenate([A, config['lambda'] * X], axis=1)

    smooth_X = smooth(A, X, 1.0)
    smooth_A = smooth(A, A, 1.0)
    if config['strategy'] == 'nc':
        gamma_adj = 1 - get_balance_coefficient(graph.G, smooth_A)
        gamma_attr = 1 - get_balance_coefficient(graph.G, smooth_X)
    elif config['strategy'] == 'sw':
        omega = get_omega(graph.G)
        omega = abs(omega)
        if omega > 1:
            omega = 1.0
        gamma_adj = omega
        gamma_attr = omega
    print("gamma_adj={:4f}, gamma_attr={:.4f}".format(gamma_adj, gamma_attr))
    ada_smooth_A = smooth(A, A, gamma_adj)
    ada_smooth_X = smooth(A, X, gamma_attr)
    target = np.concatenate([ada_smooth_A, config['lambda'] * ada_smooth_X],
                            axis=1)

    config['struct'][0] = C.shape[1]
    data = {'C': C, 'target': target, 'adj': A, 'y': y}
    model = NAIE(config)
    model.train(data)
Exemplo n.º 8
0
def classifily(intput_feature):  # name is path+name file ex. "D:/feature"
    graph = gh.load_graph('D:\\work\\face-rec-api\\save_train\\my_model.pb')
    #    im = cb.Crop(im)
    #    intput_feature = scipy.io.loadmat('data/'+str(2)+'.mat')
    x = graph.get_tensor_by_name('prefix/Inputx_Placeholder1:0')
    y = graph.get_tensor_by_name('prefix/y_prob:0')

    #     We launch a Session
    with tf.Session(graph=graph) as sess:
        # Note: we don't nee to initialize/restore anything
        # There is no Variables in this graph, only hardcoded constants

        #         y_out = sess.run(y, feed_dict={
        #                 x: intput_feature['y'][0].reshape((1,2622))
        #         })
        y_out = sess.run(y, feed_dict={x: intput_feature})
#          I taught a neural net to recognise when a sum of numbers is bigger than 45
#          it should return False in this case
#         scipy.io.savemat(name+".mat",dict(y_out=y_out))
    return y_out
Exemplo n.º 9
0
def plot_citation_graph():
    """
    Function for plotting the in-degree distribution of 
    a specific citation graph.
    """
    # Load the external graph of citation information
    citation_graph = g.load_graph(CITATION_URL)

    # Calculate the in-degree distribution of the citation graph
    in_degree_dist = g.in_degree_distribution(citation_graph)

    # Normalize the in-degree distribution to sum up to 1
    normalized_distribution = g.normalize_distribution(in_degree_dist)

    # Plot the normalized distribution on a log/log graph
    plt.loglog(normalized_distribution.keys(), normalized_distribution.values(), 'ro', basex=10, basey=10)
    plt.title('Log/log plot of the normalized distribution of a citation graph')
    plt.ylabel('Number of citations - base 10')
    plt.xlabel('Papers - base 10')
    plt.show()
Exemplo n.º 10
0
def new_graph_experiment_1(token, user, repo, max_commit):
    if is_graph_saved(GRAPH_NAME):
        g = load_graph(GRAPH_NAME)
    else:
        client = Github(token)
        user = client.get_user(user)
        repo = user.get_repo(repo)

        g = new_graph()
        add_repo_node_with_languages(g, repo)
        user_commit = user_commit_dictionary(repo, max_commit=max_commit)
        add_users(g, repo, user_commit, user)
        add_user_repos_with_commits_and_languages(g,
                                                  user_commit,
                                                  max_commit=max_commit)
        add_user_language_edges(g)
        save_graph(g, GRAPH_NAME)

    if not is_visual_graph_saved(GRAPH_NAME):
        export_visual_graph(g, GRAPH_NAME)

    return g
Exemplo n.º 11
0
        

    def get_status(self):
        return "%02d active (T=%02d,S=%d,F=%d,Bi=%d,Bu=%d,V=%d)" % (len(self._state_nodes), self._target, self._speed, self._flipped, self._bidirectional, self.burnbridges, self._version)

if __name__=='__main__':
    import sys
    import numm

    USAGE = 'python player.py EDGES SOUND [SOUND [SOUND ...]]'

    if len(sys.argv) < 3:
        print USAGE
        sys.exit(1)

    g = graph.load_graph(sys.argv[1])
    graph.connect_to_samples(g, sys.argv[2:])

    p = Player(g, speed=50)

    # # Trigger everything
    # for n in g.get_nodes():
    #     if n.frames is not None:
    #         p.trigger(n, 1.0)

    # Trigger *something*
    p.trigger(g.get_nodes()[50], 1.0)

    out = []
    v_out = cv2.VideoWriter()
    fr = p._get_base_frame()
Exemplo n.º 12
0
import json
from graph import load_graph, shortest_path
from flask import Flask, request, render_template, jsonify

stations = load_graph('static/stations_sg.json')
app = Flask(__name__)


@app.route('/')
def index():
    return render_template('index.html')


@app.route('/api/v1/')
def api():
    req = request.args
    route = shortest_path(stations, req['start'], req['end'])
    if route == None:
        return jsonify({'route': 'null'}), 400
    else:
        return jsonify({'route': route}), 200


app.run(debug=True)
Exemplo n.º 13
0
"""
To run everything. I am adding comments so you guys can see what does what , at least according to
me and then make changes in the main file and any subsequent changes you need to make.
"""
from shortest_path_calculator import gets_original_gives_full_path, only_2_points
from mapping import mapping_on_maps_multiple, mapping_on_maps_singular
from visualization import visualise
from graph import load_graph, Graph
# Graph = __import__("Graph & Node").Graph
# load_graph = __import__("Graph & Node").load_graph

g = load_graph('data/chicago_dataset_2.csv')
end = 'Kinzie'
visitor = [
    '26th', '18TH', 'Indianapolis', 'Indiana', 'Michigan', 'Peterson', '75th',
    '96th'
]
start = '1550 West'
full_path = gets_original_gives_full_path(
    g, start, end, visitor)  # Gives the full shortest path
# between start and end which goes through the locations mentioned in visitor.
path = only_2_points(
    g, start,
    end)  # Gives the full shortest path between start and end , here we
# don't have a visitor parameter.
mapping_on_maps_multiple(
    g, full_path)  # Two map functions so that on the webbrowser it opens two
# files.
mapping_on_maps_singular(
    g, path)  # In the actual main file we would need to add Kaartik's plotly
# function and we'd be good I think.
Exemplo n.º 14
0
louvain_res = [
    "louvain_n400_p0.3_q0.3.txt", "louvain_n400_p0.6_q0.3.txt",
    "louvain_n400_p0.8_q0.1.txt", "louvain_n400_p0.8_q0.2.txt",
    "louvain_benchmark_n128_e1024.txt", "louvain_benchmark_n1000_e8000.txt"
]

if __name__ == '__main__':
    print("Loading Louvain Algorithm's results")
    for i in range(len(louvain_res)):

        louvain_filename = louvain_res[i]
        graph_filename = graphs[i]

        print("---")
        print("Louvain Algorithm's result of graph " + graph_filename)

        cluster = load_community("results/" + louvain_filename, delimiter=" ")
        graph, _, _ = load_graph("data/" + graph_filename)

        labels_unique, partition = np.unique(cluster, return_counts=True)
        number_communities = len(labels_unique)

        community_filename = os.path.splitext(louvain_filename)[0]
        draw_graph(graph, cluster, community_filename + ".png")

        print("Picture of communities exported to graph/" +
              community_filename + ".png")
        print("Number of communities:", number_communities)
        print("Partition of nodes in different clusters/labels:",
              [item for item in partition])
Exemplo n.º 15
0
    for i in range(len(partitions)):
        for node in partitions[i]:
            labels[node] = i
    time_end = time.time()
    print("Calculation time:", time_end - time_start, "seconds")
    return labels


if __name__ == '__main__':
    if len(sys.argv) < 2:
        print("usage: <input-graph-filename.txt>")
        sys.exit()

    graph_filename = sys.argv[1]
    print("Divisive Approach Algorithm")
    G, _, _ = load_graph(graph_filename)
    New_Labels = divisive_approach(G)
    labels_unique, partition = np.unique(New_Labels, return_counts=True)
    number_communities = len(labels_unique)

    # save results
    filename = os.path.split(graph_filename)[1]
    graph_name = os.path.splitext(filename)[0]

    community_filename = "div_" + graph_name
    f = open("results/" + community_filename + ".txt", "w")
    for i in range(len(New_Labels)):
        label = New_Labels[i]
        f.write(str(i) + "\t" + str(label) + "\n")
    # draw the community result
    draw_graph(G, New_Labels, community_filename + ".png")
Exemplo n.º 16
0
from delta import save_delta
from delta import make_bidlinks
from capacity import generate_capacity
from capacity import save_capacity

# global settings
data_dir = 'data/'
cplex_dir = 'cplex/'
kmax = 5

if __name__ == "__main__":
    print '[LOG] Preparing Data ...'
    # generate base.dat
    txt2dat(data_dir, kmax)
    # generate paths.dat, num_nodes_in_paths.dat, num_paths.dat
    G = load_graph(data_dir)
    candidate_paths = generate_candidate_paths(G, kmax)
    save_candidate_paths(candidate_paths,
                         filename=os.path.join(data_dir, 'paths.dat'))
    save_candidate_paths(candidate_paths,
                         filename=os.path.join(data_dir, 'paths.pickle'))
    # generate demand.dat
    nodes = G.nodes.keys()
    demand_mat, meta = generate_demand_mat(nodes, dist='norm')
    save_demand_mat(demand_mat, data_dir, fmt='.dat')
    save_demand_mat(demand_mat, data_dir, fmt='.pickle')
    # generate delta.dat
    links = G.edges.keys()
    make_bidlinks(links)
    delta = generate_delta(links, candidate_paths)
    save_delta(delta, data_dir)
from graph import load_graph, generate_graph, draw_graph
from sknetwork.clustering import PropagationClustering
from sknetwork.utils import edgelist2adjacency
import numpy as np
from networkx.algorithms.community import asyn_lpa_communities, label_propagation_communities

if __name__ == '__main__':
    print("Label Propagation Algorithm of Scikit NetWork & Network X")

    # use a simple graph with 400 nodes and about 20,000 edges
    G, Edges, _ = load_graph('data/n400_p0.8_q0.1.txt')
    print("--Scikit Network--")
    propagation = PropagationClustering()
    adjacency = edgelist2adjacency(list(G.edges))
    New_Labels = propagation.fit_transform(adjacency)
    labels_unique, count = np.unique(New_Labels, return_counts=True)
    print("Number of clusters/labels:", len(labels_unique))
    print("Partition of nodes in different clusters/labels:",
          [item for item in count])

    print("--NetWorkX--")
    nx_labels = label_propagation_communities(G)
    nx_labels = list(nx_labels)
    print("Number of clusters/labels:", len(nx_labels))
    print("Partition of nodes in different clusters/labels:",
          [len(item) for item in nx_labels])
Exemplo n.º 18
0
        return self.cost

    def set_previous(self, name):
        self.previous = previous

    def get_previous(self):
        return self.previous

    def is_locked(self):
        return self.locked

    def set_locked(self, locked):
        self.locked = locked

    def print_content(self):
        text = "name: " + self.name + " cost: " + str(self.cost) + " previous: " + self.previous
        print(text)

import sys
import graph
if __name__ == "__main__":
    if len(sys.argv) == 4:
        g = graph.load_graph(sys.argv[1])
        dijkstra = Dijkstra(g)
        dijkstra.set_start(sys.argv[2])
        dijkstra.the_best_way_to(sys.argv[3])
    else:
        print("usage: python dijkstra.py [graph path] [start point name] [goal point name]")
    
    
Exemplo n.º 19
0
    dataset = args.dataset
    bl_feat = [
        '1_0_deg_min', '1_0_deg_max', '1_0_deg_mean', '1_0_deg_std', 'deg'
    ]

    # hyperparameters
    n_bin = args.n_bin  # number of bins for historgram
    norm_flag = args.norm_flag  # normalize before calling function_basis versus normalize after
    nonlinear_kernel = args.nonlinear_flag  # linear kernel versus nonlinear kernel

    # less important hyperparameter. Used for fine tunning
    uniform_flag = args.uniform_flag  # unform versus log scale. True for imdb, False for reddit.
    cdf_flag = True  # cdf versus pdf. True for most dataset.
    his_norm_flag = 'yes'

    graphs, labels = load_graph(dataset)
    n = len(graphs)
    graphs_ = []
    direct = os.path.join('../data/cache/', dataset,
                          'norm_flag_' + str(norm_flag), '')

    try:
        with open(direct + 'graphs_', 'rb') as f:
            t0 = time.time()
            graphs_ = pickle.load(f)
            print('Finish loading existing graphs. Takes %s' %
                  (time.time() - t0))
    except IOError:
        for i in range(n):
            if i % 50 == 0: print('#'),
            gi = convert2nx(graphs[i], i)
Exemplo n.º 20
0
import sys

sys.path.append('../')

from delta import make_bidlinks
from delta import generate_delta
from delta import save_delta
from graph import load_graph
from candidate import generate_candidate_paths

if __name__ == "__main__":
    G = load_graph('../data')
    links = G.edges.keys()
    make_bidlinks(links)
    paths = generate_candidate_paths(G, kmax=10)
    delta = generate_delta(links, paths)
    save_delta(delta, data_dir='../data')
Exemplo n.º 21
0
import sys
sys.path.append('../')
import matplotlib.pyplot as plt

from graph import load_graph
from graph import draw_graph


if __name__ == "__main__":
    G = load_graph(data_dir='../data')
    draw_graph(G)
    plt.show()
Exemplo n.º 22
0
Arquivo: energy.py Projeto: bishk/q
import numpy as np
from graph import load_graph, SparseGraph
from scipy.optimize import basinhopping
from GA import GA
from skopt.space import Real, Integer
from copy import deepcopy
import random
import multiprocessing as mp
from dwave_sapi2.util import qubo_to_ising
from collections import Counter
from sklearn.metrics import accuracy_score
import neal
from scipy.sparse import dok_matrix
from contextlib import closing

graph = load_graph("./graphs/graph000000.npz", SparseGraph)


def dist(vec):
    x, y, z = vec
    return np.sqrt(x**2.0 + y**2.0 + z**2.0)


def dist2d(vec):
    x, y, z = vec
    return np.sqrt(x**2.0 + y**2.0)


def geometricweight(graph, k, l, m):
    x1, y1, z1 = graph.X[k]
    x2, y2, z2 = graph.X[l]
Exemplo n.º 23
0
import json
import glob
import os

def graph2json(g, outfile):
    out = {}
    all_nodes = list(g.get_all_nodes())
    out['nodes'] = [{'nedges': len(g.all_node_edges(X)),
                     'pt': X.pt} for X in all_nodes]
    out['edges'] = [{'a': all_nodes.index(E.a),
                     'b': all_nodes.index(E.b)} for E in g.edges]
    json.dump(out, open(outfile, 'w'))

def snd2json(d, outfile):
    out = {}
    for path in sorted(glob.glob(os.path.join(d, '*.aiff'))):
        n = int(os.path.basename(path).split('_')[0])
        out.setdefault(n, []).append(path)
    json.dump(out, open(outfile, 'w'))

if __name__=='__main__':
    from graph import load_graph
    g1 = load_graph()
    graph2json(g1, 'graph.json')
    g2 = load_graph(bidirectional=True)
    graph2json(g2, 'graph_bi.json')

    for idx,d in enumerate(sorted(glob.glob('snd/*'))):
        snd2json(d, 's%d.json' % (idx))
Exemplo n.º 24
0
clusters_div = ["cluster_c4_benchmark_n128_e1024.txt"]

da_res = ["div_benchmark_n128_e1024.txt"]

if __name__ == '__main__':
    for i in range(len(graphs)):
        graph_file = graphs[i]
        cluster_file = clusters[i]
        lpa_file = lpa_res[i]
        louvain_file = louvain_res[i]

        print("---")
        print("Measure for graph " + graph_file)

        graph, edges, nodes = load_graph("data/" + graph_file)
        cluster = load_community("data/" + cluster_file)
        lpa = load_community("results/" + lpa_file)
        louvain = load_community("results/" + louvain_file, " ")

        # modularity
        communities = []  # communities : [{1, 2, 3}, {4, 5, 6}, ...partition]
        for l in set(lpa):
            communities.append(set([i for (i, j) in enumerate(lpa) if j == l]))
        print("LPA Modularity:",
              nx.algorithms.community.modularity(graph, communities))

        communities = []
        for l in set(louvain):
            communities.append(
                set([i for (i, j) in enumerate(louvain) if j == l]))