示例#1
0
文件: main.py 项目: glaand/transitcat
def upload(name, networks, onestop_ids, directoryPath):
    G, node_dict = create_node_network(networks, directoryPath)

    # Verify that nodes were created properly.
    # for NI in G.Nodes():
    # 	nodeID = NI.GetId()
    # 	print 'Node ID %d, stop_id %s, stop_name %s, stop_code %s, stop_lat %f, stop_lon %f, network_name %s' % (nodeID, G.GetStrAttrDatN(NI, 'stop_id'), G.GetStrAttrDatN(NI, 'stop_name'), G.GetStrAttrDatN(NI, 'stop_code'), G.GetFltAttrDatN(NI, 'stop_lat'), G.GetFltAttrDatN(NI, 'stop_lon'), G.GetStrAttrDatN(NI, 'network_name'))
    # print 'Node ID %d, stop_name %s, routes %s' % (nodeID, G.GetStrAttrDatN(NI, 'stop_name'), G.GetStrAttrDatN(NI, 'routes'))

    G = create_l_space_graph(G, node_dict, networks, directoryPath)
    G, internetwork_edges = get_internetwork_edges(G, 100)

    snapFileName = "snapData/uploads/" + name.replace(" ", "") + '.graph'
    pickleFileName = "snapData/uploads/" + name.replace(" ", "") + '.pkl'
    print snapFileName
    print pickleFileName
    try:
        print('Saving graph...')
        save_graph(G, snapFileName)
        with open(pickleFileName, 'wb+') as output:
            pickle.dump(node_dict, output, pickle.HIGHEST_PROTOCOL)
            pickle.dump(internetwork_edges, output, pickle.HIGHEST_PROTOCOL)
        print('Done!')

        snapPickle = SnapPickle(name=name,
                                networks=networks,
                                onestop_ids=onestop_ids,
                                pub_date=timezone.now(),
                                snapFileName=snapFileName,
                                pickleFileName=pickleFileName)
        snapPickle.save()
        return snapPickle
    except IOError:
        print('Could not save node network!')
示例#2
0
 def proceed(self):
     config = self.pipeline.version.config
     graph = utils.load_graph('labeled-import.pickle', config)
     artists = graph.get_nodes()['Artist'].values()
     # Artists without a category
     graph.prune([a for a in artists if len(a.categories) < 1])
     utils.save_graph(graph, 'labeled-import-prune', config)
     self.pipeline.update()
 def proceed(self):
     print(f"Proceeding to execute import foo")
     moma = Moma(constants.MOMA_EXHIBITIONS_CSV)
     dome = Dome(constants.DOME_ARTISTS_CSV, constants.DOME_EXHIBITIONS_CSV)
     importer = Importer(moma, dome)
     importer.run()
     utils.save_graph(importer.graph, 'import.pickle',
                      self.pipeline.version.config)
     self.pipeline.update()
示例#4
0
def create_data_for_Graphsage(G, num_nodes_label):
    num_nodes = len(G.nodes())
    features = np.zeros((num_nodes, num_nodes_label))
    for node in G.nodes:
        features[node][G.nodes[node]['label']] = 1
    graphsage_G = nx.Graph()
    graphsage_G.add_nodes_from([str(node) for node in G.nodes])
    graphsage_G.add_edges_from([(str(edge[0]), str(edge[1]))
                                for edge in list(G.edges)])

    features = np.ones((num_nodes, args.feat_dim), dtype=float)
    id2idx = {node: int(node) for node in list(graphsage_G.nodes)}
    save_graph(features, graphsage_G, id2idx, args.data_name, args.dir)
    graph_data = load_data(args.prefix)
    return graph_data
示例#5
0
    def proceed(self):
        config = self.pipeline.version.config
        graph = utils.load_graph('labeled-import.pickle', config)

        nodes_by_type = graph.get_nodes()
        exhibitions = nodes_by_type['Exhibition'].values()
        artists = nodes_by_type['Artist'].values()

        # Artists with fewer than 3 exhibitions
        graph.prune([a for a in artists if a.degrees < 3])
        # Exhibitions with fewer than 2 artists
        graph.prune([e for e in exhibitions if e.degrees < 2])

        utils.save_graph(graph, 'labeled-import-prune', config)

        self.pipeline.update()
示例#6
0
def run(g, save=None, show_wd=False):
    cpg = cp(g)
    print(f'The original graph has a clock period of {cpg}')
    print('Running algorithm OPT1')
    g1 = opt1(g, show_wd=show_wd)
    if save is not None:
        path = save+'_opt1.dot'
        save_graph(g1, path)
        print(f'Output graph saved to {path}')
    cpr1 = cp(g1)
    print(f'The graph returned by OPT1 has a clock period of {cpr1}')
    print('Running algorithm OPT2')
    g2 = opt2(g, show_wd=show_wd)
    if save is not None:
        path = save+'_opt2.dot'
        save_graph(g2, path)
        print(f'Output graph saved to {path}')
    cpr2 = cp(g2)
    print(f'The graph returned by OPT2 has a clock period of {cpr2}')
示例#7
0
def gen_random_circuit(V=8, E=11, save=False):
    """
    Generate a random synchronous circuit.

    :param V: The number of nodes.
    :param E: The number of edges.
    :param save: If different from ``None`` or ``False``, the path where to save the generated graph.
    :return: The generated graph.
    """
    while True:
        g = nx.gnm_random_graph(V, E, directed=True)
        for v in g.nodes:
            g.nodes[v]['weight'] = np.random.randint(1, 10)
        g.nodes[0]['weight'] = 0
        for e in g.edges:
            g.edges[e]['weight'] = np.random.randint(10)
        if check_if_synchronous_circuit(g):
            if save:
                save_graph(g, save)
            return g
    def test_timing_formulation(self, num):
        runs = 1000

        X = [0, 1]
        Y = [0, 1, 2]
        # for i in range(2):
        #     X.append(random.randInt(0,2))
        # Y = deep_copy(X)
        # for in range(2):
        #     X.append(random.randInt(4,))
        v = [6]

        inf_R_x = 0
        inf_R_y = 0
        inf_x = 0
        inf_y = 0
        for i in range(runs):
            T_1, T_a, T_b = runIC_fair_timings(
                (self.G, X + v, self.gamma_a, self.gamma_a))
            T_2, T_a, T_b = runIC_fair_timings(
                (self.G, X, self.gamma_a, self.gamma_a))
            inf_x += T_2
            inf_R_x += (T_1 - T_2)
            T_1, T_a, T_b = runIC_fair_timings(
                (self.G, Y + v, self.gamma_a, self.gamma_a))

            T_2, T_a, T_b = runIC_fair_timings(
                (self.G, Y, self.gamma_a, self.gamma_a))
            inf_y += T_2
            inf_R_y += (T_1 - T_2)

        print(
            ' X : {inf_x / runs}, diff: {inf_R_x / runs}  Y: {inf_y / runs} diff: {inf_R_y / runs}'
        )

        if inf_R_x / runs < inf_R_y / runs:
            ut.save_graph(self.filename + '_violates_{num}_.txt', self.G)
            nx.draw(self.G)
            plt.savefig("graph" + '_violates_{num}_.png')
            print(" \n \n ********** VIOLATED *********** \n\n\n")
示例#9
0
def gen_provided_correlator(n, save=False):
    """
    Generate a correlator circuit like the ones described in the paper.

    :param n: The values 1 or 2, depending on which correlator you want to generate.
    :param save: Whether to save the generated graph or not.
    :return: The generated graph.
    """

    g = nx.MultiDiGraph()
    add_weighted_node(g, 'h', 0)
    add_weighted_node(g, 'd0', 3)
    add_weighted_node(g, 'd1', 3)
    add_weighted_node(g, 'd2', 3)
    add_weighted_node(g, 'd3', 3)
    add_weighted_node(g, 'p0', 7)
    add_weighted_node(g, 'p1', 7)
    add_weighted_node(g, 'p2', 7)
    if n == 1:
        g.add_weighted_edges_from([('h', 'd0', 1), ('d0', 'd1', 1),
                                   ('d0', 'p0', 0), ('d1', 'd2', 1),
                                   ('d1', 'p1', 0), ('d2', 'd3', 1),
                                   ('d2', 'p2', 0), ('d3', 'p2', 0),
                                   ('p2', 'p1', 0), ('p1', 'p0', 0),
                                   ('p0', 'h', 0)])
        if save:
            save_graph(g, '../graphs/correlator1.dot')
    elif n == 2:
        g.add_weighted_edges_from([('h', 'd0', 1), ('d0', 'd1', 1),
                                   ('d0', 'p0', 0), ('d1', 'd2', 0),
                                   ('d1', 'p1', 0), ('d2', 'd3', 1),
                                   ('d2', 'p2', 0), ('d3', 'p2', 0),
                                   ('p2', 'p1', 1), ('p1', 'p0', 0),
                                   ('p0', 'h', 0)])
        if save:
            save_graph(g, '../graphs/correlator2.dot')
    else:
        raise NotImplementedError()
    return g
示例#10
0
    def proceed(self):
        config = self.pipeline.version.config
        report_path = os.path.join(config['version_dir'], 'fetch-report.json')

        graph = utils.load_graph('import.pickle', config)

        nodes = [value for value in graph
                        if value.type == 'Artist'
                        and value.wikidataID != None
                        and value.degrees > 2]

        # For rerunning the process without restarting, use
        # the nodes from the report, using `report_path` above.
        # nodes = [graph[tk] for tk in report['unupdated']]

        fetch = FetchLabels(nodes, graph, config)
        (new_graph, report) = fetch.run()

        utils.save_graph(new_graph, 'labeled-import.pickle', config)

        with open(report_path, 'wb') as f:
            f.write(json.dumps(report))
        self.pipeline.update() 
示例#11
0
def gen_correlator(k, save=False):
    """
    Generate a correlator of order :math:`k`.

    :param k: The order of the correlator.
    :param save: Whether to save the generated graph or not.
    :return: The generated graph.
    """
    assert k >= 1, 'k should be greater than or equal to 1'
    g = nx.DiGraph()
    add_weighted_node(g, 'h', 0)
    for i in range(k + 1):
        add_weighted_node(g, f'd{i}', 3)
        if i < k:
            add_weighted_node(g, f'p{i}', 7)
    g.add_weighted_edges_from([('h', 'd0', 1)])
    g.add_weighted_edges_from([(f'd{i}', f'd{i+1}', 1) for i in range(k)])
    g.add_weighted_edges_from([(f'd{i}', f'p{i}', 0) for i in range(k)])
    g.add_weighted_edges_from([(f'd{k}', f'p{k-1}', 0)])
    g.add_weighted_edges_from([(f'p{i+1}', f'p{i}', 0) for i in range(k - 1)])
    g.add_weighted_edges_from([('p0', 'h', 0)])
    if save:
        save_graph(g, f'../graphs/correlator_k{k}.dot')
    return g
示例#12
0
def create_and_save_graph(xnodes,
                          ynodes,
                          x_weights_idmat_nodeff,
                          y_weights_idmat_nodeff,
                          color,
                          dir_path,
                          filename,
                          model_dat,
                          colortrans=None):
    """create graph as dot string, save it as png and return it as svg"""

    form = ("         node [style=rounded]\n"
            "         node [shape=box]\n"
            '         ratio="compress"\n')

    if color is True:
        base = compute_color_base([  # absolute max over all values
            x_weights_idmat_nodeff[0], x_weights_idmat_nodeff[2],
            y_weights_idmat_nodeff[0], y_weights_idmat_nodeff[2]
        ])
    elif color is None:
        base = None
    else:
        base = abs(color)  # e.g. color = 2 for t-values

    x_dot = dot(xnodes, ynodes, *x_weights_idmat_nodeff, color, base,
                colortrans, filename, model_dat)
    y_dot = dot(ynodes, ynodes, *y_weights_idmat_nodeff, color, base,
                colortrans, filename, model_dat)
    dot_str = "digraph { \n" + form + x_dot + y_dot + "        }"

    utils.save_graph(dir_path, filename, dot_str)

    graph_svg = utils.render_dot(dot_str, out_type="svg")

    return graph_svg
示例#13
0
    def __init__(self, args, num_classes=1000):
        super(CNN, self).__init__()
        self.conv1 = depthwise_separable_conv_3x3(3, args.channels // 2, 2)
        self.bn1 = nn.BatchNorm2d(args.channels // 2)
        if args.net_type == 'small':
            self.conv2 = Triplet_unit(args.channels // 2, args.channels, 2)
            if args.resume:
                graph = load_graph(os.path.join(args.model_dir, 'conv3.yaml'))
            else:
                graph = build_graph(args.nodes, args)
                save_graph(graph, os.path.join(args.model_dir, 'conv3.yaml'))
            self.conv3 = StageBlock(graph, args.channels, args.channels)
            if args.resume:
                graph = load_graph(os.path.join(args.model_dir, 'conv4.yaml'))
            else:
                graph = build_graph(args.nodes, args)
                save_graph(graph, os.path.join(args.model_dir, 'conv4.yaml'))
            self.conv4 = StageBlock(graph, args.channels, args.channels * 2)
            if args.resume:
                graph = load_graph(os.path.join(args.model_dir, 'conv5.yaml'))
            else:
                graph = build_graph(args.nodes, args)
                save_graph(graph, os.path.join(args.model_dir, 'conv5.yaml'))
            self.conv5 = StageBlock(graph, args.channels * 2,
                                    args.channels * 4)
            self.relu = nn.ReLU()
            self.conv = nn.Conv2d(args.channels * 4, 1280, kernel_size=1)
            self.bn2 = nn.BatchNorm2d(1280)
        elif args.net_type == 'regular':
            if args.resume:
                graph = load_graph(os.path.join(args.model_dir, 'conv2.yaml'))
            else:
                graph = build_graph(args.nodes // 2, args)
                save_graph(graph, os.path.join(args.model_dir, 'conv2.yaml'))
            self.conv2 = StageBlock(graph, args.channels // 2, args.channels)
            if args.resume:
                graph = load_graph(os.path.join(args.model_dir, 'conv3.yaml'))
            else:
                graph = build_graph(args.nodes, args)
                save_graph(graph, os.path.join(args.model_dir, 'conv3.yaml'))
            self.conv3 = StageBlock(graph, args.channels, args.channels * 2)
            if args.resume:
                graph = load_graph(os.path.join(args.model_dir, 'conv4.yaml'))
            else:
                graph = build_graph(args.nodes, args)
                save_graph(graph, os.path.join(args.model_dir, 'conv4.yaml'))
            self.conv4 = StageBlock(graph, args.channels * 2,
                                    args.channels * 4)
            if args.resume:
                graph = load_graph(os.path.join(args.model_dir, 'conv5.yaml'))
            else:
                graph = build_graph(args.nodes, args)
                save_graph(graph, os.path.join(args.model_dir, 'conv5.yaml'))
            self.conv5 = StageBlock(graph, args.channels * 4,
                                    args.channels * 8)
            self.relu = nn.ReLU()
            self.conv = nn.Conv2d(args.channels * 8, 1280, kernel_size=1)
            self.bn2 = nn.BatchNorm2d(1280)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(1280, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
示例#14
0
from networks import Exp1, Exp2, Exp3, Exp4, Exp5, Exp6
from utils import save_graph

net1 = Exp1(n_layers=3)
save_graph('results/exp1.dot', (8, 16), net1)

net2 = Exp2(n_layers=3)
save_graph('results/exp2.dot', (8, 16), net2)

net3 = Exp3(n_layers=3)
save_graph('results/exp3.dot', (8, 16), net3)

net4 = Exp4(n_layers=5)
save_graph('results/exp4.dot', (8, 16), net4)

net5 = Exp5(n_layers=3)
save_graph('results/exp5.dot', (8, 3, 64, 64), net5)

net6 = Exp6(n_layers=3)
save_graph('results/exp6.dot', (8, 3, 64, 64), net6)
示例#15
0
def generate_reports(organization_name, project, base_dir, weeks, issues_per_week, comments_per_week, comment_avg_len_per_week, comments_len_per_week, formatted_issues_per_user_per_week, issues_per_user):

    f = open(base_dir + '{}.txt'.format(project), 'w')

    print('{0}/{1} Report'.format(organization_name, project), file=f)
    print('-'*50, end='\n\n', file=f)
    for i in range(len(weeks)):
        print('Sprint %d: ' %(i), file=f)
        print('\tIssues: %d' %(issues_per_week[i]), file=f)
        print('\tComments: %d' %(comments_per_week[i]), file=f)
        print('\tComment avg size: %d chars' %(comment_avg_len_per_week[i]), file=f)
        print('\tTotal comment size: %d chars' %(comments_len_per_week[i]), file=f)
    print('-'*50, end='\n\n', file=f)
    print('Total issues: %d' %(sum(issues_per_week)), file=f)
    print('Total comments: %d' %(sum(comments_per_week)), file=f)

    f.close()

    u.save_graph(
        x_values=weeks,
        x_label="Weeks",
        y_values=issues_per_week,
        y_label="Number of issues",
        title="Issues open per week",
        line_label="Number of issues",
        line_color="b",
        save_path=base_dir + '{}-issues_per_week.pdf'.format(project)
    )

    u.save_graph(
        x_values=weeks,
        x_label="Weeks",
        y_values=comments_per_week,
        y_label="Number of comments",
        title="Comments made per week",
        line_label="Number of comments",
        line_color="r",
        save_path=base_dir + '{}-comments_per_week.pdf'.format(project)
    )

    u.save_graph(
        x_values=weeks,
        x_label="Weeks",
        y_values=comment_avg_len_per_week,
        y_label="Average comment size",
        title="Average comment size per week",
        line_label="Total comments size",
        line_color="g",
        save_path=base_dir + '{}-comment_avg_len_per_week.pdf'.format(project)
    )

    u.save_graph(
        x_values=weeks,
        x_label="Weeks",
        y_values=comments_len_per_week,
        y_label="Total comments size",
        title="Total comments size per week",
        line_label="Total comments size",
        line_color="k",
        save_path=base_dir + '{}-total_commented_chars_per_week.pdf'.format(project)
    )

    u.save_box_graph(
        x_values=np.arange(len(issues_per_user)),
        bar_values=[issues_per_user[author] for author in issues_per_user],
        bar_labels=issues_per_user, 
        y_label='Number of issues', 
        title='Issues opened per user', 
        save_path=base_dir + '{}-issues_opened_per_user.pdf'.format(project)
    )

    colorset = set()

    lastint = 0
    while(len(colorset) < len(formatted_issues_per_user_per_week)):
        rand = random.randint(0, 0xFFFFFF)
        if abs(rand - lastint) < 10:
            rand += 10
        lastint = rand
        colorset.add("#" + "%06x" % rand)

    colorset = list(colorset)

    u.save_multiline_graph(
        x_values_list=[weeks] * len(formatted_issues_per_user_per_week),
        x_label="Weeks",
        y_values_list=[formatted_issues_per_user_per_week[author]['x'] for author in formatted_issues_per_user_per_week],
        y_label="Number of issues",
        title="Issues opened per user over time",
        line_label_list=list(formatted_issues_per_user_per_week),
        line_color_list=colorset,
        save_path=base_dir + '{}-issues_opened_per_user_over_time.pdf'.format(project)
    )
示例#16
0
    def run(
        self,
        n_episodes,
        level=0,
        load=False,
        save_frequency=10,
        threshold=70,
        test=True,
        verbose=False,
        visualize=True,
        save_video=True,
        visualize_directory=None,
    ):
        """
        ### NO WORK NEEDED ###
        You can look at the structure but you do not need to modify it.
        You can print whatever you feel necessary.
        ######################

        Train agent for n_episodes

        if test == True the agent will not explore and will only exploit.
        if verbose == True the function will print more information during the training (this will messup the progress bar)
        if visualize == True the function will display an animation of the rocket landing for every episode.
        This is at the expense of speed though. If false it will only show it every n episodes.
        """
        self.level_number = level
        self.env = tensorforce.environments.OpenAIGym("RocketLander-v0",
                                                      level_number=level)

        if n_episodes < save_frequency:
            str_error = f"n_episodes<save frequency, the model won't be able to save, set n_episodes to a value >={save_frenquency}"
            raise (ValueError(str_error))

        agent = self.create_agent(self.env, n_episodes, save_frequency, load)

        # Loop over episodes
        reward_list_episodes = []
        reward_list = []
        score_list = []
        landing_fraction_list = []
        tqdm_bar = tqdm(range(1, n_episodes + 1))
        self.number_of_landings = 0

        for i in tqdm_bar:
            self.fraction_good_landings = self.number_of_landings * 100 / i
            if i > 1:
                tqdm_bar.set_description(
                    "Episode %d/%d reward: %d (max:%d, min:%d, mean:%d, std:%d), successful landings:%d(%d%%)"
                    % (
                        i,
                        n_episodes,
                        np.round(np.sum(reward_list), 3),
                        np.max(reward_list_episodes),
                        np.min(reward_list_episodes),
                        np.round(np.mean(reward_list_episodes), 3),
                        np.round(np.std(reward_list_episodes), 3),
                        self.number_of_landings,
                        np.round(self.fraction_good_landings, 3),
                    ))

            if i % save_frequency == 0:
                if save_video:
                    self.env = tensorforce.environments.OpenAIGym(
                        "RocketLander-v0",
                        visualize=True,
                        visualize_directory=visualize_directory,
                        level_number=level,
                    )
                else:
                    self.env = tensorforce.environments.OpenAIGym(
                        "RocketLander-v0", visualize=True, level_number=level)

                reward_list, score = self.episode(self.env,
                                                  i,
                                                  agent,
                                                  test=True,
                                                  verbose=False)
            else:
                self.env = tensorforce.environments.OpenAIGym(
                    "RocketLander-v0", level_number=level)
                reward_list, score = self.episode(self.env,
                                                  i,
                                                  agent,
                                                  test=False,
                                                  verbose=False)
            score_list.append(score)
            reward_list_episodes.append(np.sum(reward_list))
            landing_fraction_list.append(self.fraction_good_landings)
            if self.env.environment.landed_ticks > 59:
                self.number_of_landings += 1
            if (self.fraction_good_landings > threshold) and (i > 50):
                print("level cracked")
                self.cracked = True
                break

        # Show Sum of reward over 1 episode vs number of episodes graph
        reward_list_episodes = save_progress(load,
                                             reward_list_episodes,
                                             "reward_list_episodes.txt",
                                             level=level)
        score_list = save_progress(load,
                                   score_list,
                                   "score_list.txt",
                                   level=level)
        landing_fraction_list = save_progress(load,
                                              landing_fraction_list,
                                              "landing_fraction.txt",
                                              level=level)
        save_graph(
            reward_list_episodes,
            "Sum of reward over 1 episode vs number of episodes",
            "rewards_vs_episodes.png",
            rolling=True,
            level=level,
        )
        save_graph(
            score_list,
            "Score vs number of episodes",
            "score_vs_episodes.png",
            rolling=True,
            level=level,
        )
        save_graph(
            landing_fraction_list,
            "Landing fraction vs number of episodes",
            "landing_fraction_vs_episodes.png",
            level=level,
        )

        # SENDING INFO TO DATABASE
        DATE = str(datetime.datetime.today())
        inputs = [np.mean(score_list), n_episodes]
        result_data = prep_data_to_send(inputs, GROUP_NAME, DATE)
        send_result(result_data)
示例#17
0
import os
import networkx as nx
import matplotlib.pyplot as plt

lib = os.path.join(os.path.abspath('.'), 'lib')
sys.path.insert(0, lib)

import utils

# USER:saicologic
user_id = 1502

G = nx.Graph()

# 自分が参加したイベントを取得する。
events = utils.get_event_by_user(user_id)

# 自分が参加したイベントの人物ネットワークを作成する。
c = 0
for event in events:
  if c < len(events):
    utils.add_network(G, event.event_id)
    c +=1
    print '----------------------------------------------------------------------------'

# pajeckに出力をする。
utils.save_graph(G, user_id)

# グラフを描画する。
nx.draw(G)
plt.show()