def post(self, method):
        logging.info('method: %s' % method)
        if method == 'generate_graph':
            generate_graph()
            res = {
                "status": "OK"
            }
            self.write(res)
        if method in ['get_all_stops', 'get_all_terminals', 'get_all_crossings']:
            przystanki = Przystanki()
            methods_data = {
                'get_all_stops': przystanki.przystanki,
                'get_all_terminals': przystanki.petle,
                'get_all_crossings': przystanki.skrzyzowania
            }
            res = {
                "data": methods_data[method],
                "status": "OK"
            }
            self.write(res)
        if method == 'get_graph_edges':
            przystanki = Przystanki()
            params = json.loads(self.request.body.decode('utf-8'))
            logging.info(params)
            res = {
                "data": przystanki.get_edges(line=params['line']),
                "status": "OK"
            }
            self.write(res)
        if method == 'get_trams':
            params = json.loads(self.request.body.decode('utf-8'))
            trams = self.spawn_worker.get_json_trams()
            logging.info(params)
            res = {
                "data": trams,
                "status": "OK"
            }
            self.write(res)

        if method == 'get_new_trams':
            params = json.loads(self.request.body.decode('utf-8'))
            trams = self.spawn_worker.get_new_json_trams()
            logging.info(params)
            res = {
                "data": trams,
                "status": "OK"
            }
            self.write(res)
def main():
    generated_graph = generate_graph(NUM_NODES, NUM_EDGES, MAX_WEIGHT)
    graph2 = generated_graph.get_graph()

    duration1 = calculate_duration(graph2, prims, START)
    duration2 = calculate_duration(graph2, prims_fh, START)

    print("Prims (Binary Heap): ", duration1)
    print("Prims (Fibonacci Heap): ", duration2)
def main():
    generated_graph = generate_graph(NUM_NODES, NUM_EDGES, MAX_WEIGHT)
    graph2 = generated_graph.get_graph()
    edges = list(generated_graph.get_edges())

    duration1 = calculate_duration(graph2, kruskal_ts, edges)
    duration2 = calculate_duration(graph2, kruskal_ms, edges)

    print("Kruskal (Tim Sort):", duration1)
    print("Kruskal (Merge Sort):", duration2)
示例#4
0
    def get(self, request):
        networkType = request.GET.get('networkType', 'random')
        numOfNodes = int(request.GET.get('numOfNodes', '10'))
        settings = request.GET.get('settings', None)
        if settings:
            settings = json.loads(settings)
        else:
            settings = {
                'width': 600,
                'height': 600,
                'defaultTheta': 0,
                'defaultCommRange': 100,
                'useCommRange': False
            }

        graph = generate_graph(networkType, numOfNodes)
        pos = nx.spring_layout(graph)
        nx.set_node_attributes(graph, 'pos', pos)

        nodes = []
        for n in graph.nodes(data=True):

            x = floor(settings['width'] * n[1]['pos'][0])
            y = floor(settings['height'] * n[1]['pos'][1])
            if x <= 0.: x = 1.
            if x >= settings['width']: x -= 1.
            if y <= 0.: y = 1.
            if y >= settings['height']: y -= 1.

            nodes.append({
                'id': n[0],
                'x': x,
                'y': y,
                'theta': settings['defaultTheta'],
                'commRange': settings['defaultCommRange'],
                'memory': {}
            })

        edges = []
        for e in graph.edges():
            edges.append([{'id': e[0]}, {'id': e[1]}])
        network = generate_network(settings, nodes, edges)
        return JsonResponse(get_network_dict(network))
def generate_times(node_list, edge_list):
    times_prim = list()
    times_kruskal = list()

    for num_node in node_list:
        for num_edge in edge_list:
            print(num_node)
            generated_graph = generate_graph(num_node, num_edge, MAX_WEIGHT)
            graph = generated_graph.graph
            edges = list(generated_graph.get_edges())

            duration1 = calculate_duration(graph, prims, 0)
            duration2 = calculate_duration(graph, kruskal_ms, edges)

            times_prim.append(duration1)
            times_kruskal.append(duration2)

            print("Adding {} n, {} m".format(num_node, num_edge))
            print("\tPrim: ", duration1)
            print("\tKruskal: ", duration2)

    return times_prim, times_kruskal
示例#6
0
        'fft_20_high': df.fft_20_high.values.tolist(),
        'fft_100_close': df.fft_100_close.values.tolist(),
        'fft_100_low': df.fft_100_low.values.tolist(),
        'fft_100_high': df.fft_100_high.values.tolist(),
        'fft_100_open': df.fft_100_open.values.tolist(),
        'fft_100_close': df.fft_100_close.values.tolist(),
        'fft_100_low': df.fft_100_low.values.tolist(),
        'fft_100_high': df.fft_100_high.values.tolist(),
        'fft_100_open': df.fft_100_open.values.tolist(),
        'var_ema': df['var_ema'].values.tolist(),
        'var_bollinger': df.var_bollinger.values.tolist(),
        'rsi_indicator': rsi_indicator,
        'stoch_indicator': stoch_indicator,
        '%K': df['%K'].values.tolist(),
        '%D': df['%D'].values.tolist(),
        'RSI': rsi_list,
        'ma20': df['ma20'].values.tolist(),
        'ma50': df['ma50'].values.tolist(),
        '26ema': df['26ema'].values.tolist(),
        '12ema': df['12ema'].values.tolist(),
        'upper_band': df['upper_band'].values.tolist(),
        'lower_band': df['lower_band'].values.tolist(),
        'ema_indicator': ema_indicator,
        'bollinger_indicator': bollinger_indicator
    }
    df = pd.DataFrame(df_dict)

    utils.generate_graph(df, x1, y1, x2, y2, x3, y3)
    print(df.tail())
    time.sleep(60)
示例#7
0
def main():
    ################################
    ## 第一模块:数据准备工作
    data_ = data.Data(args.data_dir, args.vocab_size)

    # 对ICD tree 处理
    parient_children, level2_parients, leafNodes, adj, node2id, hier_dicts = utils.build_tree(
        os.path.join(args.data_dir, 'note_labeled.csv'))
    graph = utils.generate_graph(parient_children, node2id)
    args.node2id = node2id
    args.adj = torch.Tensor(adj).long().to(args.device)
    args.leafNodes = leafNodes
    args.hier_dicts = hier_dicts

    # TODO batcher对象的细节
    g_batcher = GenBatcher(data_, args)

    #################################
    ## 第二模块: 创建G模型,并预训练 G模型
    # TODO Generator对象的细节
    gen_model = Generator(args, data_, graph, level2_parients)

    gen_model.to(args.device)
    # TODO generated 对象的细节
    generated = Generated_example(gen_model, data_, g_batcher)
    # 预训练 G模型
    pre_train_generator(gen_model, g_batcher, 10)

    # 利用G 生成一些negative samples
    generated.generator_train_negative_samples()
    generated.generator_test_negative_samples()

    #####################################
    ## 第三模块: 创建 D模型,并预训练 D模型
    d_model = Discriminator(args, data_)

    d_batcher = DisBatcher(data_, args)

    # 预训练 D模型
    pre_train_discriminator(d_model, d_batcher, 25)

    ########################################
    ## 第四模块: 交替训练G和D模型
    for epoch in range(args.num_epochs):
        batches = g_batcher.get_batches(mode='train')
        for step in range(int(len(batches) / 1000)):

            #训练 G模型
            train_generator(gen_model, d_model, g_batcher, d_batcher,
                            batches[step * 1000:(step + 1) * 1000], generated)

            # 生成训练D的negative samples
            generated.generator_samples(
                "train_sample_generated/" + str(epoch) + "epoch_step" +
                str(step) + "_temp_positive", "train_sample_generated/" +
                str(epoch) + "epoch_step" + str(step) + "_temp_negative", 1000)

            # 生成测试样本
            generated.generator_test_samples()

            # TODO: 评估 G模型的表现

            # 创建训练D的batch(即包含 negative samples和positive samples)
            d_batcher.train_batch = d_batcher.create_batches(mode='train',
                                                             shuffleis=True)

            # 训练 D网络
            train_discriminator(d_model, 5, d_batcher,
                                dis_batcher.get_batches(mode="train"))
示例#8
0
def main():
    ################################
    ## 第一模块:数据准备工作
    data_ = data.Data(args.data_dir, args.vocab_size)

    # 对ICD tree 处理
    parient_children, level2_parients, leafNodes, adj, node2id, hier_dicts = utils.build_tree(
        os.path.join(args.data_dir, 'note_labeled_v2.csv'))
    graph = utils.generate_graph(parient_children, node2id)
    args.node2id = node2id
    args.id2node = {id: node for node, id in node2id.items()}
    args.adj = torch.Tensor(adj).long().to(args.device)
    # args.leafNodes=leafNodes
    args.hier_dicts = hier_dicts
    # args.level2_parients=level2_parients
    #print('836:',args.id2node.get(836),args.id2node.get(0))

    # TODO batcher对象的细节
    g_batcher = GenBatcher(data_, args)

    #################################
    ## 第二模块: 创建G模型,并预训练 G模型
    # TODO Generator对象的细节
    gen_model_eval = Generator(args, data_, graph, level2_parients)
    gen_model_target = Generator(args, data_, graph, level2_parients)
    gen_model_target.eval()
    print(gen_model_eval)

    # for name,param in gen_model_eval.named_parameters():
    #     print(name,param.size(),type(param))
    buffer = ReplayBuffer(capacity=100000)
    gen_model_eval.to(args.device)
    gen_model_target.to(args.device)

    # TODO generated 对象的细节

    # 预训练 G模型
    #pre_train_generator(gen_model,g_batcher,10)

    #####################################
    ## 第三模块: 创建 D模型,并预训练 D模型
    d_model = Discriminator(args)
    d_model.to(args.device)

    # 预训练 D模型
    #pre_train_discriminator(d_model,d_batcher,25)

    ########################################
    ## 第四模块: 交替训练G和D模型

    #将评估结果写入文件中
    f = open('valid_result.csv', 'w')
    writer = csv.writer(f)
    writer.writerow([
        'avg_micro_p', 'avg_macro_p', 'avg_micro_r,avg_macro_r',
        'avg_micro_f1', 'avg_macro_f1', 'avg_micro_auc_roc',
        'avg_macro_auc_roc'
    ])
    epoch_f = []
    for epoch in range(args.num_epochs):
        batches = g_batcher.get_batches(mode='train')
        print('number of batches:', len(batches))
        for step in range(len(batches)):
            #print('step:',step)
            current_batch = batches[step]
            ehrs = [example.ehr for example in current_batch]
            ehrs = torch.Tensor(ehrs).long().to(args.device)

            hier_labels = [example.hier_labels for example in current_batch]

            true_labels = []

            # 对hier_labels进行填充
            for i in range(len(hier_labels)):  # i为样本索引
                for j in range(len(hier_labels[i])):  # j为每个样本的每条路径索引
                    if len(hier_labels[i][j]) < 4:
                        hier_labels[i][j] = hier_labels[i][j] + [0] * (
                            4 - len(hier_labels[i][j]))
                # if len(hier_labels[i]) < args.k:
                #     for time in range(args.k - len(hier_labels[i])):
                #         hier_labels[i].append([0] * args.hops)

            for sample in hier_labels:
                #print('sample:',sample)
                true_labels.append([row[1] for row in sample])

            predHierLabels, batchStates_n, batchHiddens_n = generator.generated_negative_samples(
                gen_model_eval, d_model, ehrs, hier_labels, buffer)

            #true_labels = [example.labels for example in current_batch]

            _, _, avgJaccard = full_eval.process_labels(
                predHierLabels, true_labels, args)

            # G生成训练D的positive samples
            batchStates_p, batchHiddens_p = generator.generated_positive_samples(
                gen_model_eval, ehrs, hier_labels, buffer)

            # 训练 D网络
            #d_loss=train_discriminator(d_model,batchStates_n,batchHiddens_n,batchStates_p,batchHiddens_p,mode=args.mode)

            # 训练 G模型
            #for g_epoch in range(10):
            g_loss = train_generator(gen_model_eval,
                                     gen_model_target,
                                     d_model,
                                     batchStates_n,
                                     batchHiddens_n,
                                     buffer,
                                     mode=args.mode)

            print('batch_number:{}, avgJaccard:{:.4f}, g_loss:{:.4f}'.format(
                step, avgJaccard, g_loss))

        # #每经过一个epoch 之后分别评估G 模型的表现以及D模型的表现(在验证集上的表现)
        avg_micro_f1 = evaluate(g_batcher,
                                gen_model_eval,
                                d_model,
                                buffer,
                                writer,
                                flag='valid')
        epoch_f.append(avg_micro_f1)

    # 画图
    # plot results
    window = int(args.num_epochs / 20)
    print('window:', window)
    fig, ((ax1), (ax2)) = plt.subplots(2, 1, sharey=True, figsize=[9, 9])
    rolling_mean = pd.Series(epoch_f).rolling(window).mean()
    std = pd.Series(epoch_f).rolling(window).std()
    ax1.plot(rolling_mean)
    ax1.fill_between(range(len(epoch_f)),
                     rolling_mean - std,
                     rolling_mean + std,
                     color='orange',
                     alpha=0.2)
    ax1.set_title(
        'Episode Length Moving Average ({}-episode window)'.format(window))
    ax1.set_xlabel('Epoch Number')
    ax1.set_ylabel('F1')

    ax2.plot(epoch_f)
    ax2.set_title('Performance on valid set')
    ax2.set_xlabel('Epoch Number')
    ax2.set_ylabel('F1')

    fig.tight_layout(pad=2)
    plt.show()
    fig.savefig('results.png')

    f.close()
示例#9
0
 async def graph(self, ctx):
     """Sends the graph"""
     utils.generate_graph()
     await ctx.send("",
                    file=discord.File("img/image.svg",
                                      filename="img/da_graph.svg"))
示例#10
0
def get_edges(args):
    if args.model == 'synthetic':
        return utils.generate_graph(args.N, args.completeness)
    else:
        raise NotImplementedError()