Example #1
0
def main():
    model_dir = './log/model'
    device_name = "/cpu:0"
    log_dir = './log/'
    data_dir = 'MNIST_data'

    data_set = input_data.read_data_sets(data_dir, one_hot=True)

    # build graph
    with tf.Graph().as_default() as g:
        with tf.device(device_name):
            (x, y_), loss, accuracy, saver = \
                    build_graph(is_learning=False, enable_bn=False)

    # run session
    with tf.Session(graph=g) as sess:
        # restore model
        saver.restore(sess, model_dir)

        writer = tf.summary.FileWriter(log_dir)
        writer.add_graph(sess.graph)

        merged_summary = tf.summary.merge(
            [tf.summary.merge_all('all'),
             tf.summary.merge_all('layers')])

        s = sess.run(fetches=merged_summary,
                     feed_dict={
                         x: [data_set.test.images[0]],
                         y_: [data_set.test.labels[0]]
                     })
        #   write log
        writer.add_summary(s, 0)
Example #2
0
def detect_bridge_cells(year):

    G = build_graph(year)

    # read annual data
    with open('../resources/' + year + '_cell_data.json', 'r') as f:
        cells = json.load(f)

    border_cells = {k for k in cells if cells[k]['border']}
    live_cells = {k for k in cells if cells[k]['live']}
    all_cells = border_cells.union(live_cells)

    bridge_cells = set()

    for cell in live_cells:
        neighborhood = cells_within([cell], cell, set(), RADIUS)
        for neighbor in neighborhood:
            if neighbor in live_cells:
                path = networkx.shortest_path(G, cell, neighbor)[1:-1]
                for path_cell in path:
                    if path_cell in border_cells:
                        bridge_cells.add(path_cell)

    for cell in bridge_cells:
        cells[cell]['bridge'] = True

    # write annual data
    with open('../resources/' + year + '_cell_data.json', 'w') as f:
        f.write(
            json.dumps(cells, indent=4, sort_keys=True, separators=(',', ':')))
Example #3
0
def main():
    parser = argparse.ArgumentParser(description="graph_stats provides some basic statistics about directed graphs")

    parser.add_argument(dest="fname", help="the name of the graph file")
    parser.add_argument(
        "-n",
        action="store",
        dest="num_display",
        default=10,
        type=int,
        help="Number of users to display in the statistics",
    )
    parser.add_argument(
        "--prune", action="store_true", default=False, help="Do NOT include leaf nodes in the graph calculations"
    )
    parser.add_argument("--verbose", action="store_true", default=False, help="Enable verbose output")

    results = parser.parse_args()

    graph = build_graph.build_graph(results.fname, results.prune, results.verbose)

    print "Graph Statistics:"
    print "Total Nodes " + str(graph.get_num_users())
    print "Total Edges " + str(graph.get_num_edges())
    graph.sort_by_links()
    for i in xrange(0, min(results.num_display, graph.get_num_users())):
        most_linked = graph.get_user_linked(i)
        print str(i + 1) + "th most linked " + most_linked.name + " with " + str(len(most_linked.followers))
    graph.calculate_friends()
    for i in xrange(0, min(results.num_display, graph.get_num_users())):
        most_friends = graph.get_user_friends(i)
        print str(i + 1) + "th most friends " + most_friends.name + " with " + str(len(most_friends.friends))
    print "Average Friends " + str(graph.avg_friends)
Example #4
0
def build(constants):
    if constants.ACTIVATION == 'leaky_relu':
        activation = tf.nn.leaky_relu
    elif constants.ACTIVATION == 'relu':
        activation = tf.nn.relu
    elif constants.ACTIVATION == 'tanh':
        activation = tf.nn.tanh
    else:
        activation = tf.nn.relu

    # make networks
    encoder = make_encoder(constants.CONVS, constants.FCS, activation)
    decoder = make_decoder(constants.CONVS, constants.FCS, activation)
    sample_latent = make_latent(constants.LATENT_SIZE)

    # build graphs
    reconstruct,\
    generate,\
    train = build_graph(
        encoder=encoder,
        decoder=decoder,
        sample_latent=sample_latent,
        image_size=constants.IMAGE_SIZE,
        latent_size=constants.LATENT_SIZE,
        lr=constants.LR
    )
    return reconstruct, generate, train
def main():
    filename = sys.argv[1]
    num_runs = int(sys.argv[2])
    G = build_graph.build_graph(filename)
    out_prefix = filename.split(".")[0]

    mccabe = open(out_prefix + "_mccabe", "a")
    algo2 = open(out_prefix + "_algo2", "a")
    algo3 = open(out_prefix + "_algo3", "a")
    paper = open(out_prefix + "_paper", "a")

    total_mccabe = 0
    total_algo2 = 0
    total_algo3 = 0
    total_paper = 0
    for i in range(num_runs):
        total_mccabe += time_mccabe(G, mccabe)
        total_algo2 += time_algo2(G, algo2)
        total_algo3 += time_algo3(G, algo3)
        total_paper += time_paper_algo(G, paper)

    mccabe.write(str(total_mccabe / num_runs))
    algo2.write(str(total_algo2 / num_runs))
    algo3.write(str(total_algo3 / num_runs))
    paper.write(str(total_paper / num_runs))

    mccabe.close()
    algo2.close()
    algo3.close()
    paper.close()
def main():
    # get data
    # (graph 1)
    # build graph
    # start training
    #   init global variables
    #   merge all summaries
    #   iterate training set
    #       if i % summary_period == 0 or i+1 == max_epochs
    #           run summary tensor
    #
    #       run train tensor
    #
    #   save model
    #
    # (graph 2)
    # build graph
    # start testing
    #   restore model
    #   eval accuracy of testing set

    data_set = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

    with tf.Graph().as_default() as g_train:
        with tf.device(FLAGS.device_name):
            (x, y_), loss, accuracy, saver = \
                        build_graph(is_learning=True, enable_bn=True) 

    with tf.Session(graph=g_train) as sess:
        train(x, y_, loss, FLAGS.lr, accuracy,
                data_set, FLAGS.batch_size, FLAGS.max_steps, 
                FLAGS.summary_period, FLAGS.print_period,
                sess, saver, FLAGS.model_dir, FLAGS.training_log_dir)

    with tf.Graph().as_default() as g_test:
        with tf.device(FLAGS.device_name):
            (x, y_), loss, accuracy, saver = \
                        build_graph(is_learning=False, enable_bn=True)

    with tf.Session(graph=g_test) as sess:
        test(x, y_, accuracy, data_set, 
                saver, FLAGS.model_dir, sess)
Example #7
0
def get_data_from_provider_update_db(connection, control):
    # Получаем данные от провайдера
    p = Provider()
    data = p.get_data()
    data = build_graph(data)

    # Записываем данные в базу
    cur = connection.cursor()
    r = control.upsert_request(data)
    cur.execute(r)
    conn.commit()
    cur.close()
    return data
Example #8
0
    def __init__(self,
                 actor,
                 critic,
                 value,
                 obs_dim,
                 num_actions,
                 replay_buffer,
                 batch_size=4,
                 action_scale=2.0,
                 gamma=0.9,
                 tau=0.01,
                 actor_lr=3 * 1e-3,
                 critic_lr=3 * 1e-3,
                 value_lr=3 * 1e-3,
                 reg_factor=1e-3):
        self.batch_size = batch_size
        self.num_actions = num_actions
        self.gamma = gamma
        self.obs_dim = obs_dim
        self.action_scale = action_scale
        self.last_obs = None
        self.t = 0
        self.replay_buffer = replay_buffer

        self._act,\
        self._train_actor,\
        self._train_critic,\
        self._train_value,\
        self._update_target = build_graph(
            actor=actor,
            critic=critic,
            value=value,
            obs_dim=obs_dim,
            num_actions=num_actions,
            batch_size=batch_size,
            gamma=gamma,
            tau=tau,
            actor_lr=actor_lr,
            critic_lr=critic_lr,
            value_lr=value_lr,
            reg_factor=reg_factor
        )

        self.actor_errors = []
        self.critic_errors = []
        self.value_errors = []
Example #9
0
def main(path, mode):
    adjacency, offsets, test_df, user2uid, item2iid = build_graph(
        path, test_size=32, split="time", out_dir="model"
    )
    n_users, n_items = len(user2uid), len(item2iid)
    latest_prefs = test_df.groupby("user_id").item_id.apply(list).to_dict()

    rw_class = {
        "vanilla": RW,
        "cache": RWcache,
        "parallel": RWparallel,
        "cy": RWcy,
        "cython": RWcython,
    }

    rw = rw_class[mode](alpha=0.1, n_total_steps=100000).load_graph(
        adjacency, offsets, n_users, n_items
    )
    run_evaluate(rw, latest_prefs, 32)
Example #10
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'graph_stats provides some basic statistics about directed graphs')

    parser.add_argument(dest="fname", help="the name of the graph file")
    parser.add_argument('-n',
                        action="store",
                        dest="num_display",
                        default=10,
                        type=int,
                        help="Number of users to display in the statistics")
    parser.add_argument(
        '--prune',
        action="store_true",
        default=False,
        help='Do NOT include leaf nodes in the graph calculations')
    parser.add_argument('--verbose',
                        action="store_true",
                        default=False,
                        help='Enable verbose output')

    results = parser.parse_args()

    graph = build_graph.build_graph(results.fname, results.prune,
                                    results.verbose)

    print "Graph Statistics:"
    print "Total Nodes " + str(graph.get_num_users())
    print "Total Edges " + str(graph.get_num_edges())
    graph.sort_by_links()
    for i in xrange(0, min(results.num_display, graph.get_num_users())):
        most_linked = graph.get_user_linked(i)
        print str(i +
                  1) + "th most linked " + most_linked.name + " with " + str(
                      len(most_linked.followers))
    graph.calculate_friends()
    for i in xrange(0, min(results.num_display, graph.get_num_users())):
        most_friends = graph.get_user_friends(i)
        print str(i +
                  1) + "th most friends " + most_friends.name + " with " + str(
                      len(most_friends.friends))
    print "Average Friends " + str(graph.avg_friends)
Example #11
0
def init_kmeans(k, data):
    print(data)

    path = "data/" + data
    S, S_ori, A, true_clus, flag, A_ori = build_graph.build_graph(path)
    clus_list = list(set(true_clus))
    print(clus_list)
    clus_dic = {}
    for i in range(len(clus_list)):
        clus_dic[clus_list[i]] = i
    for i in range(len(true_clus)):
        true_clus[i] = clus_dic[true_clus[i]]

    pred_l = []
    cent_l = []
    km_l = []
    mod = []
    ent = []
    nmi = []
    ari = []
    for j in range(5):
        pred, centroids, kmeans_clus = clustering(A_ori, k)
        pred_l.append(pred)
        cent_l.append(centroids)
        km_l.append(kmeans_clus)
        ari.append(evaluate.ARI(true_clus, pred))
    ind = ari.index(sorted(ari)[2])
    pred = pred_l[ind]
    centroids = cent_l[ind]
    kmeans_clus = km_l[ind]
    U = initialize_U(A_ori, centroids)
    f = open('initialize/' + data + '_U_' + str(k) + '.csv', 'w')
    writer = csv.writer(f, lineterminator='\n')
    writer.writerows(U)
    f.close()
    f = open('initialize/' + data + '_V_' + str(k) + '.csv', 'w')
    writer = csv.writer(f, lineterminator='\n')
    writer.writerows(centroids)
    f.close()
Example #12
0
            flag=1
            break
        parent_id = child_ids(nn1_ind)
        depth = depth+1
    if flag == 1:
        nn_id = parent_id
        nn_dist  = parent_dist
    else:
        nn_id = - 1
        nn_dist = -1

    return nn_id, nn_dist, visited

if __name__ == '__main__':
    dt = numpy.dtype('f8')
    X = [range(10)]
    print X
    for i in range(10):
        Vect = [random.randint(0,100) for r in range(10)]
        X = numpy.vstack([X,Vect])
    X = numpy.delete(X,(0),axis = 0)
    X = MA(X,dt)
    K = 2
    print X
    nodes = build_graph(X,3)
    Q = [random.randint(0,100) for r in range(10)]
    print Q
    Q = MA(Q,dt)
    print (search_graph(Q,nodes,X,1))
    
Example #13
0
def create_gaph(args):
    ls_adj, feature_list, word_freq_list, y, y_hot, train_size, word_vectors, positions_list = build_graph.build_graph(
        config=args.configfile)
    vocab_size = len(word_vectors)
    word_vectors = torch.from_numpy(word_vectors).float()

    g_list = []
    max_words = 0
    for i, adj in enumerate(ls_adj):
        adj = normalize_adj(adj)
        g = nx.from_scipy_sparse_matrix(adj)
        lb = y[i]
        feat = feature_list[i]
        recon = [0] * vocab_size
        for j, f in enumerate(feat):
            recon[f] = word_freq_list[i][0][j] * word_freq_list[i][1][j]
        m_recon = max(recon)
        for f in range(len(recon)):
            recon[f] /= m_recon
        # if frequency_as_feature:
        #     feat = np.concatenate((feat, word_freq_list[i].toarray()), axis=1)
        #     # feat = feat * word_freq_list[i].toarray()
        if i == 10:
            print(word_freq_list[i])
        # s = sum(word_freq_list[i])
        # # s = 1
        # wf = [el / s for el in word_freq_list[i]]
        s = sum(word_freq_list[i][0])
        # s = 1
        wf1 = [el / s for el in word_freq_list[i][0]]
        s = sum(word_freq_list[i][1])
        # s = 1
        wf2 = [el / s for el in word_freq_list[i][1]]
        wf = (wf1, wf2)

        g_list.append(
            S2VGraph(g,
                     lb,
                     node_features=feat,
                     word_freq=wf,
                     positions=positions_list[i],
                     adj=adj.todense(),
                     recon=recon))
        for ar in positions_list[i]:
            max_words = max(max_words, max(ar))

    max_words += 1

    zero_edges = 0
    for g in g_list:
        # edges = [list(pair) for pair in g.g.edges()]
        # edges_w = [w['weight'] for i, j, w in g.g.edges(data=True)]
        # edges.extend([[i, j] for j, i in edges])
        # edges_w.extend([w for w in edges_w])
        edges = []
        edges_w = []
        for i, j, wt in g.g.edges(data=True):
            w = wt['weight']
            edges.append([i, j])
            edges_w.append(w)
            if i != j:
                edges.append([j, i])
                edges_w.append(w)

        if len(edges) == 0:
            print('zero edge : ', len(g.g))
            zero_edges += 1
            edges = [[0, 0]]
            edges_w = [0]
        g.edge_mat = torch.tensor(edges).long().transpose(0, 1)
        g.edges_weights = torch.tensor(edges_w).float()
    print('total zero edge graphs : ', zero_edges)
    return g_list, len(set(y)), train_size, word_vectors, max_words
Example #14
0
    FLAGS = flags.FLAGS

    #args=getopt()
    cv = 3
    k = 6
    fasta_name = 'first.txt'

    data_name = fasta_name.split('.')[0]
    split2cv(cv, fasta_name, data_name)
    test_acc = []
    test_pred = []
    test_labels = []
    for i in range(cv):
        temp_data_name = data_name + '_cv' + str(i + 1)
        print(temp_data_name)
        prepare_data(temp_data_name, k)
        build_graph(temp_data_name)
        acc, pred, labels = train(temp_data_name)
        test_acc.extend([acc])
        test_labels.extend(labels)
        test_pred.extend(pred)
    print('cv_acc:', np.mean(np.array(test_acc)))
    np.savetxt(data_name + '_cv_acc_result.csv',
               np.array(test_acc),
               delimiter=',',
               fmt='%5f')
    np.savetxt(data_name + 'cv_pred.csv',
               np.array([test_labels, test_pred]).T,
               delimiter=',',
               fmt='%d')
Example #15
0
    test_data = 'PDB_test.txt'
    test_label = 'test_label.txt'

    # train_data = 'trainnew.txt'
    # train_label = 'train_labelnew.txt'
    # test_data = 'testnew.txt'
    # test_label = 'test_labelnew.txt'

    
    test_acc=[]
    test_pred=[]
    test_labels=[]
    
    data_name=train_data.split('.')[0]
    prepare_data_trian_test(train_data,train_label,test_data,test_label,data_name,k)
    build_graph(data_name,word_embeddings_dim,slide_size)
    acc,pred,labels=train(data_name)
    test_acc.extend([acc])
    test_labels.extend(labels)
    test_pred.extend(pred)
    
    print('cv_acc:',np.mean(np.array(test_acc)))
    np.savetxt(data_name+'_cv_acc_result_test_'+str(k)+'.csv',np.array(test_acc),delimiter=',',fmt='%5f')
    np.savetxt(data_name+'cv_pred_test_'+str(k)+'.csv',np.array([test_labels,test_pred]).T,delimiter=',',fmt='%d')



        
        

Example #16
0
    def build(self, reuse=False):
        self.reuse = reuse
        with tf.variable_scope(self.name, reuse=self.reuse):
            # train set
            self.mix = self.loader.mix_queue # mixture batch
            self.source = self.loader.source_queue # source batch
            self.other = self.mix - self.source
            
            # test samples (placeholder)
            self.mix_test = tf.placeholder(tf.float32, shape=(1,16384), name="mixture_sample")
            self.source_test = tf.placeholder(tf.float32, shape=(1,16384), name="source_sample")

            # define network
            self.FE = FrontEnd()
            if config.network == "unet":
                self.SEP = Unet(model=config.model)
            elif config.network == "lstm":
                self.SEP = LSTM(model=config.model)
            elif config.network == "bilstm":
                self.SEP = BiLSTM(model=config.model)
            elif config.network == "dilated":
                self.SEP = Dilated(model=config.model)
            else:
                print("No model chosen")
                raise ValueError
            self.BE = BackEnd()
            functions = [self.FE, self.SEP, self.BE]

            if config.network2 == "unet":
                self.SEP2 = Unet(model=config.model2)
                self.CON = Confidence(model=config.confidence)
                functions = functions + [self.SEP2, self.CON]
            elif config.network2 == "lstm":
                self.SEP2 = LSTM(model=config.model2)
                self.CON = Confidence(model=config.confidence)
                functions = functions + [self.SEP2, self.CON]
            elif config.network2 == "bilstm":
                self.SEP2 = BiLSTM(model=config.model2)
                self.CON = Confidence(model=config.confidence)
                functions = functions + [self.SEP2, self.CON]
            elif config.network2 == "dilated":
                self.SEP2 = Dilated(model=config.model2)
                self.CON = Confidence(model=config.confidence)
                functions = functions + [self.SEP2, self.CON]
            else:
                print("No model chosen")

            input = {"mix": self.mix, "source": self.source}
            input_test = {"mix": self.mix_test, "source": self.source_test}

            # draw graph
            self.graph = build_graph()
            self.graph_test = build_graph(train=False)

            self.graph(input, functions)
            self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            self.graph_test(input_test, functions)

            # variable count
            self.variable_size = np.sum(
                np.array([np.prod(np.array(v.get_shape().as_list())) for v in tf.trainable_variables()]))
            print("\n total varialbes : {}".format(self.variable_size), "\n")
            for v in tf.trainable_variables():
                print(v, "{}".format(np.prod(np.array(v.get_shape().as_list()))))

            # Define loss and summarize
            # self.loss_pre = (loss_fn(self.graph.masked_spec1, self.graph.estimated1, self.graph.source_spec,
            #                     self.source, self.other, loss_type=config.loss) + \
            #                 loss_fn(self.graph.masked_spec2, self.graph.estimated2, self.graph.source_spec,
            #                         self.source, self.other, loss_type=config.loss))/2

            if config.hybrid:
                if config.loss_seq == 'mid':
                    self.loss = loss_fn(self.graph.masked_spec1_mid, self.graph.estimated1_mid, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss) + \
                                loss_fn(self.graph.masked_spec2_mid, self.graph.estimated2_mid, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss)
                elif config.loss_seq == 'end':
                    self.loss = loss_fn(self.graph.masked_spec1, self.graph.estimated1, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss) + \
                                loss_fn(self.graph.masked_spec2, self.graph.estimated2, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss)
                elif config.loss_seq == 'both':
                    self.loss = (loss_fn(self.graph.masked_spec1, self.graph.estimated1, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss) +
                                loss_fn(self.graph.masked_spec2, self.graph.estimated2, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss) +
                                loss_fn(self.graph.masked_spec1_mid, self.graph.estimated1_mid, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss) +
                                loss_fn(self.graph.masked_spec2_mid, self.graph.estimated2_mid, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss))/2
                elif config.loss_seq == 'two':
                    self.loss_pre = loss_fn(self.graph.masked_spec1_mid, self.graph.estimated1_mid, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss) + \
                                loss_fn(self.graph.masked_spec2_mid, self.graph.estimated2_mid, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss)

                    self.loss = (loss_fn(self.graph.masked_spec1, self.graph.estimated1, self.graph.source_spec,
                                         self.source, self.other, loss_type=config.loss) +
                                 loss_fn(self.graph.masked_spec2, self.graph.estimated2, self.graph.source_spec,
                                         self.source, self.other, loss_type=config.loss) +
                                 loss_fn(self.graph.masked_spec1_mid, self.graph.estimated1_mid, self.graph.source_spec,
                                         self.source, self.other, loss_type=config.loss) +
                                 loss_fn(self.graph.masked_spec2_mid, self.graph.estimated2_mid, self.graph.source_spec,
                                         self.source, self.other, loss_type=config.loss)) / 2
                elif config.loss_seq == 'first':
                    self.loss_pre = loss_fn(self.graph.masked_spec1_mid, self.graph.estimated1_mid, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss)

                    self.loss = loss_fn(self.graph.masked_spec1, self.graph.estimated1, self.graph.source_spec,
                                         self.source, self.other, loss_type=config.loss) + \
                                 loss_fn(self.graph.masked_spec1_mid, self.graph.estimated1_mid, self.graph.source_spec,
                                         self.source, self.other, loss_type=config.loss)
                    self.graph.estimated = self.graph.estimated1
                    self.graph_test.estimated = self.graph_test.estimated1
                elif config.loss_seq == 'second':
                    self.loss_pre = loss_fn(self.graph.masked_spec2_mid, self.graph.estimated2_mid, self.graph.source_spec,
                                        self.source, self.other, loss_type=config.loss)

                    self.loss = loss_fn(self.graph.masked_spec2, self.graph.estimated2, self.graph.source_spec,
                                         self.source, self.other, loss_type=config.loss) + \
                                 loss_fn(self.graph.masked_spec2_mid, self.graph.estimated2_mid, self.graph.source_spec,
                                         self.source, self.other, loss_type=config.loss)
                    self.graph.estimated = self.graph.estimated2
                    self.graph_test.estimated = self.graph_test.estimated2
                else:
                    raise AssertionError("wrong config.loss_seq !!")
            else:
                self.loss = loss_fn(self.graph.masked_spec, self.graph.estimated, self.graph.source_spec,
                                    self.source, self.other, loss_type=config.loss)

            self.loss_summary = tf.summary.scalar("loss", self.loss)
            self.alpha_summary = tf.summary.scalar("alpha", tf.reduce_mean(self.graph.alpha))
            self.summary = tf.summary.merge_all()
            print("\n loss type : %s \n" % config.loss)
Example #17
0
            assert "visited_authors.pkl" in progress_dir, "visited_authors.pkl not found!" + prompt

        assert args.max_book > 0, "max_book must be a positive integer."
        assert args.max_book <= 2000, "max_book should be less than 2000."
        assert args.max_author > 0, "max_author must be a positive integer."
        assert args.max_author <= 2000, "max_author should be less than 2000."


if __name__ == "__main__":
    parser = construct_parser()
    validate_scrape_args(parser)
    args = parser.parse_args()
    if args.which == "scrape":  # run command "scrape"
        start_url = args.start_url
        max_book = args.max_book
        max_author = args.max_author
        new_scrape = args.new
        scrape_start(new_scrape, start_url, max_book, max_author, PROGRESS_DIR)
    elif args.which == "update":  # run command "update"
        type_json = args.type
        src_json = args.srcJSON
        insert_into_db(src_json, type_json)
    elif args.which == "export":  # run command "export"
        db_choice = args.db
        dump_db(db_choice)
    elif args.which == "draw":  # run command "draw"
        build_graph()
    else:  # invalid input
        print("error : invalid input.")
        parser.print_help()
Example #18
0
import sys
from pprint import pprint
from build_graph import build_graph
from map_coloring_utils import mrv, degree_heuristic, lcv, get_allowed_colors, coloring

if __name__ == "__main__":

    map = 'map.txt'

    colors = ['RED', 'GREEN']

    # Build graph from file
    graph = build_graph(map)

    for _ in range(len(graph)):
        cities_with_max_degree = degree_heuristic(graph)
        cities_with_minimum_remaining_colors = mrv(graph, colors)
        much_used_colors = lcv(graph, colors)

        selected_city = set(cities_with_max_degree).intersection(
            set(cities_with_minimum_remaining_colors)).pop()

        # Get allowed color for selected city
        colors_of_selected_city = get_allowed_colors(graph, selected_city,
                                                     colors)

        # Final chosen color
        common_color = set(much_used_colors).intersection(
            set(colors_of_selected_city))

        try:
Example #19
0
        fp = 0
        tn = 0
        fn = 0
        (graphs, test_edge_list) = self.graph.k_folds(k)
        for graph, test_edges in zip(graphs, test_edge_list):
            tc = TriadClassifier(graph)
            tc.train()
            for test in test_edges:
                node1 = test[0][0]
                node2 = test[0][1]
                weight = test[1]
                pred = tc.classify_pair(node1, node2)
                if 1 * weight >= 0:
                    if pred[0] > pred[1]:
                        tp += 1
                    else:
                        fn += 1
                else:
                    if pred[0] < pred[1]:
                        tn += 1
                    else:
                        fp += 1
        return (tp, tn, fp, fn)


if __name__ == '__main__':
    tc = TriadClassifier(build_graph.build_graph('data/mlb/2015'))
    (tp, tn, fp, fn) = tc.k_folds()
    print(tp, tn, fp, fn)
    print "Accuracy: ", float(tp + tn) / (tp + fp + tn + fn)
Example #20
0
def construct_spring_approximation(fname, verbose, run_tests, prune,load_dist, iterations_to_save = -1, solver = "BFGS", k = 1000, q = 500):
    #Create the Graph
    graph = build_graph.build_graph(fname,prune,verbose)

    i = 0
    j = 0
    #unit distance
    e = 1
    #replusion distance scaler
    scale = 1

    position_matrix = e*graph.get_num_users()*np.random.random((graph.get_num_users(), 2))
    length_matrix = np.zeros((graph.get_num_users(),graph.get_num_users()))
    un_length_matrix = np.zeros((graph.get_num_users(),graph.get_num_users()))

    if load_dist:
        length_matrix = np.load("directed_length_matrix")
        un_length_matrix = np.load("undirected_length_matrix")
    else:
        if verbose:
            print "About to construct all pair distance matrices. This could take a while."
        length_matrix = graph.compute_all_path_matrix(True)
        un_length_matrix = graph.compute_all_path_matrix(False)
        if verbose:
            print "Done computing distance matrices"
        print "Saving all pair distance matrices"
        np.save("directed_length_matrix",length_matrix)
        np.save("undirected_length_matrix",un_length_matrix)

    connectivity_matrix = np.min(length_matrix,1)
    connectivity_matrix[connectivity_matrix == 0] = 1
    connectivity_matrix[connectivity_matrix == -1] = 0
    repulsion_matrix = (1-connectivity_matrix)

    print connectivity_matrix
    print repulsion_matrix
    print length_matrix
    print un_length_matrix

    #a quick vectorized distance function
    dist = lambda P: np.sqrt((P[:,0]-P[:,0][:, np.newaxis])**2 + 
                             (P[:,1]-P[:,1][:, np.newaxis])**2)

    #general broadcasting function
    def build_matrix(func, args):
        return func(*args) 

    def diff(A,B):
        return A[:,np.newaxis] - B

    def energy(P):
        epsilon = 0.000001
        P = P.reshape((-1, 2))
        D = dist(P)
        # The potential energy is the sum of the elastic potential energies plus the repel energies.
        return (k * connectivity_matrix * (D - length_matrix)**2).sum() + (q*(un_length_matrix)*(repulsion_matrix)*(1/(D/scale+epsilon))).sum()

    def grad(P):
        epsilon = 0.000001
        P = P.reshape((-1, 2))
        # We compute the distance matrix.
        D = dist(P)
        x_diff = build_matrix(diff,(P[:,0],P[:,0]))
        y_diff = build_matrix(diff,(P[:,1],P[:,1]))
        num_users = P.shape[0]
        grad = np.zeros((num_users,2))
        #Sorry there has to be a little math involved
        grad[:,0]=((2*k*connectivity_matrix*(D-length_matrix)*(1/(D+epsilon)) - q*un_length_matrix*repulsion_matrix*1/((D/scale)**3+epsilon))*x_diff).sum(axis = 1)
        grad[:,0]+=((2*k*np.transpose(connectivity_matrix)*(D-np.transpose(length_matrix))*(1/(D+epsilon)) - q*np.transpose(un_length_matrix)*np.transpose(repulsion_matrix)*1/((D/scale)**3+epsilon))*x_diff).sum(axis = 1)
        grad[:,1]=((2*k*connectivity_matrix*(D-length_matrix)*(1/(D+epsilon)) - q*un_length_matrix*repulsion_matrix*1/((D/scale)**3+epsilon))*y_diff).sum(axis = 1)
        grad[:,1]+=((2*k*np.transpose(connectivity_matrix)*(D-np.transpose(length_matrix))*(1/(D+epsilon)) - q*np.transpose(un_length_matrix)*np.transpose(repulsion_matrix)*1/((D/scale)**3+epsilon))*y_diff).sum(axis = 1)
        return grad.ravel()

    #Q:Why are there arrays here?
    #A:Python's closures do not let you modify the variables outside of local scope but they do let you modify objects like arrays
    num_iterations = [0]
    best_soln = [None]
    last_time = datetime.now()

    def solver_callback(xk):
        num_iterations.append(num_iterations[0] + 1)
        num_iterations.pop(0)
        best_soln.append(xk)
        best_soln.pop(0)
        print str(num_iterations[0]) + " iterations with energy " + str(energy(xk))
        if num_iterations[0] % iterations_to_save == 0 and iterations_to_save > 0:
            print str(iterations_to_save) + " iterations in:" + str((datetime.now()-last_time).seconds) + "seconds"
            last_time = datetime.now()
            print "Saving Current Solution..."
            save_solution(graph,xk,"iteration_"+str(num_iterations[0]))
            print "Solution Saved as " + "iteration_"+str(num_iterations[0])+".js"

    def leave_handler(signal, frame):
        if best_soln[0] != None:
            print "Saving best solution on exit as exit_solution.js"
            save_solution(graph, "exit_solution", "exit_solution")

        return (solver_callback, leave_handler)

    signal.signal(signal.SIGINT, leave_handler)

    if run_tests:
        from scipy.optimize import check_grad
        from scipy.optimize import approx_fprime
        print "Checking Gradient:"
        print check_grad(energy, grad, position_matrix.ravel())

        print "Speed Tests:"
        print "Energy Function Speed:"
        print datetime.now()
        print energy(position_matrix.ravel())
        print datetime.now()
        print "Gradient Speed:"
        print datetime.now()
        print grad(position_matrix.ravel())
        print datetime.now()

    print "Minimizing with " + str(solver)

    solution = opt.minimize(energy, position_matrix.ravel(),
                      method=solver,jac = grad,callback = solver_callback)

    print "Solved to optimiality"
    if verbose:
        print energy(position_matrix.ravel())
        print energy(solution.x)

    save_solution(graph,solution.x,"results")
            flag = 1
            break
        parent_id = child_ids(nn1_ind)
        depth = depth + 1
    if flag == 1:
        nn_id = parent_id
        nn_dist = parent_dist
    else:
        nn_id = -1
        nn_dist = -1

    return nn_id, nn_dist, visited


if __name__ == '__main__':
    dt = numpy.dtype('f8')
    X = [range(10)]
    print X
    for i in range(10):
        Vect = [random.randint(0, 100) for r in range(10)]
        X = numpy.vstack([X, Vect])
    X = numpy.delete(X, (0), axis=0)
    X = MA(X, dt)
    K = 2
    print X
    nodes = build_graph(X, 3)
    Q = [random.randint(0, 100) for r in range(10)]
    print Q
    Q = MA(Q, dt)
    print(search_graph(Q, nodes, X, 1))
Example #22
0
# https://www.reddit.com/r/dailyprogrammer/comments/4j65ls/20160513_challenge_266_hard_finding_friends_in/

import build_graph as bg

def maximum_clique(graph, vertices, clique=set()):
	if not vertices:
		return clique

	return max((maximum_clique(graph, vertices & graph[v], clique | {v}) for v in vertices), key=len)

file_location = 'maximum_clique_input.txt'
print(maximum_clique(*bg.build_graph(file_location)))
Example #23
0
def construct_spring_approximation(fname,
                                   verbose,
                                   run_tests,
                                   prune,
                                   load_dist,
                                   iterations_to_save=-1,
                                   solver="BFGS",
                                   k=1000,
                                   q=500):
    #Create the Graph
    graph = build_graph.build_graph(fname, prune, verbose)

    i = 0
    j = 0
    #unit distance
    e = 1
    #replusion distance scaler
    scale = 1

    position_matrix = e * graph.get_num_users() * np.random.random(
        (graph.get_num_users(), 2))
    length_matrix = np.zeros((graph.get_num_users(), graph.get_num_users()))
    un_length_matrix = np.zeros((graph.get_num_users(), graph.get_num_users()))

    if load_dist:
        length_matrix = np.load("directed_length_matrix")
        un_length_matrix = np.load("undirected_length_matrix")
    else:
        if verbose:
            print "About to construct all pair distance matrices. This could take a while."
        length_matrix = graph.compute_all_path_matrix(True)
        un_length_matrix = graph.compute_all_path_matrix(False)
        if verbose:
            print "Done computing distance matrices"
        print "Saving all pair distance matrices"
        np.save("directed_length_matrix", length_matrix)
        np.save("undirected_length_matrix", un_length_matrix)

    connectivity_matrix = np.min(length_matrix, 1)
    connectivity_matrix[connectivity_matrix == 0] = 1
    connectivity_matrix[connectivity_matrix == -1] = 0
    repulsion_matrix = (1 - connectivity_matrix)

    print connectivity_matrix
    print repulsion_matrix
    print length_matrix
    print un_length_matrix

    #a quick vectorized distance function
    dist = lambda P: np.sqrt((P[:, 0] - P[:, 0][:, np.newaxis])**2 +
                             (P[:, 1] - P[:, 1][:, np.newaxis])**2)

    #general broadcasting function
    def build_matrix(func, args):
        return func(*args)

    def diff(A, B):
        return A[:, np.newaxis] - B

    def energy(P):
        epsilon = 0.000001
        P = P.reshape((-1, 2))
        D = dist(P)
        # The potential energy is the sum of the elastic potential energies plus the repel energies.
        return (k * connectivity_matrix * (D - length_matrix)**2).sum() + (
            q * (un_length_matrix) * (repulsion_matrix) *
            (1 / (D / scale + epsilon))).sum()

    def grad(P):
        epsilon = 0.000001
        P = P.reshape((-1, 2))
        # We compute the distance matrix.
        D = dist(P)
        x_diff = build_matrix(diff, (P[:, 0], P[:, 0]))
        y_diff = build_matrix(diff, (P[:, 1], P[:, 1]))
        num_users = P.shape[0]
        grad = np.zeros((num_users, 2))
        #Sorry there has to be a little math involved
        grad[:, 0] = (
            (2 * k * connectivity_matrix * (D - length_matrix) *
             (1 /
              (D + epsilon)) - q * un_length_matrix * repulsion_matrix * 1 /
             ((D / scale)**3 + epsilon)) * x_diff).sum(axis=1)
        grad[:,
             0] += ((2 * k * np.transpose(connectivity_matrix) *
                     (D - np.transpose(length_matrix)) *
                     (1 / (D + epsilon)) - q * np.transpose(un_length_matrix) *
                     np.transpose(repulsion_matrix) * 1 /
                     ((D / scale)**3 + epsilon)) * x_diff).sum(axis=1)
        grad[:, 1] = (
            (2 * k * connectivity_matrix * (D - length_matrix) *
             (1 /
              (D + epsilon)) - q * un_length_matrix * repulsion_matrix * 1 /
             ((D / scale)**3 + epsilon)) * y_diff).sum(axis=1)
        grad[:,
             1] += ((2 * k * np.transpose(connectivity_matrix) *
                     (D - np.transpose(length_matrix)) *
                     (1 / (D + epsilon)) - q * np.transpose(un_length_matrix) *
                     np.transpose(repulsion_matrix) * 1 /
                     ((D / scale)**3 + epsilon)) * y_diff).sum(axis=1)
        return grad.ravel()

    #Q:Why are there arrays here?
    #A:Python's closures do not let you modify the variables outside of local scope but they do let you modify objects like arrays
    num_iterations = [0]
    best_soln = [None]
    last_time = datetime.now()

    def solver_callback(xk):
        num_iterations.append(num_iterations[0] + 1)
        num_iterations.pop(0)
        best_soln.append(xk)
        best_soln.pop(0)
        print str(num_iterations[0]) + " iterations with energy " + str(
            energy(xk))
        if num_iterations[
                0] % iterations_to_save == 0 and iterations_to_save > 0:
            print str(iterations_to_save) + " iterations in:" + str(
                (datetime.now() - last_time).seconds) + "seconds"
            last_time = datetime.now()
            print "Saving Current Solution..."
            save_solution(graph, xk, "iteration_" + str(num_iterations[0]))
            print "Solution Saved as " + "iteration_" + str(
                num_iterations[0]) + ".js"

    def leave_handler(signal, frame):
        if best_soln[0] != None:
            print "Saving best solution on exit as exit_solution.js"
            save_solution(graph, "exit_solution", "exit_solution")

        return (solver_callback, leave_handler)

    signal.signal(signal.SIGINT, leave_handler)

    if run_tests:
        from scipy.optimize import check_grad
        from scipy.optimize import approx_fprime
        print "Checking Gradient:"
        print check_grad(energy, grad, position_matrix.ravel())

        print "Speed Tests:"
        print "Energy Function Speed:"
        print datetime.now()
        print energy(position_matrix.ravel())
        print datetime.now()
        print "Gradient Speed:"
        print datetime.now()
        print grad(position_matrix.ravel())
        print datetime.now()

    print "Minimizing with " + str(solver)

    solution = opt.minimize(energy,
                            position_matrix.ravel(),
                            method=solver,
                            jac=grad,
                            callback=solver_callback)

    print "Solved to optimiality"
    if verbose:
        print energy(position_matrix.ravel())
        print energy(solution.x)

    save_solution(graph, solution.x, "results")
Example #24
0
def main(start = None, end = None):
    #list of strings containing every street with an
    #intersection in the grid 
    exp_node_num = 31
    exp_edge_num = 47
    streets_in_grid = ["SACRAMENTO ST", "BAKER ST", "SUTTER ST", "STEINER ST", "PERINE PL", "WILMOT ST", "BRODERICK ST", "DIVISADERO ST", "SCOTT ST", "PIERCE ST", "CALIFORNIA ST", "PINE ST", "BUSH ST"]
    int_dic = loadIntersections(toPrint=False)
    ints_in_grid = {}
    #if both streets for intersection are in grid, add the intersection to ints_in_grid
    for key in int_dic.keys():
        if int_dic[key].get_streets()[0] in streets_in_grid and int_dic[key].get_streets()[1] in streets_in_grid:
            ints_in_grid[key] = int_dic[key]
    
    # for key in ints_in_grid.keys():
    #     print(ints_in_grid[key].get_streets())
        
    # print(len(ints_in_grid))
    
    #get every street where start int and end int are in test Grid
    all_streets = load_streets(toPrint=False)
    streets_in_grid = {}
    for key in all_streets.keys():
        if all_streets[key].get_start() in ints_in_grid.keys() and all_streets[key].get_end() in ints_in_grid.keys():
            streets_in_grid[key] = all_streets[key]
    
#     for key in streets_in_grid.keys():
#         sne = streets_in_grid[key].get_start_name_end()
#         print("{}->{}->{}".format(ints_in_grid[sne[0]].get_streets(), sne[1], ints_in_grid[sne[2]].get_streets()))
#     print(len(streets_in_grid))
    digraph = build_graph(ints_in_grid, streets_in_grid)
#     print("Args: {}".format(sys.argv))
#     print("Arg length: {}".format(len(sys.argv)))
    if len(sys.argv) == 3:
        if sys.argv[1] in ints_in_grid.keys() and sys.argv[2] in ints_in_grid.keys():
            start = sys.argv[1]
            end = sys.argv[2]
        else:
            print("invalid start or end node")
            assert False
    else:
        start = random.choice(list(ints_in_grid.keys()))
        end = random.choice(list(ints_in_grid.keys()))
        while end == start:
            end = random.choice(list(ints_in_grid.keys()))
    start_int = ints_in_grid[start]
    end_int = ints_in_grid[end]
    start_node= SF_intersection(start_int.get_cnn(), start_int.get_streets(), start_int.get_loc(), start_int.get_elev())
    end_node= SF_intersection(end_int.get_cnn(), end_int.get_streets(), end_int.get_loc(), end_int.get_elev())
    bf_path = search(digraph, start_node, end_node)
    print("start:{} - {}".format(start_int.get_streets(), start_int.get_cnn()))
    print("end:{} - {}".format(end_int.get_streets(), end_int.get_cnn()))
    print("brute force: " + str(bf_path))
    print(bf_path.get_weight())
    print()
    dp_path = dsearch(digraph, start_node, end_node)
    print()
    print("dynamic p:   " + str(dp_path))
    print(dp_path.get_node_list())
    print(dp_path.get_weight())
    print("\nDystra:")
    dpath = dijkstra_search(digraph, start_node, end_node, toPrint=False)
    dyspath = makePathFromDijk(digraph, dpath)
    print(dyspath)
    print(dpath)
    print(dyspath.get_weight())
Example #25
0
	dist = bfs(graph, vertices, u)
	dist = [dist[v] for v in dist.keys() if v != u and dist[v] != INFINITY]
	if dist: return max(dist)
	else:    return None
	
def bfs(graph, vertices, s):
	dist = {}
	for u in vertices: 
		dist[u] = INFINITY
	dist[s] = 0
	
	q = [s]
	while q:
		u = q.pop(0)
	
		if not u in graph:
			continue
			
		for v in graph[u]:
			if dist[v] > dist[u] + 1:
				dist[v] = dist[u] + 1
				q.append(v)

	return dist
	
file_location = 'graph_size_input.txt'	
ecc = eccentricities(*bg.build_graph(file_location, True))
radius = min(ecc)
diameter = max(ecc)
print('radius: %i\ndiameter: %i' % (radius, diameter))
Example #26
0
    def __init__(self,
                 hparams,
                 mode,
                 iterator,
                 handle,
                 vocab_table,
                 reverse_vocab_table=None,
                 scope=None,
                 extra_args=None):
        assert isinstance(iterator, iterator_utils.BatchedInput)
        self.iterator = iterator
        self.handle = handle
        self.mode = mode
        self.vocab_table = vocab_table
        self.vocab_size = hparams.vocab_size
        self.num_layers = hparams.num_layers
        self.num_gpus = hparams.num_gpus
        self.hparams = hparams
        self.single_cell_fn = None
        self.global_gpu_num = 0
        if extra_args:
            self.single_cell_fn = extra_args.single_cell_fn

        # Initializer
        initializer = model_helper.get_initializer(hparams.init_op,
                                                   hparams.random_seed,
                                                   hparams.init_weight)
        tf.get_variable_scope().set_initializer(initializer)

        # Embeddings
        self.init_embeddings(hparams, scope)
        self.batch_size = tf.shape(self.iterator.source)[0]

        # Projection
        with tf.variable_scope(scope or "build_network"):
            with tf.variable_scope("decoder/output_projection"):
                self.output_layer1 = layers_core.Dense(
                    hparams.vocab_size,
                    use_bias=False,
                    name="output_projection_1")
                self.output_layer2 = layers_core.Dense(
                    hparams.vocab_size,
                    use_bias=False,
                    name="output_projection_2")
                self.output_layer_action = layers_core.Dense(
                    hparams.vocab_size,
                    use_bias=False,
                    name="output_projection_action")
                self.vn_project11 = layers_core.Dense(
                    hparams.unit_value_network,
                    use_bias=False,
                    name="vn_project_11")
                self.vn_project12 = layers_core.Dense(
                    hparams.unit_value_network,
                    use_bias=False,
                    name="vn_project_12")
                self.vn_project21 = layers_core.Dense(
                    hparams.unit_value_network,
                    use_bias=False,
                    name="vn_project_21")
                self.vn_project22 = layers_core.Dense(
                    hparams.unit_value_network,
                    use_bias=False,
                    name="vn_project_22")

        ## Train graph
        sl_loss, sl_loss_arr, rl_loss_arr, sample_id_arr_train, sample_id_arr_infer = build_graph(
            self, hparams, scope=scope)

        if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
            self.train_loss = sl_loss
            self.all_train_loss = sl_loss_arr
            self.word_count = tf.reduce_sum(self.iterator.dialogue_len)
            self.sample_ids_arr = sample_id_arr_train
            self.sample_words_arr1 = []
            self.sample_words_arr2 = []
            source = self.iterator.source
            for i in range(len(self.sample_ids_arr)):
                element_infer = self.sample_ids_arr[i]
                element_src = source[0]
                # element_src=0
                src = reverse_vocab_table.lookup(tf.to_int64(element_src))
                infer = reverse_vocab_table.lookup(
                    tf.to_int64(element_infer)
                )[0]  # src can only get the first one so I only get the first inference
                if i == 0:
                    self.sample_words_arr1.append((tf.constant(i), src, infer))
                elif i == 1:
                    self.sample_words_arr2.append((tf.constant(i), src, infer))
            self.vl1, self.vl2, self.pl1, self.pl2, self.eq11, self.eq12, self.eq2 = rl_loss_arr  # reinforcement updates

        elif self.mode == tf.contrib.learn.ModeKeys.EVAL:
            self.eval_loss = sl_loss
            self.all_eval_loss = sl_loss_arr

        elif self.mode == tf.contrib.learn.ModeKeys.INFER:
            self.sample_ids_arr = sample_id_arr_infer
            self.sample_words_arr = []
            self.source = reverse_vocab_table.lookup(
                tf.to_int64(iterator.source))
            for element in self.sample_ids_arr:
                self.sample_words_arr.append(
                    reverse_vocab_table.lookup(tf.to_int64(element)))
        elif self.mode in dialogue_utils.self_play_modes:
            #### self play
            self.train_loss = sl_loss
            self.all_train_loss = sl_loss_arr
            self.selfplay_agent_1_utt = reverse_vocab_table.lookup(
                tf.to_int64(sample_id_arr_infer[0]))
            self.selfplay_agent_2_utt = reverse_vocab_table.lookup(
                tf.to_int64(sample_id_arr_infer[1]))
            self.selfplay_action = reverse_vocab_table.lookup(
                tf.to_int64(sample_id_arr_infer[2]))
            if self.mode == dialogue_utils.mode_self_play_mutable:
                self.vl1, self.vl2, self.pl1, self.pl2, self.eq11, self.eq12, self.eq2 = rl_loss_arr  # reinforcement updates

        if self.mode != tf.contrib.learn.ModeKeys.INFER:
            ## Count the number of predicted words for compute ppl.
            self.predict_count = tf.reduce_sum(self.iterator.dialogue_len)

        ## Learning rate
        warmup_steps = hparams.learning_rate_warmup_steps
        warmup_factor = hparams.learning_rate_warmup_factor
        print("  start_decay_step=%d, learning_rate=%g, decay_steps %d, "
              "decay_factor %g, learning_rate_warmup_steps=%d, "
              "learning_rate_warmup_factor=%g, starting_learning_rate=%g" %
              (hparams.start_decay_step, hparams.learning_rate,
               hparams.decay_steps, hparams.decay_factor, warmup_steps,
               warmup_factor,
               (hparams.learning_rate * warmup_factor**warmup_steps)))
        self.global_step = tf.Variable(0, trainable=False)

        params = tf.trainable_variables()

        # Gradients and SGD update operation for training the model.
        # Arrage for the embedding vars to appear at the beginning.
        if self.mode == tf.contrib.learn.ModeKeys.TRAIN or self.mode == dialogue_utils.mode_self_play_mutable:
            self.learning_rate = tf.constant(hparams.learning_rate)

            inv_decay = warmup_factor**(tf.to_float(warmup_steps -
                                                    self.global_step))
            self.learning_rate = tf.cond(
                self.global_step < hparams.learning_rate_warmup_steps,
                lambda: inv_decay * self.learning_rate,
                lambda: self.learning_rate,
                name="learning_rate_decay_warump_cond")

            if hparams.optimizer == "sgd":
                self.learning_rate = tf.cond(
                    self.global_step < hparams.start_decay_step,
                    lambda: self.learning_rate,
                    lambda: tf.train.exponential_decay(self.learning_rate, (
                        self.global_step - hparams.start_decay_step),
                                                       hparams.decay_steps,
                                                       hparams.decay_factor,
                                                       staircase=True),
                    name="sgd_learning_rate_supervised")
                opt = tf.train.GradientDescentOptimizer(self.learning_rate,
                                                        name="SGD_supervised")
                tf.summary.scalar("lr", self.learning_rate)
            elif hparams.optimizer == "adam":
                assert float(
                    hparams.learning_rate
                ) <= 0.001, "! High Adam learning rate %g" % hparams.learning_rate
                opt = tf.train.AdamOptimizer(self.learning_rate,
                                             name="Adam_supervised")

            gradients = tf.gradients(self.train_loss,
                                     params,
                                     colocate_gradients_with_ops=hparams.
                                     colocate_gradients_with_ops,
                                     name="gradients_adam")

            clipped_gradients, gradient_norm_summary = model_helper.gradient_clip(
                gradients, max_gradient_norm=hparams.max_gradient_norm)

            self.update = opt.apply_gradients(zip(clipped_gradients, params),
                                              global_step=self.global_step,
                                              name="adam_apply_gradients")

            # Summary
            self.train_summary = tf.summary.merge([
                tf.summary.scalar("lr", self.learning_rate),
                tf.summary.scalar("train_loss", self.train_loss),
            ] + gradient_norm_summary)

        # second part of the learning rate
        if self.mode == tf.contrib.learn.ModeKeys.TRAIN or self.mode == dialogue_utils.mode_self_play_mutable:
            self.learning_rate2 = tf.constant(hparams.learning_rate2)
            self.learning_rate3 = tf.constant(hparams.learning_rate3)
            if hparams.optimizer == "sgd":
                self.learning_rate2 = tf.cond(
                    self.global_step < hparams.start_decay_step,
                    lambda: self.learning_rate2,
                    lambda: tf.train.exponential_decay(self.learning_rate2, (
                        self.global_step - hparams.start_decay_step),
                                                       hparams.decay_steps,
                                                       hparams.decay_factor,
                                                       staircase=True),
                    name="sgd_learning_rate_supervised2")
                self.learning_rate3 = tf.cond(
                    self.global_step < hparams.start_decay_step,
                    lambda: self.learning_rate3,
                    lambda: tf.train.exponential_decay(self.learning_rate3, (
                        self.global_step - hparams.start_decay_step),
                                                       hparams.decay_steps,
                                                       hparams.decay_factor,
                                                       staircase=True),
                    name="sgd_learning_rate_supervised3")
                tf.summary.scalar("self_play_lr", self.learning_rate)
            elif hparams.optimizer == "adam":
                assert float(
                    hparams.learning_rate2
                ) <= 0.001, "! High Adam learning rate2 %g" % hparams.learning_rate2
                assert float(
                    hparams.learning_rate3
                ) <= 0.001, "! High Adam learning rate3 %g" % hparams.learning_rate3

            # params=[]

            print("params=")
            for element in params:
                print(element.name)
            val1_params = self.patial_params(
                params, ["dynamic_seq2seq/value_network1"])
            val2_params = self.patial_params(
                params, ["dynamic_seq2seq/value_network2"])
            embedding_params = self.patial_params(params, ["embeddings"])
            main_dec_enc_params1 = self.patial_params(
                params,
                ["dynamic_seq2seq/encoder1/", "dynamic_seq2seq/decoder1/"])
            main_dec_enc_params2 = self.patial_params(
                params,
                ["dynamic_seq2seq/encoder2/", "dynamic_seq2seq/decoder2/"])
            action_params = self.patial_params(
                params, ["dynamic_seq2seq/decoder_action"])
            encoder_kb_params = self.patial_params(
                params, ["dynamic_seq2seq/encoder2_kb"])
            encoder_intent_params = self.patial_params(
                params, ["dynamic_seq2seq/encoder1_intent"])
            print("val1_params", "\n".join(map(lambda a: a.name, val1_params)))
            print("val2_params", "\n".join(map(lambda a: a.name, val2_params)))
            print("embedding_params",
                  "\n".join(map(lambda a: a.name, embedding_params)))
            print("main_dec_enc_params1",
                  "\n".join(map(lambda a: a.name, main_dec_enc_params1)))
            print("main_dec_enc_params2",
                  "\n".join(map(lambda a: a.name, main_dec_enc_params2)))
            print("action_params",
                  "\n".join(map(lambda a: a.name, action_params)))
            print("encoder_kb_params",
                  "\n".join(map(lambda a: a.name, encoder_kb_params)))
            print("encoder_intent_params",
                  "\n".join(map(lambda a: a.name, encoder_intent_params)))
            self.optimizer_vl1, self.v1_sum = self.generate_optimizer(
                self.vl1, params, "vl1", self.learning_rate2,
                self.hparams.max_gradient_norm2)
            self.optimizer_vl2, self.v2_sum = self.generate_optimizer(
                self.vl2, params, "vl2", self.learning_rate2,
                self.hparams.max_gradient_norm2)
            if hparams.self_play_variable_method == 0:
                rl_param1, rl_param2 = encoder_intent_params, encoder_kb_params + action_params
            elif hparams.self_play_variable_method == 1:
                rl_param1, rl_param2 = main_dec_enc_params1, main_dec_enc_params2
            elif hparams.self_play_variable_method == 2:
                rl_param1, rl_param2 = main_dec_enc_params1 + encoder_intent_params, main_dec_enc_params2 + encoder_kb_params + action_params
            elif hparams.self_play_variable_method == 3:
                rl_param1, rl_param2 = [main_dec_enc_params1[0]
                                        ] + encoder_intent_params, [
                                            main_dec_enc_params2[0]
                                        ] + encoder_kb_params
            elif hparams.self_play_variable_method == 4:
                rl_param1, rl_param2 = [main_dec_enc_params1[0]
                                        ], [main_dec_enc_params2[0]]
            elif hparams.self_play_variable_method == 5:
                rl_param1, rl_param2 = params, params
            self.optimizer_pl1, self.p1_sum = self.generate_optimizer(
                self.pl1, params, "pl1", self.learning_rate3,
                self.hparams.max_gradient_norm3)
            self.optimizer_pl2, self.p2_sum = self.generate_optimizer(
                self.pl2, params, "pl2", self.learning_rate3,
                self.hparams.max_gradient_norm3)
            print("self.learning", self.learning_rate, self.learning_rate2,
                  self.learning_rate3)
            ################################
            ### supervised learning######'
            ###########################
        # Saver
        self.saver = tf.train.Saver(tf.global_variables())

        # Print trainable variables
        utils.print_out("# Trainable variables")
        for param in params:
            utils.print_out(
                "  %s, %s, %s" %
                (param.name, str(param.get_shape()), param.op.device))
Example #27
0
"""
from bigquery import user_input_query_helper
from build_graph import build_graph
from cycles import transaction_cycle
from high_balance import find_avg_balance, high_balance_transactions
from subnetworks import biggest_subnetwork, future_partners
from regression import balance_correlation_and_plot
from visualize_graph import plot_graph

# Prompt user for input, run a query on BigQuery, and write the results to csv files.
# If the query fails due to exceeding the quota, replace
# 'credentials.json' with 'backup-credentials.json'
# user_input_query_helper('credentials.json')

# Create a graph of the ethereum network using the csv files available.
ethereum_graph = build_graph('balances.csv', 'transactions.csv')

# Visualize the graph
plot_graph(ethereum_graph)

# Run the linear regression and output the result.
r2, rmse = balance_correlation_and_plot(ethereum_graph)
print("Coefficient of determination (r^2): " + str(r2) + "\nRMSE: " +
      str(rmse) + "\n")

# Prompt the user if they are ready to run high_balance, run it if they are.
print("Enter 'y' when you wish to run the high_balance.py.")

user_input = input("Are you ready?: ")
if user_input.lower() == 'y':
    avg = find_avg_balance(ethereum_graph)
Example #28
0
    args=parse.parse_args()
    return args
'''

if __name__ == "__main__":
    
    flags = tf.app.flags
    FLAGS = flags.FLAGS
    
    #args=getopt()
    k=9

    train_data='train.data'
    train_label='train.label'
    test_data='test.data'
    test_label='test.label'

    data_name=train_data.split('.')[0]
    prepare_data_trian_test(train_data,train_label,test_data,test_label,data_name,k)
    build_graph(data_name)
    train(data_name)



        
        


    
    
Example #29
0
        return x['startIndex'] < y['startIndex']
    if x['endIndex'] != y['endIndex']:
        return x['endIndex'] < y['endIndex']


if not os.path.exists('results'):
    os.mkdir('results')

# filenames = ['Anatole France.json']
# filenames = ['Barack Obama.json']
vocab = ['his', 'her']
filenames = os.listdir('cluster')
filenames.remove('Canada.json')
for filename in filenames:
    print('File: {}'.format(filename))
    graph = build_graph(filename)

    with open(os.path.join('cluster', filename)) as f:
        clusters = json.load(f)

    clusters_not_assigned = []
    for cluster in clusters:
        cluster.sort(
            key=lambda x: (x['sentNum'], x['startIndex'], x['endIndex']))
        node = None
        for mention_idx, mention in enumerate(cluster):
            node_, max_len = search_node(graph, mention)
            if node_ is not None:
                if node is None:
                    node_.addMentions(cluster[:mention_idx])
                node = node_
Example #30
0
def train(num_epochs, beta, lr, evaluate_test_dset=False):
  tf.logging.set_verbosity(tf.logging.INFO)
  tf.reset_default_graph()

  inputs, outputs, savers, tf_summaries = build_graph(beta=beta)
  learning_rate, rgb_input, flow_input, is_training, y = inputs
  scores, loss, loss_minimize = outputs
  rgb_saver, flow_saver, training_saver = savers

  # Load the training and test data
  X_train_initial, X_test, y_train_initial, y_test = load_exercise_dataset(_VIDEO_DIR, _LABEL_MAP_PATH)
  X_train_initial, y_train_initial = np.array(X_train_initial), np.array(y_train_initial)
  dset_size = X_train_initial.shape[0]
  validation_cutoff = int(dset_size * 0.2)
  test_dset = list(zip(X_test, y_test))

  print('\ntrain_dset len={}; val_dset len={}; test_dset len={}\n'.format(
    dset_size - validation_cutoff, validation_cutoff, len(test_dset)
  ))

  try:
    train_accuracies = np.load(_STATS['train_acc']).tolist()
    val_accuracies = np.load(_STATS['val_acc']).tolist()
    losses = np.load(_STATS['loss']).tolist()
    tf.logging.info('Statistics restored')
  except:
    train_accuracies, val_accuracies, losses = [], [], []


  def _check_acc(msg, dset, sess):
    num_correct, num_samples = 0, 0

    for x_video, y_class in dset:
      feed_dict = {
        rgb_input: rgb_data(x_video, IMAGE_SIZE, nframes=NUM_FRAMES),
        flow_input: flow_data(x_video, IMAGE_SIZE, nframes=NUM_FRAMES),
        is_training: 0
      }

      scores_np = sess.run(scores, feed_dict=feed_dict)
      y_pred = scores_np.argmax(axis=1)
      num_samples += 1
      num_correct += (y_pred == y_class).sum()

    acc = float(num_correct) / num_samples
    print('%s: %d / %d correct (%.2f%%)' % (msg, num_correct, num_samples, 100 * acc))
    return acc


  if not os.path.exists('summaries'):
    os.mkdir('summaries')
  path = os.path.join('summaries', 'first')
  if not os.path.exists(path):
    os.mkdir(path)

  # Now we can run the computational graph many times to train the model.
  # When we call sess.run we ask it to evaluate train_op, which causes the
  # model to update.
  with tf.Session() as sess:
    writer = tf.summary.FileWriter(path, sess.graph)
    sess.run(tf.global_variables_initializer())

    rgb_saver.restore(sess, _CHECKPOINT_PATHS['rgb_imagenet'])
    tf.logging.info('RGB checkpoint restored')
    flow_saver.restore(sess, _CHECKPOINT_PATHS['flow_imagenet'])
    tf.logging.info('Flow checkpoint restored')
    try:
      training_saver.restore(sess, _CHECKPOINT_PATHS['training'])
      tf.logging.info('Training checkpoint restored')
    except Exception as e:
      pass

    if evaluate_test_dset:
      _ = _check_acc('Test', test_dset, sess)
      exit()

    t = 0

    for epoch in range(num_epochs):
      print('Starting epoch %d' % epoch)

      # Re-sample train and validation datasets for each epoch
      nums = list(range(dset_size))
      indices = random.sample(nums, validation_cutoff)
      mask = np.ones(dset_size, np.bool)
      mask[indices] = 0
      X_train, y_train = X_train_initial[mask], y_train_initial[mask]
      train_dset = list(zip(X_train, y_train))
      X_val, y_val = X_train_initial[indices], y_train_initial[indices]
      val_dset = list(zip(X_val, y_val))

      if epoch != 0:
        # Check training and validation accuracies, and save the model
        save_path = training_saver.save(sess, _CHECKPOINT_PATHS['training'])
        print('\nTraining model saved in path: %s' % save_path)

        train_acc = _check_acc('Train', train_dset, sess)
        val_acc = _check_acc('Val', val_dset, sess)

        train_accuracies.append(train_acc)
        val_accuracies.append(val_acc)

        np.save(_STATS['train_acc'], np.array(train_accuracies))
        np.save(_STATS['val_acc'], np.array(val_accuracies))
        np.save(_STATS['loss'], np.array(losses))

      for x_video, y_class in train_dset:
        feed_dict = {
          learning_rate: lr,
          rgb_input: rgb_data(x_video, IMAGE_SIZE, nframes=NUM_FRAMES),
          flow_input: flow_data(x_video, IMAGE_SIZE, nframes=NUM_FRAMES),
          y: np.array([y_class]),
          is_training: 1
        }

        if t % _CHECK_EVERY == 0:
            ops = [loss, loss_minimize, tf_summaries]
            loss_np, _, summary = sess.run(ops, feed_dict=feed_dict)
            writer.add_summary(summary, epoch)
        else:
            ops = [loss, loss_minimize]
            loss_np, _ = sess.run(ops, feed_dict=feed_dict)

        losses.append(loss_np)

        print('Iteration %d, loss = %.4f' % (t, loss_np))
        t += 1
            border_cells = {
                cell
                for cell in adj_cells if cell not in live_cells
            }
            new_border_cells = new_border_cells.union(border_cells)

    # set flag
    for cell in new_border_cells:
        cells[str(cell)]['border'] = True

    # write annual data
    with open('../resources/' + year + '_cell_data.json', 'w') as f:
        f.write(
            json.dumps(cells, indent=4, sort_keys=True, separators=(',', ':')))


# for all years add layers until the graph is connected

for y in range(1985, 2017):
    year = str(y)
    print('Connecting graph for %s...' % year)
    G = build_graph(year)
    layer = 1
    while not networkx.is_connected(G):
        print('\tAdding border layer #%d...' % layer)
        add_layer(year)
        G = build_graph(year)
        layer += 1

# EOF
Example #32
0
        organic_search_volume,
        seo_value,
    ]]

    df = pd.read_csv('data.csv')
    scaler = StandardScaler()
    features = [
        'Analyst value (0 - 5)', 'Partner value (0 - 5)',
        'Persona value (0 - 5)', 'Growing market', 'Organic Search Volume',
        'SEO Value (0 - 3)'
    ]
    scaler.fit(df[features])
    y_scaled = scaler.transform(y)
    loaded_model = pickle.load(open('model/gm_for_predicton_6dims.sav', 'rb'))
    result = loaded_model.predict(y)[0]
    build_graph(y_scaled)
    new_graph = True

responses = [
    "0>>> Not so sure about this one 😐️",
    "1>>> It’s a 🦄! This api has a good chance of increasing traffic.",
    "2>>> This one is probably not going to do so well🥶",
]

if result in [0, 1, 2]:
    st.markdown(f"<p style='text-align: center'>Result:<br>\
        {responses[result]}</p>",
                unsafe_allow_html=True)
    if new_graph:
        st.image('pic.png', use_column_width=True, output_format='PNG')
else:
Example #33
0
    #args=getopt()
    cv = 5
    k = 3
    fasta_name = 'PDB14120.txt'
    #fasta_name='train.txt'

    data_name = fasta_name.split('.')[0]
    split2cv(cv, fasta_name, data_name)
    test_acc = []
    test_pred = []
    test_labels = []
    for i in range(cv):
        temp_data_name = data_name + '_cv' + str(i + 1)
        print(temp_data_name)
        prepare_data(temp_data_name, k)
        build_graph(temp_data_name, 20, 20)
        acc, pred, labels = train(temp_data_name)
        test_acc.extend([acc])
        test_labels.extend(labels)
        test_pred.extend(pred)
    print('cv_acc:', np.mean(np.array(test_acc)))
    np.savetxt(data_name + '_cv_acc_result.csv',
               np.array(test_acc),
               delimiter=',',
               fmt='%5f')
    np.savetxt(data_name + 'cv_pred.csv',
               np.array([test_labels, test_pred]).T,
               delimiter=',',
               fmt='%d')