예제 #1
0
def test_nn(test_set, max_len, U):
    lstm = SiameseLSTM(max_length=max_len,
                       init_embeddings=U,
                       num_classes=config.neg_sample,
                       hidden_units=config.hidden_units,
                       embeddings_trainable=config.embeddings_trainable,
                       l2_reg_lambda=config.l2_reg_lambda)
    # saver = tf.train.import_meta_graph("save/model.ckpt.meta")
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, tf.train.latest_checkpoint("checkpoints/"))
        test_loss, test_correct_num = 0, 0
        start = time.time()
        for test_step in tqdm(range(
                int(math.ceil(test_set.size * 1.0 / config.batch_size))),
                              desc='Testing epoch ' + ''):
            ques_batch, rela_batch, label_batch, ques_lens_batch, rela_lens_batch = pull_batch(
                test_set.ques_idx, test_set.rela_idx, test_set.label,
                test_set.ques_lens, test_set.rela_lens, test_step)
            feed_dict = {
                lstm.input_x_u: ques_batch,
                lstm.input_x_r: rela_batch,
                lstm.input_y: label_batch,
                lstm.u_lens: ques_lens_batch,
                lstm.v_lens: rela_lens_batch,
                lstm.dropout_keep_prob: config.dropout_keep_prob
            }
            loss, accuracy, correct_num, score = sess.run(
                [lstm.loss, lstm.accuracy, lstm.correct_num, lstm.score],
                feed_dict)
            test_loss += loss
            test_correct_num += correct_num
            if test_step == 0:
                test_score = score
            else:
                test_score = np.concatenate((test_score, score), axis=0)
        end = time.time()
        util.save_data(config.test_file, config.test_output_file,
                       test_score.tolist(), test_set.rela)
        print("time {}, test loss {:g}, train acc {:g}".format(
            end - start, test_loss / test_set.size,
            test_correct_num / test_set.size))
예제 #2
0
def test(test_set, max_len, U):
    net = QaCNN(sequence_length=max_len,
                num_classes=config.num_classes,
                filter_sizes=config.filter_sizes,
                num_filters=config.num_filters,
                init_embeddings=U,
                embeddings_trainable=config.embeddings_trainable,
                l2_reg_lambda=config.l2_reg_lambda)
    # saver = tf.train.import_meta_graph("save/model.ckpt.meta")
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        saver.restore(sess, tf.train.latest_checkpoint("checkpoints/"))
        test_loss, test_correct_num = 0, 0
        start = time.time()
        for test_step in tqdm(range(
                int(math.ceil(test_set.size * 1.0 / config.batch_size))),
                              desc='Testing epoch ' + ''):
            ques_batch, rela_batch, label_batch = pull_batch(
                test_set.ques_idx, test_set.rela_idx, test_set.label,
                test_step)
            feed_dict = {
                net.input_x_u: ques_batch,
                net.input_x_r: rela_batch,
                net.input_y: label_batch,
            }
            loss, accuracy, correct_num, score = sess.run(
                [net.loss, net.accuracy, net.correct_num, net.score],
                feed_dict)
            test_loss += loss
            test_correct_num += correct_num
            if test_step == 0:
                test_score = score
            else:
                test_score = np.concatenate((test_score, score), axis=0)
        util.save_data(config.test_file, config.test_output_file,
                       test_score.tolist(), test_set.rela)
        end = time.time()
        print("time {}, test loss {:g}, train acc {:g}".format(
            end - start, test_loss / test_set.size,
            test_correct_num / test_set.size))
예제 #3
0
    # parser.add_argument("--max_gen", type=int)
    parser.add_argument("--tolerance", type=float, default=0.2)
    parser.add_argument("--pop", type=int, default=200)
    parser.add_argument("--gen", type=int, default=200)

    args, leftovers = parser.parse_known_args()
    # Using full map
    refmap = RefMap("../data/combined.csv", tolerance=args.tolerance).points

    # Using error
    # refmap = Scan(scanName)
    # refmap = applytuple(refmap.scan_points, refmap.posx, refmap.posy, refmap.rot)
    errorscan = Scan("../" + scanName, tolerance=args.tolerance)

    print "Aiming for"
    print errorscan.posx, errorscan.posy, errorscan.rot
    for NGEN in tqdm(np.arange(450, 500, 50)):
        for x in trange(args.iterations):
            best_fitness, record, log, expr = main(
                multicore=args.multicore,
                verb=args.v,
                POP=args.pop,
                NGEN=args.gen,
                scan=copy.deepcopy(errorscan),
                map=refmap,
                CXPB=CXPB,
                MUTPB=MUTPB)
            if args.savefile is not None:
                row = [NGEN, best_fitness, expr[0], expr[1], expr[2], "\r"]
                save_data(row, "../results/" + args.savefile)
예제 #4
0
def evaluate_pose(ind):
    fitness = evaluate(ind, errorscan, refmap)[0]
    row = [ind[0], ind[1], ind[2], str(fitness), "\r"]
    print row
    raw_input()
    save_data(row, "../results/" + filename)
예제 #5
0
def train_nn(train_set, dev_set, max_len, U):
    nn = QaNN(sequence_length=max_len,
              init_embeddings=U,
              num_classes=config.neg_sample,
              embeddings_trainable=config.embeddings_trainable,
              l2_reg_lambda=config.l2_reg_lambda)
    global_step = tf.Variable(0, name="global_step", trainable=True)
    optimizer = tf.train.AdamOptimizer()
    grads_and_vars = optimizer.compute_gradients(nn.loss)
    train_op = optimizer.apply_gradients(grads_and_vars,
                                         global_step=global_step)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # Output directory for models and summaries
        # timestamp = str(int(time.time()))
        # out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
        # print("Writing to {}\n".format(out_dir))

        # Summaries for loss and accuracy
        loss_summary = tf.summary.scalar("loss", nn.loss)
        acc_summary = tf.summary.scalar("accuracy", nn.accuracy)

        # Train Summaries
        train_summary_op = tf.summary.merge([loss_summary, acc_summary])
        # train_summary_dir = os.path.join(out_dir, "summaries", "train")
        # train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)

        # Dev summaries
        dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
        # dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
        # dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)

        # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
        checkpoint_dir = os.path.abspath(
            os.path.join(os.path.curdir, "checkpoints"))
        checkpoint_prefix = os.path.join(checkpoint_dir, "model")
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        saver = tf.train.Saver(tf.global_variables())

        best_dev_loss, stop_step = sys.maxint, 0
        for epoch_idx in range(config.epoch_num):
            start = time.time()
            train_loss, dev_loss, train_accuracy, dev_accuracy, train_correct_num, dev_correct_num = 0, 0, 0, 0, 0, 0

            for train_step in tqdm(range(
                    int(math.ceil(train_set.size * 1.0 / config.batch_size))),
                                   desc='Training epoch ' +
                                   str(epoch_idx + 1) + ''):
                ques_batch, rela_batch, label_batch = pull_batch(
                    train_set.ques_idx, train_set.rela_idx, train_set.label,
                    train_step)
                feed_dict = {
                    nn.input_x_u: ques_batch,
                    nn.input_x_r: rela_batch,
                    nn.input_y: label_batch,
                }
                _, summaries, score, loss, accuracy, correct_num = sess.run([
                    train_op, train_summary_op, nn.score, nn.loss, nn.accuracy,
                    nn.correct_num
                ], feed_dict)
                train_loss += loss
                train_accuracy += accuracy
                train_correct_num += correct_num
                if train_step == 0:
                    train_score = score  # score:[0, 2]
                else:
                    train_score = np.concatenate((train_score, score), axis=0)

            for dev_step in tqdm(range(
                    int(math.ceil(dev_set.size * 1.0 / config.batch_size))),
                                 desc='Deving epoch ' + str(epoch_idx + 1) +
                                 ''):
                ques_batch, rela_batch, label_batch = pull_batch(
                    dev_set.ques_idx, dev_set.rela_idx, dev_set.label,
                    dev_step)
                feed_dict = {
                    nn.input_x_u: ques_batch,
                    nn.input_x_r: rela_batch,
                    nn.input_y: label_batch,
                }
                loss, accuracy, correct_num, score, summaries = sess.run([
                    nn.loss, nn.accuracy, nn.correct_num, nn.score,
                    dev_summary_op
                ], feed_dict)
                dev_loss += loss
                dev_accuracy += accuracy
                dev_correct_num += correct_num
                if dev_step == 0:
                    dev_score = score
                else:
                    dev_score = np.concatenate((dev_score, score), axis=0)
            end = time.time()
            print(
                "epoch {}, time {}, train loss {:g}, train acc {:g},  dev loss {:g}, dev acc {:g}"
                .format(epoch_idx, end - start, train_loss / train_set.size,
                        train_correct_num / train_set.size,
                        dev_loss / dev_set.size,
                        dev_correct_num / dev_set.size))

            if dev_loss < best_dev_loss:
                stop_step = 0
                best_dev_loss = dev_loss
                print('saving new best result...')
                # print np.array(dev_set.rela).shape, dev_score.shape

                saver_path = saver.save(sess, "%s.ckpt" % checkpoint_prefix)
                print(saver_path)

                util.save_data(config.train_file, config.train_output_file,
                               train_score.tolist(), train_set.rela)
                util.save_data(config.dev_file, config.dev_output_file,
                               dev_score.tolist(), dev_set.rela)
            else:
                stop_step += 1
            if stop_step >= config.early_step:
                print('early stopping')
                break
예제 #6
0
    parser.add_argument("--pop", type=int, default=200)
    parser.add_argument("--gen", type=int, default=50)
    parser.add_argument("--grid", action='store_true', default=False)
    parser.add_argument("--graph", action='store_true', default=False)

    args, leftovers = parser.parse_known_args()
    # Using full map
    refmap = RefMap("../data/combined.csv", tolerance=args.tolerance).points

    # Using error
    # refmap = Scan(scanName)
    # refmap = applytuple(refmap.scan_points, refmap.posx, refmap.posy, refmap.rot)
    errorscan = Scan("../" + scanName, tolerance=args.tolerance)
    target = (errorscan.posx, errorscan.posy, errorscan.rot)
    save_data([
        __file__, "elite:" + str(elite), "pop:" + str(args.pop),
        "gen:" + str(args.gen), "grid:" + str(args.grid), "\r"
    ], "../results/" + args.savefile)

    for x in trange(args.iterations):
        best_fitness, record, log, expr = main(multicore=args.multicore,
                                               verb=args.v,
                                               POP=args.pop,
                                               NGEN=args.gen,
                                               refmap=refmap,
                                               CXPB=CXPB,
                                               MUTPB=MUTPB,
                                               grid=args.grid,
                                               graph=args.graph)
        if args.savefile is not None:
            row = [best_fitness, expr[0], expr[1], expr[2], "\r"]
            save_data(row, "../results/" + args.savefile)
예제 #7
0
    pickle_file_name = pickleFolder + "tol=" + str(
        args.tolerance) + "cells=" + str(args.numcells)

    if pickledMapExists(pickle_file_name):
        print "Found and loading pickled map"
        refmap = pickle.load(open(pickle_file_name, "rb"))
    else:
        print "Map not found, pickling map"
        refmap = LookupRefMap("../data/combined.csv",
                              args.numcells,
                              tolerance=args.tolerance)
        pickle.dump(refmap, open(pickle_file_name, "wb"))
        print "Loaded and pickled map for further use"

    save_data([
        __file__, "pop:" + str(args.pop), "gen:" + str(args.gen),
        "grid:" + str(args.grid), "numcells:" + str(args.numcells), "\r"
    ], "../results/" + args.savefile)

    # Using full map

    errorscan = Scan(scanName, tolerance=args.tolerance)
    target = (errorscan.posx, errorscan.posy, errorscan.rot)
    for x in trange(args.iterations):
        best_fitness, record, log, expr = main(multicore=args.multicore,
                                               verb=args.v,
                                               POP=args.pop,
                                               NGEN=args.gen,
                                               scan=copy.deepcopy(errorscan),
                                               map=refmap,
                                               CXPB=CXPB,
                                               MUTPB=MUTPB,