コード例 #1
0
ファイル: main.py プロジェクト: zhilangtaosha/Mobike-1
def test(**kwargs):
	
	# ---------------------- 更新参数 ----------------------
	opt = DefaultConfig()
	opt.update(**kwargs)
	opt.printf()

    # ---------------------- 数据处理 ----------------------

    # 获取数据
	train, test = get_test_data(opt)
	gc.collect()
 #    # 获取样本
	# test_sample = get_sample(train, test, load=True)
	# gc.collect()
 #    # 获取特征
	# test_feat = get_feat(train, test_sample)
	# gc.collect()

	# 保存特征至文件
	# test_feat.to_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_{}.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)
	test_feat = pd.read_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_{}.hdf'.format(test.shape[0]))
	test_feat = get_feat(train, test_feat)
	gc.collect()
	test_feat.to_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_{}_filter.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)

    # ---------------------- 载入模型 ----------------------

	# opt['model_name'] = 'lgb_1_90_all.pkl'
	# gbm0, use_feat0 = load_model(opt)
	opt['model_name'] = 'lgb_2017-09-23#20:14:52_0.58893.pkl'
	gbm1, use_feat1 = load_model(opt)
	# opt['model_name'] = 'lgb_2_300_top15.pkl'
	# gbm2, use_feat2 = load_model(opt)
	# opt['model_name'] = 'lgb_3_300_top10.pkl'
	# gbm3, use_feat3 = load_model(opt)
	# opt['model_name'] = 'lgb_4_300_top5.pkl'
	# gbm4, use_feat4 = load_model(opt)
    
	# ---------------------- 保存预测结果 -------------------

	# test_feat.loc[:, 'pred'] = gbm0.predict(test_feat[use_feat0])
	# gc.collect()
	# res = test_feat[['orderid', 'geohashed_end_loc', 'pred']].sort_values(by=['orderid', 'pred'], ascending=False).groupby('orderid').head(25)
	# res[['orderid', 'geohashed_end_loc']].to_hdf('/home/xuwenchao/dyj-storage/sample_25_{}_filter_leak_sample.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)
	# gc.collect()

	# test_feat.loc[:, 'pred'] = gbm1.predict(test_feat[use_feat1])
	# test_feat[['orderid', 'geohashed_end_loc', 'pred']].to_hdf('/home/xuwenchao/dyj-storage/pred/pred_{}_0.58820.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)

	res = predict(test_feat, use_feat1, gbm1)
	test_feat[['orderid', 'geohashed_end_loc', 'pred']].to_hdf('/home/xuwenchao/dyj-storage/pred/pred_{}_0.58893.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)
	gc.collect()
	cur_time = datetime.datetime.now().strftime('%Y-%m-%d#%H:%M:%S')
	res_path = '{}/day{}_{}_wc_sample_0.58893.csv'.format(opt['result_dir'], opt['test_startday'], cur_time)
	res.to_csv(res_path, index=False)
	print('保存测试结果至:', res_path)
コード例 #2
0
ファイル: classify.py プロジェクト: wonderwrj/MIREX-2019
def classify(out_dir, inp_txt, out_file, num_threads, task, batch_size):
    melspec_dir = os.path.normpath(out_dir) + '/melspec'
    model_dir = os.path.normpath(out_dir) + '/' + f'{task}_model'
    best_model_path = model_dir + '/best_model.pth'
    mean_std_path = model_dir + '/mean_std.pkl'
    labels_path = model_dir + '/label_ids.pkl'

    with open(mean_std_path, 'rb') as f:
        mean, std = pickle.load(f)

    test_fnames = get_test_data(inp_txt)
    test_dataset = TestDataset(test_fnames, melspec_dir, mean, std)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)

    cuda = False
    device = torch.device('cuda:0' if cuda else 'cpu')
    print('Device: ', device)

    num_classes = CONFIG[task]['num_classes']
    model = MirexModel(num_classes)
    model = model.to(device)
    model.load_state_dict(
        torch.load(best_model_path))  # Loading the best model

    test_preds = []

    for inputs in tqdm(test_loader):
        inputs = inputs.to(device)
        with torch.set_grad_enabled(False):
            model = model.eval()
            outputs = model(inputs)
            _, predicted = torch.max(outputs, 1)
        test_preds.extend(list(predicted.numpy()))

    with open(labels_path, 'rb') as f:
        ref_labels_dict = pickle.load(f)
    ids_to_labels = {i: x for x, i in ref_labels_dict.items()}

    with open(out_file, 'w') as f:
        for i in range(len(test_fnames)):
            this_file = test_fnames[i]
            this_pred = ids_to_labels[test_preds[i]]
            f.write(f'{this_file}\t{this_pred}\n')
コード例 #3
0
def test(weights_path, batch_size):
    """Tests a model."""

    try:
        # Loads or creates test data.
        input_shape, test, test_targets, \
            test_coords, orig_test_shape = get_test_data()
    except FileNotFoundError as e:
        print(e)
        print("Could not find test files in data_dir. "
              "Did you specify the correct orig_test_data_dir?")
        return

    # Loads or creates model.
    model, checkpoint_path, _ = get_model(input_shape,
                                       scale_factor=len(test)/batch_size,
                                       weights_path=weights_path)

    # Predicts on test data and saves results.
    predict(model, test, test_targets, test_coords,
            orig_test_shape, input_shape)
    plots()
コード例 #4
0
import os
import numpy as np
import dataset
import model
from torch import nn, optim

if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model_test = model.Model()
    model_test.double()
    state_dict = torch.load('checkpoints/checkpoint7.0.pth')
    print(state_dict.keys())
    model_test.load_state_dict(state_dict)
    print(model_test)

    test_iter = dataset.get_test_data()
    len_test = len(test_iter)

    Error = 0
    model_test.eval()
    with torch.no_grad():
        for batch_num, batch in enumerate(test_iter):
            data = batch[0]
            target = batch[1]
            target = target.long()

            ps = model_test(data)

            top_p, top_class = ps.topk(1, dim=1)
            print("predict:", top_class.view(-1))
            print("target:", target)
コード例 #5
0
ファイル: ranknet.py プロジェクト: akovski/d
 def feed_dict(train):
     if train:
         x1s, x2s, ys, _ = dataset.get_train_data()
     else:
         x1s, x2s, ys, _ = dataset.get_test_data()
     return {x1: x1s, x2: x2s, y_: ys}
コード例 #6
0
ファイル: ranknet.py プロジェクト: akovski/d
    tf.initialize_all_variables().run()

    #fout = open('out.txt', 'w')
    for i in range(20):
        if i % 10 == 0 or i < 50:
            train_acc, res = sess.run([accuracy, y], feed_dict=feed_dict(True))
            test_acc, res = sess.run([accuracy, y1],
                                     feed_dict=feed_dict(False))
            ys = [sum(x) / len(x) for x in res]
            print 'Accuracy at step {}: is {}, {}'.format(
                i, train_acc, test_acc)
            #print >>fout, '{}, {}'.format(train_acc, test_acc)
        sess.run([train_step], feed_dict=feed_dict(True))
    print 'Linear Accuracy: ', dataset.linear_acc(
        *(dataset.get_test_data()[:3]))

    model_name = dataset.token
    if model_name == '':
        return
    tops, ys = sess.run([top, yy], feed_dict=model_data(model_name))
    names = dataset.load_model_data(model_name)[1]
    print tops
    #n = len(names)
    #print '\n'.join([names[x] for x in tops])
    #print names[tops[0]], ys[tops[0]]
    #print dataset.linear_best(*dataset.load_model_data(model_name))
    #print names[tops[n / 4]], ys[tops[n / 4]]
    #print names[tops[n / 2]], ys[tops[n / 2]]
    #print names[tops[-n / 4]], ys[tops[- n / 4]]
    #print names[tops[-1]], ys[tops[-1]]
コード例 #7
0
sess = tf.Session()
sess.run(tf.global_variables_initializer())

#model.pretrain_load(sess)
saver = tf.train.Saver()
saver.restore(sess, save_dir + 'a.ckpt')

avg = Avg(['loss'])
for i in range(1, 1+max_iter):
#    x, y = get_train_pair(8)
#    rnd_bern = np.random.randint(100, size=[8,512,512])
#    rnd_bern = rnd_bern < 2
#    fd = {img_x: x, img_y: y, b_ph: rnd_bern}
#    _, _, l = sess.run([train_op, update_op, loss], fd)
#    p = sess.run(recon, fd)
#    print (p.shape)
#    avg.add(l, 0)
#    if i % 30 == 0:
#        avg.show(i)
    if i % 10 == 0:
        x, y = get_test_data(8)
        fd = {img_x: x, img_y: y}
        rc, rx, ry = sess.run([recon, img_x, img_y], fd)
        for k in range(rc.shape[0]):
            np.save('sample_imgs/a_'+str(k)+'.npy', rc[k])
            np.save('sample_imgs/x_'+str(k)+'.npy', rx[k])
            np.save('sample_imgs/y_'+str(k)+'.npy', ry[k])
        avg.description()
        print (np.mean(rc), np.mean(ry), np.mean(rx))
        print (hi)
コード例 #8
0
ファイル: main.py プロジェクト: nguyenlethanhkhoa/inria
from sklearn import svm

win_stride = (8, 8)
padding = (8, 8)
locations = ((10, 20), )

hog = util.histogram_of_gradient()
hog_params = (win_stride, padding, locations)

train_data = dataset.get_train_data()
features, labels = dataset.get_imgs_feature(train_data, hog, hog_params)

linear_svm_model = svm.LinearSVC()
linear_svm_model.fit(features, labels)

test_data = dataset.get_test_data()
features, labels = dataset.get_imgs_feature(test_data, hog, hog_params)
results = linear_svm_model.predict(features)

util.precision_and_recall_evaluate(results, labels)

pos, neg = dataset.get_predict_data()
for i in range(5):
    rects = util.detect_person(pos[i], hog, hog_params, linear_svm_model)
    rects = util.non_max_suppression_fast(np.array(rects), 0.5)
    print(pos[i])
    print(rects)
    dataset.visualize(pos[i], rects)

# # imgs = []
# # img_paths = []
コード例 #9
0
def run_training(batch_size, learning_rate, epochs, run_number):
    with tf.Graph().as_default():
        images_placeholder, labels_placeholder = placeholder_inputs(batch_size)

        logits = define_model(images_placeholder)
        lossFunction = define_loss(logits, labels_placeholder)
        train_op = training(lossFunction, learning_rate)

        eval_correct = evaluation(logits, labels_placeholder)

        #       summary = tf.summary.merge_all()
        saver = tf.train.Saver()
        init = tf.global_variables_initializer()

        with tf.Session() as session:
            #session = tf_debug.LocalCLIDebugWrapperSession(session)

            logdir = "log/" + str(run_number)
            #           summary_writer = tf.summary.FileWriter(logdir, session.graph)
            session.run(init)

            for step in range(epochs):
                training_data, training_labels = dataset.get_training_data()
                batches = [(training_data[i:i + batch_size],
                            training_labels[i:i + batch_size])
                           for i in range(0, len(training_data), batch_size)]
                epochLoss = 0
                for batch in batches:
                    image_data = batch[0]
                    label_data = batch[1]

                    feed_dict = {
                        images_placeholder: image_data,
                        labels_placeholder: label_data
                    }
                    activations, loss_value = session.run(
                        [train_op, lossFunction], feed_dict=feed_dict)
                    epochLoss += loss_value
                if step % 2 == 0:
                    #print('Step %d: loss = %.2f' % (step, epochLoss))
                    print('Step %d: loss = %.2f' % (step, loss_value))
                    sys.stdout.flush()


#                    summary_str = session.run(summary, feed_dict=feed_dict)
#                    summary_writer.add_summary(summary_str, step)
#                    summary_writer.flush()

                early_stop = False
                if (step + 1) % 5 == 0 or (step + 1) == epochs:
                    validation_data = dataset.get_validation_data(batch_size)
                    print("Doing evaluation on validation Set")
                    sys.stdout.flush()
                    early_stop = do_evaluation(session, eval_correct,
                                               validation_data, batch_size,
                                               images_placeholder,
                                               labels_placeholder)

                if (step + 1) == epochs or early_stop:
                    print("Doing evaluation on training set")
                    sys.stdout.flush()
                    do_evaluation(session, eval_correct,
                                  (training_data, training_labels), batch_size,
                                  images_placeholder, labels_placeholder)

                    print("Doing evaluation on the test set")
                    sys.stdout.flush()
                    test_data = dataset.get_test_data(batch_size)
                    do_evaluation(session, eval_correct, test_data, batch_size,
                                  images_placeholder, labels_placeholder)

                    saver.save(session, "model.ckpt")

                    if (early_stop):
                        print("Achieved desired precision at step %d" % step)
                        return