예제 #1
0
파일: spam.py 프로젝트: cs50Mu/wonderland
def train_point(features, label, weights):
    """ Checks whether a single point is correctly classified.
    If it is not correctly classified, the weights vector is updated.
    Returns the new weights vector.
    """
    prediction = predict(features, weights)
    if prediction != label:
        weights = update_weights(features, label, weights)
    return weights
예제 #2
0
파일: main.py 프로젝트: lsp140510/Mobike
def test(**kwargs):
	
	# ---------------------- 更新参数 ----------------------
	opt = DefaultConfig()
	opt.update(**kwargs)
	opt.printf()

    # ---------------------- 数据处理 ----------------------

    # 获取数据
	train, test = get_test_data(opt)
	gc.collect()
 #    # 获取样本
	# test_sample = get_sample(train, test, load=True)
	# gc.collect()
 #    # 获取特征
	# test_feat = get_feat(train, test_sample)
	# gc.collect()

	# 保存特征至文件
	# test_feat.to_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_{}.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)
	test_feat = pd.read_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_{}.hdf'.format(test.shape[0]))
	test_feat = get_feat(train, test_feat)
	gc.collect()
	test_feat.to_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_{}_filter.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)

    # ---------------------- 载入模型 ----------------------

	# opt['model_name'] = 'lgb_1_90_all.pkl'
	# gbm0, use_feat0 = load_model(opt)
	opt['model_name'] = 'lgb_2017-09-23#20:14:52_0.58893.pkl'
	gbm1, use_feat1 = load_model(opt)
	# opt['model_name'] = 'lgb_2_300_top15.pkl'
	# gbm2, use_feat2 = load_model(opt)
	# opt['model_name'] = 'lgb_3_300_top10.pkl'
	# gbm3, use_feat3 = load_model(opt)
	# opt['model_name'] = 'lgb_4_300_top5.pkl'
	# gbm4, use_feat4 = load_model(opt)
    
	# ---------------------- 保存预测结果 -------------------

	# test_feat.loc[:, 'pred'] = gbm0.predict(test_feat[use_feat0])
	# gc.collect()
	# res = test_feat[['orderid', 'geohashed_end_loc', 'pred']].sort_values(by=['orderid', 'pred'], ascending=False).groupby('orderid').head(25)
	# res[['orderid', 'geohashed_end_loc']].to_hdf('/home/xuwenchao/dyj-storage/sample_25_{}_filter_leak_sample.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)
	# gc.collect()

	# test_feat.loc[:, 'pred'] = gbm1.predict(test_feat[use_feat1])
	# test_feat[['orderid', 'geohashed_end_loc', 'pred']].to_hdf('/home/xuwenchao/dyj-storage/pred/pred_{}_0.58820.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)

	res = predict(test_feat, use_feat1, gbm1)
	test_feat[['orderid', 'geohashed_end_loc', 'pred']].to_hdf('/home/xuwenchao/dyj-storage/pred/pred_{}_0.58893.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)
	gc.collect()
	cur_time = datetime.datetime.now().strftime('%Y-%m-%d#%H:%M:%S')
	res_path = '{}/day{}_{}_wc_sample_0.58893.csv'.format(opt['result_dir'], opt['test_startday'], cur_time)
	res.to_csv(res_path, index=False)
	print('保存测试结果至:', res_path)
예제 #3
0
def evaluate(X, y, itrain, theta):
    ntot = 0
    nhit = 0
    nnul = 0
    for i in range(0, M):
        if not i in itrain:
            ntot = ntot + 1
            p = predict(X[i,:], theta, N, L, S, K)
            if (y[i] == 1) == (0.5 < p):
                nhit = nhit + 1
            if y[i] == 1:
                nnul = nnul + 1

    rate = float(nhit) / float(ntot)
    rrate = float(nnul) / float(ntot)
    
    print ""
    print "---------------------------------------"
    print "Predictor success rate on test set:", str(nhit) + "/" + str(ntot), round(100 * rate, 2), "%"
    print "Null success rate on test set     :", str(nnul) + "/" + str(ntot), round(100 * rrate, 2), "%"
    
    return [rate, rrate]
예제 #4
0
optimizer = optim.SGD(model.parameters(), lr=0.0611, momentum=0.9)

epochs = 200  # Increase amount of epochs for better accuracy.
batch_size = 32
n_batches = Xtrain.size()[0] // batch_size

costs = []
test_accuracies = []
for i in range(epochs):
    cost = 0.
    for j in range(n_batches):
        Xbatch = Xtrain[j * batch_size:(j + 1) * batch_size]
        Ybatch = Ytrain[j * batch_size:(j + 1) * batch_size]
        cost += train(model, loss, optimizer, Xbatch, Ybatch)

    Ypred = predict(model, Xtest)
    print("Epoch: %d, cost(train): %.6f, cost(test): %.6f" %
          ((i + 1), cost, loss.forward(torch.from_numpy(Ypred), Ytest)))

    costs.append(cost)

TrainPred = predict(model, Xtrain)

# We will plot 2 graphs.

# The training data in red, and the trained model in blue
plt.plot(sc.inverse_transform(Ytrain), 'r')
plt.plot(sc.inverse_transform(TrainPred))
plt.show()

# The test data in red, and the trained model in blue
예제 #5
0
 def forward(self, x):
     return predict(self.model, x)
def tplinker_predict(config, test_data_path, model_state_path):

    config = config.eval_config
    hyper_parameters = config["hyper_parameters"]

    os.environ["TOKENIZERS_PARALLELISM"] = "true"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(config["device_num"])
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    data_home = config["data_home"]
    experiment_name = config["exp_name"]
    # test_data_path = os.path.join(data_home, experiment_name, config["test_data"])
    batch_size = hyper_parameters["batch_size"]
    rel2id_path = os.path.join(data_home, experiment_name, config["rel2id"])
    save_res_dir = os.path.join(config["save_res_dir"], experiment_name)
    max_test_seq_len = hyper_parameters["max_test_seq_len"]
    sliding_len = hyper_parameters["sliding_len"]
    force_split = hyper_parameters["force_split"]
    # for reproductivity
    torch.backends.cudnn.deterministic = True
    if force_split:
        split_test_data = True
        print("force to split the test dataset!")
        # read test data
    test_data = json.load(open(test_data_path, "r", encoding="utf-8"))
    # get tokenizer
    tokenize, get_tok2char_span_map = get_tokenizer(config["encoder"],
                                                    config["bert_path"])
    # get data token num
    max_tok_num = get_token_num(test_data, tokenize)
    max_seq_len = min(max_tok_num, max_test_seq_len)

    # data prpcessor
    preprocessor = Preprocessor(
        tokenize_func=tokenize,
        get_tok2char_span_map_func=get_tok2char_span_map)
    data = preprocessor.split_into_short_samples(test_data,
                                                 max_seq_len,
                                                 sliding_len=sliding_len,
                                                 encoder=config["encoder"],
                                                 data_type="test")

    rel2id = json.load(open(rel2id_path, "r", encoding="utf-8"))
    handshaking_tagger = HandshakingTaggingScheme(rel2id=rel2id,
                                                  max_seq_len=max_seq_len)
    metrics = MetricsCalculator(handshaking_tagger)

    # get data maker and model
    if config["encoder"] == "BERT":
        data_maker = get_data_bert_data_maker(config["bert_path"],
                                              handshaking_tagger)
        rel_extractor = get_tplinker_bert_model(config["bert_path"], rel2id,
                                                hyper_parameters)
    elif config["encoder"] == "BiLSTM":
        token2idx_path = os.path.join(data_home, experiment_name,
                                      config["token2idx"])
        data_maker, token2idx = get_data_bilstm_data_maker(
            token2idx_path, handshaking_tagger)
        rel_extractor = get_tplinker_lstm_model(token2idx, hyper_parameters,
                                                rel2id)

    # load model
    rel_extractor.load_state_dict(
        torch.load(model_state_path, map_location=torch.device('cpu')))
    rel_extractor.eval()

    result = predict(config, data, data_maker, max_seq_len, batch_size, device,
                     rel_extractor, True, handshaking_tagger)

    with open("./results/nyt_demo/predict_result.json", "w",
              encoding="utf-8") as f:
        f.write(json.dumps(result, ensure_ascii=False, indent=2))
예제 #7
0
nn_params = res.x

# Obtain Theta1 and Theta2 back from nn_params
Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],
                    (hidden_layer_size, (input_layer_size + 1)))

Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],
                    (num_labels, (hidden_layer_size + 1)))

# After the training completes, we will proceed to report the training accuracy of your classifier by computing the percentage of examples it got correct. If your implementation is correct, you should see a reported
# training accuracy of about 95.3% (this may vary by about 1% due to the random initialization). It is possible to get higher training accuracies by training the neural network for more iterations. We encourage you to try
# training the neural network for more iterations (e.g., set `maxiter` to 400) and also vary the regularization parameter $\lambda$. With the right learning settings, it is possible to get the neural network to perfectly fit the training set.

# In[44]:

pred = utils.predict(Theta1, Theta2, X)
print('Training Set Accuracy: %f' % (np.mean(pred == y) * 100))

# ## 3 Visualizing the Hidden Layer
#
# One way to understand what your neural network is learning is to visualize what the representations captured by the hidden units. Informally, given a particular hidden unit, one way to visualize what it computes is to find an input $x$ that will cause it to activate (that is, to have an activation value
# ($a_i^{(l)}$) close to 1). For the neural network you trained, notice that the $i^{th}$ row of $\Theta^{(1)}$ is a 401-dimensional vector that represents the parameter for the $i^{th}$ hidden unit. If we discard the bias term, we get a 400 dimensional vector that represents the weights from each input pixel to the hidden unit.
#
# Thus, one way to visualize the “representation” captured by the hidden unit is to reshape this 400 dimensional vector into a 20 × 20 image and display it (It turns out that this is equivalent to finding the input that gives the highest activation for the hidden unit, given a “norm” constraint on the input (i.e., $||x||_2 \le 1$)).
#
# The next cell does this by using the `displayData` function and it will show you an image with 25 units,
# each corresponding to one hidden unit in the network. In your trained network, you should find that the hidden units corresponds roughly to detectors that look for strokes and other patterns in the input.

# In[45]:

utils.displayData(Theta1[:, 1:])
예제 #8
0
# Custom object needed for inference and training
custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': None}

print('Loading model...')

# Load model into GPU / CPU
model = load_model(args.model, custom_objects=custom_objects, compile=False)

print('\nModel loaded ({0}).'.format(args.model))

# Input images
inputs = load_images( glob.glob(args.input) )
print('\nLoaded ({0}) images of size {1}.'.format(inputs.shape[0], inputs.shape[1:]))

# Compute results
outputs = predict(model, inputs)

#matplotlib problem on ubuntu terminal fix
#matplotlib.use('TkAgg')

#import sys
#sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')

#import cv2 as cv

#src = cv.imread('/home/eliaswyq/DenseDepth/Segmentation/1579006203141429000.png')
#seg = load_images('/home/eliaswyq/DenseDepth/Segmentation/1579006203141429000.png')
#plt.savefig('segmentation.png')


#plt.figure(figsize=(76.8,19.2))
##############################################################################
# TRAINING
print()
print('Training... ')
with tf.Session(graph=g, config=config) as sess:
    [avg_loss_plot, val_accuracy_plot, test_accuracy_plot] = train(sess=sess, epochs=epochs,
                                                             training_set=(X_train, y_train),
                                                             validation_set=(X_valid, y_valid),
                                                             test_set=(X_test, y_test),
                                                             batch_size=batch_size,
                                                             initialize=True)
                                                             
    np.save(os.path.join(store_folder, Name + '_avg_loss_plot.npy'), avg_loss_plot)
    np.save(os.path.join(store_folder, Name + '_val_accuracy_plot.npy'), val_accuracy_plot)
    np.save(os.path.join(store_folder, Name + '_test_accuracy_plot.npy'), test_accuracy_plot)
##############################################################################
# PREDICTION

    # LABELS
    y_pred = predict(sess, X_test, return_proba=False)
    test_acc = 100*np.sum((y_pred == y_test)/len(y_test))
    print('Test Accuracy: %.3f%%' % (test_acc))
    with open(os.path.join(store_folder, Name + '_AccuracyTest.txt'), 'w') as f:
        f.write('%.3f%%' % (test_acc))

    # PROBABILITIES
    np.set_printoptions(precision=3, suppress=True)
    y_pred_proba = predict(sess, X_test, return_proba=True)
    print(y_pred_proba)
    np.save(os.path.join(store_folder, Name + '_pred_proba.npy'), y_pred_proba)
예제 #10
0
def main():
    # Data Loader (Input Pipeline)
    print('===>Model Type:', args.model_type)
    print('loading dataset...')
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size, 
                                               num_workers=args.num_workers,
                                               drop_last=True,
                                               shuffle=True)
    
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=args.test_batch_size, 
                                              num_workers=args.num_workers,
                                              drop_last=True,
                                              shuffle=False)
    # Define models
    print('building model...')
    clf = CNN(input_channel=input_channel, n_outputs=num_classes).cuda()
    print(clf.parameters)

    # Optimizer
    if args.optim == 'adam':
        optimizer = torch.optim.Adam(clf.parameters(), lr=args.lr, \
                         betas=(args.beta1, args.momentum), weight_decay=args.weight_decay)
    if args.optim == 'sgd_mom':
        optimizer = torch.optim.SGD(clf.parameters(), lr=args.lr, \
                                    momentum=args.momentum, weight_decay=args.weight_decay)

    # learning rate schedule
    if args.lr_scheduler == 'linear':
        adjust_lr_linear = True
    else:
        scheduler = learning_rate_scheduler(optimizer, args)
        adjust_lr_linear = False
    print('Adjust lr linear [True/False]:', adjust_lr_linear)
    
    best_test_acc=0
    mean_pure_ratio=0
    mean_pure_ratio1=0


    if not args.resume:
        epoch=0
        train_acc=0
        
        # evaluate models with random weights
        test_acc, test_loss = evaluate(test_loader, clf, epoch)
        print('Epoch [%d/%d] Test Accuracy on the 10000 test data: %.4f %%, Pure Ratio %.4f %%' % \
                 (epoch+1, args.n_epoch, test_acc, mean_pure_ratio))
        # save results
        with open(txtfile, "a") as myfile:
            myfile.write(str(int(epoch)) + ': ' + str(train_acc) +' ' + str(test_acc) + ' ' \
                          + str(mean_pure_ratio) + "\n")
    else:
        args.start_epoch, best_test_acc, optimizer, clf = \
                               resume_checkpoint(optimizer, clf, model_dir, model_str)

    # training
    for epoch in range(args.start_epoch, args.n_epoch):
        # train models
        clf.train()

        # learning rate scheduler step
        if adjust_lr_linear:
            if args.model_type=='sigua_sl':
                if args.optim == 'adam':  
                    print('adjust learning rate adam sl')
                    adjust_learning_rate_adam_sl(optimizer, epoch)
                else:
                    adjust_learning_rate_sgd(optimizer, epoch)
            if args.model_type=='sigua_bc':
                if args.optim == 'adam':  
                    adjust_learning_rate_adam_bc(optimizer, epoch)
                else:
                    adjust_learning_rate_sgd(optimizer, epoch)
        else: 
            scheduler.step()

        print('Training %s...' % model_str)
        train_acc, pure_ratio_list = train(train_loader, epoch, clf, optimizer, args)
        # evaluate models
        print('Evaluating %s...' % model_str)
        test_acc, test_loss = evaluate(test_loader, clf, epoch)
        
        # save results
        if args.model_type=='sigua_sl':
            print('Epoch [%d/%d] Test Accuracy on the %s test data: %.4f %%, Pure Ratio %.4f %%' % \
                              (epoch+1, args.n_epoch, len(test_dataset), test_acc, mean_pure_ratio))
            mean_pure_ratio = sum(pure_ratio_list)/len(pure_ratio_list)
            with open(txtfile, "a") as myfile:
                myfile.write(str(int(epoch)) + ': ' + str(train_acc) + ' ' + str(test_acc) + ' ' \
                              + str(mean_pure_ratio) + "\n")
        else:
            print('Epoch [%d/%d] Test Accuracy on the %s test data: %.4f %%' % \
                         (epoch+1, args.n_epoch, len(test_dataset), test_acc))
            with open(txtfile, "a") as myfile:
                myfile.write(str(int(epoch)) + ': ' + str(train_acc) +' ' + str(test_acc) + "\n")

        # remember best prec@1 and save checkpoint
        is_best=test_acc > best_test_acc
        best_test_acc=max(test_acc, best_test_acc)
        if args.save_model:
            save_checkpoint({
            'epoch': epoch+1,
            'state_dict': clf.state_dict(),
            'best_prec1': best_test_acc,
            'optimizer': optimizer.state_dict(),
            }, is_best, model_dir, model_str)

    print('Predicting %s...' % model_str)
    preds, true=predict(test_loader, clf)
    for p, t in zip(preds, true):
        with open(txtfile.replace('.txt', '_pred.txt'), "a") as myfile:
            myfile.write(str(p) +' ' + str(t) + "\n")
    exp_ev_codes = ['EXP', 'IDA', 'IPI', 'IMP', 'IGI', 'IEP']
    comp_an_ev_codes = ['ISS', 'ISO', 'ISA', 'ISM', 'IGC', 'IBA', 'IBD', 'IKR', 'IRD', 'RCA']
    auth_stmt_ev_codes = ['TAS', 'NAS']
    cur_ev_codes = ['IC', 'ND']
    elec_ev_codes = ['IEA']


    NUM_FEATURES = 8555
    #NUM_FEATURES = 53

    ev_codes = exp_ev_codes + comp_an_ev_codes + auth_stmt_ev_codes + cur_ev_codes
    GO_terms = GO_utils.get_go_terms_descendants(biomart_file_path, gene2go_file_path, gene_count_file_path, obo_file_path, ev_codes=ev_codes)
    GO_terms = GO_utils.sort_go_terms(GO_terms)

    term = GO_terms[300]
    utils.predict(term, NUM_FEATURES, rpkm_file_path)
    '''
    for t in GO_terms[0:10]:
        print t.id, ' ', len(t.genes)

    for term in GO_terms:
        utils.predict(term)
        break
    '''

    # Logistic Regression with Cross-Validation, L1 Norm (must use liblinear solver for L1)
    #costs = []
    '''
    num_folds = 5   # number of folds to use for cross-validation
    loss_function = 'l1'  # Loss function to use. Must be either 'l1' or 'l2'
    logreg_cv_L1 = linear_model.LogisticRegressionCV(cv=num_folds, penalty=loss_function, solver='liblinear')
예제 #12
0
    return (value - decay) / vmax

def one_unnormalize(value, decay, vmax):
    return value * vmax + decay

def is_number(s):
    try: 
        float(s)
        return True
    except ValueError:
        return False

if __name__ == "__main__":
    data = csv_to_numpy("data.csv")
    km_list, price_list = data[:,0], data[:,1]

    thetas = csv_to_numpy("thetas.csv")
    t0, t1 = thetas[0], thetas[1]

    useless, km_decay, km_max = normalize(data[:,0])
    useless, price_decay, price_max = normalize(data[:,1])

    while True:
        km = input("Enter your km: ")
        if not is_number(km):
            print("Please enter number")
        else:
            n_km = one_normalize(float(km), km_decay, km_max)
            n_predicted_price = predict(n_km, t0, t1)
            predicted_price = one_unnormalize(n_predicted_price, price_decay, price_max)
            print("For {} km, price is {}".format(km, predicted_price))
예제 #13
0
word_to_id, char_to_id, tag_to_id = [
    {v: k for k, v in x.items()}
    for x in [model.id_to_word, model.id_to_char, model.id_to_tag]
]


print 'Reloading previous model...'
_, f_eval = model.build(training=False, **parameters)
model.reload()


print "--------"
print opts.test_folder
for test_file in os.listdir(opts.test_folder):

    if test_file.endswith(".conll"):
        out_file = default_prefix + test_file.replace(".conll", "") + ".txt"

        test_sentences = loader.load_sentences(opts.test_folder + "/" + test_file, lower, zeros)
        
        update_tag_scheme(test_sentences, tag_scheme)

        test_data = prepare_dataset2(
            test_sentences, word_to_id, char_to_id, tag_to_id, model.feature_maps, lower
        )

        print "input: ", test_file, ":", len(test_sentences), len(test_data)
        print "output: ", opts.out_folder + "/" + out_file

        predict(parameters, f_eval, test_sentences,
                test_data, model.id_to_tag, opts.out_folder + "/" + out_file, add_O_tags=False)
예제 #14
0
import argparse

parser = argparse.ArgumentParser(description="Generate text using charLSTM.")

parser.add_argument(
    '--initial_letters',
    type=str,
    default="Give me some ",
    help='Initial letters to start with. Default is "Give me some "')

parser.add_argument('--num_letters',
                    type=int,
                    default=100,
                    help='Number of letters to generate. Default is 100')

args = parser.parse_args()

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Running on " + str(device))

model = md.get_model().to(device)
model.load_state_dict(torch.load("model/model_state_dict"))

print(
    predict(model,
            device,
            initial_letters=args.initial_letters,
            num_letters=args.num_letters,
            top_k=5))
예제 #15
0
test_meta = pd.read_csv(path_to_data + 'test_set_metadata.csv')

g_wtable, eg_wtable = utils.get_wtables(path_to_data)

g_clfs = utils.load_models(split_count, True, g_wtable)
eg_clfs = utils.load_models(split_count, False, eg_wtable)

folds = StratifiedKFold(n_splits=split_count, shuffle=True)

g_features = pd.read_csv(path_to_data + 'test_g_features.csv', header=0)

meta = g_features['object_id']
g_features = g_features.drop(['object_id'], axis=1)

g_preds = utils.predict(g_clfs, g_features, folds)

g_preds_99 = utils.predict_99(g_preds)
g_preds_df = utils.store_preds(g_preds, utils.g_class_names(), g_preds_99,
                               meta)

for i in utils.eg_class_names():
    g_preds_df[i] = 0
g_preds_df = g_preds_df.reindex_axis(['object_id'] + utils.g_class_names() +
                                     utils.eg_class_names() + ['class_99'],
                                     axis=1)

g_preds_df.to_csv('predictions.csv', header=True, mode='a', index=False)

for i_c, data_chunk in enumerate(
        pd.read_csv(path_to_data + 'test_eg_features.csv',
예제 #16
0
# data processing, change to time-param/hour sequence (4320, 9)
#raw_data = raw_data[:, 2:]
#raw_data = raw_data.astype(float)
#test_data = copy.deepcopy(raw_data)

################################### feature scaling ###################################

#mean = np.mean(test_data, axis=0 )
#std = np.std(test_data, axis=0 )
#test_data = (test_data - mean) / (std + 1e-20)

# data processing, change to time-param/day sequence (240,432)
#test_data = np.reshape(raw_data, (-1, featNum*9 ))

# predict
result = utils.predict(train_data, w, b)
#result = utils.predict(test_data, w, b)

output = np.stack((result, ground_truth))
output = output.T

output = output.astype(int)

count = 0
for data in range(0, output.shape[0]):
    if (output[data][0] == output[data][1]):
        count += 1
#print("index\t", data)
print("Correct ratio= {0:.2f} %".format(100 * count / int(output.shape[0])))

np.savetxt('output.csv', output, delimiter=',')
예제 #17
0
파일: train.py 프로젝트: mr-easy/charLSTM
            for j, letter in enumerate(seq):
                encoded_x[j, i, utils.letter_to_index[letter]] = 1
                encoded_y[j, i, utils.letter_to_index[y[b + i][j]]] = 1
        if (b + batch_size <= x.shape[0]):
            yield encoded_x, encoded_y
        else:
            yield encoded_x[:, :last_batch_size -
                            1, :], encoded_y[:, :last_batch_size - 1, :]


x, y = prepare_data(data, seq_len)

print("Random sentence without any training:")
print(
    predict(model,
            device,
            initial_letters=random.choice(initial_letters),
            top_k=9))

loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)

# TRAINING LOOP
epoch_losses = []
iteration = 0
for e in range(1, num_epochs + 1):
    running_losses = []

    for batch_x, batch_y in get_batches(x, y, batch_size):

        iteration += 1
예제 #18
0
# script for predict pm25 real-time by crontab task

import utils

utils.predict()
예제 #19
0
def main():
    ''' Predicting application with the following parameters:
        - image (image to predict)
        - checkpoint (checkpoint file)
        - top_k (top K classes)
        - category_names (JSON file with categories)
        - gpu (use GPU for prediction)
    '''

    parser = argparse.ArgumentParser(
        description='The neural network prediction application.')
    '''
    parser.add_argument("image", 
                        type = str,
                        help="Path name of the image to predict. Default image path: 'flowers/test/17/image_03830.jpg'",
                        #nargs="?", 
                        action="store", 
                        default="flowers/test/17/image_03830.jpg")         
    '''
    parser.add_argument("image",
                        type=str,
                        help="Path name of the image to predict.",
                        action="store")
    parser.add_argument(
        "checkpoint",
        type=str,
        help=
        "Checkpoint file with the saved model. Default file name: 'checkpoint.pth'",
        nargs="?",
        action="store",
        default="checkpoint.pth")
    parser.add_argument("--top_k",
                        type=int,
                        help="Top K classes. Default value: 5",
                        action="store",
                        default=5)
    parser.add_argument(
        "--category_names",
        type=str,
        help="JSON file with category names. Default value: 'cat_to_name.json'",
        action="store",
        default="cat_to_name.json")
    parser.add_argument(
        "--gpu",
        help="Use GPU. If not specified the default device will be used.",
        action="store_true",
        default=False)

    args = parser.parse_args()

    print(args)

    # Validate
    utils.validate_positive('top_k', args.top_k, True)

    # Handle device_name
    if args.gpu:
        device_name = 'gpu'
    else:
        device_name = 'cpu'

    print('')
    print('**********************************')
    print('*** Prediction process started ***')
    print(f'***           ({device_name.upper()})            ***')
    print('**********************************')
    print('')

    # Load model, predict & display probabilities
    model = utils.load_model(args.checkpoint, device_name)
    probs, classes = utils.predict(args.image, model, args.top_k, device_name)
    utils.display(probs, classes, args.category_names)

    print('')
    print('***********************************')
    print('*** Prediction process finished ***')
    print('***********************************')
    print('')
예제 #20
0
def ekf_estimate(system, model, state_var, save_video_dir=None, deviation=0.):
    """ Estimate the system with extended Kalman filter

        :param system: the system to estimate,
        :param model: the model we build for mimic the system,
        :param state_var: estimated state variance
        :param save_video_dir: if given, step wise images will be saved and recorded as a video
        :param deviation: deviation added upon each dimension of the initial state
        :returns
            x_est: estimated states by extended Kalman filter based on the images,
            Sigma_est: estimated observation uncertainties
            x_true: the recorded true system states
            o_list: list of obtained observation
    """

    system.reset()
    x = system.get_pos()
    # set initial deviation
    x = x + (np.ones_like(x) * deviation)
    model.set_norm(system.norm_xyz, system.norm_q)

    Sigma = np.eye(model.dim_state) * state_var
    Q = np.eye(model.dim_state) * state_var

    x_true = []  # true system states
    o_list = []  # observations
    x_est = []  # estimated states
    Sigma_est = []  # estimated covariances
    R_list = []  # uncertainty matrix for observations

    # init the display for video recording
    recorder = None
    if save_video_dir is not None:
        recorder = VideoRecorder(save_video_dir, norm_xyz=system.norm_xyz)

    # init observation
    img = system.render()

    print("Simulating the system with EKF...")
    for t in tqdm(range(system.horizon)):
        o, cov_o = model.wrap(img)

        # estimate observation model H
        o_func = lambda _x: model.emit(_x)
        H = finite_diff(o_func, x, dim_out=model.dim_observation)

        # get the observation variance
        R = cov_o

        # correct the state using observation and H
        x, Sigma = correct(x, Sigma, o, R, H, model.emit)

        # collect current step data
        R_list.append(np.diag(R))
        x_est.append(x)
        Sigma_est.append(Sigma)
        o_list.append(o)
        x_sys = system.get_pos()
        x_true.append(x_sys)

        if recorder is not None:
            emit = model.generate(x)
            recorder.record(img, emit, x_sys, x)

        if t < system.horizon - 1:
            # evolve the system by one step

            img, u, dt = system.step()
            dt = float(dt)

            # estimate state transition matrix A, B
            x_func = lambda _x: model.step(_x, u, dt)
            u_func = lambda _u: model.step(x, _u, dt)
            A = finite_diff(x_func, x, dim_out=model.dim_state)
            B = finite_diff(u_func, u, dim_out=model.dim_state)

            # get the transition mapping
            sim_step = lambda _x, _u: model.step(_x, _u, dt)

            # predict the next state, let Qt be correlated with dt, since longer time exhibit more uncertainty
            x, Sigma = predict(x, Sigma, u, A, B, Q*(dt**2), sim_step)

    if recorder is not None:
        recorder.finalize()

    # show the sigma of z
    print("Aver. inferred variance is {}".format(np.mean(R_list)))

    # revoke the normalization, prepare for evaluation
    x_est = np.array(x_est)
    x_est[:, :3, :] *= system.norm_xyz
    x_est[:, 3:, :] *= system.norm_q
    Sigma_est = np.array(Sigma_est)
    x_true = np.array(x_true)
    x_true[:, :3, :] *= system.norm_xyz
    x_true[:, 3:, :] *= system.norm_q
    o_list = np.array(o_list)

    return x_est, Sigma_est, x_true, o_list
예제 #21
0
              loss='binary_crossentropy',
              metrics=[
                  'binary_accuracy', dice_coefficient, precision_smooth,
                  recall_smooth
              ])
print("Number of parameters: ", model.count_params())
pretrained_weights = './weights/sample_low.hdf5'

from keras.models import load_model
model.load_weights(pretrained_weights)

num_stochastic_T = 30
for i in range(6):
    image = x_test[i]
    gt = y_test[i]
    prediction, aleatoric, epistemic, scores = utils.predict(
        model, image, gt, T=num_stochastic_T)
    x_test[i, :, :, 1] = prediction * 255
    x_test[i, :, :, 2] = (epistemic + aleatoric) * 10 * 255

plt.rcParams['figure.figsize'] = [20, 5]
M = 6
list_index = np.random.randint(low=0, high=x_test.shape[0], size=M)
plt.figure(figsize=(25, 25))
fig, ax = plt.subplots(4, M)
for k, ind in enumerate(list_index):
    ax[0, k].imshow(x_test[ind, :, :, 0], cmap='gray')
    ax[1, k].imshow(y_test[ind, :, :, 0], cmap='gray')
    ax[2, k].imshow(x_test[ind, :, :, 1], cmap='gray')
    ax[3, k].imshow(x_test[ind, :, :, 2])
    ax[0, k].axis('off')
    ax[1, k].axis('off')
예제 #22
0
J = 10
# 提示する教材合計数
textbook = 500
# 推定に使う教材数
test_textbook_list = [100]
# 推定間に提示する教材数
between_textbook_list = [1]
# 組
k = 1
lambds = [1, 2, 3, 4, 5]

for lambd in lambds:
    oracle = Oracle(eta=eta, lambd=lambd)
    min_w = oracle.estimate_min_w(
        pd.concat([train_X, test_X]), pd.concat([train_y, test_y]))
    print('{}: {}'.format(min_w, predict(test_X, test_y, min_w)))
    W_init = oracle.make_W_init(J=J)
    W = W_init.copy()
    train_X_ = train_X.copy()
    train_y_ = train_y.copy()

    now = datetime.datetime.now()
    now_str = now.strftime('%Y%m%d%H%M%S')
    result_path = 'result/wine_{}_{}_{}'.format(now_str, k, lambd)
    logging.basicConfig(
        filename='./logs/log_wine_{0:%Y%m%d%H%M%S}_{1}_{2}.log'.format(now, k, lambd), level=logging.DEBUG
    )

    logging.debug(
        './logs/log_{0:%Y%m%d%H%M%S}_{1}_{2}.log'.format(now, k, lambd))
    logging.debug('lambd')
예제 #23
0
    args = parser.parse_args()

    # load dictionary
    word2idx = utils.read_json(args.vocab)
    idx2word = {value: key for key, value in word2idx.items()}

    # load model #
    params = torch.load(args.model)

    model = lstm_lm.LSTMLM(params)
    optimizer = optim.SGD(model.parameters(), lr=0.1)
    loss_function = nn.NLLLoss()
    load_model(params, model, optimizer)

    if params["use_gpu"]:
        model.cuda()

    model.eval()  # change state to evaluation mode

    # process input word
    test = args.test
    print(test)
    i = 0
    while (i != 100):
        test = [word2idx[test] if test in word2idx else word2idx["<unk>"]]
        test = np.array(test, dtype=np.int)
        test_batch = utils.batchify(test, 1, 1, word2idx)
        test = utils.predict(model, test_batch, idx2word, params["use_gpu"])
        i += 1
예제 #24
0
parameters = model(X_train, Y_train, X_test, Y_test, 0.0001, 200, 32)



joblib.dump(parameters, 'parameters_test.pkl')
parameters = joblib.load('parameters_test.pkl')





# Predict all test image
X_test = X_test.T
for i in range (30):
  my_image = X_test[i].reshape(3072, 1)
  my_image_prediction = predict(my_image, parameters)
  print("Orginal digit: " + str(testResponseData[i]))
  #plt.imshow()
  print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)) + "\n")


# Predict my own image
#fname = 'Convolutional-Neural-Network/cnn_tensorflow/test_low/seven.jpg'
fname = 'test_low/seven.jpg'
image = cv2.imread(fname)
my_image = cv2.resize(image,(32,32))
my_image = cv2.subtract(255, my_image)
plt.imshow(my_image)
#my_image = np.vectorize(lambda x: 255 - x)(np.ndarray.flatten(my_image))
#my_image = (np.ndarray.flatten(image))
my_image = my_image.reshape((32*32*3, 1))
예제 #25
0
def trainWithRawData(path,epochNum):
    try:
        print('[+] loading modelParams...', end='', flush=True)
        modelParams=torch.load(path)
        print('Done')
    except IOError:
        print("Error: 没有找到文件或读取文件失败")
    writer=SummaryWriter('../log/'+date+'/DenseNet201/') # 创建 /log/日期/DenseNet201的组织形式
    train_dataset,val_dataset = CropDataset.split_Dataset(data_dir, train_val_ratio, IMAGE_SIZE,trian_transform, val_transform)
    train_dataLoader = DataLoader(train_dataset,BATCH_SIZE,num_workers=16, shuffle=True)
    val_dataLoader = DataLoader(val_dataset,BATCH_SIZE,num_workers=1, shuffle=False)
    model = getmodel()
    criterion = nn.CrossEntropyLoss().cuda()    
    model.load_state_dict(modelParams['state_dict'])
    min_loss=modelParams['val_loss']
    print('val_correct is %f'%(modelParams['val_correct']))
    print('min_loss is :%f'%(min_loss))   
    min_acc=max(modelParams['val_correct'],0.81)
    optinizerSave=modelParams['optimizer']
    patience=0
    lr=1e-4
    momentum=0.9
    for epoch in range(epochNum):
        print('Epoch {}/{}'.format(epoch, epochNum - 1))
        print('-' * 10)
        if epoch==0:
            optimizer=torch.optim.SGD(params=model.parameters(),lr=lr,momentum=momentum)
            print('begin lr is ',lr)
        else:
            optimizer=torch.optim.SGD(params=model.parameters(),lr=lr,momentum=momentum)           
        if patience==3:
            patience=0
            model.load_state_dict(torch.load('../model/DenseNet201/'+date+'_loss_best.pth')['state_dict'])
            lr=lr/5
            print('lr desencd, lr now is %f'%(lr))
        optimizer = torch.optim.Adam(model.parameters(),lr = lr,amsgrad=True,weight_decay=1e-4)
        print('now patience is %d '%(patience))
        # 保存训练过程中的loss和acc
        running_loss = utils.RunningMean()
        running_corrects = utils.RunningMean()

        for batch_idx, (inputs, labels) in enumerate(train_dataLoader):
            model.train(True)
            n_batchsize=inputs.size(0)
            inputs = inputs.cuda()
            labels = labels.cuda()
            optimizer.zero_grad()
            outputs = model(inputs)
            _, preds = torch.max(outputs.data, 1)
            loss = criterion(outputs, labels)
            running_loss.update(loss.item(),1)
            running_corrects.update(torch.sum(preds == labels.data).data,n_batchsize)
            loss.backward()
            optimizer.step()
            # 每10个batch显示一次训练结果信息
            if batch_idx%10==9:
                print('(%s)[epoch:%d,batch:%d]:acc: %f,loss:%f'%(str(datetime.datetime.now()),epoch,batch_idx,running_corrects.value,running_loss.value))
                niter = epoch * len(train_dataset)/BATCH_SIZE + batch_idx
                writer.add_scalar('Train/Acc',running_corrects.value,niter)
                writer.add_scalar('Train/Loss',running_loss.value,niter)
                # 如果batch大于300,则每300个batch进行一次验证               
                if batch_idx%300==299: 
                    lx,px=utils.predict(model,val_dataLoader)
                    log_loss = criterion(px,lx)
                    log_loss = log_loss.item()
                    _, preds = torch.max(px, dim=1)
                    accuracy = torch.mean((preds == lx).float())
                    writer.add_scalar('Val/Acc',accuracy,niter)
                    writer.add_scalar('Val/Loss',log_loss,niter)
                    print('(%s)[epoch:%d,batch:%d]: val_acc:%f,val_loss:%f,val_total_len:%d'%(epoch,batch_idx,accuracy,log_loss,len(val_dataset)))
        print('(%s)[epoch:%d] :acc: %f,loss:%f,lr:%f,patience:%d'%(str(datetime.datetime.now()),epoch,running_corrects.value,running_loss.value,lr,patience))       
        
        # 训练完后进行验证集验证
        lx,px=utils.predict(model,val_dataLoader)
        log_loss = criterion(px,lx)
        log_loss = log_loss.item()
        _, preds = torch.max(px, dim=1)
        accuracy = torch.mean((preds == lx).float())
        writer.add_scalar('Val/Acc',accuracy,(epoch+1) * len(train_dataset)/BATCH_SIZE)
        writer.add_scalar('Val/Loss',log_loss,(epoch+1) * len(train_dataset)/BATCH_SIZE)
        print('(%s)[epoch:%d]: val_acc:%f,val_loss:%f,'%(str(datetime.datetime.now()),epoch,accuracy,log_loss))
        
        # 若验证集误差小于设定的min_loss,则保存模型快照
        if  log_loss < min_loss:
            try:
                fileName = date+'_loss_best.pth'
                utils.snapshot('../model/DenseNet121/', fileName, {
                       'epoch': epoch + 1,
                       'state_dict': model.state_dict(),
                       'optimizer': optimizer.state_dict(),
                       'val_loss': log_loss,
                       'val_correct':accuracy })          
                patience = 0
                min_loss=log_loss
                print('save new model loss,now loss is ',min_loss)
            except IOError:
                print("Error: 没有找到文件或读取文件失败")    
        else:
            patience += 1    
            
        # 若精确度大于设定的min+acc,则保存模型快照    
        if accuracy>min_acc:
            try:
                fileName = date+'_acc_best.pth'
                utils.snapshot('../model/DenseNet121/', fileName, {
                       'epoch': epoch + 1,
                       'state_dict': model.state_dict(),
                       'optimizer': optimizer.state_dict(),
                       'val_loss': log_loss,
                       'val_correct':accuracy })             
                min_acc=accuracy
                print('save new model acc,now acc is ',min_acc.item())    
            except IOError:
                print("Error: 没有找到文件或读取文件失败")
예제 #26
0
# Load model into GPU / CPU
model = load_model(args.model, custom_objects=custom_objects, compile=False)

print('\nModel loaded ({0}).'.format(args.model))


def getDepth():

    outputs = predict(model, inputs)
    print(inputs.shape, outputs.shape)
    outputs = outputs.squeeze()
    outputs = cv2.resize(outputs, (1280, 960), interpolation=cv2.INTER_CUBIC)
    outputs_np = np.array(outputs)
    outputs_np = np.abs(outputs)


foldername = '/home/tourani/Desktop/2014-06-26-09-24-58/front_undistort/'
folder_depth = '/home/tourani/Desktop/2014-06-26-09-24-58/front_undistort_depth/'
for filename in os.listdir(foldername):
    ff = str(foldername) + filename
    x = np.clip(np.asarray(Image.open(ff), dtype=float) / 255, 0, 1)
    outputs = predict(model, x)
    outputs = outputs.squeeze()
    outputs = cv2.resize(outputs, (1280, 960), interpolation=cv2.INTER_CUBIC)
    outputs_np = np.array(outputs)
    outputs_np = np.abs(outputs)
    file_name = folder_depth + filename.split('.')[0] + '.npy'
    #print(file_name)
    #time.sleep(40)
    np.save(file_name, outputs_np)
예제 #27
0
def train(epochNum):
    writer=SummaryWriter('../log/'+date+'/DenseNet201/') # 创建 /log/日期/DenseNet201的组织形式
    train_dataset,val_dataset = CropDataset.split_Dataset(data_dir, train_val_ratio, IMAGE_SIZE,trian_transform, val_transform)
    train_dataLoader = DataLoader(train_dataset,BATCH_SIZE,num_workers=16, shuffle=True)
    val_dataLoader = DataLoader(val_dataset,BATCH_SIZE,num_workers=1, shuffle=False)
    model = getmodel()
    criterion = nn.CrossEntropyLoss().cuda()
    min_loss=4.1
    print('min_loss is :%f'%(min_loss))
    min_acc=0.80
    patience=0
    lr=0.0
    momentum=0.9
    for epoch in range(epochNum):
        print('Epoch {}/{}'.format(epoch, epochNum - 1))
        print('-' * 10)
        
        #第一轮首先训练全连接层
        if epoch==0 or epoch==1 or epoch==2: 
            lr=1e-3
            optimizer = torch.optim.Adam(model.fresh_params(),lr = lr,amsgrad=True,weight_decay=1e-4)
        else:
            optimizer = torch.optim.Adam(model.parameters(),lr = lr,amsgrad=True,weight_decay=1e-4)    
            
        if epoch==3 or epoch==4 or epoch==5:
            lr=0.00003
            momentum=0.95
            print('set lr=:%f,momentum=%f'%(lr,momentum))
        if epoch==6:
            lr=1e-4
            momentum=0.9
            print('set lr=:%f,momentum=%f'%(lr,momentum))
        if patience==2: # 如果两个epoch损失都未达到设置的min_loss,则重新加载模型并将学习率降低到1/5
            patience=0
            model.load_state_dict(torch.load('../model/DenseNet201/'+date+'_loss_best.pth')['state_dict'])
            lr=lr/5
            print('loss has increased, lr is divided by 5, lr now is :%f'%(lr))
        
        # 保存训练过程中的loss和acc
        running_loss = utils.RunningMean()
        running_corrects = utils.RunningMean()
            
        for batch_idx, (inputs, labels) in enumerate(train_dataLoader):    
            model.train(True) # 模型进入训练模式
            n_batchsize=inputs.size(0)
            inputs = inputs.cuda()
            labels = labels.cuda()
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            running_loss.update(loss.item(),1)  # 将这一个batch的loss保存起来
            _, preds = torch.max(outputs.data, 1)
            running_corrects.update(torch.sum(preds == labels.data).data,n_batchsize)   # 将这个batch的准确度保存起来
            loss.backward()
            optimizer.step()
            
            # 每10个batch显示一次训练结果信息
            if batch_idx%10==9:
                print('(%s)[epoch:%d,batch:%d]:acc: %f,loss:%f'%(str(datetime.datetime.now()),epoch,batch_idx,running_corrects.value,running_loss.value))
                niter = epoch * len(train_dataset)/BATCH_SIZE + batch_idx
                writer.add_scalar('Train/Acc',running_corrects.value,niter)
                writer.add_scalar('Train/Loss',running_loss.value,niter)
                # 如果batch大于300,则每300个batch进行一次验证               
                if batch_idx%300==299: 
                    lx,px=utils.predict(model,val_dataLoader)
                    log_loss = criterion(px,lx)
                    log_loss = log_loss.item()
                    _, preds = torch.max(px, dim=1)
                    accuracy = torch.mean((preds == lx).float())
                    writer.add_scalar('Val/Acc',accuracy,niter)
                    writer.add_scalar('Val/Loss',log_loss,niter)
                    print('(%s)[epoch:%d,batch:%d]: val_acc:%f,val_loss:%f,val_total_len:%d'%(epoch,batch_idx,accuracy,log_loss,len(val_dataset)))
        print('(%s)[epoch:%d] :acc: %f,loss:%f,lr:%f,patience:%d'%(str(datetime.datetime.now()),epoch,running_corrects.value,running_loss.value,lr,patience))       
        
        # 训练完后进行验证集验证
        lx,px=utils.predict(model,val_dataLoader)
        log_loss = criterion(px,lx)
        log_loss = log_loss.item()
        _, preds = torch.max(px, dim=1)
        accuracy = torch.mean((preds == lx).float())
        writer.add_scalar('Val/Acc',accuracy,(epoch+1) * len(train_dataset)/BATCH_SIZE)
        writer.add_scalar('Val/Loss',log_loss,(epoch+1) * len(train_dataset)/BATCH_SIZE)
        print('(%s)[epoch:%d]: val_acc:%f,val_loss:%f,'%(str(datetime.datetime.now()),epoch,accuracy,log_loss))
        
        # 若验证集误差小于设定的min_loss,则保存模型快照
        if  log_loss < min_loss:
            try:
                fileName = date+'_loss_best.pth'
                utils.snapshot('../model/DenseNet201/', fileName, {
                       'epoch': epoch + 1,
                       'state_dict': model.state_dict(),
                       'optimizer': optimizer.state_dict(),
                       'val_loss': log_loss,
                       'val_correct':accuracy })          
                patience = 0
                min_loss=log_loss
                print('save new model loss,now loss is ',min_loss)
            except IOError:
                print("Error: 没有找到文件或读取文件失败")    
        else:
            patience += 1    
            
        # 若精确度大于设定的min+acc,则保存模型快照    
        if accuracy>min_acc:
            try:
                fileName = date+'_acc_best.pth'
                utils.snapshot('../model/DenseNet201/', fileName, {
                       'epoch': epoch + 1,
                       'state_dict': model.state_dict(),
                       'optimizer': optimizer.state_dict(),
                       'val_loss': log_loss,
                       'val_correct':accuracy })             
                min_acc=accuracy
                print('save new model acc,now acc is ',min_acc.item())    
            except IOError:
                print("Error: 没有找到文件或读取文件失败")
예제 #28
0
def train():
    train_dataset = FurnitureDataset('train2',
                                     transform=preprocess_with_augmentation)
    val_dataset = FurnitureDataset('val', transform=preprocess)
    training_data_loader = DataLoader(dataset=train_dataset,
                                      num_workers=8,
                                      batch_size=BATCH_SIZE,
                                      shuffle=True)
    validation_data_loader = DataLoader(dataset=val_dataset,
                                        num_workers=1,
                                        batch_size=BATCH_SIZE,
                                        shuffle=False)

    model = get_model()

    criterion = nn.CrossEntropyLoss().cuda()

    nb_learnable_params = sum(p.numel() for p in model.fresh_params())
    print(f'[+] nb learnable params {nb_learnable_params}')

    min_loss = float("inf")
    lr = 0
    patience = 0
    for epoch in range(30):
        print(f'epoch {epoch}')
        if epoch == 1:
            lr = 0.00003
            print(f'[+] set lr={lr}')
        if patience == 2:
            patience = 0
            model.load_state_dict(torch.load('best_val_weight.pth'))
            lr = lr / 5
            print(f'[+] set lr={lr}')
        if epoch == 0:
            lr = 0.001
            print(f'[+] set lr={lr}')
            optimizer = torch.optim.Adam(model.fresh_params(), lr=lr)
        else:
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=lr,
                                         weight_decay=0.0001)

        running_loss = RunningMean()
        running_score = RunningMean()

        model.train()
        pbar = tqdm(training_data_loader, total=len(training_data_loader))
        for inputs, labels in pbar:
            batch_size = inputs.size(0)

            inputs = Variable(inputs)
            labels = Variable(labels)
            if use_gpu:
                inputs = inputs.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()
            outputs = model(inputs)
            _, preds = torch.max(outputs.data, dim=1)

            loss = criterion(outputs, labels)
            running_loss.update(loss.data[0], 1)
            running_score.update(torch.sum(preds != labels.data), batch_size)

            loss.backward()
            optimizer.step()

            pbar.set_description(
                f'{running_loss.value:.5f} {running_score.value:.3f}')
        print(
            f'[+] epoch {epoch} {running_loss.value:.5f} {running_score.value:.3f}'
        )

        lx, px = utils.predict(model, validation_data_loader)
        log_loss = criterion(Variable(px), Variable(lx))
        log_loss = log_loss.data[0]
        _, preds = torch.max(px, dim=1)
        accuracy = torch.mean((preds != lx).float())
        print(f'[+] val {log_loss:.5f} {accuracy:.3f}')

        if log_loss < min_loss:
            torch.save(model.state_dict(), 'best_val_weight.pth')
            print(
                f'[+] val score improved from {min_loss:.5f} to {log_loss:.5f}. Saved!'
            )
            min_loss = log_loss
            patience = 0
        else:
            patience += 1
예제 #29
0
    n_price_list, price_decay, price_max = normalize(np.array(price_list))

    # Hyperparameters
    learning_rate = 0.5
    generation = 200

    # Learning loop
    cost_history = []
    accuracy_history = []
    curve_history = []
    for i in range(generation):
        sum_for_t0 = 0
        sum_for_t1 = 0
        current_curve = []
        for km, price in zip(n_km_list, n_price_list):
            prediction = predict(km, t0, t1)
            current_curve.append(
                unnormalize(prediction, price_decay, price_max))
            error = prediction - price
            sum_for_t0 += error
            sum_for_t1 += error * km

        # Regression
        t0 -= learning_rate * (data_mean * sum_for_t0)
        t1 -= learning_rate * (data_mean * sum_for_t1)

        # Evolution debug
        sum_for_t0 *= data_mean
        precision = 1 - abs(sum_for_t0**2)
        print(
            "generation {}: error = {:.2f}, precision = {:.2f}, T0 = {:.2f}, T1 = {:.2f}"
예제 #30
0
                                    input_image_path = path.join(
                                        sequencePath,
                                        date) + '/' + str(i) + '.png'

                                    # Read and predict on a test image
                                    input_image = cv2.imread(
                                        input_image_path)  #640,600,3

                                    input_image_large = cv2.resize(
                                        input_image, (2048, 1024),
                                        interpolation=cv2.INTER_AREA)
                                    input_tensor = graph.get_tensor_by_name(
                                        'input_placeholder:0')
                                    predicted_image = predict(
                                        input_image_large, input_tensor, model,
                                        dataset, sess)

                                    # Convert colorspace (palette is in RGB) and save prediction result
                                    predicted_image = cv2.cvtColor(
                                        predicted_image, cv2.COLOR_BGR2RGB)
                                    predicted_image = cv2.resize(
                                        predicted_image, (640, 600),
                                        cv2.INTER_AREA)

                                    # sky cannot be in the lower part of the image. More efficient to use this when predicting,
                                    # but very easy to put in here...

                                    predicted_image[400:, :, :] = np.zeros(
                                        (200, 640, 3), dtype=int)
                                    kernelSize = 70
예제 #31
0
            elif optimizer == "adam":
                t = t + 1
                parameters, v, s = update_parameters_with_adam(parameters, gradients, v, s,
                                                               t, learning_rate, beta1, beta2, epsilon)

        # 结束

        # 每个1000批次打印损失
        if print_cost and i % 1000 == 0:
            print("第 %i 次迭代的损失值: %f" % (i, cost))
        if print_cost and i % 100 == 0:
            costs.append(cost)

    # 画出损失的变化
    plt.plot(costs)
    plt.ylabel('cost')
    plt.xlabel('epochs (per 100)')
    plt.title("损失图")
    plt.show()

    return parameters


if __name__ == '__main__':

    train_X, train_Y = load_dataset()

    parameters = model(train_X, train_Y, optimizer="momentum")

    predictions = predict(train_X, train_Y, parameters)
예제 #32
0
    return test


from utils import getModel, predict

# ning accuracy was 96.0%.
# ing accuracy was 90.0% (N = 50).
correct = 0.0

for ei in range(len(features)):
    # select all but the one at position `ei`:
    training = np.ones(len(features), bool)
    training[ei] = False
    testing = ~training
    model = getModel(features[training], is_virginica[training])
    predict(model, features[testing])
    predictions = predict(model, features[testing])
    correct += np.sum(predictions == is_virginica[testing])
acc = correct / float(len(features))
print('Accuracy: {0:.1%}'.format(acc))


###########################################
############## SEEDS DATASET ##############
###########################################

from main.ch02.load import load_dataset

feature_names = [
    'area',
    'perimeter',
예제 #33
0
J = 10
# 提示する教材合計数
textbook = 500
# 推定に使う教材数
test_textbook_list = [100]
# 推定間に提示する教材数
between_textbook_list = [1]
# 組
k = 1

lambds = [1, 2, 3, 4, 5]
for lambd in lambds:
    oracle = Oracle(eta=eta, lambd=lambd)
    min_w = oracle.estimate_min_w(pd.concat([train_X, test_X]),
                                  pd.concat([train_y, test_y]))
    print('{}: {}'.format(min_w, predict(test_X, test_y, min_w)))
    W_init = oracle.make_W_init(J=J)
    W = W_init.copy()
    train_X_ = train_X.copy()
    train_y_ = train_y.copy()

    now = datetime.datetime.now()
    now_str = now.strftime('%Y%m%d%H%M%S')
    result_path = 'result/insect_{}_{}_{}'.format(now_str, k, lambd)
    logging.basicConfig(
        filename='./logs/log_insect_{0:%Y%m%d%H%M%S}_{1}_{2}.log'.format(
            now, k, lambd),
        level=logging.DEBUG)
    logging.debug('./logs/log_{0:%Y%m%d%H%M%S}_{1}_{2}.log'.format(
        now, k, lambd))
    logging.debug('min_w')
예제 #34
0
파일: main.py 프로젝트: gsvic/fmriFlow
op = args.operator

if op == "vb":
    flow = flow.visualizeBrain()

elif op == "v":
    nsamples = args.nsamples
    flow = flow.visualize(nsamples=int(nsamples))

elif op == "vc":
    k = args.k
    flow = flow.clustering(int(k)).visualize()

elif op == "ts":
    k = args.k
    flow = flow.clustering(int(k))
    flow.execute()
    with open("model", "a+") as output:
        pickle.dump(flow.last.result, output, pickle.HIGHEST_PROTOCOL)
        exit("Model Saved")

elif op == "pr":
    utils.predict(args.model, args.vector)
    exit()

else:
    exit("Operator not found")

flow.execute()
예제 #35
0
from utils import predict, BBox
import torch

font = cv2.FONT_HERSHEY_DUPLEX
image = np.fromstring(image, np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
updated_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
updated_image = cv2.resize(updated_image, (320, 240))
image_mean = np.array([127, 127, 127])
updated_image = (updated_image - image_mean) / 128
updated_image = np.transpose(updated_image, [2, 0, 1])
updated_image = np.expand_dims(updated_image, axis=0)
updated_image = updated_image.astype(np.float32)
confidences, boxes = face_detector.run(None,
                                       {face_detector_input: updated_image})
boxes, _, _ = predict(image.shape[1], image.shape[0], confidences, boxes, 0.7)

bmi_value = -1
bmi_class = 'Not Found'
if len(boxes) > 0:
    print(boxes)

    box = boxes[0]
    out_size = 112
    img = image.copy()
    height, width, _ = img.shape
    x1 = box[0]
    y1 = box[1]
    x2 = box[2]
    y2 = box[3]
    x1 = int(x1 - 0.1 * x1)
예제 #36
0
def predict(in_file, out_file):
    xvals, yvals = utils.load_data(in_file)
    network = build_network()
    predictions = utils.predict(xvals, network, 'circle.tflearn')
    print('Accuracy: {}%'.format(utils.get_accuracy(yvals, predictions)))
    utils.write_predictions(xvals, predictions, out_file)
예제 #37
0
    # load model:
    pytorch_model = PTModel().float()
    checkpoint = torch.load("data/nyu.pth.tar", map_location=torch.device('cpu'))
    pytorch_model.load_state_dict(checkpoint['state_dict'])
    pytorch_model.eval()

    # Load input imgs, not adapted for a video yet:
    file = "data/_in/classroom__rgb_00283.jpg"
    img = np.asarray(Image.open(file), dtype='float32')
    rgb_height, rgb_width = img.shape[:2]
    xx, yy = worldCoords(width=rgb_width, height=rgb_height)
    inputRGB = np.clip(img / 255, 0, 1)

    # Compute results
    start = time.time()
    output = predict(pytorch_model, inputRGB)
    print(f"Predicted in {time.time() - start} s.")
   
    # save image:
    # im = Image.fromarray(np.uint8(output * 255))
    # im.save("data/_out/sample_2_depth.png")

    # Compute PCD and visualize:
    output = scale_up(2, output) * 10.0
    pcd = posFromDepth(output.copy(), xx, yy)
    
    # Open3d pcd:
    pcl = o3d.geometry.PointCloud()
    pcl.points = o3d.utility.Vector3dVector(pcd)
    pcl.colors = o3d.utility.Vector3dVector(inputRGB.reshape(rgb_width * rgb_height, 3))
    # Flip it, otherwise the pointcloud will be upside down
예제 #38
0
import argparse
import utils

parser = argparse.ArgumentParser(description='predict module')

parser.add_argument('checkpoint')
parser.add_argument('img_path')
parser.add_argument('--gpu', dest="gpu", default="gpu")
parser.add_argument('--top_k', dest="top_k", default=5, type=int)
parser.add_argument('--category_names',
                    dest="category_names",
                    default="cat_to_name.json")
args = parser.parse_args()
checkpoint = args.checkpoint
img_path = args.img_path
core = args.gpu
top_k = args.top_k
category = args.category_names

model = utils.load_model(checkpoint)
probs, classes = utils.predict(img_path, model, core, category, top_k)
print(probs)
print(classes)
예제 #39
0
    minibatches = get_minibatch(X_train, y_train)

    for iter in range(1, 100 + 1):
        idx = np.random.randint(0, len(minibatches))
        X_mini, y_mini = minibatches[idx]

        grad = get_minibatch_grad(model, X_mini, y_mini)

        for layer in grad:
            velocity[layer] = gamma * velocity[layer] + 1e-3 * grad[layer]
            model[layer] += velocity[layer]

    return model


if __name__ == '__main__':
    X, y = make_moons(n_samples=5000, random_state=42, noise=0.1)
    x_train, x_test, y_train, y_test = train_test_split(X, y, random_state=123)

    mean_accuracy = []

    for j in range(15):
        model = make_network()
        model = sgd(model, x_train, y_train)
        y_pred = predict(model, x_test, y_test)

        acc = accuracy(y_test, y_pred)
        mean_accuracy.append(acc)

    print(np.mean(mean_accuracy))