def process(tag_name, tc, save_flag=False, try_times=2000):
    """ 学習とその検証を繰り返して、結果をファイルに保存する
    """
    _dir = create_dir([tag_name])

    with open(tag_name + "/av_verify_report" + tag_name + ".csv", "w") as fw:
        for i in range(try_times):
            print("--try count: {0}/{1}--".format(i, try_times))

            # 教師データを作成
            verify_data, teacher_dates, x, y = None, None, None, None
            if save_flag:
                _save_name_teacher = tag_name + "/av_teaching_data" + tag_name + "_{0:05d}".format(i) + ".csv" # tc.save_teacher(ファイル名)で教師と検証データを保存するときに使う
                _save_name_verify = tag_name + "/av_verify_data" + tag_name + "_{0:05d}".format(i) + ".csv"
                verify_data, teacher_dates, x, y = tc.save_teacher(save_name_teacher=_save_name_teacher, save_name_verify=_save_name_verify) # 毎回、呼び出すたびにデータセットの内容が変わる
            else:
                verify_data, teacher_dates, x, y = tc.create_dataset() # 毎回、呼び出すたびにデータセットの内容が変わる
            features_dict = tc.get_all_features()
            training_data = (x, y)
            

            # 学習と保存
            clf, score = learning.learn(training_data)
            mc.save(clf, tag_name)
            fw.write("{0},{1}\n".format(i, score))

            # 過学習の確認(現状では役に立っていない)
            dates = sorted(verify_data.keys())                   # 学習に使っていない日付
            if len(dates) == 0:
                continue
            result = predict.predict2(clf, dates, features_dict) # 学習に使っていないデータで検証
Esempio n. 2
0
def main():
    X, Y = load_data()

    classifier, iters, error, (precision, recall, f1) = learning.learn(X, Y)

    print("Best C: {0}".format(iters))
    print("Test set error: {0}".format(error))
    print("Precision: {0}".format(precision))
    print("Recall: {0}".format(recall))
    print("F1 score: {0}".format(f1))
Esempio n. 3
0
def main():
    X, Y = load_data()

    classifier, iters, error, (precision, recall, f1) = learning.learn(X, Y)

    print("Best C: {0}".format(iters))
    print("Test set error: {0}".format(error))
    print("Precision: {0}".format(precision))
    print("Recall: {0}".format(recall))
    print("F1 score: {0}".format(f1))
Esempio n. 4
0
def create_clf(clfque):
    if os.path.isfile('clf3.pckl'):
        clf = pickle.load(open('clf3.pckl', 'rb'))
    else:
        clf = lern.learn()
        clf_file = open('clf3.pckl',
                        'wb')  # saving the clf variable into memory
        pickle.dump(clf, clf_file)
        clf_file.close()

    clfque.put(clf)
Esempio n. 5
0
def solveOne(setN, setName, ansName):
    startTime = time()
    maxRating, nUsers, nItems, trainX, trainY, testX, testY = loadData(setName, ansName)

    print("Set #{0}: read {1} users, {2} items, {3} train set ratings and {4} test set ratings".format(setN, nUsers,
                                                                                                       nItems,
                                                                                                       len(trainX),
                                                                                                       len(testX)))
    bestL, predictedRs = learn(maxRating, nUsers, nItems, trainX, trainY, testX)
    print("Set #{0}: RMSE = {1}, best λ = {2}".format(setN, errors.rmse(predictedRs, testY), bestL))
    endTime = time()
    print("Set #{0}: Leaning done in {1} seconds".format(setN, endTime - startTime))
Esempio n. 6
0
def main():
    startTime = time.time()
    X, Y = load_data()

    classifier, iters, error, (precision, recall, f1) = learning.learn(X, Y)

    print("Best L: {0}".format(iters))
    print("Test set error: {0}".format(error))
    print("Precision: {0}".format(precision))
    print("Recall: {0}".format(recall))
    print("F1 score: {0}".format(f1))

    endTime = time.time()
    print("Done working in {0} seconds".format(endTime - startTime))
Esempio n. 7
0
def solveOne(setN, setName, ansName):
    startTime = time()
    maxRating, nUsers, nItems, trainX, trainY, testX, testY = loadData(
        setName, ansName)

    print(
        "Set #{0}: read {1} users, {2} items, {3} train set ratings and {4} test set ratings"
        .format(setN, nUsers, nItems, len(trainX), len(testX)))
    bestL, predictedRs = learn(maxRating, nUsers, nItems, trainX, trainY,
                               testX)
    print("Set #{0}: RMSE = {1}, best λ = {2}".format(
        setN, errors.rmse(predictedRs, testY), bestL))
    endTime = time()
    print("Set #{0}: Leaning done in {1} seconds".format(
        setN, endTime - startTime))
def process(tag_name, tc, save_flag=False, try_times=2000):
    """ 学習とその検証を繰り返して、結果をファイルに保存する
    """
    _dir = create_dir([tag_name])

    with open(tag_name + "/av_verify_report" + tag_name + ".csv", "w") as fw:
        for i in range(try_times):
            print("--try count: {0}/{1}--".format(i, try_times))

            # 教師データを作成
            verify_data, teacher_dates, x, y = None, None, None, None
            if save_flag:
                _save_name_teacher = tag_name + "/av_teaching_data" + tag_name + "_{0:05d}".format(
                    i) + ".csv"  # tc.save_teacher(ファイル名)で教師と検証データを保存するときに使う
                _save_name_verify = tag_name + "/av_verify_data" + tag_name + "_{0:05d}".format(
                    i) + ".csv"
                verify_data, teacher_dates, x, y = tc.save_teacher(
                    save_name_teacher=_save_name_teacher,
                    save_name_verify=_save_name_verify
                )  # 毎回、呼び出すたびにデータセットの内容が変わる
            else:
                verify_data, teacher_dates, x, y = tc.create_dataset(
                )  # 毎回、呼び出すたびにデータセットの内容が変わる
            features_dict = tc.get_all_features()
            training_data = (x, y)

            # 学習と保存
            clf, score = learning.learn(training_data)
            mc.save(clf, tag_name)
            fw.write("{0},{1}\n".format(i, score))

            # 過学習の確認(現状では役に立っていない)
            dates = sorted(verify_data.keys())  # 学習に使っていない日付
            if len(dates) == 0:
                continue
            result = predict.predict2(clf, dates,
                                      features_dict)  # 学習に使っていないデータで検証
def main():
    #######  Estimate Distance  ########
    vision.img_capture()
    distance = vision.img_distance()
    print("Distance estimated ", distance)
    if distance == 0:
        print("Distance estimation Failed! ")
        exit(0)
    #######  Get KNN Model  ########
    knn_model = learning.learn()

    #######  Predict hit Success/Failure  ########
    d = distance
    temp = {'Distance': [d, d, d, d, d], 'Delay': [1, 2, 3, 4, 5]}
    x1 = pd.DataFrame(data=temp)
    y_pred = learning.knn_predict(knn_model, x1)
    print("Prediction successful!")
    print(y_pred)

    idx = np.where(y_pred == np.amax(y_pred))
    print(idx[0])
    idx = np.amax(idx)
    print(idx)

    delay = idx + 1
    print('Delay to be applied: ', delay)

    #######  Execute hit  ########
    ser = serial.Serial()
    ser.baudrate = 9600
    ser.port = 'com5'
    ser.open()
    i = bytearray([delay])
    #send value here
    ser.write(i)
    print("sent")
    ser.close()
Esempio n. 10
0
    'chopper': set_chopper_params,
    'pong': set_pong_params
}

#core user interface dialouge

if len(sys.argv) == 3 and sys.argv[1] == 'stress':
    mutation_count = int(sys.argv[2])
    set_stress_test_params()
    b = brain.Brain(1)
    b.mutation_stress_test(mutation_count)

elif len(sys.argv) == 3 and sys.argv[1] == 'train':
    param_setup = environment_settings[sys.argv[2]]
    param_setup()
    result = learning.learn(environments[sys.argv[2]])

elif len(sys.argv) == 4 and sys.argv[1] == 'analyze':
    load_brain = brain.load_brain_from_file(sys.argv[3])
    param_setup = environment_settings[sys.argv[2]]
    param_setup()
    learning.visualize_performance(load_brain, environments[sys.argv[2]])

elif len(sys.argv) == 4 and sys.argv[1] == 'improve':
    load_brain = brain.load_brain_from_file(sys.argv[3])
    param_setup = environment_settings[sys.argv[2]]
    param_setup()
    result = learning.learn_from_existing(load_brain,
                                          environments[sys.argv[2]])
else:
    print('INVALID USAGE')
Esempio n. 11
0
 def learning(self):
     cores_path = str(self.text_cores.text())
     target_path = str(self.text_target.text())
     
     learning.learn(cores_path, target_path)
Esempio n. 12
0
def learn(args):
    """
    予測用モデルから予測値を取得
    """
    learning.learn(args.start_date, args.end_date, args.var_lag)
Esempio n. 13
0
ax1.imshow(np.reshape(train_set[NN, :], (Nr, Nc)),
           cmap=plt.get_cmap('gray'),
           aspect='auto'), ax1.set_title("Original Document " + str(Nr) + "x" +
                                         str(Nc))
ax2.imshow(np.reshape(trfeat_set[NN, :rN**2], (rN, rN)),
           cmap=plt.get_cmap('gray'),
           aspect='auto'), ax2.set_title("Perceptual Hash " + str(rN) + "x" +
                                         str(int(rN / 2)))
plt.colorbar()

plt.show()

#%% Learning on training set
import learning
# Only in case of phash or ahash or whash0;des
clasf = learning.learn(trfeat_set, y_train, method=c_method)

#%% Prediction on test set

# create_validation: Get batch of Nb images per class, full resolution
test_set, y_test, Nr, Nc = preproc.create_sample(dataset,
                                                 nitems,
                                                 label='test',
                                                 Nbatch=n_val)
tefeat_set = features.feat_extr(test_set,
                                Nr,
                                Nc,
                                feat_type=f_method,
                                hash_size=hash_s)
del test_set
def process(tag_name, tc, feature_func, save_flag=False, try_times=2000):
	""" 学習とその検証を繰り返して、結果をファイルに保存する
	"""
	create_dir([tag_name])

	with open(tag_name + "/av_verify_report" + tag_name + ".csv", "w") as fw:
		for i in range(try_times):
			print("--try count: {0}/{1}--".format(i, try_times))

			# 教師データを作成
			verify_data, teacher_dates, teacher_features, teacher_flags = None, None, None, None
			if save_flag:
				_save_name_teacher = tag_name + "/av_teaching_data" + tag_name + "_{0:05d}".format(i) + ".csv" # tc.save_teacher(ファイル名)で教師と検証データを保存するときに使う
				_save_name_verify = tag_name + "/av_verify_data" + tag_name + "_{0:05d}".format(i) + ".csv"
				verify_data, teacher_dates, teacher_features, teacher_flags = tc.save_teacher(save_name_teacher=_save_name_teacher, save_name_verify=_save_name_verify) # 毎回、呼び出すたびにデータセットの内容が変わる
			else:
				verify_data, teacher_dates, teacher_features, teacher_flags = tc.create_dataset() # 毎回、呼び出すたびにデータセットの内容が変わる
			features_dict = tc.get_all_features()
			training_data = (teacher_features, teacher_flags)
			dates = sorted(verify_data.keys())                   # 学習に使っていない日付

			# 学習
			clf = learning.learn(training_data, tag_name + "/av_entry_temp{0}_{1:05d}.pickle".format(tag_name, i))
			result = predict.predict2(clf, dates, features_dict) # 学習に使っていないデータで検証
			
			# 必要なら個別の結果も保存
			if save_flag:
				with open(tag_name + "/av_verify_data" + tag_name + "_{0:05d}".format(i) + "_result.csv", "w") as fw_result:
					for date in dates:
						if date in result:
							try:      # このtryはいらないんじゃないかな・・・
								fw_result.write(str(date))
								fw_result.write(",")
								fw_result.write(str(verify_data[date]))
								fw_result.write(",")
								fw_result.write(str(result[date]))
								fw_result.write("\n")
							except:
								pass

			# 結果の集計
			scale = 10
			zero = [0.000001] * scale # sum()して分母に入れようとしたら、0の時にエラーが出るので0.000001とした
			one = [0.000001] * scale
			for date in dates:
				if date in result:
					try:              # このtryはいらないんじゃないかな・・・
						c = verify_data[date]
						val = c - result[date]
						if int(c) == 0:
							zero[abs(int(val * scale))] += 1
						elif int(c) == 1:
							one[abs(int(val * scale))] += 1
						#print(val)
					except:
						pass

			# 最終結果の一覧ファイルへの保存
			zero = [str(x / sum(zero)) for x in zero] # 正規化
			one = [str(x / sum(one)) for x in one]
			fw.write("{0},".format(i))
			fw.write(",".join(zero))
			fw.write(",,") # Excelで閲覧した時に分離させる
			fw.write(",".join(one))
			fw.write("\n")
Esempio n. 15
0
def main(argus):
    model = build_model()
    kfold = argus.kfold
    data_path = 'VeReMi-Dataset/' + argus.at + '_' + argus.t
    learning.learn(model, data_path)
Esempio n. 16
0
 def relearn(self):
     learning.learn()
     raise cherrypy.HTTPRedirect("/stats")