def GradientBoosted(X_train, X_test, y_train, y_test):
    mod = GradientBoostingRegressor()
    mod.fit(X_train, y_train)
    print "Done training"
    gb_labels = mod.predict(X_test)
    print "Done testing"
    gb_score = mod.score(X_test, y_test)
    return gb_score, gb_labels
Esempio n. 2
0
 def predict_using_local_model(self):
     gbr = GradientBoostingRegressor()
     gbr.fit(self.train_x, self.train_y)
     print('Accuracy of gbr, on the training set: ' +
           str(gbr.score(train_x, train_y)))
     start_time = time.time()
     predictions = gbr.predict(self.test_x)
     predict_time = time.time() - start_time
     print('Prediction time for gbr is ' + str(predict_time) + '\n')
     predictions = predictions.astype('uint8')
     return predictions
Esempio n. 3
0
def prediction():
    global train_x, train_y, test_x
    gbr = GradientBoostingRegressor()
    gbr.fit(train_x, train_y)
    print('Accuracy of gbr, on the training set: ' +
          str(gbr.score(train_x, train_y)))
    start_time = time.time()
    predictions = gbr.predict(test_x)
    predict_time = time.time() - start_time
    print('Prediction time for gbr is ' + str(predict_time) + '\n')
    predictions = predictions.astype('uint8')
    print(predictions)
    return predictions
#!/usr/bin/env python 

import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn import datasets
from sklearn.utils import shuffle
import numpy as np

boston = datasets.load_boston()
X, Y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, Y_train = X[:offset], Y[:offset]
X_test, Y_test = X[offset:], Y[offset:]

regressor = GradientBoostingRegressor(n_estimators=120, learning_rate=0.2,max_depth=2, random_state=0, loss='ls')
regressor.fit(X_train,Y_train)
score = regressor.score(X_test,Y_test)
print(score)
Esempio n. 5
0
from sklearn.linear_model import SGDRegressor as SGDR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor as GBR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.externals import joblib
import pandas as pd

# 读取数据
point = pd.read_csv("csv_data/points_p.csv")
world_x = point["world_x"]
world_y = point["world_y"]

# 读取数据中的标签列
# eye = point[['eye_x', 'eye_y', 'eye_w', 'eye_h', 'pipil_x', 'pupil_y', 'pupil_w', 'pupil_h']]
eye = point[['eye_x', 'eye_y', 'pipil_x', 'pupil_y', 'pupil_w', 'pupil_h']]
print(eye)

# clf = SGDR(loss='huber',penalty='l2',alpha=0.9,max_iter=1000)
clf = GBR(max_depth=10)
# clf = KNeighborsRegressor(n_neighbors=20, weights="distance", algorithm="ball_tree", leaf_size=50)
clf.fit(eye, world_x)
joblib.dump(clf, "model/world_x.pkl")
print('得分:', clf.score(eye, world_x))

clf.fit(eye, world_y)
joblib.dump(clf, "model/world_y.pkl")
print('得分:', clf.score(eye, world_y))

# print('回归系数:',clf.coef_)
# print('偏差:',clf.intercept_)
print('mae \t mean_absolute_error')
print('mse \t mean_squared_error')
print('r2 \t coefficient of determination')
print(70 * '-')
exit()
#  模型效果可视化
plt.figure()
x = np.arange(training_data_input.shape[0])
plt.plot(x, training_data_output, color='r', label='origin y')
color_list = ['k.', 'b.', 'go', 'yv', 'c*', 'm^']  # 颜色列表
for i, pre_y in enumerate(pre_y_list):  # 读出通过回归模型预测得到的索引及结果
    plt.plot(x, pre_y_list[i], color_list[i], label=model_names[i])  # 画出每条预测结果线
plt.title('regression result comparison')  # 标题
plt.legend(loc='upper right')
plt.xlabel('test data number')
plt.ylabel('real and predicted values')
# plt.savefig("regression compare.jpg", dpi=500)
plt.show()

# 模型应用
print('regression prediction:')
print('predict data \t real data')
new_pre_y = model_gbr.predict(test_data_input)  # 使用GBR进行预测
model_gbr_score = model_gbr.score(test_data_input, test_data_output)
print("The score of model_gbr is : %f" % model_gbr_score)
for i in range(len(test_data_input)):
    print('  %.2f \t %0.2f' % (new_pre_y[i], test_data_output[i]))  # 打印输出每个数据点的预测信息

# if __name__ == "__main__":
#     svm_baseline()