def xgboost_cv(): train_start_date = '2016-03-05' train_end_date = '2016-04-06' test_start_date = '2016-04-11' test_end_date = '2016-04-16' sub_start_date = '2016-02-05' sub_end_date = '2016-03-05' sub_test_start_date = '2016-03-05' sub_test_end_date = '2016-03-10' user_index, training_data, label = make_train_set(train_start_date, train_end_date, test_start_date, test_end_date) X_train, X_test, y_train, y_test = train_test_split(training_data, label, test_size=0.2, random_state=0) dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test, label=y_test) param = { 'max_depth': 10, 'eta': 0.05, 'silent': 1, 'objective': 'binary:logistic' } num_round = 4000 param['nthread'] = 4 param['eval_metric'] = "auc" plst = param.items() plst += [('eval_metric', 'logloss')] evallist = [(dtest, 'eval'), (dtrain, 'train')] bst = xgb.train(plst, dtrain, num_round, evallist) sub_user_index, sub_trainning_date, sub_label = make_train_set( sub_start_date, sub_end_date, sub_test_start_date, sub_test_end_date) test = xgb.DMatrix(sub_trainning_date) #y = bst.predict(test) pred = sub_user_index.copy() y_true = sub_user_index.copy() pred['label'] = y y_true['label'] = label report(pred, y_true)
def xgboost_cv(): train_start_date = '2016-03-06' train_end_date = '2016-04-06' test_start_date = '2016-04-06' test_end_date = '2016-04-11' sub_start_date = '2016-03-11' sub_end_date = '2016-04-11' sub_test_start_date = '2016-04-11' sub_test_end_date = '2016-04-16' user_index, training_data, label = make_train_set(train_start_date, train_end_date, test_start_date, test_end_date) X_train, X_test, y_train, y_test = train_test_split(training_data, label, test_size=0.2, random_state=0) del user_index, training_data, label dtrain=xgb.DMatrix(X_train.values, label=y_train) #todo: missing=-999.0 dtest=xgb.DMatrix(X_test.values, label=y_test) del X_train, X_test, y_train, y_test # param = {'max_depth': 10, 'eta': 0.05, 'silent': 1, 'objective': 'binary:logistic'} param = {'learning_rate' : 0.1, 'n_estimators': 1000, 'max_depth': 3, 'min_child_weight': 5, 'gamma': 0, 'subsample': 1.0, 'colsample_bytree': 0.8, 'scale_pos_weight': 1, 'eta': 0.05, 'silent': 1, 'objective': 'binary:logistic'} num_round = 4000 param['nthread'] = 6 param['eval_metric'] = "auc" plst = param.items() plst += [('eval_metric', 'logloss')] evallist = [(dtest, 'eval'), (dtrain, 'train')] bst=xgb.train( plst, dtrain, num_round, evallist) del dtrain print('saving model...') # flag = 'basic' # bst.save_model('./cache/' + flag + '_model') sub_user_index, sub_training_data, sub_label = make_train_set(sub_start_date, sub_end_date, sub_test_start_date, sub_test_end_date, test=True) test = xgb.DMatrix(sub_training_data.values) y = bst.predict(test) pred = sub_user_index.copy() pred['label'] = y pred = pred.sort_values(by=['label'], ascending=False).groupby(['user_id'], as_index=False).first() limits = np.linspace(0, 0.5, 100) scores = np.zeros((1,100)) count = 0 for i in limits: print('--------------------------------------------------------------------') print('limit=%s' % str(i)) p = pred[pred['label'] > i] scores[:, count] = report(p, sub_label) count += 1 print('max score : %s\nmax limit : %s' % (np.max(scores), limits[np.argmax(scores)]))
def xgboost_cv2(): train_start_date = '2016-03-05' train_end_date = '2016-04-06' test_start_date = '2016-04-06' test_end_date = '2016-04-11' sub_start_date = '2016-03-10' sub_end_date = '2016-04-11' sub_test_start_date = '2016-04-11' sub_test_end_date = '2016-04-16' user_index, training_data, label = make_train_set(train_start_date, train_end_date, test_start_date, test_end_date) X_train, X_test, y_train, y_test = train_test_split(training_data, label, test_size=0.2, random_state=0) dtrain = xgb.DMatrix(X_train.values, label=y_train) dtest = xgb.DMatrix(X_test.values, label=y_test) param = { 'learning_rate': 0.05, 'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 1, 'gamma': 0, 'subsample': 0.8, 'colsample_bytree': 0.8, 'scale_pos_weight': 1, 'eta': 0.05, 'silent': 1, 'objective': 'binary:logistic', 'eval_metric': 'auc' } num_round = 300 param['nthread'] = 5 # param['eval_metric'] = "auc" # plst = param.items() # plst += [('eval_metric', 'logloss')] evallist = [(dtest, 'eval'), (dtrain, 'train')] bst = xgb.train(param, dtrain, num_round, evallist) sub_user_index, sub_trainning_date, sub_label = make_train_set( sub_start_date, sub_end_date, sub_test_start_date, sub_test_end_date) # use this data to see the offline score test = xgb.DMatrix(sub_trainning_date.values) y = bst.predict(test) pred = sub_user_index.copy() y_true = get_labels_8( sub_test_start_date, sub_test_end_date) # during the test date, real label for cate 8 # y_true = sub_user_index.copy() pred[ 'label'] = y # add the new column which is the predict label for the test date # print(pred[(pred.label >= 0.12)].shape) # print("y_true:") # print(y_true) # pred = pred[(pred.label >= 0.35)] # print(len(pred)) # print(pred) ans = [] for i in range(0, 30): pred = sub_user_index.copy() pred['label'] = y pred = pred[pred.label >= i / 100] # print(pred) rep = report(pred, y_true) print('%s : score:%s' % (i / 100, rep)) ans.append([i / 100, rep]) print('ans:%s' % ans) threshold = sorted(ans, key=getKey, reverse=True)[0][0] bestscore = sorted(ans, key=getKey, reverse=True)[0][1] print('best threshold:%s' % threshold) print('best score:%s' % bestscore)
def gbdt_cv(): train_start_date = '2016-03-05' train_end_date = '2016-04-06' test_start_date = '2016-04-06' test_end_date = '2016-04-11' sub_start_date = '2016-03-10' sub_end_date = '2016-04-11' sub_test_start_date = '2016-04-11' sub_test_end_date = '2016-04-16' user_index, training_data, label = make_train_set(train_start_date, train_end_date, test_start_date, test_end_date) X_train, X_test, y_train, y_test = train_test_split(training_data, label, test_size=0.2, random_state=0) param = { 'n_estimators': 1200, 'max_depth': 3, 'subsample': 1.0, 'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3, 'max_features': 0.8 } clf = gbdt(param) clf.fit(X_train, y_train) sub_user_index, sub_trainning_date, sub_label = make_train_set( sub_start_date, sub_end_date, sub_test_start_date, sub_test_end_date) # use this data to see the offline score test = sub_trainning_date.values y = clf.predict(test) pred = sub_user_index.copy() y_true = get_labels_8( sub_test_start_date, sub_test_end_date) # during the test date, real label for cate 8 # y_true = sub_user_index.copy() pred[ 'label'] = y # add the new column which is the predict label for the test date ans = [] for i in range(0, 30): pred = sub_user_index.copy() pred['label'] = y pred = pred[pred.label >= i / 100] # print(pred) rep = report(pred, y_true) print('%s : score:%s' % (i / 100, rep)) ans.append([i / 100, rep]) print('ans:%s' % ans) threshold = sorted(ans, key=getKey, reverse=True)[0][0] bestscore = sorted(ans, key=getKey, reverse=True)[0][1] print('best threshold:%s' % threshold) print('best score:%s' % bestscore)
def xgboost_report_submission(): train_start_date = '2016-03-08' train_end_date = '2016-04-09' result_start_date = '2016-04-09' result_end_date = '2016-04-14' valid_start_date = '2016-03-01' valid_end_date = '2016-04-02' valid_result_start_date = '2016-04-02' valid_result_end_date = '2016-04-07' test_start_date = '2016-03-15' test_end_date = '2016-04-16' user_index, training_data, label = make_train_set(train_start_date, train_end_date, result_start_date, result_end_date) x_train, x_test, y_train, y_test = train_test_split(training_data.values, label.values, test_size=0.2, random_state=0) dtrain = xgb.DMatrix(x_train, label=y_train) dtest = xgb.DMatrix(x_test, label=y_test) param = {'learning_rate': 0.1, 'n_estimators': 1000, 'max_depth': 3, 'min_child_weight': 5, 'gamma': 0, 'subsample': 1.0, 'colsample_bytree': 0.8, 'scale_pos_weight': 1, 'eta': 0.05, 'silent': 1, 'objective': 'binary:logistic'} num_round = 283 param['nthread'] = 4 #param['eval_metric'] = "auc" plst = param.items() plst += [('eval_metric', 'logloss')] evallist = [(dtest, 'eval'), (dtrain, 'train')] bst = xgb.train(plst, dtrain, num_round, evallist) # Report with validation set valid_user_index, valid_trainning_date = make_test_set(valid_start_date, valid_end_date) valid_trainning_date = xgb.DMatrix(valid_trainning_date.values) pred_y = bst.predict(valid_trainning_date) valid_pred = valid_user_index.copy() valid_pred['label'] = pred_y valid_pred = valid_pred[valid_pred['label'] >= 0.014] valid_pred = valid_pred.sort_values('label', ascending=False).groupby('user_id').first().reset_index() valid_true = get_true(valid_result_start_date, valid_result_end_date) report(valid_pred, valid_true) valid_pred = valid_pred[valid_pred['label'] >= 0.016] print 0.016 report(valid_pred, valid_true) valid_pred = valid_pred[valid_pred['label'] >= 0.018] print 0.018 report(valid_pred, valid_true) valid_pred = valid_pred[valid_pred['label'] >= 0.02] print 0.02 report(valid_pred, valid_true) valid_pred = valid_pred[valid_pred['label'] >= 0.022] print 0.022 report(valid_pred, valid_true) valid_pred = valid_pred[valid_pred['label'] >= 0.024] print 0.024 report(valid_pred, valid_true) valid_pred = valid_pred[valid_pred['label'] >= 0.026] print 0.026 report(valid_pred, valid_true) valid_pred = valid_pred[valid_pred['label'] >= 0.028] print 0.028 report(valid_pred, valid_true) valid_pred = valid_pred[valid_pred['label'] >= 0.03] print 0.03 report(valid_pred, valid_true)