def cross_val(x_train, y_train): skf = StratifiedKFold(n_splits=10) model = RotationForestClassifier(n_estimators=100, random_state=47, verbose=4, n_jobs=-2) accuracy = [] mcc = [] precision = [] roc_auc = [] Sensitivity = [] Specificity = [] score = [] f1 = [] for x in range(10): for train_index, test_index in skf.split(x_train, y_train): X_train, X_test = x_train[train_index], x_train[test_index] Y_train, Y_test = y_train[train_index], y_train[test_index] model.fit(X_train, Y_train) y_predict = model.predict(X_test) score.append(model.score(X_test, Y_test)) accuracy.append(accuracy_score(Y_test, y_predict)) mcc.append(matthews_corrcoef(Y_test, y_predict)) precision.append(precision_score(Y_test, y_predict)) f1.append(f1_score(Y_test, y_predict)) roc_auc.append(roc_auc_score(Y_test, y_predict)) Sensitivity.append(sensitivity(Y_test, y_predict)) Specificity.append(specificity(Y_test, y_predict)) with open('./data/rotation_forest_knn_human100.pkl', 'wb') as f: pickle.dump(model, f) print("****************************************") print("Accuracy: ", np.mean(accuracy)) print("MCC: ", np.mean(mcc)) print("Precision: ", np.mean(precision)) print("Roc auc score: ", np.mean(roc_auc)) print("F1 score: {}\n".format(np.mean(f1))) print("Sensitivity: ", np.mean(Sensitivity)) print("Specifity: ", np.mean(Specificity))
def robust_cross_val(x_train, y_train, x_test, y_test, folds): skf = StratifiedKFold(n_splits=folds, random_state=47) model = RotationForestClassifier(n_estimators=100, random_state=47, verbose=4, n_jobs=-2) accuracy = [] mcc = [] precision = [] roc_auc = [] Sensitivity = [] Specificity = [] auc_score = [] f1 = [] score = [] for x in range(10): for train_index, test_index in skf.split(x_train, y_train): X_train, X_test = x_train[train_index], x_train[test_index] Y_train, Y_test = y_train[train_index], y_train[test_index] model.fit(X_train, Y_train) y_predict = model.predict(X_test) score.append(model.score(X_test, Y_test)) accuracy.append(accuracy_score(Y_test, y_predict)) mcc.append(matthews_corrcoef(Y_test, y_predict)) precision.append(precision_score(Y_test, y_predict)) roc_auc.append(roc_auc_score(Y_test, y_predict)) auc_score.append(auc(Y_test, y_predict)) f1.append(f1_score(Y_test, y_predict)) Sensitivity.append(sensitivity(Y_test, y_predict)) Specificity.append(specificity(Y_test, y_predict)) with open('../data/rotation_forest_human.pkl', 'wb') as f: pickle.dump(model, f) res = "{} folds\n".format(folds) res += "******************** Cross Validation Score ********************\n" res += "Accuracy: {}\n".format(np.mean(accuracy)) res += "MCC: {}\n".format(np.mean(mcc)) res += "Precision: {}\n".format(np.mean(precision)) res += "Roc AUC score: {}\n".format(np.mean(roc_auc)) res += "AUC score: {}\n".format(np.mean(auc_score)) res += "F1 score: {}\n".format(np.mean(f1)) res += "Sensitivity: {}\n".format(np.mean(Sensitivity)) res += "Specifity: {}\n".format(np.mean(Specificity)) y_test_predict = model.predict(x_test) res += "\n******************** Independent Test Score ********************\n" res += "Accuracy: {}\n".format(accuracy_score(y_test, y_test_predict)) res += "MCC: {}\n".format(matthews_corrcoef(y_test, y_test_predict)) res += "Precision: {}\n".format(precision_score(y_test, y_test_predict)) res += "Roc AUC score: {}\n".format(roc_auc_score(y_test, y_test_predict)) res += "AUC score: {}\n".format(auc(y_test, y_test_predict)) res += "F1 score: {}\n".format(f1_score(y_test, y_test_predict)) res += "Sensitivity: {}\n".format(sensitivity(y_test, y_test_predict)) res += "Specifity: {}\n\n\n".format(specificity(y_test, y_test_predict)) with open('../data/rotation_forest_human_result.txt', 'a') as f: f.write(res)