random_state=229) logisticRegr.fit(x_train, y_train) y_pred = logisticRegr.predict(x_test) train_pred = logisticRegr.predict(x_train) ## train metrics print("accuracy:", metrics.accuracy_score(y_train, train_pred)) print("recall:", metrics.recall_score(y_train, train_pred, pos_label="Success")) print("precision:", metrics.precision_score(y_train, train_pred, pos_label="Success")) print("f1-score:", metrics.f1_score(y_train, train_pred, pos_label="Success")) print("======Classification report========") print(metrics.classification_report(y_train, train_pred)) plot_confusion_matrix(y_train, train_pred, classes=["Failure", "Success"]) plt.show() y_score = logisticRegr.decision_function(x_train) plot_roc_curve(y_train, y_score) ## test metrics print("accuracy:", metrics.accuracy_score(y_test, y_pred)) print("recall:", metrics.recall_score(y_test, y_pred, pos_label="Success")) print("precision:", metrics.precision_score(y_test, y_pred, pos_label="Success")) print("f1-score:", metrics.f1_score(y_test, y_pred, pos_label="Success")) print("======Classification report========") print(metrics.classification_report(y_test, y_pred))
## MLP clf = MLPClassifier(activation='logistic', hidden_layer_sizes=(100, 50, 50, 20), random_state=229, learning_rate_init=0.0001, max_iter=500, alpha=0.05) clf.fit(x_sm, y_sm) y_pred = clf.predict(x_test) train_pred = clf.predict(x_sm) ## train metrics print("accuracy:", metrics.accuracy_score(y_sm, train_pred)) print("recall:", metrics.recall_score(y_sm, train_pred, pos_label=1)) print("precision:", metrics.precision_score(y_sm, train_pred, pos_label=1)) print("f1-score:", metrics.f1_score(y_sm, train_pred, pos_label=1)) print("======Classification report========") print(metrics.classification_report(y_sm, train_pred)) plot_confusion_matrix(y_sm, train_pred, classes=[0, 1]) plt.show() probs = clf.predict_proba(x_sm)[:, 1] plot_roc_curve(y_sm, probs, pos_label=1) ## test metrics print("accuracy:", metrics.accuracy_score(y_test, y_pred)) print("recall:", metrics.recall_score(y_test, y_pred, pos_label=1)) print("precision:", metrics.precision_score(y_test, y_pred, pos_label=1)) print("f1-score:", metrics.f1_score(y_test, y_pred, pos_label=1)) print("======Classification report========") print(metrics.classification_report(y_test, y_pred)) plot_confusion_matrix(y_test, y_pred, classes=[0, 1]) plt.show()