clf.fit(x_sm, y_sm) y_pred = clf.predict(x_test) train_pred = clf.predict(x_sm) ## train metrics print("accuracy:", metrics.accuracy_score(y_sm, train_pred)) print("recall:", metrics.recall_score(y_sm, train_pred, pos_label=1)) print("precision:", metrics.precision_score(y_sm, train_pred, pos_label=1)) print("f1-score:", metrics.f1_score(y_sm, train_pred, pos_label=1)) print("======Classification report========") print(metrics.classification_report(y_sm, train_pred)) plot_confusion_matrix(y_sm, train_pred, classes=[0, 1]) plt.show() probs = clf.predict_proba(x_sm)[:, 1] plot_roc_curve(y_sm, probs, pos_label=1) ## test metrics print("accuracy:", metrics.accuracy_score(y_test, y_pred)) print("recall:", metrics.recall_score(y_test, y_pred, pos_label=1)) print("precision:", metrics.precision_score(y_test, y_pred, pos_label=1)) print("f1-score:", metrics.f1_score(y_test, y_pred, pos_label=1)) print("======Classification report========") print(metrics.classification_report(y_test, y_pred)) plot_confusion_matrix(y_test, y_pred, classes=[0, 1]) plt.show() probs = clf.predict_proba(x_test)[:, 1] plot_roc_curve(y_test, probs, pos_label=1)
## train metrics print("accuracy:", metrics.accuracy_score(y_train, train_pred)) print("recall:", metrics.recall_score(y_train, train_pred, pos_label="Success")) print("precision:", metrics.precision_score(y_train, train_pred, pos_label="Success")) print("f1-score:", metrics.f1_score(y_train, train_pred, pos_label="Success")) print("======Classification report========") print(metrics.classification_report(y_train, train_pred)) plot_confusion_matrix(y_train, train_pred, classes=["Failure", "Success"]) plt.show() y_score = logisticRegr.decision_function(x_train) plot_roc_curve(y_train, y_score) ## test metrics print("accuracy:", metrics.accuracy_score(y_test, y_pred)) print("recall:", metrics.recall_score(y_test, y_pred, pos_label="Success")) print("precision:", metrics.precision_score(y_test, y_pred, pos_label="Success")) print("f1-score:", metrics.f1_score(y_test, y_pred, pos_label="Success")) print("======Classification report========") print(metrics.classification_report(y_test, y_pred)) plot_confusion_matrix(y_test, y_pred, classes=["Failure", "Success"]) plt.show() y_score = logisticRegr.decision_function(x_test)
## train metrics print("accuracy:", metrics.accuracy_score(y_train, train_pred)) print("recall:", metrics.recall_score(y_train, train_pred, pos_label="Success")) print("precision:", metrics.precision_score(y_train, train_pred, pos_label="Success")) print("f1-score:", metrics.f1_score(y_train, train_pred, pos_label="Success")) print("======Classification report========") print(metrics.classification_report(y_train, train_pred)) plot_confusion_matrix(y_train, train_pred, classes=["Failure", "Success"]) plt.show() probs = clf.predict_proba(x_train)[:, 1] plot_roc_curve(y_train, probs) ## test metrics print("accuracy:", metrics.accuracy_score(y_test, y_pred)) print("recall:", metrics.recall_score(y_test, y_pred, pos_label="Success")) print("precision:", metrics.precision_score(y_test, y_pred, pos_label="Success")) print("f1-score:", metrics.f1_score(y_test, y_pred, pos_label="Success")) print("======Classification report========") print(metrics.classification_report(y_test, y_pred)) plot_confusion_matrix(y_test, y_pred, classes=["Failure", "Success"]) plt.show() probs = clf.predict_proba(x_test)[:, 1]