예제 #1
0
def draw_binary(outpath=None):
    _, ax = plt.subplots(figsize=(9, 6))

    X_train, X_test, y_train, y_test = load_binary(split=True)

    oz = PrecisionRecallCurve(RidgeClassifier(), ax=ax)
    oz.fit(X_train, y_train)
    oz.score(X_test, y_test)
    oz.poof(outpath=outpath)
예제 #2
0
def draw_binary(outpath=None):
    _, ax = plt.subplots(figsize=(9,6))

    X_train, X_test, y_train, y_test = load_binary(split=True)

    oz = PrecisionRecallCurve(RidgeClassifier(), ax=ax)
    oz.fit(X_train, y_train)
    oz.score(X_test, y_test)
    oz.poof(outpath=outpath)
예제 #3
0
def precision_recall_f1(model, classes, X_train, Y_train, X_test, Y_test):
    from yellowbrick.classifier import PrecisionRecallCurve
    viz = PrecisionRecallCurve(model,
                               per_class=True,
                               iso_f1_curves=True,
                               fill_area=False,
                               micro=False)
    viz.fit(X_train, Y_train)
    viz.score(X_test, Y_test)
    viz.poof()
예제 #4
0
def precision_recall(model, classes, X_train, Y_train, X_test, Y_test):
    from yellowbrick.classifier import PrecisionRecallCurve

    # Load the dataset and split into train/test splits

    # Create the visualizer, fit, score, and poof it
    viz = PrecisionRecallCurve(model)
    viz.fit(X_train, Y_train)
    viz.score(X_test, Y_test)
    viz.poof()
예제 #5
0
def draw_multiclass(outpath=None, simple=True):
    _, ax = plt.subplots(figsize=(9,6))

    X_train, X_test, y_train, y_test = load_multiclass()

    if simple:
        oz = PrecisionRecallCurve(RandomForestClassifier(), ax=ax)
    else:
        oz = PrecisionRecallCurve(MultinomialNB(), ax=ax, per_class=True, iso_f1_curves=True, fill_area=False, micro=False)

    oz.fit(X_train, y_train)
    oz.score(X_test, y_test)
    oz.poof(outpath=outpath)
예제 #6
0
def draw_multiclass(outpath=None, simple=True):
    _, ax = plt.subplots(figsize=(9, 6))

    X_train, X_test, y_train, y_test = load_multiclass()

    if simple:
        oz = PrecisionRecallCurve(RandomForestClassifier(), ax=ax)
    else:
        oz = PrecisionRecallCurve(MultinomialNB(),
                                  ax=ax,
                                  per_class=True,
                                  iso_f1_curves=True,
                                  fill_area=False,
                                  micro=False)

    oz.fit(X_train, y_train)
    oz.score(X_test, y_test)
    oz.poof(outpath=outpath)
예제 #7
0
print(classification_report(y_test, y_pred_thresh))

### Precision-Recall curve

from scikitplot.metrics import plot_precision_recall

rf_probas = rf.predict_proba(X_test)[:, 1]
plot_precision_recall(y_test, rf_probas)

from yellowbrick.classifier import PrecisionRecallCurve

viz = PrecisionRecallCurve(rf)
viz.fit(X_train, y_train)
viz.score(X_test, y_test)
viz.poof()

# Discimination Threshold - probability or score at which the positive class is chosen over the negative class

from yellowbrick.classifier import DiscriminationThreshold

viz = DiscriminationThreshold(rf)
viz.fit(X_train, y_train)
viz.poof()

# Average Precision

from sklearn.metrics import average_precision_score

average_precision_score(
    y_test,
예제 #8
0
visualizer = ClassPredictionError(model, classes=classes)
visualizer.fit(X_fclass_train, y_train)
visualizer.score(X_fclass_test, y_test)
visualizer.poof(outpath="bag_class_errorf_classIF.png")

visualizer = DiscriminationThreshold(model)
visualizer.fit(X_fclass_train,
               y_train)  # Fit the training data to the visualizer
visualizer.score(X_fclass_test, y_test)
visualizer.poof(outpath="bag_descrimination_thresholdf_classIF.png")

# Create the visualizer, fit, score, and poof it
viz = PrecisionRecallCurve(model)
viz.fit(X_fclass_train, y_train)
viz.score(X_fclass_test, y_test)
viz.poof(outpath="bag_precision_recall_curvef_classIF.png")

#KNeighborsClassifier with f_classif features
model = KNeighborsClassifier()
model.fit(X_fclass_train, y_train)

visualizer = ClassificationReport(model, classes=classes)
visualizer.fit(X_fclass_train, y_train)  # Fit the visualizer and the model
visualizer.score(X_fclass_test, y_test)  # Evaluate the model on the test data
visualizer.poof(outpath="kneear_classification_report_fclassIF.png")

visualizer = ClassPredictionError(model, classes=classes)
visualizer.fit(X_fclass_train, y_train)
visualizer.score(X_fclass_test, y_test)
visualizer.poof(outpath="kneear_class_error_fclassIF.png")
예제 #9
0
    # Run model with 4-fold cross validation. Report mean accuracy.
    scores = cross_val_score(mlp, X_train, y_train, cv=4)
    print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))

    # Plot ROC, AUC.
    classes = ["Normal", "Pre-Ictal", "Seizure"]
    visualizer = ROCAUC(mlp, classes=classes)

    visualizer.fit(X_train, y_train)  # Fit the training data to the visualizer
    visualizer.score(X_test, y_test)  # Evaluate the model on the test data
    ROC_title = "ROCAUC_{}.png".format(animal_id)
    g = visualizer.poof(outpath=ROC_title)  # Save plot w unique title

    # Plot the precision-recall curve.
    viz = PrecisionRecallCurve(mlp)
    viz.fit(X_train, y_train)  # Fit the training data to the visualizer
    viz.score(X_test, y_test)  # Evaluate the model on the test data
    PR_title = "PR_{}.png".format(animal_id)
    viz.poof(outpath=PR_title)  # Save plot w unique title

    # Plot loss curve aka cost function.
    loss_values = mlp.loss_curve_
    plt.plot(loss_values)
    plt.show()
    Loss_title = "Loss_{}.png".format(animal_id)
    plt.savefig(Loss_title)
sys.stdout.close()

# In[ ]:
예제 #10
0
def plot_pr(model, X_train, y_train, X_valid, y_valid):
    visualizer = PrecisionRecallCurve(model)
    visualizer.fit(X_train, y_train)
    visualizer.score(X_valid, y_valid)
    visualizer.poof()