示例#1
0
    def _runtestset(self, model, test_loader, path_to_weights, fold):
        if path_to_weights:
            weights = torch.load(path_to_weights)
            model.load_state_dict(weights)
            model.to(self.device)
            # Make Predictions
            with torch.no_grad():
                model.eval()
                y_truth, y_predicted = [], []
                for data in test_loader:
                    images, labels = data['image'].to(
                        self.device,
                        dtype=torch.float), data['label'].to(self.device,
                                                             dtype=torch.long)
                    output = model(images)
                    output_pb = F.softmax(output.cpu(), dim=1)
                    top_ps, top_class = output_pb.topk(1, dim=1)
                    y_predicted.extend(list(top_class.flatten().numpy()))
                    y_truth.extend(list(labels.cpu().flatten().numpy()))

                metrics = self._get_evaluation_metric(y_truth, y_predicted)
                logging.info("Test_Fold_{}_Metrics".format(fold))
                for arg in metrics:
                    print("{}: {}".format(arg, metrics[arg]))
                    logging.info("{}: {}".format(arg, metrics[arg]))
                logging.info("=" * 40)

                bc = BinaryClassification(y_truth,
                                          y_predicted,
                                          labels=["Benign", "Malignant"])
                bc.plot_roc_curve()
                auc_name = "Test_fold_{}_roc.png".format(fold)
                plt.savefig(auc_name)
                plt.clf()
示例#2
0
def getROCCurve(model, X_test, y_test):
    y_pred = model.predict_proba(X_test)[:, 1]
    from plot_metric.functions import BinaryClassification

    bc = BinaryClassification(y_test, y_pred, labels=["Win", "Loss"])

    plt.figure(figsize=(5, 5))
    bc.plot_roc_curve()
    plt.show()

    return
示例#3
0
    def plot_roc_curve(self, yproba, name):

        '''Plots the ROC curve for given model (by definition requires a model that can gen probabilities)'''

        # Visualisation with plot_metric
        bc = BinaryClassification(self.y_test, yproba, labels=["No Disease", "Heart Disease"])

        # Figures
        plt.figure(figsize=(6, 6))
        bc.plot_roc_curve()
        pth = Path(self.graphics_path, 'roc_curve_'+name).with_suffix('.png')
        plt.savefig(pth)
        plt.close()
示例#4
0
def drawCurve(results,target,TPR,FPR):
    #y_true = target
    #y_pred = results
    
        #this also works 
    #fpr,tpr,thresholds = metrics.roc_curve(target,results,pos_label=0)
    #plt.plot(tpr,fpr)
    #plt.show()
    
    #this also works 
    
    #fpr,tpr,thresholds = metrics.roc_curve(target,results,pos_label=0)
    #plt.plot(tpr,fpr)
    #plt.show()
    
    bc = BinaryClassification(target,results,labels=["Class1","Class2"],threshold=0.25)
    plt.figure(figsize=(5,5))
    
    bc.plot_roc_curve()
    plt.show()
示例#5
0
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.5,
                                                    random_state=2)

# Building Classifier
clf = RandomForestClassifier(n_estimators=50, random_state=23)

# Train our classifier
model = clf.fit(X_train, y_train)

# Predict test set
y_pred = clf.predict_proba(X_test)[:, 1]

# Visualisation with plot_metric
bc = BinaryClassification(y_test, y_pred, labels=["Class 1", "Class 2"])

# Figures
plt.figure(figsize=(15, 10))
plt.subplot2grid(shape=(2, 6), loc=(0, 0), colspan=2)
bc.plot_roc_curve()
plt.subplot2grid((2, 6), (0, 2), colspan=2)
bc.plot_precision_recall_curve()
plt.subplot2grid((2, 6), (0, 4), colspan=2)
bc.plot_class_distribution()
plt.subplot2grid((2, 6), (1, 1), colspan=2)
bc.plot_confusion_matrix()
plt.subplot2grid((2, 6), (1, 3), colspan=2)
bc.plot_confusion_matrix(normalize=True)

# Save figure
示例#6
0
def save_classification_plots(
    y_true: np.ndarray,
    y_proba: np.ndarray,
    threshold: np.float64,
    prefix: Optional[str] = None,
    destination: Optional[str] = None,
):
    """
    Build and save binary classification performance evaluation plots.

    Args:
        y_true: Ground truth (correct) labels.
        y_pred: Predicted probabilities of the positive class returned by a classifier.
        threshold: Classification pipeline optimal threshold.
        prefix: Classification plots prefix i.e. pipeline name. Defaults to None.
        destination: Folder where the report should be saved. Defaults to ``METADATA_REGISTRY``.
    """
    destination = cfg.METADATA_REGISTRY if destination is None else destination
    fname = ("classification_plots.png" if prefix is None else prefix +
             "_classification_plots.png")
    path = os.path.join(destination, fname)

    bc = BinaryClassification(y_true, y_proba, labels=["No fire", "Fire"])

    plt.figure(figsize=(15, 10))
    plt.subplot2grid(shape=(2, 6), loc=(0, 0), colspan=2)
    bc.plot_roc_curve(threshold=threshold)
    plt.subplot2grid((2, 6), (0, 2), colspan=2)
    bc.plot_precision_recall_curve(threshold=threshold)
    plt.subplot2grid((2, 6), (0, 4), colspan=2)
    bc.plot_class_distribution(threshold=threshold)
    plt.subplot2grid((2, 6), (1, 1), colspan=2)
    bc.plot_confusion_matrix(threshold=threshold)
    plt.subplot2grid((2, 6), (1, 3), colspan=2)
    bc.plot_confusion_matrix(threshold=threshold, normalize=True)

    plt.savefig(path)
示例#7
0
# metrics are used to find accuracy or error
from sklearn import metrics
# using metrics module for accuracy calculation
print("ACCURACY OF THE MODEL: ", metrics.accuracy_score(y_test, y_pred))

# classification report for precision, recall f1-score and accuracy: ,labels=[0,1]
matrix = classification_report(y_test, y_pred)
print('Classification report : \n', matrix)

# In[291]:

#AUC Curve
from plot_metric.functions import BinaryClassification
# Visualisation with plot_metric
y_pred = clf.predict_proba(X_test)[:, 1]
bc = BinaryClassification(y_test, y_pred, labels=["Class 1", "Class 2"])

# Figures
plt.figure(figsize=(5, 5))
bc.plot_roc_curve()
plt.show()

# #### Model 2: KNN

# In[292]:

#Model 2: KNN
from sklearn.neighbors import KNeighborsClassifier
neighbors = np.arange(1, 30)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
        correct_faces = sum(ensemble_vote_all(faces_ii_testing, classifiers))
        incorrect_faces = len(faces_testing) - correct_faces
        correct_non_faces = len(non_faces_testing) - sum(ensemble_vote_all(non_faces_ii_testing, classifiers))
        incorrect_non_faces = len(non_faces_testing) - correct_non_faces
        
        correct_faces_score = ensemble_score_all(faces_ii_testing, classifiers)
        incorrect_non_faces_score = ensemble_score_all(non_faces_ii_testing, classifiers)
        face_label = np.array([1] * len(correct_faces_score) + [0] * len(incorrect_non_faces_score))
        face_predict = np.array(ensemble_vote_all(faces_ii_testing, classifiers) + ensemble_vote_all(non_faces_ii_testing, classifiers))
        
        # plot confusion matrix
        cf = plot_confusion_matrix(correct_faces, incorrect_faces, correct_non_faces, incorrect_non_faces)
        cf.figure.savefig("results/cf_round_{}.png".format(num))
        # plot roc curve
        # Visualisation with plot_metric
        bc = BinaryClassification(face_label, face_predict, labels=["Class 1", "Class 2"])
        f1 = f1_score(face_label, face_predict)
        print("f1: ", f1)
        # Figures
        plt.figure(figsize=(5,5))
        fpr, tpr, thres, auc = bc.plot_roc_curve()
        plt.savefig('results/round_{}_roc.jpg'.format(num))
        print('..done.\n\nResult:\n      Faces: ' + str(correct_faces) + '/' + str(len(faces_testing))
            + '  (' + str((float(correct_faces) / len(faces_testing)) * 100) + '%)\n  non-Faces: '
            + str(correct_non_faces) + '/' + str(len(non_faces_testing)) + '  ('
            + str((float(correct_non_faces) / len(non_faces_testing)) * 100) + '%)')
        
        print('False Positive Rate: {}, True Positive Rate: {}'.format(fpr, tpr))
        print('Classifier Results: ', ab.stats)

        haar_imgs = vis_haar(classifiers, faces_testing[0])
 def metric_plots_2(self, actual=None, pred=None, threshold=0.5):
     '''
     TODO : due to package issue this function is not used (need to check)
     Plotting the roc Curve, Precision recall curve, confusion matirx
     '''
     if ((actual is not None) & (pred is not None)):
         bc = BinaryClassification(y_true=actual,
                                   y_pred=pred,
                                   labels=["Class 0", "Class 1"],
                                   threshold=threshold)
         # Figures
         plt.figure(figsize=(20, 15))
         plt.subplot2grid(shape=(2, 6), loc=(0, 0), colspan=2)
         # Roc curve:
         bc.plot_roc_curve(threshold=threshold)
         plt.subplot2grid((2, 6), (0, 2), colspan=2)
         # precision recall curve:
         bc.plot_precision_recall_curve(threshold=threshold)
         plt.subplot2grid((2, 6), (0, 4), colspan=2)
         # class distribution curve:
         bc.plot_class_distribution(threshold=threshold)
         plt.subplot2grid((2, 6), (1, 1), colspan=2)
         # confusion matrix:
         bc.plot_confusion_matrix(threshold=threshold)
         plt.subplot2grid((2, 6), (1, 3), colspan=2)
         # normalised confusion matrix:
         bc.plot_confusion_matrix(threshold=threshold, normalize=True)
         plt.show()
         # Classification report:
         print(
             classification_report(
                 actual, [0 if i <= threshold else 1 for i in pred]))