Exemplo n.º 1
0
    def detailed_analysis(self):
        print_to_consol(
            'Making a confusion matrix for test set classification outcomes')

        matrix_stats = confusion_matrix_and_stats(self.y_test, self.y_pred,
                                                  self.directory)

        logging.info(f'Detailed analysis of confusion matrix for test set. \n'
                     f'True positives: {matrix_stats["TP"]} \n'
                     f'True negatives: {matrix_stats["TN"]} \n'
                     f'False positives: {matrix_stats["FP"]} \n'
                     f'False negatives: {matrix_stats["FN"]} \n'
                     f'Classification accuracy: {matrix_stats["acc"]} \n'
                     f'Classification error: {matrix_stats["err"]} \n'
                     f'Sensitivity: {matrix_stats["sensitivity"]} \n'
                     f'Specificity: {matrix_stats["specificity"]} \n'
                     f'False positive rate: {matrix_stats["FP-rate"]} \n'
                     f'False negative rate: {matrix_stats["FN-rate"]} \n'
                     f'Precision: {matrix_stats["precision"]} \n'
                     f'F1-score: {matrix_stats["F1-score"]} \n')

        print_to_consol(
            'Plotting precision recall curve for test set class 1 probabilities'
        )

        logging.info(
            f'Plotting precision recall curve for class 1 in test set probabilities. \n'
        )

        plot_precision_recall_vs_threshold(self.y_test, self.y_pred_proba_ones,
                                           self.directory)

        print_to_consol(
            'Plotting ROC curve ad calculating AUC for test set class 1 probabilities'
        )

        logging.info(
            f'Plotting ROC curve for class 1 in test set probabilities. \n')

        self.fpr, self.tpr, self.thresholds = plot_roc_curve(
            self.y_test, self.y_pred_proba_ones, self.directory)

        AUC = round(
            roc_auc_score(self.y_test, self.y_pred_proba_ones) * 100, 2)

        logging.info(
            f'Calculating AUC for ROC curve for class 1 in test set probabilities: {AUC} \n'
        )

        print_to_consol('Make a radar plot for performance metrics')

        radar_dict = {
            'Classification accuracy': matrix_stats["acc"],
            'Classification error': matrix_stats["err"],
            'Sensitivity': matrix_stats["sensitivity"],
            'Specificity': matrix_stats["specificity"],
            'False positive rate': matrix_stats["FP-rate"],
            'False negative rate': matrix_stats["FN-rate"],
            'Precision': matrix_stats["precision"],
            'F1-score': matrix_stats["F1-score"],
            'ROC AUC': AUC
        }

        plot_radar_chart(radar_dict, self.directory)

        print_to_consol(
            'Exploring probability thresholds, sensitivity, specificity for class 1'
        )

        threshold_dict = evaluate_threshold(self.tpr, self.fpr,
                                            self.thresholds)

        logging.info(
            f'Exploring different probability thresholds and sensitivity-specificity trade-offs. \n'
            f'Threshold 0.2: {threshold_dict["0.2"]} \n'
            f'Threshold 0.3: {threshold_dict["0.3"]} \n'
            f'Threshold 0.4: {threshold_dict["0.4"]} \n'
            f'Threshold 0.5: {threshold_dict["0.5"]} \n'
            f'Threshold 0.6: {threshold_dict["0.6"]} \n'
            f'Threshold 0.7: {threshold_dict["0.7"]} \n'
            f'Threshold 0.8: {threshold_dict["0.8"]} \n'
            f'Threshold 0.9: {threshold_dict["0.9"]} \n')

        end = datetime.now()
        duration = end - self.start

        logging.info(
            f'Prediction and analysis lasted for {duration} minutes \n')

        logging.info(f'Prediction and analysis completed \n')

        print_to_consol('Prediction and analysis completed')
Exemplo n.º 2
0
    def detailed_analysis(self):
        print_to_consol(
            'Making a confusion matrix for test set classification outcomes')

        matrix_stats, report = confusion_matrix_and_stats_multiclass(
            self.y_test, self.y_pred, 'before_cal', self.directory)

        logging.info(f'Detailed analysis of confusion matrix for test set. \n'
                     f'True positives: {matrix_stats["TP"]} \n'
                     f'True negatives: {matrix_stats["TN"]} \n'
                     f'False positives: {matrix_stats["FP"]} \n'
                     f'False negatives: {matrix_stats["FN"]} \n'
                     f'Classification accuracy: {matrix_stats["acc"]} \n'
                     f'Classification error: {matrix_stats["err"]} \n'
                     f'Sensitivity: {matrix_stats["sensitivity"]} \n'
                     f'Specificity: {matrix_stats["specificity"]} \n'
                     f'False positive rate: {matrix_stats["FP-rate"]} \n'
                     f'False negative rate: {matrix_stats["FN-rate"]} \n'
                     f'Precision: {matrix_stats["precision"]} \n'
                     f'F1-score: {matrix_stats["F1-score"]} \n')

        logging.info(
            f'Classification report on test set before calibration. \n'
            f'{report} \n')

        print_to_consol('Make a radar plot for performance metrics')

        radar_dict = {
            'Classification accuracy': matrix_stats["acc"],
            'Classification error': matrix_stats["err"],
            'Sensitivity': matrix_stats["sensitivity"],
            'Specificity': matrix_stats["specificity"],
            'False positive rate': matrix_stats["FP-rate"],
            'False negative rate': matrix_stats["FN-rate"],
            'Precision': matrix_stats["precision"],
            'F1-score': matrix_stats["F1-score"],
            'ROC AUC': None
        }

        plot_radar_chart(radar_dict, self.directory)

        print_to_consol(
            'Calibrating classifier and writing to disk; getting new accuracy')

        self.calibrated_clf, clf_acc = calibrate_classifier(
            self.model, self.X_cal_scaled, self.y_cal)

        date = datetime.strftime(datetime.now(), '%Y%m%d_%H%M')
        joblib.dump(
            self.calibrated_clf,
            os.path.join(self.directory,
                         'best_calibrated_predictor_' + date + '.pkl'))

        logging.info(
            f'Calibrated the best classifier with X_cal and y_cal and new accuracy {clf_acc}\n'
            f'Writing file to disk disk in {self.directory} \n')

        print_to_consol(
            'Getting 95% confidence interval for calibrated classifier')

        alpha, upper, lower = get_confidence_interval(
            self.X_train_scaled, self.y_train, self.X_test_scaled, self.y_test,
            self.calibrated_clf, self.directory, self.bootiter, 'calibrated')

        logging.info(f'{alpha}% confidence interval {upper}% and {lower}% \n'
                     f'for calibrated classifier. \n')

        print_to_consol('Running prediction for calibrated classifier')

        print_to_consol(
            'Getting class predictions and probabilities for test set with calibrated classifier'
        )

        test_stats_cal, self.y_pred_cal, self.y_pred_proba_cal = testing_predict_stats_multiclass(
            self.calibrated_clf, self.X_test_scaled, self.y_test)

        y_pred_cal_out = os.path.join(self.directory,
                                      "y_pred_after_calibration.csv")
        np.savetxt(y_pred_cal_out, self.y_pred_cal, delimiter=",")

        y_pred_proba_cal_out = os.path.join(
            self.directory, "y_pred_proba_after_calibration.csv")
        np.savetxt(y_pred_proba_cal_out, self.y_pred_proba_cal, delimiter=",")

        logging.info(
            f'Writing y_pred and y_pred_proba after calibration to disk. \n'
            f'Predicting on the test set with calibrated classifier. \n'
            f'Storing classes for calibrated classifier in y_pred and probabilities in y_pred_proba. \n'
        )

        print_to_consol(
            'Calculate prediction stats for y_pred and y_pred_proba of test set with calibrated classifier'
        )

        logging.info(
            f'Basic stats on the test set woth calibrated classifier. \n'
            f'Prediction accuracy on the test set: {test_stats_cal["predict_acc"]} \n'
            f'Class distributio in the test set: {test_stats_cal["class_distribution"]} \n'
            f'Matthews Correlation Coefficient: {test_stats_cal["mcc"]} \n')

        print_to_consol(
            'Making a confusion matrix for test set classification outcomes with calibrated classifier'
        )

        matrix_stats_cal, report_cal = confusion_matrix_and_stats_multiclass(
            self.y_test, self.y_pred_cal, 'after_cal', self.directory)

        logging.info(
            f'Detailed analysis of confusion matrix for test set with calibrated classifier. \n'
            f'True positives: {matrix_stats_cal["TP"]} \n'
            f'True negatives: {matrix_stats_cal["TN"]} \n'
            f'False positives: {matrix_stats_cal["FP"]} \n'
            f'False negatives: {matrix_stats_cal["FN"]} \n'
            f'Classification accuracy: {matrix_stats_cal["acc"]} \n'
            f'Classification error: {matrix_stats_cal["err"]} \n'
            f'Sensitivity: {matrix_stats_cal["sensitivity"]} \n'
            f'Specificity: {matrix_stats_cal["specificity"]} \n'
            f'False positive rate: {matrix_stats_cal["FP-rate"]} \n'
            f'False negative rate: {matrix_stats_cal["FN-rate"]} \n'
            f'Precision: {matrix_stats_cal["precision"]} \n'
            f'F1-score: {matrix_stats_cal["F1-score"]} \n')

        logging.info(
            f'Classification report on test set afetr callibration. \n'
            f'{report_cal} \n')

        print_to_consol(
            'Make a radar plot for performance metrics with calibrated classifier'
        )

        radar_dict_cal = {
            'Classification accuracy': matrix_stats_cal["acc"],
            'Classification error': matrix_stats_cal["err"],
            'Sensitivity': matrix_stats_cal["sensitivity"],
            'Specificity': matrix_stats_cal["specificity"],
            'False positive rate': matrix_stats_cal["FP-rate"],
            'False negative rate': matrix_stats_cal["FN-rate"],
            'Precision': matrix_stats_cal["precision"],
            'F1-score': matrix_stats_cal["F1-score"],
            'ROC AUC': None
        }

        plot_radar_chart(radar_dict_cal, self.directory)

        end = datetime.now()
        duration = end - self.start

        logging.info(f'Training lasted for {duration} minutes \n')

        logging.info(f'Training completed \n')

        print_to_consol('Training completed')
    def detailed_analysis(self):
        print_to_consol(
            'Making a confusion matrix for test set classification outcomes')

        matrix_stats = confusion_matrix_and_stats(self.y_test, self.y_pred,
                                                  'before_cal', self.directory)

        logging.info(f'Detailed analysis of confusion matrix for test set. \n'
                     f'True positives: {matrix_stats["TP"]} \n'
                     f'True negatives: {matrix_stats["TN"]} \n'
                     f'False positives: {matrix_stats["FP"]} \n'
                     f'False negatives: {matrix_stats["FN"]} \n'
                     f'Classification accuracy: {matrix_stats["acc"]} \n'
                     f'Classification error: {matrix_stats["err"]} \n'
                     f'Sensitivity: {matrix_stats["sensitivity"]} \n'
                     f'Specificity: {matrix_stats["specificity"]} \n'
                     f'False positive rate: {matrix_stats["FP-rate"]} \n'
                     f'False negative rate: {matrix_stats["FN-rate"]} \n'
                     f'Precision: {matrix_stats["precision"]} \n'
                     f'F1-score: {matrix_stats["F1-score"]} \n')

        print_to_consol(
            'Plotting precision recall curve for test set class 1 probabilities'
        )

        logging.info(
            f'Plotting precision recall curve for class 1 in test set probabilities. \n'
        )

        plot_precision_recall_vs_threshold(self.y_test, self.y_pred_proba_ones,
                                           self.directory)

        print_to_consol(
            'Plotting ROC curve ad calculating AUC for test set class 1 probabilities'
        )

        logging.info(
            f'Plotting ROC curve for class 1 in test set probabilities. \n')

        self.fpr, self.tpr, self.thresholds = plot_roc_curve(
            self.y_test, self.y_pred_proba_ones, self.directory)

        AUC = round(
            roc_auc_score(self.y_test, self.y_pred_proba_ones) * 100, 2)

        logging.info(
            f'Calculating AUC for ROC curve for class 1 in test set probabilities: {AUC} \n'
        )

        print_to_consol('Make a radar plot for performance metrics')

        radar_dict = {
            'Classification accuracy': matrix_stats["acc"],
            'Classification error': matrix_stats["err"],
            'Sensitivity': matrix_stats["sensitivity"],
            'Specificity': matrix_stats["specificity"],
            'False positive rate': matrix_stats["FP-rate"],
            'False negative rate': matrix_stats["FN-rate"],
            'Precision': matrix_stats["precision"],
            'F1-score': matrix_stats["F1-score"],
            'ROC AUC': AUC
        }

        plot_radar_chart(radar_dict, self.directory)

        print('*' * 80)
        print(
            '*    Exploring probability thresholds, sensitivity, specificity for class 1 '
        )
        print('*' * 80)

        threshold_dict = evaluate_threshold(self.tpr, self.fpr,
                                            self.thresholds)

        logging.info(
            f'Exploring different probability thresholds and sensitivity-specificity trade-offs. \n'
            f'Threshold 0.2: {threshold_dict["0.2"]} \n'
            f'Threshold 0.3: {threshold_dict["0.3"]} \n'
            f'Threshold 0.4: {threshold_dict["0.4"]} \n'
            f'Threshold 0.5: {threshold_dict["0.5"]} \n'
            f'Threshold 0.6: {threshold_dict["0.6"]} \n'
            f'Threshold 0.7: {threshold_dict["0.7"]} \n'
            f'Threshold 0.8: {threshold_dict["0.8"]} \n'
            f'Threshold 0.9: {threshold_dict["0.9"]} \n')

        print_to_consol(
            'Calibrating classifier and writing to disk; getting new accuracy')

        self.calibrated_clf, clf_acc = calibrate_classifier(
            self.model, self.X_cal_scaled, self.y_cal)

        date = datetime.strftime(datetime.now(), '%Y%m%d_%H%M')
        joblib.dump(
            self.calibrated_clf,
            os.path.join(self.directory,
                         'best_calibrated_predictor_' + date + '.pkl'))

        logging.info(
            f'Calibrated the best classifier with X_cal and y_cal and new accuracy {clf_acc}\n'
            f'Writing file to disk disk in {self.directory} \n')

        print_to_consol(
            'Getting 95% confidence interval for calibrated classifier')

        alpha, upper, lower = get_confidence_interval(
            self.X_train_scaled, self.y_train, self.X_test_scaled, self.y_test,
            self.calibrated_clf, self.directory, self.bootiter, 'calibrated')

        logging.info(f'{alpha}% confidence interval {upper}% and {lower}% \n'
                     f'for calibrated classifier. \n')

        print_to_consol('Running prediction for calibrated classifier')

        print_to_consol(
            'Getting class predictions and probabilities for test set with calibrated classifier'
        )

        test_stats_cal, self.y_pred_cal, self.y_pred_proba_cal = testing_predict_stats(
            self.calibrated_clf, self.X_test_scaled, self.y_test)

        logging.info(
            f'Predicting on the test set with calibrated classifier. \n'
            f'Storing classes for calibrated classifier in y_pred and probabilities in y_pred_proba. \n'
        )

        print_to_consol(
            'Calculate prediction stats for y_pred and y_pred_proba of test set with calibrated classifier'
        )

        logging.info(
            f'Basic stats on the test set woth calibrated classifier. \n'
            f'Prediction accuracy on the test set: {test_stats_cal["predict_acc"]} \n'
            f'Class distributio in the test set: {test_stats_cal["class_distribution"]} \n'
            f'Matthews Correlation Coefficient: {test_stats_cal["mcc"]} \n'
            f'Average number of class 1 samples: {test_stats_cal["class_one"]} \n'
            f'Average number of class 0 samples: {test_stats_cal["class_zero"]} \n'
            f'Null accuracy: {test_stats_cal["null_acc"]} \n')

        print_to_consol(
            'Plotting histogram for class 1 prediction probabilities for test set'
        )

        #store the predicted probabilities for class 1 of test set
        self.y_pred_proba_cal_ones = self.y_pred_proba_cal[:, 1]

        plot_hist_pred_proba(self.y_pred_proba_cal_ones, self.directory)

        logging.info(
            f'Plotting prediction probabilities for class 1 in test set in histogram for calibrated classifier. \n'
        )

        print_to_consol(
            'Making a confusion matrix for test set classification outcomes with calibrated classifier'
        )

        matrix_stats_cal = confusion_matrix_and_stats(self.y_test,
                                                      self.y_pred_cal,
                                                      'after_cal',
                                                      self.directory)

        logging.info(
            f'Detailed analysis of confusion matrix for test set with calibrated classifier. \n'
            f'True positives: {matrix_stats_cal["TP"]} \n'
            f'True negatives: {matrix_stats_cal["TN"]} \n'
            f'False positives: {matrix_stats_cal["FP"]} \n'
            f'False negatives: {matrix_stats_cal["FN"]} \n'
            f'Classification accuracy: {matrix_stats_cal["acc"]} \n'
            f'Classification error: {matrix_stats_cal["err"]} \n'
            f'Sensitivity: {matrix_stats_cal["sensitivity"]} \n'
            f'Specificity: {matrix_stats_cal["specificity"]} \n'
            f'False positive rate: {matrix_stats_cal["FP-rate"]} \n'
            f'False negative rate: {matrix_stats_cal["FN-rate"]} \n'
            f'Precision: {matrix_stats_cal["precision"]} \n'
            f'F1-score: {matrix_stats_cal["F1-score"]} \n')

        print_to_consol(
            'Plotting precision recall curve for test set class 1 probabilities with calibrated classifier'
        )

        logging.info(
            f'Plotting precision recall curve for class 1 in test set probabilities with calibrated classifier. \n'
        )

        plot_precision_recall_vs_threshold(self.y_test,
                                           self.y_pred_proba_cal_ones,
                                           self.directory)

        print_to_consol(
            'Plotting ROC curve ad calculating AUC for test set class 1 probabilities with calibrated classifier'
        )

        logging.info(
            f'Plotting ROC curve for class 1 in test set probabilities with calibrated classifier. \n'
        )

        self.fpr_cal, self.tpr_cal, self.thresholds_cal = plot_roc_curve(
            self.y_test, self.y_pred_proba_cal_ones, self.directory)

        AUC_cal = round(
            roc_auc_score(self.y_test, self.y_pred_proba_cal_ones) * 100, 2)

        logging.info(
            f'Calculating AUC for ROC curve for class 1 in test set probabilities with calibrated classifier: {AUC_cal} \n'
        )

        print_to_consol(
            'Make a radar plot for performance metrics with calibrated classifier'
        )

        radar_dict_cal = {
            'Classification accuracy': matrix_stats_cal["acc"],
            'Classification error': matrix_stats_cal["err"],
            'Sensitivity': matrix_stats_cal["sensitivity"],
            'Specificity': matrix_stats_cal["specificity"],
            'False positive rate': matrix_stats_cal["FP-rate"],
            'False negative rate': matrix_stats_cal["FN-rate"],
            'Precision': matrix_stats_cal["precision"],
            'F1-score': matrix_stats_cal["F1-score"],
            'ROC AUC': AUC_cal
        }

        plot_radar_chart(radar_dict_cal, self.directory)

        print_to_consol(
            'Exploring probability thresholds, sensitivity, specificity for class 1 with calibrated classifier'
        )

        threshold_dict_cal = evaluate_threshold(self.tpr_cal, self.fpr_cal,
                                                self.thresholds_cal)

        logging.info(
            f'Exploring different probability thresholds and sensitivity-specificity trade-offs \n'
            f'for calibrated classifier. \n'
            f'Threshold 0.2: {threshold_dict_cal["0.2"]} \n'
            f'Threshold 0.3: {threshold_dict_cal["0.3"]} \n'
            f'Threshold 0.4: {threshold_dict_cal["0.4"]} \n'
            f'Threshold 0.5: {threshold_dict_cal["0.5"]} \n'
            f'Threshold 0.6: {threshold_dict_cal["0.6"]} \n'
            f'Threshold 0.7: {threshold_dict_cal["0.7"]} \n'
            f'Threshold 0.8: {threshold_dict_cal["0.8"]} \n'
            f'Threshold 0.9: {threshold_dict_cal["0.9"]} \n')

        end = datetime.now()
        duration = end - self.start

        logging.info(f'Training lasted for {duration} minutes \n')

        logging.info(f'Training completed \n')

        print_to_consol('Training completed')
Exemplo n.º 4
0
    def detailed_analysis(self):
        print('*' * 80)
        print(
            '*    Making a confusion matrix for test set classification outcomes'
        )
        print('*' * 80)

        matrix_stats = confusion_matrix_and_stats(self.y_test, self.y_pred,
                                                  self.directory)

        logging.info(f'Detailed analysis of confusion matrix for test set. \n'
                     f'True positives: {matrix_stats["TP"]} \n'
                     f'True negatives: {matrix_stats["TN"]} \n'
                     f'False positives: {matrix_stats["FP"]} \n'
                     f'False negatives: {matrix_stats["FN"]} \n'
                     f'Classification accuracy: {matrix_stats["acc"]} \n'
                     f'Classification error: {matrix_stats["err"]} \n'
                     f'Sensitivity: {matrix_stats["sensitivity"]} \n'
                     f'Specificity: {matrix_stats["specificity"]} \n'
                     f'False positive rate: {matrix_stats["FP-rate"]} \n'
                     f'False negative rate: {matrix_stats["FN-rate"]} \n'
                     f'Precision: {matrix_stats["precision"]} \n'
                     f'F1-score: {matrix_stats["F1-score"]} \n')

        print('*' * 80)
        print(
            '*    Plotting precision recall curve for test set class 1 probabilities'
        )
        print('*' * 80)

        logging.info(
            f'Plotting precision recall curve for class 1 in test set probabilities. \n'
        )

        plot_precision_recall_vs_threshold(self.y_test, self.y_pred_proba_ones,
                                           self.directory)

        print('*' * 80)
        print(
            '*    Plotting ROC curve ad calculating AUC for test set class 1 probabilities'
        )
        print('*' * 80)

        logging.info(
            f'Plotting ROC curve for class 1 in test set probabilities. \n')

        self.fpr, self.tpr, self.thresholds = plot_roc_curve(
            self.y_test, self.y_pred_proba_ones, self.directory)

        AUC = round(
            roc_auc_score(self.y_test, self.y_pred_proba_ones) * 100, 2)

        logging.info(
            f'Calculating AUC for ROC curve for class 1 in test set probabilities: {AUC} \n'
        )

        print('*' * 80)
        print('*    Make a radar plot for performance metrics')
        print('*' * 80)

        radar_dict = {
            'Classification accuracy': matrix_stats["acc"],
            'Classification error': matrix_stats["err"],
            'Sensitivity': matrix_stats["sensitivity"],
            'Specificity': matrix_stats["specificity"],
            'False positive rate': matrix_stats["FP-rate"],
            'False negative rate': matrix_stats["FN-rate"],
            'Precision': matrix_stats["precision"],
            'F1-score': matrix_stats["F1-score"],
            'ROC AUC': AUC
        }

        plot_radar_chart(radar_dict, self.directory)

        print('*' * 80)
        print(
            '*    Exploring probability thresholds, sensitivity, specificity for class 1 '
        )
        print('*' * 80)

        threshold_dict = evaluate_threshold(self.tpr, self.fpr,
                                            self.thresholds)

        logging.info(
            f'Exploring different probability thresholds and sensitivity-specificity trade-offs. \n'
            f'Threshold 0.2: {threshold_dict["0.2"]} \n'
            f'Threshold 0.3: {threshold_dict["0.3"]} \n'
            f'Threshold 0.4: {threshold_dict["0.4"]} \n'
            f'Threshold 0.5: {threshold_dict["0.5"]} \n'
            f'Threshold 0.6: {threshold_dict["0.6"]} \n'
            f'Threshold 0.7: {threshold_dict["0.7"]} \n'
            f'Threshold 0.8: {threshold_dict["0.8"]} \n'
            f'Threshold 0.9: {threshold_dict["0.9"]} \n')

        print('*' * 80)
        print(
            '*    Calibrating classifier and writing to disk; getting new accuracy'
        )
        print('*' * 80)

        self.calibrated_clf, clf_acc = calibrate_classifier(
            self.model, self.X_cal, self.y_cal)

        date = datetime.strftime(datetime.now(), '%Y%m%d_%H%M')
        joblib.dump(
            self.calibrated_clf,
            os.path.join(self.directory,
                         'best_calibrated_predictor_' + date + '.pkl'))

        logging.info(
            f'Calibrated the best classifier with X_cal and y_cal and new accuracy {clf_acc}\n'
            f'Writing file to disk disk in {self.directory} \n')

        end = datetime.now()
        duration = end - self.start

        logging.info(f'Training lasted for {duration} minutes \n')

        logging.info(f'Training completed \n')

        print('*' * 80)
        print('*    Training completed')
        print('*' * 80)