Exemple #1
0
    def get_training_testing_prediction_stats(self):
        print('*' * 80)
        print('*    Getting basic stats for training set and cross-validation')
        print('*' * 80)

        training_stats, y_train_pred, y_train_pred_proba = training_cv_stats(
            self.model, self.X_train, self.y_train, self.cv)

        logging.info(
            f'Basic stats achieved for training set and 3-fold CV \n'
            f'Accuracy for each individual fold of 3 CV folds: {training_stats["acc_cv"]} \n'
            f'Accuracy across all 3 CV-folds: {training_stats["acc"]} \n'
            f'ROC_AUC across all 3 CV-folds: {training_stats["roc_auc"]} \n'
            f'Recall across all 3 CV-folds: {training_stats["recall"]} \n'
            f'Precision across all 3 CV-folds: {training_stats["precision"]} \n'
            f'F1 score across all 3 CV-folds: {training_stats["f1-score"]} \n'
            f'Storing cross-validated y_train classes in y_train_pred \n'
            f'Storing cross-validated y_train probabilities in y_train_pred_proba \n'
        )

        print('*' * 80)
        print('*    Getting class predictions and probabilities for test set')
        print('*' * 80)

        test_stats, self.y_pred, self.y_pred_proba = testing_predict_stats(
            self.model, self.X_test, self.y_test)

        logging.info(
            f'Predicting on the test set. \n'
            f'Storing classes in y_pred and probabilities in y_pred_proba \n')

        print('*' * 80)
        print(
            '*    Calculate prediction stats for y_pred and y_pred_proba of test set'
        )
        print('*' * 80)

        logging.info(
            f'Basic stats on the test set. \n'
            f'Prediction accuracy on the test set: {test_stats["predict_acc"]} \n'
            f'Class distributio in the test set: {test_stats["class_distribution"]} \n'
            f'Matthews Correlation Coefficient: {test_stats["mcc"]} \n'
            f'Average number of class 1 samples: {test_stats["class_one"]} \n'
            f'Average number of class 0 samples: {test_stats["class_zero"]} \n'
            f'Null accuracy: {test_stats["null_acc"]} \n')

        print('*' * 80)
        print(
            '*    Plotting histogram for class 1 prediction probabilities for test set'
        )
        print('*' * 80)

        #store the predicted probabilities for class 1 of test set
        self.y_pred_proba_ones = self.y_pred_proba[:, 1]

        plot_hist_pred_proba(self.y_pred_proba_ones, self.directory)

        logging.info(
            f'Plotting prediction probabilities for class 1 in test set in histogram. \n'
        )
Exemple #2
0
    def predict(self):

        print_to_consol('Running prediction for provided classifier')

        print_to_consol(
            'Getting class predictions and probabilities for test set')

        test_stats, self.y_pred, self.y_pred_proba = testing_predict_stats(
            self.model, self.X_test_scaled, self.y_test)

        logging.info(
            f'Predicting on the test set. \n'
            f'Storing classes in y_pred and probabilities in y_pred_proba \n')

        print_to_consol(
            'Calculate prediction stats for y_pred and y_pred_proba of test set'
        )

        logging.info(
            f'Basic stats on the test set. \n'
            f'Prediction accuracy on the test set: {test_stats["predict_acc"]} \n'
            f'Class distributio in the test set: {test_stats["class_distribution"]} \n'
            f'Matthews Correlation Coefficient: {test_stats["mcc"]} \n'
            f'Average number of class 1 samples: {test_stats["class_one"]} \n'
            f'Average number of class 0 samples: {test_stats["class_zero"]} \n'
            f'Null accuracy: {test_stats["null_acc"]} \n')

        print_to_consol(
            'Plotting histogram for class 1 prediction probabilities for test set'
        )

        #store the predicted probabilities for class 1 of test set
        self.y_pred_proba_ones = self.y_pred_proba[:, 1]

        plot_hist_pred_proba(self.y_pred_proba_ones, self.directory)

        logging.info(
            f'Plotting prediction probabilities for class 1 in test set in histogram. \n'
        )
    def detailed_analysis(self):
        print_to_consol(
            'Making a confusion matrix for test set classification outcomes')

        matrix_stats = confusion_matrix_and_stats(self.y_test, self.y_pred,
                                                  'before_cal', self.directory)

        logging.info(f'Detailed analysis of confusion matrix for test set. \n'
                     f'True positives: {matrix_stats["TP"]} \n'
                     f'True negatives: {matrix_stats["TN"]} \n'
                     f'False positives: {matrix_stats["FP"]} \n'
                     f'False negatives: {matrix_stats["FN"]} \n'
                     f'Classification accuracy: {matrix_stats["acc"]} \n'
                     f'Classification error: {matrix_stats["err"]} \n'
                     f'Sensitivity: {matrix_stats["sensitivity"]} \n'
                     f'Specificity: {matrix_stats["specificity"]} \n'
                     f'False positive rate: {matrix_stats["FP-rate"]} \n'
                     f'False negative rate: {matrix_stats["FN-rate"]} \n'
                     f'Precision: {matrix_stats["precision"]} \n'
                     f'F1-score: {matrix_stats["F1-score"]} \n')

        print_to_consol(
            'Plotting precision recall curve for test set class 1 probabilities'
        )

        logging.info(
            f'Plotting precision recall curve for class 1 in test set probabilities. \n'
        )

        plot_precision_recall_vs_threshold(self.y_test, self.y_pred_proba_ones,
                                           self.directory)

        print_to_consol(
            'Plotting ROC curve ad calculating AUC for test set class 1 probabilities'
        )

        logging.info(
            f'Plotting ROC curve for class 1 in test set probabilities. \n')

        self.fpr, self.tpr, self.thresholds = plot_roc_curve(
            self.y_test, self.y_pred_proba_ones, self.directory)

        AUC = round(
            roc_auc_score(self.y_test, self.y_pred_proba_ones) * 100, 2)

        logging.info(
            f'Calculating AUC for ROC curve for class 1 in test set probabilities: {AUC} \n'
        )

        print_to_consol('Make a radar plot for performance metrics')

        radar_dict = {
            'Classification accuracy': matrix_stats["acc"],
            'Classification error': matrix_stats["err"],
            'Sensitivity': matrix_stats["sensitivity"],
            'Specificity': matrix_stats["specificity"],
            'False positive rate': matrix_stats["FP-rate"],
            'False negative rate': matrix_stats["FN-rate"],
            'Precision': matrix_stats["precision"],
            'F1-score': matrix_stats["F1-score"],
            'ROC AUC': AUC
        }

        plot_radar_chart(radar_dict, self.directory)

        print('*' * 80)
        print(
            '*    Exploring probability thresholds, sensitivity, specificity for class 1 '
        )
        print('*' * 80)

        threshold_dict = evaluate_threshold(self.tpr, self.fpr,
                                            self.thresholds)

        logging.info(
            f'Exploring different probability thresholds and sensitivity-specificity trade-offs. \n'
            f'Threshold 0.2: {threshold_dict["0.2"]} \n'
            f'Threshold 0.3: {threshold_dict["0.3"]} \n'
            f'Threshold 0.4: {threshold_dict["0.4"]} \n'
            f'Threshold 0.5: {threshold_dict["0.5"]} \n'
            f'Threshold 0.6: {threshold_dict["0.6"]} \n'
            f'Threshold 0.7: {threshold_dict["0.7"]} \n'
            f'Threshold 0.8: {threshold_dict["0.8"]} \n'
            f'Threshold 0.9: {threshold_dict["0.9"]} \n')

        print_to_consol(
            'Calibrating classifier and writing to disk; getting new accuracy')

        self.calibrated_clf, clf_acc = calibrate_classifier(
            self.model, self.X_cal_scaled, self.y_cal)

        date = datetime.strftime(datetime.now(), '%Y%m%d_%H%M')
        joblib.dump(
            self.calibrated_clf,
            os.path.join(self.directory,
                         'best_calibrated_predictor_' + date + '.pkl'))

        logging.info(
            f'Calibrated the best classifier with X_cal and y_cal and new accuracy {clf_acc}\n'
            f'Writing file to disk disk in {self.directory} \n')

        print_to_consol(
            'Getting 95% confidence interval for calibrated classifier')

        alpha, upper, lower = get_confidence_interval(
            self.X_train_scaled, self.y_train, self.X_test_scaled, self.y_test,
            self.calibrated_clf, self.directory, self.bootiter, 'calibrated')

        logging.info(f'{alpha}% confidence interval {upper}% and {lower}% \n'
                     f'for calibrated classifier. \n')

        print_to_consol('Running prediction for calibrated classifier')

        print_to_consol(
            'Getting class predictions and probabilities for test set with calibrated classifier'
        )

        test_stats_cal, self.y_pred_cal, self.y_pred_proba_cal = testing_predict_stats(
            self.calibrated_clf, self.X_test_scaled, self.y_test)

        logging.info(
            f'Predicting on the test set with calibrated classifier. \n'
            f'Storing classes for calibrated classifier in y_pred and probabilities in y_pred_proba. \n'
        )

        print_to_consol(
            'Calculate prediction stats for y_pred and y_pred_proba of test set with calibrated classifier'
        )

        logging.info(
            f'Basic stats on the test set woth calibrated classifier. \n'
            f'Prediction accuracy on the test set: {test_stats_cal["predict_acc"]} \n'
            f'Class distributio in the test set: {test_stats_cal["class_distribution"]} \n'
            f'Matthews Correlation Coefficient: {test_stats_cal["mcc"]} \n'
            f'Average number of class 1 samples: {test_stats_cal["class_one"]} \n'
            f'Average number of class 0 samples: {test_stats_cal["class_zero"]} \n'
            f'Null accuracy: {test_stats_cal["null_acc"]} \n')

        print_to_consol(
            'Plotting histogram for class 1 prediction probabilities for test set'
        )

        #store the predicted probabilities for class 1 of test set
        self.y_pred_proba_cal_ones = self.y_pred_proba_cal[:, 1]

        plot_hist_pred_proba(self.y_pred_proba_cal_ones, self.directory)

        logging.info(
            f'Plotting prediction probabilities for class 1 in test set in histogram for calibrated classifier. \n'
        )

        print_to_consol(
            'Making a confusion matrix for test set classification outcomes with calibrated classifier'
        )

        matrix_stats_cal = confusion_matrix_and_stats(self.y_test,
                                                      self.y_pred_cal,
                                                      'after_cal',
                                                      self.directory)

        logging.info(
            f'Detailed analysis of confusion matrix for test set with calibrated classifier. \n'
            f'True positives: {matrix_stats_cal["TP"]} \n'
            f'True negatives: {matrix_stats_cal["TN"]} \n'
            f'False positives: {matrix_stats_cal["FP"]} \n'
            f'False negatives: {matrix_stats_cal["FN"]} \n'
            f'Classification accuracy: {matrix_stats_cal["acc"]} \n'
            f'Classification error: {matrix_stats_cal["err"]} \n'
            f'Sensitivity: {matrix_stats_cal["sensitivity"]} \n'
            f'Specificity: {matrix_stats_cal["specificity"]} \n'
            f'False positive rate: {matrix_stats_cal["FP-rate"]} \n'
            f'False negative rate: {matrix_stats_cal["FN-rate"]} \n'
            f'Precision: {matrix_stats_cal["precision"]} \n'
            f'F1-score: {matrix_stats_cal["F1-score"]} \n')

        print_to_consol(
            'Plotting precision recall curve for test set class 1 probabilities with calibrated classifier'
        )

        logging.info(
            f'Plotting precision recall curve for class 1 in test set probabilities with calibrated classifier. \n'
        )

        plot_precision_recall_vs_threshold(self.y_test,
                                           self.y_pred_proba_cal_ones,
                                           self.directory)

        print_to_consol(
            'Plotting ROC curve ad calculating AUC for test set class 1 probabilities with calibrated classifier'
        )

        logging.info(
            f'Plotting ROC curve for class 1 in test set probabilities with calibrated classifier. \n'
        )

        self.fpr_cal, self.tpr_cal, self.thresholds_cal = plot_roc_curve(
            self.y_test, self.y_pred_proba_cal_ones, self.directory)

        AUC_cal = round(
            roc_auc_score(self.y_test, self.y_pred_proba_cal_ones) * 100, 2)

        logging.info(
            f'Calculating AUC for ROC curve for class 1 in test set probabilities with calibrated classifier: {AUC_cal} \n'
        )

        print_to_consol(
            'Make a radar plot for performance metrics with calibrated classifier'
        )

        radar_dict_cal = {
            'Classification accuracy': matrix_stats_cal["acc"],
            'Classification error': matrix_stats_cal["err"],
            'Sensitivity': matrix_stats_cal["sensitivity"],
            'Specificity': matrix_stats_cal["specificity"],
            'False positive rate': matrix_stats_cal["FP-rate"],
            'False negative rate': matrix_stats_cal["FN-rate"],
            'Precision': matrix_stats_cal["precision"],
            'F1-score': matrix_stats_cal["F1-score"],
            'ROC AUC': AUC_cal
        }

        plot_radar_chart(radar_dict_cal, self.directory)

        print_to_consol(
            'Exploring probability thresholds, sensitivity, specificity for class 1 with calibrated classifier'
        )

        threshold_dict_cal = evaluate_threshold(self.tpr_cal, self.fpr_cal,
                                                self.thresholds_cal)

        logging.info(
            f'Exploring different probability thresholds and sensitivity-specificity trade-offs \n'
            f'for calibrated classifier. \n'
            f'Threshold 0.2: {threshold_dict_cal["0.2"]} \n'
            f'Threshold 0.3: {threshold_dict_cal["0.3"]} \n'
            f'Threshold 0.4: {threshold_dict_cal["0.4"]} \n'
            f'Threshold 0.5: {threshold_dict_cal["0.5"]} \n'
            f'Threshold 0.6: {threshold_dict_cal["0.6"]} \n'
            f'Threshold 0.7: {threshold_dict_cal["0.7"]} \n'
            f'Threshold 0.8: {threshold_dict_cal["0.8"]} \n'
            f'Threshold 0.9: {threshold_dict_cal["0.9"]} \n')

        end = datetime.now()
        duration = end - self.start

        logging.info(f'Training lasted for {duration} minutes \n')

        logging.info(f'Training completed \n')

        print_to_consol('Training completed')