def __store_stats(self):

        learner_name = TornadoDic.get_short_names(self.learner.LEARNER_NAME)
        detector_name = self.drift_detector.DETECTOR_NAME
        detector_setting = self.drift_detector.get_settings()

        file_name = learner_name + "_" + detector_name + "." + detector_setting[
            0]
        st_wr = open(self.__project_path + file_name.lower() + ".txt", "w")

        lrn_error_rate = PredictionEvaluator.calculate_error_rate(
            self.learner.get_global_confusion_matrix())
        dl, tp, fp, fn = DriftDetectionEvaluator.calculate_dl_tp_fp_fn(
            self.__located_drift_points, self.__actual_drift_points,
            self.__drift_acceptance_interval)

        if len(self.__located_drift_points) != 0:
            # learner stats
            lrn_mem = numpy.mean(self.__learner_memory_usage)
            lrn_ave_runtime = numpy.mean(self.__learner_runtime)
            lrn_total_runtime = self.learner.get_total_running_time()
            # ddm stats
            ddm_mem = numpy.mean(self.__drift_detection_memory_usage)
            ddm_avg_runtime = numpy.mean(self.__drift_detection_runtime)
            ddm_total_runtime = self.drift_detector.TOTAL_RUNTIME
        else:
            lrn_mem = asizeof.asizeof(self.learner, limit=20)
            lrn_ave_runtime = self.learner.get_total_running_time()
            lrn_total_runtime = lrn_ave_runtime
            ddm_mem = asizeof.asizeof(self.drift_detector, limit=20)
            ddm_avg_runtime = self.drift_detector.TOTAL_RUNTIME
            ddm_total_runtime = ddm_avg_runtime

        stats = learner_name + " + " + detector_name + ": " + "\n\t" + \
                "Classifier Error-rate: " + "%0.2f" % (100 * lrn_error_rate) + "%" + "," + "\n\t" + \
                "Classifier Average Memory Usage (bytes): " + "%0.2f" % lrn_mem + "," + "\n\t" + \
                "Classifier Average Runtime (ms): " + "%0.2f" % lrn_ave_runtime + "," + "\n\t" + \
                "Classifier Total Runtime (ms): " + "%0.2f" % lrn_total_runtime + "," + "\n\t" + \
                "Detection Delay: " + "%0.2f" % dl + " TP: " + str(tp) + " FP: " + str(fp) + " FN: " + str(fn) + "," + "\n\t" + \
                "Average Detection Memory Usage (bytes): " + "%0.2f" % ddm_mem + "," + "\n\t" + \
                "Average Detection Runtime (ms): " + "%0.2f" % ddm_avg_runtime + "," + "\n\t" + \
                "Total Detection Runtime (ms): " + "%0.2f" % ddm_total_runtime + "," + "\n\t" + \
                "Drift Points detected: " + str(self.__located_drift_points)

        print(stats)
        st_wr.write(stats)
        st_wr.close()
    def __store_stats(self):

        st_wr = open(self.__project_path + TornadoDic.get_short_names(self.learner.LEARNER_NAME).lower() + ".txt", "w")

        lrn_error_rate = PredictionEvaluator.calculate_error_rate(self.learner.get_global_confusion_matrix())
        lrn_mem = asizeof.asizeof(self.learner, limit=20)
        lrn_runtime = self.learner.get_total_running_time()

        stats = self.learner.LEARNER_NAME + "\n\t" + \
                "Classifier Error-rate: " + "%0.2f" % (100 * lrn_error_rate) + "%" + "\n\t" + \
                "Classifier Memory Usage (bytes): " + "%0.2f" % lrn_mem + "\n\t" + \
                "Classifier Runtime (ms): " + "%0.2f" % lrn_runtime

        print(stats)

        st_wr.write(stats)
        st_wr.close()