示例#1
0
    def test_real_graph(self):
        perf_list = self.get_real_graph()
        print([p.score.TPR for p in perf_list])
        print([p.score.TNR for p in perf_list])
        print([p.score.FPR for p in perf_list])
        print([p.score.FNR for p in perf_list])

        Minimum_true_positive_rate = 0.9
        Acceptable_false_negative_rate = 0.1
        Minimum_true_negative_rate = 0.9
        Acceptable_false_positive_rate = 0.1

        thre_max_TPR, val_TPR = self.quality_evaluator.get_threshold_where_upper_are_more_than_xpercent_TP(
            perfs_list=perf_list, percent=Minimum_true_positive_rate)
        thre_max_FNR, val_FNR = self.quality_evaluator.get_threshold_where_upper_are_less_than_xpercent_FN(
            perfs_list=perf_list, percent=Acceptable_false_negative_rate)
        thre_max_TNR, val_TNR = self.quality_evaluator.get_threshold_where_below_are_more_than_xpercent_TN(
            perfs_list=perf_list, percent=Minimum_true_negative_rate)
        thre_max_FPR, val_FPR = self.quality_evaluator.get_threshold_where_below_are_less_than_xpercent_FP(
            perfs_list=perf_list, percent=Acceptable_false_positive_rate)
        thre_max_F1, val_F1 = self.quality_evaluator.get_max_threshold_for_max_F1(
            perfs_list=perf_list)

        self.logger.info(
            f"Found value TPR : {thre_max_TPR} for {val_TPR} / upper there is more than 90% true positive"
        )
        self.logger.info(
            f"Found value TNR : {thre_max_TNR} for {val_TNR} / below there is more than 90% true negative"
        )
        self.logger.info(
            f"Found value FNR : {thre_max_FNR} for {val_FNR} / upper there is less than 10% false negative"
        )
        self.logger.info(
            f"Found value FPR : {thre_max_FPR} for {val_FPR} / below there is less than 10% false positive"
        )
        self.logger.info(f"Found value F1 :  {thre_max_F1} for {val_F1}")

        tmp_conf = calibrator_conf.Default_calibrator_conf()
        tmp_conf.thre_below_at_least_xpercent_TNR = thre_max_TNR
        tmp_conf.thre_upper_at_most_xpercent_FNR = thre_max_FNR
        tmp_conf.thre_upper_at_least_xpercent_TPR = thre_max_TPR
        tmp_conf.thre_below_at_most_xpercent_FPR = thre_max_FPR
        tmp_conf.maximum_F1 = thre_max_F1

        self.plotmaker.print_graph_with_thresholds(
            perf_list,
            thresholds_handler=tmp_conf,
            output_path=self.output_folder,
            file_name="real_graph.png")

        self.assertAlmostEqual(thre_max_TPR, 0.8, delta=0.1)
        self.assertAlmostEqual(thre_max_TNR, 0.2, delta=0.1)
        self.assertAlmostEqual(thre_max_FNR, 0.8, delta=0.1)
        self.assertAlmostEqual(thre_max_FPR, 0.1,
                               delta=0.1)  # Would have been 0.2 without delta
        self.assertAlmostEqual(thre_max_F1, 0.5, delta=0.1)
示例#2
0
 def test_get_min_min_decreasing(self):
     perf_list = self.get_decreasing_graph()
     print([p.score.TPR for p in perf_list])
     thre, val = self.quality_evaluator.get_optimal_for_optimized_attribute(
         perfs_list=perf_list,
         attribute="TPR",
         higher=False,
         rightmost=True,
         is_increasing=False)
     self.logger.info(f"Found value {thre}")
     tmp_conf = calibrator_conf.Default_calibrator_conf()
     tmp_conf.thre_upper_at_least_xpercent_TPR = thre
     self.plotmaker.print_graph_with_thresholds(
         perf_list,
         thresholds_handler=tmp_conf,
         output_path=self.output_folder,
         file_name="min_min_dec.png")
     self.assertAlmostEqual(thre, 1.0, delta=0.1)
    def evaluate_list_results(self, list_results: List[Dict],
                              gt_path: pathlib.Path,
                              output_path: pathlib.Path,
                              cal_conf: calibrator_conf.Default_calibrator_conf = calibrator_conf.Default_calibrator_conf()) -> List[perf_datastruct.Perf]:

        # Load ground truth file
        gt_graph = graph_datastructure.load_visjs_to_graph(gt_path)

        # tmp_cal_conf = calibrator_conf.Default_calibrator_conf()

        # Call the graph evaluator on this pair result_list + gt_graph
        self.logger.debug(f"Extracting performance list ")
        perf_eval = similarity_graph_quality_evaluator.similarity_graph_quality_evaluator(cal_conf)
        perfs_list = perf_eval.get_perf_list(list_results, gt_graph)  # ==> List of scores
        self.logger.debug(f"Fetched performance list : {pformat(perfs_list)} ")

        # Do same for decisions
        perf_eval.get_perf_list_decision(list_results, gt_graph, output_folder=output_path)

        return perfs_list
示例#4
0
    def test_calibrator_launch_FN_TP(self):
        self.logger.debug("Launching calibration... (tests)")
        new_calibrator_conf = calibrator_conf.Default_calibrator_conf()

        new_calibrator_conf.Acceptable_false_positive_rate = 0.1
        new_calibrator_conf.Acceptable_false_negative_rate = 0.1
        new_calibrator_conf.PTS_NB: int = 100
        new_calibrator_conf.MIN_THRESHOLD: float = 0
        new_calibrator_conf.MAX_THRESHOLD: float = 1
        new_calibrator_conf.NB_TO_CHECK: int = 3

        self.calibrator_instance.set_calibrator_conf(
            tmp_calibrator_conf=new_calibrator_conf)
        list_algos = self.calibrator_instance.calibrate_douglas_quaid(
            folder_of_pictures=self.micro_dataset_input_path,
            ground_truth_file=self.micro_dataset_gt_path,
            output_folder=self.micro_dataset_output_path / "FPFN")
        for algo in list_algos:
            self.assertTrue(
                algo.threshold_yes_to_maybe <= algo.threshold_maybe_to_no)