Пример #1
0
    def _compute_perfs_list(self, list_results: List,
                            gt_graph: GraphDataStruct):
        # Init a void performance list
        perfs_list: List[perf_datastruct.Perf] = []

        # For each evaluation points
        for i in range(self.cal_conf.PTS_NB):
            # Computing the new threshold
            curr_threshold = i * (
                (self.cal_conf.MAX_THRESHOLD - self.cal_conf.MIN_THRESHOLD) /
                self.cal_conf.PTS_NB)
            self.logger.info(
                f"Current threshold computation : {curr_threshold}")

            # Compute score for this threshold
            # TODO : To remove the export (debug only)
            # json_import_export.save_json(list_results, pathlib.Path(get_homedir() / "result_file_to_be_evaluated.json"))
            tmp_score = self.compute_score_for_one_threshold(
                list_results, gt_graph, curr_threshold)
            self.logger.info(f"Current score for this threshold : {tmp_score}")

            # Store the score in the performance datastructure
            tmp_perf = perf_datastruct.Perf(tmp_score, curr_threshold)

            # Add to performance list
            perfs_list.append(tmp_perf)

        return perfs_list
Пример #2
0
    def get_decreasing_graph(self):
        perf_list = []

        MAX_VAL = 20
        for i in range(0, MAX_VAL):
            t = i / MAX_VAL
            tmp_score = stats_datastruct.Stats_datastruct()
            tmp_score.TPR = 1 - (i / MAX_VAL)
            perf_list.append(perf_datastruct.Perf(score=tmp_score,
                                                  threshold=t))

        return perf_list
Пример #3
0
    def get_real_graph(self):
        perf_list = []

        MAX_VAL = 20
        last_F1 = 0
        for i in range(0, MAX_VAL):
            t = i / MAX_VAL
            tmp_score = stats_datastruct.Stats_datastruct()

            delta = 0.05
            tmp_score.TPR = (i / MAX_VAL)  # Increasing
            tmp_score.FPR = (i / MAX_VAL) + delta  # Increasing
            tmp_score.TNR = 1 - (i / MAX_VAL)  # Decreasing
            tmp_score.FNR = 1 - (i / MAX_VAL) - delta  # Decreasing
            if i < MAX_VAL / 2:
                tmp_score.F1 = (i / MAX_VAL)  # Increasing until some point
                last_F1 = tmp_score.F1
            else:
                tmp_score.F1 = last_F1

            perf_list.append(perf_datastruct.Perf(score=tmp_score,
                                                  threshold=t))

        return perf_list
Пример #4
0
    def launch(self):
        # ========= INPUTS =========
        # Input files folder
        image_folder = get_homedir() / "datasets" / "MINI_DATASET"
        # Ground truth file
        gt = get_homedir() / "datasets" / "MINI_DATASET_VISJS.json"
        # Output general folder
        output_folder = get_homedir() / "datasets" / "OUTPUT"
        output_folder.mkdir(parents=True, exist_ok=True)

        # ========= GOAL =========
        perfs = []

        iterations_limit = 50  # Or nb of iteration if complete exploration

        max_threshold = 1
        min_threshold = 0

        # ========= CONFIGURATION CHOSING =========

        for i in range(iterations_limit):

            # Computing the new threshold
            curr_threshold = i * (
                (max_threshold - min_threshold) / iterations_limit)
            self.logger.info(
                f"Current threshold computation : {curr_threshold}")

            # If the instance already exist, delete it
            if self.server_launcher is not None:
                del self.server_launcher

            # Put configuration in place
            self.server_launcher = instance_handler.Instance_Handler()
            self.server_launcher.dist_conf.MAX_DIST_FOR_NEW_CLUSTER = curr_threshold

            # Create output folder for this configuration
            tmp_output = output_folder / ''.join(
                [str(curr_threshold), "_threshold"])
            tmp_output.mkdir(parents=True, exist_ok=True)

            # ========= CONFIGURATION LAUNCH =========

            # Launch Server
            self.server_launcher.launch()
            time.sleep(2)

            # Launch client tester
            self.client_launcher = evaluator.InternalClusteringQualityEvaluator(
            )
            perf_overview = self.client_launcher.get_storage_graph(
                image_folder, gt, tmp_output)
            self.logger.warning(f"Perf overview added : {perf_overview}")

            perfs.append(perf_datastruct.Perf(perf_overview, curr_threshold))

            # Wait for client end

            # ========= TIDY UP FOR NEXT ROUND =========

            # Flush server
            self.server_launcher.flush_db()

            # Shutdown server
            self.server_launcher.stop()

            # Wait for shutdown (wait for workers to shutdown, usually longer than db)
            while not self.server_launcher.check_worker():
                time.sleep(1)  # Enough ?
                self.logger.warning("Waiting for workers to stop .. ")

            # Remove all workers
            self.server_launcher.flush_workers()
            time.sleep(2)

        # Print plot
        TwoD_plot = two_dimensions_plot.TwoDimensionsPlot()
        TwoD_plot.print_graph(perfs, output_folder)