コード例 #1
0
    def run(self) -> Table:
        agg_metric_table = MetricTable(
            path_to_table=PATH_TO_METRIC_TABLE_AGGREGATE)
        H_PATH = os.path.join(PATH_TO_GAIN, "hybrid_over_direct.csv")
        hybrid_gain_table = (
            agg_metric_table.calculate_gain_between_techniques(
                create_comparison_dict()).sort(DATASET_COLUMN_ORDER).save(
                    H_PATH))

        T_PATH = os.path.join(PATH_TO_GAIN, "transitive_over_direct.csv")
        transitive_gain_table = (
            agg_metric_table.calculate_gain_between_techniques(
                create_comparison_dict_transitive()).sort(
                    DATASET_COLUMN_ORDER).save(T_PATH))

        H_OVER_T_PATH = os.path.join(PATH_TO_GAIN,
                                     "hybrid_over_transitive.csv")
        hybrid_over_transitive_gain = (
            agg_metric_table.calculate_gain_between_techniques(
                create_comparison_dict_hybrid_over_transitive()).sort(
                    DATASET_COLUMN_ORDER).save(H_OVER_T_PATH))

        self.export_paths.append(H_PATH)
        self.export_paths.append(T_PATH)
        self.export_paths.append(H_OVER_T_PATH)

        # rq2_gain_df.save(PATH_TO_RQ2_GAIN)
        self.export_paths.append(PATH_TO_RQ2_GAIN)
        return agg_metric_table
コード例 #2
0
ファイル: test_gain.py プロジェクト: thearod5/Tracer
    def assert_scores(self, table, expected_values):
        metric_table = MetricTable(table)
        gain = metric_table.calculate_gain_between_techniques({
            self.TEST_DATASET_NAME: ("old", "new")
        }).table
        self.assertEqual(1, len(gain))
        gain_entry = gain.iloc[0]

        for m_name, e_value in expected_values:
            self.assertEqual(round(e_value, N_SIG_FIGS), gain_entry[m_name])
コード例 #3
0
 def test_gain(self):
     metric_table = MetricTable(self.data)
     gain = metric_table.calculate_gain_between_techniques(
         {self.TEST_DATASET_NAME: ("old", "new")}
     )
     print(gain)
コード例 #4
0
    def run(self) -> Table:
        tracer = Tracer()

        def get_metrics(d_name, t_def: str):
            return tracer.get_metrics(d_name, t_def)

        def add_metrics(d_name, t_def: str, t_type: str, p_name: str):
            t_metrics = get_metrics(d_name, t_def)
            metric_table.add(
                t_metrics,
                {
                    DATASET_COLNAME: d_name,
                    "path": p_name,
                    "type": t_type,
                    NAME_COLNAME: t_def,
                },
            )

        aggregate_gain = None
        aggregate_metric = None
        for path in POSSIBLE_PATHS:
            metric_table = MetricTable()
            comparison_dict = {}
            path_name = path_to_str(path)

            for dataset_name in DATASET_COLUMN_ORDER:
                source_index = str(path[0])
                intermediate_index = str(path[1])
                target_index = str(path[2])

                new_path = [source_index, intermediate_index, target_index]

                # direct
                direct_technique_def = change_paths_in_technique(
                    get_best_direct_technique(dataset_name), new_path)
                add_metrics(
                    dataset_name,
                    direct_technique_def,
                    DIRECT_ID,
                    path_name,
                )

                # transitive
                transitive_technique_def = change_paths_in_technique(
                    get_best_transitive_technique(dataset_name), new_path)
                add_metrics(
                    dataset_name,
                    transitive_technique_def,
                    TRANSITIVE_ID,
                    path_name,
                )

                # HYBRID
                hybrid_technique_definition = change_paths_in_technique(
                    get_best_hybrid_technique(dataset_name), new_path)
                add_metrics(
                    dataset_name,
                    hybrid_technique_definition,
                    HYBRID_ID,
                    path_name,
                )
                comparison_dict.update({
                    dataset_name:
                    (direct_technique_def, hybrid_technique_definition)
                })
            gain_table = metric_table.calculate_gain_between_techniques(
                comparison_dict)
            gain_table.table["path"] = path_name

            aggregate_gain = (gain_table.table if aggregate_gain is None else
                              pd.concat([gain_table.table, aggregate_gain]))

            aggregate_metric = (metric_table.table
                                if aggregate_metric is None else pd.concat(
                                    [metric_table.table, aggregate_metric]))

            MetricTable(aggregate_metric).create_lag_norm_inverted(
                drop_old=True).melt_metrics().save(METRIC_TABLE_EXPORT_PATH)
            self.export_paths.append(METRIC_TABLE_EXPORT_PATH)

            MetricTable(aggregate_gain).melt_metrics().save(
                GAIN_TABLE_EXPORT_PATH)
            self.export_paths.append(GAIN_TABLE_EXPORT_PATH)
        return aggregate_gain