class ExperimentRunner:
    _log_names = [
        '10x2_1W',
        '10x2_3W',
        '10x2_1S',
        '10x2_3S',
        '10x5_1W',
        '10x5_3W',
        '10x5_1S',
        '10x5_3S',
        '10x20_1W',
        '10x20_3W',
        '10x20_1S',
        '10x20_3S',
        '5x5_1W',
        '5x5_3W',
        '5x5_1S',
        '5x5_3S',
        '50x5_1W',
        '50x5_3W',
        '50x5_1S',
        '50x5_3S'
        'BPI2017_W',
        'BPI2017_S'
        'BPI2012_W'
        'BPI2012_W_2'
        'BPI2012_S'
    ]

    def __init__(self, use_old_model, use_time, port, python_port, train, evaluate):
        self._use_old_model = use_old_model
        self._use_time = use_time
        self._port = port
        self._python_port = python_port
        self._train = train
        self._evaluate = evaluate

        if use_old_model:
            self._models_folder = 'old_model'
        else:
            self._models_folder = 'new_model'

        self._evaluator = Evaluator(self._port, self._python_port)
        print self._models_folder

    def _run_single_experiment(self, log_name):
        print('log_name:', log_name)
        print('use_time:', self._use_time)
        print('train:', self._train)
        print('evaluate:', self._evaluate)

        if self._use_time:
            if self._train:
                TrainCFRT.train(log_name, self._models_folder, self._use_old_model)
            if self._evaluate:
                self._evaluator.evaluate_time(log_name, self._models_folder)
        else:
            if self._train:
                TrainCF.train(log_name, self._models_folder, self._use_old_model)
                TrainCFR.train(log_name, self._models_folder, self._use_old_model)
            if self._evaluate:
                self._evaluator.evaluate_all(log_name, self._models_folder)

    def run_experiments(self, input_log_name):
        config = tf.ConfigProto(intra_op_parallelism_threads=4, inter_op_parallelism_threads=4,
                                allow_soft_placement=True)
        session = tf.Session(config=config)
        K.set_session(session)

        if input_log_name is not None:
            self._run_single_experiment(input_log_name)
        else:
            for log_name in self._log_names:
                self._run_single_experiment(log_name)
コード例 #2
0
            total_len_reference = evaluator.total_len_ref - total_len_reference_previous
            extra = 'Total distance extracted: {} km\n'.format(
                round(total_len_extracted / 1000, 2))
            extra += 'Total distance reference: {} km\n'.format(
                round(total_len_reference / 1000, 2))
            total_len_extracted_previous = evaluator.total_len_ext
            total_len_reference_previous = evaluator.total_len_ref

            save_results(road_name, statistics, evaluation, extractions,
                         mapping_style, extra)

            for k, v in statistics.items():
                if k in total_statistics.keys():
                    total_statistics[k] += v
                else:
                    total_statistics[k] = v

        stop = mapping_tool.stop

if evaluation_count > 1:
    evaluation = evaluator.evaluate_all()

    total_len_extracted = evaluator.total_len_ext
    total_len_reference = evaluator.total_len_ref
    extra = 'Total distance extracted: {} km\n'.format(
        round(total_len_extracted / 1000, 2))
    extra += 'Total distance reference: {} km\n'.format(
        round(total_len_reference / 1000, 2))

    save_results('Total', total_statistics, evaluation, extra_text=extra)
コード例 #3
0
        print('Completeness: {} %'.format(
            round(100 * evaluation.completeness, 2)))
        print('Correctness: {} %'.format(round(100 * evaluation.correctness,
                                               2)))
        print('Correctness*: {} %'.format(
            round(100 * evaluation.correctness_all, 2)))
        print('Quality: {} %'.format(round(100 * evaluation.quality, 2)))
        print('Quality*: {} %'.format(round(100 * evaluation.quality_all, 2)))
        print('Redundancy: {} %'.format(round(100 * evaluation.redundancy, 2)))
        print('Mean distance: {} m'.format(round(evaluation.mean_distance, 2)))
        print('RMSE: {} m'.format(round(evaluation.rmse, 2)))
        print('------------')

        segments_tested += 1
        if print_intermediate_averages and segments_tested % intermediate_avg_interval == 0:
            eval_total = evaluator.evaluate_all()
            print('------------')
            print('Segments tested: {}'.format(segments_tested))
            print('Completeness: {} %'.format(
                round(100 * eval_total.completeness, 2)))
            print('Correctness: {} %'.format(
                round(100 * eval_total.correctness, 2)))
            print('Correctness*: {} %'.format(
                round(100 * eval_total.correctness_all, 2)))
            print('Quality: {} %'.format(round(100 * eval_total.quality, 2)))
            print('Quality*: {} %'.format(
                round(100 * eval_total.quality_all, 2)))
            print('Redundancy: {} %'.format(
                round(100 * eval_total.redundancy, 2)))
            print('Mean distance: {} m'.format(
                round(eval_total.mean_distance, 2)))