Ejemplo n.º 1
0
 def test_evaluate(self):
     evaluator = Evaluator(model_dir=self.model_dir,
                           input_shape=self.input_shape)
     BAR, EAR = evaluator.evaluate(basic_model=self.basic_model,
                                   evaluate_model=self.evaluate_model,
                                   valid_stocks=self.valid_stocks,
                                   rounds=3)
     print BAR, EAR
     self.assertNotEqual(BAR, EAR)
def prec_recall(data, gt):
    search_engine = SearchEngine(data)

    print('\n> Running Evaluation...\n', end='')
    evaluator = Evaluator(search_engine, gt)
    prec, avg_prec_recall = evaluator.evaluate()

    mkdir(EVALUATION_PATH)
    save_to_csv(prec, os.path.join(EVALUATION_PATH, 'precision.csv'))
    save_to_csv(avg_prec_recall, os.path.join(EVALUATION_PATH, 'avg_prec_recall.csv'), index=True)
    print('\n Results of evaluation saved to directory "%s"' % os.path.relpath(EVALUATION_PATH, PROJ_ROOT))
Ejemplo n.º 3
0
def eval_run_func(params):
    from evaluation.evaluator import Evaluator

    # get input parameters
    model_dir = params['model_dir']
    basic_model = params['basic_model']
    evaluate_model = params['evaluate_model']
    input_shape = params['input_shape']
    rounds = params['rounds']
    valid_stocks = params['valid_stocks']
    _evaluator = Evaluator(model_dir=model_dir, input_shape=input_shape)
    BAR, EAR = _evaluator.evaluate(basic_model, evaluate_model, valid_stocks,
                                   rounds)
    return BAR, EAR
Ejemplo n.º 4
0
        for x, y in buffered_points_ref:
            img_ref[y][x] = (0, 0.7, 0)

        mapping_tool = start_mapping(img, img_ref, mapping_style)
        statistics = mapping_tool.get_statistics()
        extractions = mapping_tool.extractions

        points = []
        for extraction in extractions:
            points.extend(extraction.get_pixels())

        if len(points) > 0:
            evaluation_count += 1
            mpp = meter_per_pixel(center.lat, zoom_level)
            evaluation, _ = evaluator.evaluate(points, points_ref, points_ref,
                                               buffer_width, mpp)

            total_len_extracted = evaluator.total_len_ext - total_len_extracted_previous
            total_len_reference = evaluator.total_len_ref - total_len_reference_previous
            extra = 'Total distance extracted: {} km\n'.format(
                round(total_len_extracted / 1000, 2))
            extra += 'Total distance reference: {} km\n'.format(
                round(total_len_reference / 1000, 2))
            total_len_extracted_previous = evaluator.total_len_ext
            total_len_reference_previous = evaluator.total_len_ref

            save_results(road_name, statistics, evaluation, extractions,
                         mapping_style, extra)

            for k, v in statistics.items():
                if k in total_statistics.keys():
Ejemplo n.º 5
0
                fig, ax = plt.subplots()
                ax.plot(ex, ey, 'r-')
                ax.plot(sx, sy, 'b-')
                ax.plot(rx, ry, 'g-')
                plt.show()

        all_references = set()
        for road_ref in roads:
            input = road_ref.pixels(size, zoom_level, center=center)
            for x, y in input:
                if 0 <= x < len(img[0]) and 0 <= y < len(img):
                    all_references.add((x, y))

        evaluation, matched_ext = evaluator.evaluate(smoothed_extraction,
                                                     reference, all_references,
                                                     buffer_width, mpp)

        print('------------')
        print('Completeness: {} %'.format(
            round(100 * evaluation.completeness, 2)))
        print('Correctness: {} %'.format(round(100 * evaluation.correctness,
                                               2)))
        print('Correctness*: {} %'.format(
            round(100 * evaluation.correctness_all, 2)))
        print('Quality: {} %'.format(round(100 * evaluation.quality, 2)))
        print('Quality*: {} %'.format(round(100 * evaluation.quality_all, 2)))
        print('Redundancy: {} %'.format(round(100 * evaluation.redundancy, 2)))
        print('Mean distance: {} m'.format(round(evaluation.mean_distance, 2)))
        print('RMSE: {} m'.format(round(evaluation.rmse, 2)))
        print('------------')