Ejemplo n.º 1
0
 def test_onnx(self):
     self.exporter.export_to_onnx_model_if_not_yet(model=self.encoder_name, model_type='encoder')
     self.exporter.export_to_onnx_model_if_not_yet(model=self.decoder_name, model_type='decoder')
     evaluator = Evaluator(deepcopy(self.config), RunnerType.ONNX)
     metric_onnx = evaluator.validate()
     target_metric = evaluator.expected_outputs.get('target_metric')
     self.assertGreaterEqual(metric_onnx, target_metric)
Ejemplo n.º 2
0
 def test_run_ir_model(self):
     if not self.config.get('export_ir'):
         return
     self.exporter.export_to_ir_model_if_not_yet(model=self.res_model_name, model_type=None)
     evaluator = Evaluator(deepcopy(self.config), RunnerType.OpenVINO)
     ir_metric = evaluator.validate()
     target_metric = evaluator.expected_outputs.get('target_metric')
     self.assertGreaterEqual(ir_metric, target_metric)
Ejemplo n.º 3
0
 def setUpClass(cls):
     test_config = get_config(config_file, section='eval')
     cls.config = test_config
     cls.config.update({'expected_outputs': expected_outputs})
     if not os.path.exists(cls.config.get("model_path")):
         download_checkpoint(cls.config.get("model_path"),
                             cls.config.get("model_url"))
     cls.validator = Evaluator(config=cls.config)
Ejemplo n.º 4
0
 def test_onnx(self):
     self.exporter.export_complete_model()
     evaluator = Evaluator(deepcopy(self.config), RunnerType.ONNX)
     metric_onnx = evaluator.validate()
     target_metric = evaluator.expected_outputs.get('target_metric')
     self.assertGreaterEqual(metric_onnx, target_metric)
Ejemplo n.º 5
0
import os

from text_recognition.utils.get_config import get_config
from text_recognition.utils.evaluator import Evaluator


def parse_args():
    args = argparse.ArgumentParser()
    args.add_argument('--config')
    return args.parse_args()


if __name__ == '__main__':
    arguments = parse_args()
    test_config = get_config(arguments.config, section='eval')
    validator = Evaluator(test_config)
    if 'model_folder' in test_config.keys():
        model_folder = test_config.get('model_folder')
        best_model, best_result = None, 0
        for model in os.listdir(model_folder):
            validator.runner.reload_model(os.path.join(model_folder, model))
            result = validator.validate()
            if result > best_result:
                best_result = result
                best_model = os.path.join(model_folder, model)
        print('model = {}'.format(best_model))
        result = best_result
    else:
        result = validator.validate()
    print('Result metric is: {}'.format(result))