def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) try: config, models, source, annotations, definitions, extensions, result = build_argparser( ) test_parameters = parameters(config, models, source, annotations, definitions, extensions) output_handler = out_hand(result) output_handler.create_table() executor_type = 'host_machine' accuracy_check(executor_type, test_parameters, output_handler, log) except Exception as ex: print('ERROR! : {0}'.format(str(ex))) sys.exit(1)
def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) try: config, source, annotations, definitions, extensions, result_table, executor_type = build_argparser( ) test_parameters = parameters(source, annotations, definitions, extensions) test_list = parser.get_test_list(config, test_parameters) log.info('Create result table with name: {}'.format(result_table)) output_handler = out_hand(result_table) output_handler.create_table() log.info('Start {} accuracy tests'.format(len(test_list))) accuracy_check(executor_type, test_list, output_handler, log) log.info('End inference tests') log.info('Work is done!') except Exception as ex: print('ERROR! : {0}'.format(str(ex))) sys.exit(1)
raise ValueError('Wrong path to configuration file!') return config, result, executor_type def inference_benchmark(executor_type, test_list, output_handler, log): process_executor = executor.get_executor(executor_type, log) for test in test_list: test_process = process.get_process(test, process_executor, log) test_process.execute() log.info('Saving test result in file') output_handler.add_row_to_table(process_executor, test, test_process) if __name__ == '__main__': try: log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) config, result_table, executor_type = build_parser() test_list = config_parser.process_config(config, log) log.info('Create result table with name: {}'.format(result_table)) output_handler = out_hand(result_table) output_handler.create_table() log.info('Start {} inference tests'.format(len(test_list))) inference_benchmark(executor_type, test_list, output_handler, log) log.info('End inference tests') log.info('Work is done!') except Exception as exp: log.error(str(exp))