def run(model, config_file): global nn, pre_process, post_process filename, file_extension = os.path.splitext(model) supported_files = ['.so', '.pb'] if file_extension not in supported_files: raise Exception(""" Unknown file type. Got %s%s. Please check the model file (-m). Only .pb (protocol buffer) or .so (shared object) file is supported. """ % (filename, file_extension)) config = load_yaml(config_file) pre_process = build_pre_process(config.PRE_PROCESSOR) post_process = build_post_process(config.POST_PROCESSOR) if file_extension == '.so': # Shared library nn = NNLib() nn.load(model) elif file_extension == '.pb': # Protocol Buffer file # only load tensorflow if user wants to use GPU from lmnet.tensorflow_graph_runner import TensorflowGraphRunner nn = TensorflowGraphRunner(model) run_impl(config)
def run(model, config_file): global nn, pre_process, post_process filename, file_extension = os.path.splitext(model) supported_files = ['.so', '.pb'] if file_extension not in supported_files: raise Exception(""" Unknown file type. Got %s%s. Please check the model file (-m). Only .pb (protocol buffer) or .so (shared object) file is supported. """ % (filename, file_extension)) config = load_yaml(config_file) pre_process = build_pre_process(config.PRE_PROCESSOR) post_process = build_post_process(config.POST_PROCESSOR) if file_extension == '.so': # Shared library nn = NNLib() nn.load(model) elif file_extension == '.pb': # Protocol Buffer file # only load tensorflow if user wants to use GPU from lmnet.tensorflow_graph_runner import TensorflowGraphRunner nn = TensorflowGraphRunner(model) if config.TASK == "IMAGE.CLASSIFICATION": run_classification(config) if config.TASK == "IMAGE.OBJECT_DETECTION": run_object_detection(config) if config.TASK == "IMAGE.SEMANTIC_SEGMENTATION": run_sementic_segmentation(config)
def run_prediction(input_image, model, config_file, max_percent_incorrect_values=0.1, trial=1): if not input_image or not model or not config_file: logger.error('Please check usage with --help option') exit(1) config = load_yaml(config_file) # load the image image_data = load_image(input_image) raw_image = image_data # pre process for image image_data, bench_pre = _timerfunc(_pre_process, (image_data, config.PRE_PROCESSOR, config.DATA_FORMAT), trial) # add the batch dimension image_data = np.expand_dims(image_data, axis=0) # run the model to inference output, bench_inference = _timerfunc(_run, (model, image_data, config), trial) logger.info('Output: (before post process)\n{}'.format(output)) # pre process for output output, bench_post = _timerfunc(_post_process, (output, config.POST_PROCESSOR), trial) logger.info('Output: (after post process)\n{}'.format(output)) # json output json_output = JsonOutput( task=Tasks(config.TASK), classes=config.CLASSES, image_size=config.IMAGE_SIZE, data_format=config.DATA_FORMAT, bench={ "total": (bench_pre + bench_post + bench_inference) / trial, "pre": bench_pre / trial, "post": bench_post / trial, "inference": bench_inference / trial, }, ) image_from_json = ImageFromJson( task=Tasks(config.TASK), classes=config.CLASSES, image_size=config.IMAGE_SIZE, ) output_dir = "output" outputs = output raw_images = [raw_image] image_files = [input_image] json_obj = json_output(outputs, raw_images, image_files) _save_json(output_dir, json_obj) filename_images = image_from_json(json_obj, raw_images, image_files) _save_images(output_dir, filename_images) logger.info("Benchmark avg result(sec) for {} trials: pre_process: {} inference: {} post_process: {} Total: {}" .format(trial, bench_pre / trial, bench_inference / trial, bench_post / trial, (bench_pre + bench_post + bench_inference) / trial,))
def run(model, config_file, port=80): global nn, pre_process, post_process, config, stream, pool filename, file_extension = os.path.splitext(model) supported_files = ['.so', '.pb'] if file_extension not in supported_files: raise Exception(""" Unknown file type. Got %s%s. Please check the model file (-m). Only .pb (protocol buffer) or .so (shared object) file is supported. """ % (filename, file_extension)) if file_extension == '.so': # Shared library nn = NNLib() nn.load(model) elif file_extension == '.pb': # Protocol Buffer file # only load tensorflow if user wants to use GPU from lmnet.tensorflow_graph_runner import TensorflowGraphRunner nn = TensorflowGraphRunner(model) nn = NNLib() nn.load(model) stream = VideoStream(CAMERA_SOURCE, CAMERA_WIDTH, CAMERA_HEIGHT, CAMERA_FPS) config = load_yaml(config_file) pre_process = build_pre_process(config.PRE_PROCESSOR) post_process = build_post_process(config.POST_PROCESSOR) pool = Pool(processes=1, initializer=_init_worker) try: server = ThreadedHTTPServer(('', port), MotionJpegHandler) print("server starting") server.serve_forever() except KeyboardInterrupt as e: print("KeyboardInterrpt in server - ending server") stream.release() pool.terminate() pool.join() server.socket.close() server.shutdown() return
def run(library, config_file): global nn, pre_process, post_process nn = NNLib() nn.load(library) nn.init() config = load_yaml(config_file) pre_process = build_pre_process(config.PRE_PROCESSOR) post_process = build_post_process(config.POST_PROCESSOR) if config.TASK == "IMAGE.CLASSIFICATION": run_classification(config) if config.TASK == "IMAGE.OBJECT_DETECTION": run_object_detection(config)
def run_prediction(input_image, model, config_file, max_percent_incorrect_values=0.1): if not input_image or not model or not config_file: print('Please check usage with --help option') exit(1) config = load_yaml(config_file) # run the model output, raw_image = _run(model, input_image, config) print('Output: (before post process)') print(output) # pre process for output output = _post_process(output, config.POST_PROCESSOR) print('Output: ') print(output) # json output json_output = JsonOutput( task=Tasks(config.TASK), classes=config.CLASSES, image_size=config.IMAGE_SIZE, data_format=config.DATA_FORMAT, ) image_from_json = ImageFromJson( task=Tasks(config.TASK), classes=config.CLASSES, image_size=config.IMAGE_SIZE, ) output_dir = "output" outputs = output raw_images = [raw_image] image_files = [input_image] json_obj = json_output(outputs, raw_images, image_files) _save_json(output_dir, json_obj) filename_images = image_from_json(json_obj, raw_images, image_files) _save_images(output_dir, filename_images)