def main(): log.basicConfig(format = '[ %(levelname)s ] %(message)s', level = log.INFO, stream = sys.stdout) args = build_argparser().parse_args() try: model_wrapper = openvino_io_model_wrapper() data_transformer = transformer() io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer) iecore = utils.create_ie_core(args.extension, args.cldnn_config, args.device, args.nthreads, None, args.dump, 'sync', log) net = utils.create_network(iecore, args.model_xml, args.model_bin, log) input_shapes = utils.get_input_shape(model_wrapper, net) for layer in input_shapes: log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer])) utils.reshape_input(net, args.batch_size) log.info('Prepare input data') io.prepare_input(net, args.input) log.info('Create executable network') exec_net = utils.load_network(iecore, net, args.device, args.priority, 1) log.info('Starting inference ({} iterations) on {}'. format(args.number_iter, args.device)) result, time = infer_sync(exec_net, args.number_iter, io.get_slice_input) average_time, latency, fps = process_result(time, args.batch_size, args.mininfer) if not args.raw_output: io.process_output(result, log) result_output(average_time, fps, latency, log) else: raw_result_output(average_time, fps, latency) del net del exec_net del iecore except Exception as ex: print('ERROR! : {0}'.format(str(ex))) sys.exit(1)
def main(): log.basicConfig( format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout ) args = build_parser().parse_args() try: model_wrapper = openvino_io_model_wrapper() data_transformer = openvino_transformer() io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer) core = utils.create_core( args.extension, args.intel_gpu_config, args.device, args.nthreads, args.nstreams, args.dump, 'async', log ) model = utils.create_model(core, args.model_xml, args.model_bin, log) utils.configure_model(core, model, args.device, args.default_device, args.affinity) input_shapes = utils.get_input_shape(model_wrapper, model) for layer in input_shapes: log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer])) utils.reshape_input(model, args.batch_size) log.info('Prepare input data') io.prepare_input(model, args.input) log.info('Create executable network') compiled_model = utils.compile_model(core, model, args.device, args.priority) log.info('Starting inference ({} iterations) with {} requests on {}'.format(args.number_iter, args.requests, args.device)) result, time = infer_async(compiled_model, args.number_iter, args.requests, io.get_slice_input) average_time, fps = process_result(time, args.batch_size, args.number_iter) if not args.raw_output: io.process_output(result, log) result_output(average_time, fps, log) else: raw_result_output(average_time, fps) del model del compiled_model del core except Exception as ex: print('ERROR! : {0}'.format(str(ex))) sys.exit(1)