def __init__(self, proto, model, labelmap_file, gpu_mode, trt_mode, batch_size, resolution, precision): self.batch_size = batch_size TRT_PRECISION_TO_DATATYPE = { 16: trt.DataType.HALF, 32: trt.DataType.FLOAT } trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[precision] tyolo_model_path = model tyolo_deploy_path = proto trt_engine_path = PATHS.get_engine_path(trt_engine_datatype, batch_size) print("trt_engine_path:", trt_engine_path) try: os.makedirs(os.path.dirname(trt_engine_path)) except: pass # override native resolution with command line or prototxt try: import re if resolution != "": wh = resolution.split('x') w = int(wh[0]) h = int(wh[1]) f = open(tyolo_deploy_path, 'r') contents = f.read() f.close() contents = re.sub('(?<=input: "data"\ninput_shape {\n dim: 1\n dim: 3\n dim: )[0-9]+', str(h), contents) contents = re.sub('(?<=input: "data"\ninput_shape {\n dim: 1\n dim: 3\n dim: ' + str(h) + '\n dim: )[0-9]+', str(w), contents) f = open(tyolo_deploy_path, 'w') f.write(contents) f.close() else: f = open(tyolo_deploy_path, 'r') contents = f.read() f.close() dim = re.search('(?<=input: "data"\ninput_shape {\n dim: 1\n dim: 3\n dim: )[0-9]+', contents) h = contents[dim.span()[0]:dim.span()[1]] dim = re.search('(?<=input: "data"\ninput_shape {\n dim: 1\n dim: 3\n dim: ' + h + '\n dim: )[0-9]+', contents) w = int(contents[dim.span()[0]:dim.span()[1]]) h = int(h) import utils.model as model_utils model_utils.ModelData.INPUT_SHAPE = (3, h, w) except Exception as e: import logging logging.info('Bad resolution, using defaults') # Set up all TensorRT data structures needed for inference self.trt_inference_wrapper = inference_utils.TRTInference( tyolo_deploy_path, trt_engine_path, tyolo_model_path, trt_engine_datatype=trt_engine_datatype, batch_size=batch_size)
def parse_commandline_arguments(): """Parses command line arguments and adjusts internal data structures.""" # Define script command line arguments parser = argparse.ArgumentParser(description='Run object detection evaluation on VOC2007 dataset.') parser.add_argument('inference_backend', metavar='INFERENCE_BACKEND', type=str, choices=['tensorrt', 'tensorflow'], default='tensorrt', nargs='?', help='inference backend to run evaluation with') parser.add_argument('-p', '--precision', type=int, choices=[32, 16], default=32, help='desired TensorRT float precision to build an engine with') parser.add_argument('-b', '--max_batch_size', type=int, default=64, help='max TensorRT engine batch size') parser.add_argument('-f', '--force_inference', action='store_true', help='force model inference even if detections exist') parser.add_argument('-w', '--workspace_dir', help='sample workspace directory') parser.add_argument('-fc', '--flatten_concat', help='path of built FlattenConcat plugin') parser.add_argument('-voc', '--voc_dir', help='VOC2007 root directory') # Parse arguments passed args = parser.parse_args() # Adjust global Paths data structure adjust_paths(args) # Verify Paths after adjustments. This also exits script if verification fails PATHS.verify_all_paths(should_verify_voc=True) # Fetch directory to save inference results to, create it if it doesn't exist trt_engine_datatype = None trt_engine_path = None if args.inference_backend == 'tensorrt': # In case of TensorRT we also fetch engine data type and engine path trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision] trt_engine_path = PATHS.get_engine_path(trt_engine_datatype, args.max_batch_size) if not os.path.exists(os.path.dirname(trt_engine_path)): os.makedirs(os.path.dirname(trt_engine_path)) results_dir = PATHS.get_voc_model_detections_path('tensorrt', trt_engine_datatype) elif args.inference_backend == 'tensorflow': results_dir = PATHS.get_voc_model_detections_path('tensorflow') if not os.path.exists(results_dir): os.makedirs(results_dir) # Return parsed arguments for further functions to use parsed = { 'inference_backend': args.inference_backend, 'max_batch_size': args.max_batch_size, 'force_inference': args.force_inference, 'results_dir': results_dir, 'trt_engine_path': trt_engine_path, 'trt_engine_datatype': trt_engine_datatype } return parsed
def parse_commandline_arguments(): """Parses command line arguments and adjusts internal data structures.""" # Define script command line arguments parser = argparse.ArgumentParser( description='Run object detection inference on input image.') parser.add_argument('input_img_path', metavar='INPUT_IMG_PATH', help='an image file to run inference on') parser.add_argument( '-p', '--precision', type=int, choices=[32, 16], default=32, help='desired TensorRT float precision to build an engine with') parser.add_argument('-b', '--max_batch_size', type=int, default=1, help='max TensorRT engine batch size') parser.add_argument('-w', '--workspace_dir', help='sample workspace directory') parser.add_argument('-fc', '--flatten_concat', help='path of built FlattenConcat plugin') # Parse arguments passed args = parser.parse_args() # Set FlattenConcat TRT plugin path and # workspace dir path if passed by user if args.flatten_concat: PATHS.set_flatten_concat_plugin_path(args.flatten_concat) if args.workspace_dir: PATHS.set_workspace_dir_path(args.workspace_dir) if not os.path.exists(PATHS.get_workspace_dir_path()): os.makedirs(PATHS.get_workspace_dir_path()) # Verify Paths after adjustments. This also exits script if verification fails PATHS.verify_all_paths() # Fetch TensorRT engine path and datatype trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision] trt_engine_path = PATHS.get_engine_path(trt_engine_datatype, args.max_batch_size) if not os.path.exists(os.path.dirname(trt_engine_path)): os.makedirs(os.path.dirname(trt_engine_path)) parsed = { 'input_img_path': args.input_img_path, 'max_batch_size': args.max_batch_size, 'trt_engine_datatype': trt_engine_datatype, 'trt_engine_path': trt_engine_path } return parsed
def parse_commandline_arguments(): """Parses command line arguments and adjusts internal data structures.""" # Define script command line arguments parser = argparse.ArgumentParser( description='Run object detection inference on input image.') parser.add_argument('input_img_path', metavar='INPUT_IMG_PATH', help='an image file to run inference on') parser.add_argument( '-p', '--precision', type=int, choices=[32, 16], default=32, help='desired TensorRT float precision to build an engine with') parser.add_argument('-b', '--max_batch_size', type=int, default=1, help='max TensorRT engine batch size') parser.add_argument('-w', '--workspace_dir', help='sample workspace directory') parser.add_argument("-o", "--output", help="path of the output file", default=os.path.join(PATHS.get_sample_root(), "image_inferred.jpg")) # Parse arguments passed args = parser.parse_args() # Set workspace dir path if passed by user if args.workspace_dir: PATHS.set_workspace_dir_path(args.workspace_dir) try: os.makedirs(PATHS.get_workspace_dir_path()) except: pass # Verify Paths after adjustments. This also exits script if verification fails PATHS.verify_all_paths() # Fetch TensorRT engine path and datatype args.trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision] args.trt_engine_path = PATHS.get_engine_path(args.trt_engine_datatype, args.max_batch_size) try: os.makedirs(os.path.dirname(args.trt_engine_path)) except: pass return args
def parse_commandline_arguments(): """Parses command line arguments and adjusts internal data structures.""" # Define script command line arguments parser = argparse.ArgumentParser( description='Run object detection inference on input image.') parser.add_argument('--input_img_path', metavar='INPUT_IMG_PATH', help='an image file to run inference on') parser.add_argument( '-p', '--precision', type=int, choices=[32, 16, 8], default=32, help='desired TensorRT float precision to build an engine with') parser.add_argument('-b', '--max_batch_size', type=int, default=1, help='max TensorRT engine batch size') parser.add_argument('-w', '--workspace_dir', help='sample workspace directory') parser.add_argument('-fc', '--flatten_concat', help='path of built FlattenConcat plugin') parser.add_argument('-d', '--calib_dataset', default='../VOCdevkit/VOC2007/JPEGImages', help='path to the calibration dataset') parser.add_argument('-c', '--camera', default=True, help='if True, will run webcam application') # Parse arguments passed args = parser.parse_args() # Set FlattenConcat TRT plugin path and # workspace dir path if passed by user if args.flatten_concat: PATHS.set_flatten_concat_plugin_path(args.flatten_concat) if args.workspace_dir: PATHS.set_workspace_dir_path(args.workspace_dir) try: os.makedirs(PATHS.get_workspace_dir_path()) except: pass # Verify Paths after adjustments. This also exits script if verification fails PATHS.verify_all_paths() # Fetch TensorRT engine path and datatype args.trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision] args.trt_engine_path = PATHS.get_engine_path(args.trt_engine_datatype, args.max_batch_size) try: os.makedirs(os.path.dirname(args.trt_engine_path)) except: pass return args
def parse_commandline_arguments(): """Parses command line arguments and adjusts internal data structures.""" # Define script command line arguments parser = argparse.ArgumentParser( description='Run object detection evaluation on VOC2007 dataset.') parser.add_argument('inference_backend', metavar='INFERENCE_BACKEND', type=str, choices=['tensorrt', 'tensorflow'], default='tensorrt', nargs='?', help='inference backend to run evaluation with') parser.add_argument( '-p', '--precision', type=int, choices=[32, 16], default=32, help='desired TensorRT float precision to build an engine with') parser.add_argument('-b', '--max_batch_size', type=int, default=64, help='max TensorRT engine batch size') parser.add_argument('-f', '--force_inference', action='store_true', help='force model inference even if detections exist') parser.add_argument('-w', '--workspace_dir', help='sample workspace directory') parser.add_argument( '-d', '--data', help= "Specify the data directory where it is saved in. $TRT_DATA_DIR will be overwritten by this argument." ) args, _ = parser.parse_known_args() data_dir = os.environ.get('TRT_DATA_DIR', None) if args.data is None else args.data if data_dir is None: raise ValueError( "Data directory must be specified by either `-d $DATA` or environment variable $TRT_DATA_DIR." ) adjust_paths(args, data_dir) extract_voc_data_if_needed() # Verify Paths after adjustments. This also exits script if verification fails PATHS.verify_all_paths(should_verify_voc=True) # Fetch directory to save inference results to, create it if it doesn't exist trt_engine_datatype = None trt_engine_path = None if args.inference_backend == 'tensorrt': # In case of TensorRT we also fetch engine data type and engine path trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision] trt_engine_path = PATHS.get_engine_path(trt_engine_datatype, args.max_batch_size) if not os.path.exists(os.path.dirname(trt_engine_path)): os.makedirs(os.path.dirname(trt_engine_path)) results_dir = PATHS.get_voc_model_detections_path( 'tensorrt', trt_engine_datatype) elif args.inference_backend == 'tensorflow': results_dir = PATHS.get_voc_model_detections_path('tensorflow') if not os.path.exists(results_dir): os.makedirs(results_dir) # Return parsed arguments for further functions to use parsed = { 'inference_backend': args.inference_backend, 'max_batch_size': args.max_batch_size, 'force_inference': args.force_inference, 'results_dir': results_dir, 'trt_engine_path': trt_engine_path, 'trt_engine_datatype': trt_engine_datatype } return parsed
def parse_commandline_arguments(): """Parses command line arguments and adjusts internal data structures.""" # Define script command line arguments parser = argparse.ArgumentParser( description='Run object detection inference on input image.') parser.add_argument('input_img_path', metavar='INPUT_IMG_PATH', help='an image file to run inference on') parser.add_argument( '-p', '--precision', type=int, choices=[32, 16], default=32, help='desired TensorRT float precision to build an engine with') parser.add_argument('-b', '--max_batch_size', type=int, default=1, help='max TensorRT engine batch size') parser.add_argument('-w', '--workspace_dir', help='sample workspace directory') parser.add_argument("-o", "--output", help="path of the output file", default=os.path.join(PATHS.get_sample_root(), "image_inferred.jpg")) parser.add_argument( '-d', '--data', help= "Specify the data directory where it is saved in. $TRT_DATA_DIR will be overwritten by this argument." ) args, _ = parser.parse_known_args() data_dir = os.environ.get('TRT_DATA_DIR', None) if args.data is None else args.data if data_dir is None: raise ValueError( "Data directory must be specified by either `-d $DATA` or environment variable $TRT_DATA_DIR." ) PATHS.set_data_dir_path(data_dir) # Set workspace dir path if passed by user if args.workspace_dir: PATHS.set_workspace_dir_path(args.workspace_dir) try: os.makedirs(PATHS.get_workspace_dir_path()) except: pass # Verify Paths after adjustments. This also exits script if verification fails PATHS.verify_all_paths() # Fetch TensorRT engine path and datatype args.trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision] args.trt_engine_path = PATHS.get_engine_path(args.trt_engine_datatype, args.max_batch_size) try: os.makedirs(os.path.dirname(args.trt_engine_path)) except: pass return args