예제 #1
0
def build_trt_pb(model_name, pb_path, download_dir='data'):
    """Build TRT model from the original TF model, and save the graph
    into a pb file for faster access in the future.

    The code was mostly taken from the following example by NVIDIA.
    https://github.com/NVIDIA-Jetson/tf_trt_models/blob/master/examples/detection/detection.ipynb
    """
    from tf_trt_models.detection import download_detection_model
    from tf_trt_models.detection import build_detection_graph
    from utils.egohands_models import get_egohands_model

    if 'coco' in model_name:
        config_path, checkpoint_path = \
            download_detection_model(model_name, download_dir)
    else:
        config_path, checkpoint_path = \
            get_egohands_model(model_name)
    frozen_graph_def, input_names, output_names = build_detection_graph(
        config=config_path, checkpoint=checkpoint_path)
    trt_graph_def = trt.create_inference_graph(
        input_graph_def=frozen_graph_def,
        outputs=output_names,
        max_batch_size=1,
        max_workspace_size_bytes=1 << 26,
        precision_mode='FP16',
        minimum_segment_size=50)
    with open(pb_path, 'wb') as pf:
        pf.write(trt_graph_def.SerializeToString())
예제 #2
0
def obj_det_graph(obj_model, obj_model_dir, trt_graph_path):
    make_new = 'obj' in sys.argv
    if os.path.exists(trt_graph_path) and not make_new:
        trt_graph = tf.GraphDef()
        with open(trt_graph_path, 'rb') as f:
            trt_graph.ParseFromString(f.read())
    else:
        config_path, checkpoint_path = download_detection_model(
            obj_model, obj_model_dir)
        frozen_graph, input_names, output_names = build_detection_graph(
            config=config_path, checkpoint=checkpoint_path)
        print("Making a TRT graph for the object detection model")
        trt_graph = trt.create_inference_graph(
            input_graph_def=frozen_graph,
            outputs=output_names,
            max_batch_size=1,
            max_workspace_size_bytes=1 << 26,
            precision_mode='FP32',
            minimum_segment_size=50)
        with open(trt_graph_path, 'wb') as f:
            f.write(trt_graph.SerializeToString())

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_sess = tf.Session(config=tf_config)

    tf.import_graph_def(trt_graph, name=OBJ_PREFIX)
    tf_input = tf_sess.graph.get_tensor_by_name(OBJ_PREFIX + '/input:0')
    tf_scores = tf_sess.graph.get_tensor_by_name(OBJ_PREFIX + '/scores:0')
    tf_boxes = tf_sess.graph.get_tensor_by_name(OBJ_PREFIX + '/boxes:0')
    tf_classes = tf_sess.graph.get_tensor_by_name(OBJ_PREFIX + '/classes:0')
    return tf_sess, tf_scores, tf_boxes, tf_classes, tf_input
    def build_trt_graph(self):
        MODEL = self.cfg['model']
        PRECISION_MODE = self.cfg['precision_model']
        CONFIG_FILE = "data/" + MODEL + '.config'  # ./data/ssd_inception_v2_coco.config
        CHECKPOINT_FILE = 'data/' + MODEL + '/model.ckpt'  # ./data/ssd_inception_v2_coco/model.ckpt
        FROZEN_MODEL_NAME = MODEL + '_trt_' + PRECISION_MODE + '.pb'
        TRT_MODEL_DIR = 'data'
        LOGDIR = 'logs/' + MODEL + '_trt_' + PRECISION_MODE

        config_path, checkpoint_path = download_detection_model(MODEL, 'data')

        frozen_graph_def, input_names, output_names = build_detection_graph(
            config=CONFIG_FILE, checkpoint=CHECKPOINT_FILE)

        tf.reset_default_graph()
        trt_graph_def = trt.create_inference_graph(
            input_graph_def=frozen_graph_def,
            outputs=output_names,
            max_batch_size=1,
            max_workspace_size_bytes=1 << 25,
            precision_mode=PRECISION_MODE,
            minimum_segment_size=50)
        tf.train.write_graph(trt_graph_def,
                             TRT_MODEL_DIR,
                             FROZEN_MODEL_NAME,
                             as_text=False)

        train_writer = tf.summary.FileWriter(LOGDIR)
        train_writer.add_graph(tf.get_default_graph())
        train_writer.flush()
        train_writer.close()

        return trt_graph_def
예제 #4
0
def load_model(model_name):

    # Download and load the model
    config_path, checkpoint_path = download_detection_model(
        model_name, './models/')

    tr_graph, input_names, output_names = build_detection_graph(
        config=config_path, checkpoint=checkpoint_path)

    print('Input names: {}'.format(input_names))
    print('Output names: {}'.format(output_names))

    return tr_graph
예제 #5
0
    def build_trt_graph(self):
        MODEL             = self.cfg['model']
        PRECISION_MODE    = self.cfg['precision_model']
        CONFIG_FILE       = "data/" + MODEL + '.config'   # ./data/ssd_inception_v2_coco.config 
        CHECKPOINT_FILE   = 'data/' + MODEL + '/model.ckpt'    # ./data/ssd_inception_v2_coco/model.ckpt
        FROZEN_MODEL_NAME = MODEL+'_trt_' + PRECISION_MODE + '.pb'
        TRT_MODEL_DIR     = 'data'
        LOGDIR            = 'logs/' + MODEL + '_trt_' + PRECISION_MODE

        if os.path.exists(os.path.join(TRT_MODEL_DIR, FROZEN_MODEL_NAME)) is False:
            config_path, checkpoint_path = download_detection_model(MODEL, 'data')
    
            frozen_graph_def, _, _ = build_detection_graph(
                config=config_path,
                checkpoint=checkpoint_path,
                score_threshold = 0.5,
                force_nms_cpu = False
                
            )
    
            tf.reset_default_graph()
            trt_graph_def = trt.create_inference_graph(
                input_graph_def=frozen_graph_def,
                outputs=get_output_names(MODEL),
                max_batch_size=1,
                max_workspace_size_bytes=1<<30,
                precision_mode=PRECISION_MODE,
                minimum_segment_size=50
            )
#            tf.train.write_graph(trt_graph_def, TRT_MODEL_DIR,
#                                 FROZEN_MODEL_NAME, as_text=False)
#    
#            train_writer = tf.summary.FileWriter(LOGDIR)
#            train_writer.add_graph(tf.get_default_graph())
#            train_writer.flush()
#            train_writer.close()
            with open(os.path.join(TRT_MODEL_DIR, FROZEN_MODEL_NAME), 'wb') as f:
                f.write(trt_graph_def.SerializeToString())
        else:
            print("It Works")
            trt_graph_def = tf.GraphDef()
            with tf.gfile.GFile(os.path.join(TRT_MODEL_DIR, FROZEN_MODEL_NAME), 'rb') as f:
                trt_graph_def.ParseFromString(f.read())
        

        return trt_graph_def
예제 #6
0
def createModel(config_path, checkpoint_path, graph_path):
    """ Create a TensorRT Model.
    config_path (string) - The path to the model config file.
    checkpoint_path (string) - The path to the model checkpoint file(s).
    graph_path (string) - The path to the model graph.
    returns (Model) - The TRT model built or loaded from the input files.
    """

    global build_graph, prev_classes

    trt_graph = None
    input_names = None

    if build_graph:
        frozen_graph, input_names, output_names = build_detection_graph(
            config=config_path, checkpoint=checkpoint_path)

        trt_graph = trt.create_inference_graph(
            input_graph_def=frozen_graph,
            outputs=output_names,
            max_batch_size=1,
            max_workspace_size_bytes=1 << 25,
            precision_mode='FP16',
            minimum_segment_size=50)

        with open(graph_path, 'wb') as f:
            f.write(trt_graph.SerializeToString())

        with open('config.txt', 'r+') as json_file:
            data = json.load(json_file)
            data['model'] = []
            data['model'] = [{'input_names': input_names}]
            json_file.seek(0)
            json_file.truncate()
            json.dump(data, json_file)

    else:
        with open(graph_path, 'rb') as f:
            trt_graph = tf.GraphDef()
            trt_graph.ParseFromString(f.read())
        with open('config.txt') as json_file:
            data = json.load(json_file)
            input_names = data['model'][0]['input_names']

    return Model(trt_graph, input_names)
def loadCheckpoint(model_path, write_nodes):
    ''' Load a graph model from a training checkpoint. '''
    model_path = os.path.join(MODELS_DIR, model_path)
    if not os.path.exists(model_path):
        cprint.fatal(f'Error: the path {model_path} does not exist.', interrupt=True)

    config_file = os.path.join(model_path, 'pipeline.config')
    if not os.path.isfile(config_file):
        cprint.fatal(f'Error: the config file {config_file} does not exist.', interrupt=True)

    checkpoint_file = os.path.join(model_path, 'model.ckpt')
    if not os.path.isfile(checkpoint_file + '.meta'):
        cprint.fatal(f'Error: the checkpoint file {checkpoint_file} does not exist.', interrupt=True)

    graph_def, input_names, output_names = build_detection_graph(config=config_file, checkpoint=checkpoint_file,
                                                                 score_threshold=0.3, batch_size=1,
                                                                 force_nms_cpu=FORCE_NMS_CPU)
    if write_nodes:
        writeNodes(model_path, graph_def)

    return graph_def, input_names, output_names
예제 #8
0
def load_model(model_name):
    
    trt_output_file = f'./models/{model_name}_trt.pb'

    trt_graph = tf.compat.v1.GraphDef()

    if os.path.exists(trt_output_file):
        print(f'Loading model {trt_output_file}...')
        with tf.io.gfile.GFile(trt_output_file, 'rb') as f:
            trt_graph.ParseFromString(f.read())
            print(f'{trt_output_file} loaded.')
    else:
        # Lazy load these dependencies
        import sys
        sys.path.insert(1, '/')
        from tf_trt_models.detection import download_detection_model
        from tf_trt_models.detection import build_detection_graph
        
        config_path, checkpoint_path = download_detection_model(
            model_name, './models/')

        frozen_graph, input_names, output_names = build_detection_graph(
            config=config_path,
            checkpoint=checkpoint_path
        )

        print(f'Converting {model_name} to trt..')
        trt_graph = trt.create_inference_graph(
            input_graph_def=frozen_graph,
            outputs=output_names,
            max_batch_size=1,
            max_workspace_size_bytes=1 << 25,
            precision_mode='FP16',
            minimum_segment_size=50
        )
        with open(trt_output_file, 'wb') as f:
            f.write(trt_graph.SerializeToString())
            print(f'{trt_output_file} saved.')

    return trt_graph
예제 #9
0
import tensorflow.contrib.tensorrt as trt
from tf_trt_models.detection import download_detection_model, build_detection_graph

frozen_graph, input_names, output_names = build_detection_graph(
    config_path='training/pipeline.config',
    checkpoint='training/model.ckpt-28607'
    #score_threshold=0.3,
    #batch_size=1
)


trt_graph_def = trt.create_inference_graph(
    #input_graph_def='person_inference_graph/person_inference_graph.pb',
    input_graph_def=frozen_graph,
    outputs = output_names,
    #outputs=['detection_boxes', 'detection_classes', 'detection_scores', 'num_detections'],
    max_batch_size=1,
    max_workspace_size_bytes=1 << 25,
    precision_mode='FP16',
    minimum_segment_size=50
)

pb_path = 'ssd_mobilenet_v1_coco_people/{}_trt.pb'.format('ssd_mobilenet_v1_coco_people')
with open(pb_path, 'wb') as pf:
        pf.write(trt_graph_def.SerializeToString())
if (args.model is None):
    print(
        "Please provide a -m flag followerd by the name of your directory in the /data folder that contains the frozen graph. EX: -m my_retrained_network"
    )
    exit()

config_path = './data/' + args.model + '/pipeline.config'
checkpoint_path = './data/' + args.model + '/model.ckpt'
if (args.number is not None):
    checkpoint_path += '-' + args.number

print("Building detection graph from model " + args.model + "...")
frozen_graph, input_names, output_names = build_detection_graph(
    config=config_path,
    checkpoint=checkpoint_path,
    score_threshold=0.3,
    batch_size=1)

# score_threshold is the score below to throw out BBs
# batch_size is 1 for the Nano for speed

print("Creating Jetson optimized graph...")
trt_graph = trt.create_inference_graph(input_graph_def=frozen_graph,
                                       outputs=output_names,
                                       max_batch_size=1,
                                       max_workspace_size_bytes=1 << 25,
                                       precision_mode='FP16',
                                       minimum_segment_size=50)

# make the graph a trt for Jetson optimizations
예제 #11
0
import tensorflow.contrib.tensorrt as trt
from tf_trt_models.detection import build_detection_graph
from tx2_config import MODEL, DATA_DIR, CONFIG_FILE, CHECKPOINT_FILE, SERIAL_FILE
from common.colors import cprint

cprint('Freezing tensorflow graph...','blue')
try:
    frozen_graph, input_names, output_names = build_detection_graph(
        config=CONFIG_FILE,
        checkpoint=CHECKPOINT_FILE,
        score_threshold=0.3,
        force_nms_cpu=False,
        batch_size=1
    )
    cprint('Tensorflow graph frozen successfully','green')
except Exception as e:
    cprint('Failed to freeze graph','error')
    cprint(str(e),'error')


cprint('Creating inference graph...','blue')

try:
    trt_graph = trt.create_inference_graph(
        input_graph_def=frozen_graph,
        outputs=output_names,
        max_batch_size=1,
        max_workspace_size_bytes=1 << 25,
        precision_mode='FP16',
        minimum_segment_size=50
    )
예제 #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='tf-trt model.')
    parser.add_argument('--path', help='path to checkpoint dir.')
    parser.add_argument('--output', help='Output dir.', default='model')
    parser.add_argument('--force_nms_cpu', help='Force NMS CPU', action='store_true')
    parser.add_argument('--threshold', help='Score threshold', default=0.5, type=float)
    args = parser.parse_args()

    model_dir = args.output
    if tf.gfile.Exists(model_dir) == False:
        tf.gfile.MkDir(model_dir)

    if args.model:
        config_path, checkpoint_path = download_detection_model(args.model, 'data')

    elif args.path:
        if tf.gfile.Exists(args.path) == False:
            print('Error: Checkpoint dir dose note exist!')
            return

        config_path = os.path.join(args.path, 'pipeline.config')
        checkpoint_path = os.path.join(args.path,'model.ckpt')

    else:
        print('Error: Either model or path is not specified in the argument.')
        return

    frozen_graph, input_names, output_names = build_detection_graph(
        config=config_path,
        force_nms_cpu=args.force_nms_cpu,
        checkpoint=checkpoint_path,
        batch_size=1
    )
    print(input_names, output_names)
    base_name = os.path.splitext(os.path.basename(checkpoint_path))[0]
    save_model_file_name = base_name + '_frozen.pb'
    with open(os.path.join(model_dir, save_model_file_name), 'wb') as f:
        f.write(frozen_graph.SerializeToString())

    # base_name = os.path.splitext(os.path.basename(checkpoint_path))[0]
    # save_model_file_name = base_name + '_frozen.pb'
    # with open(os.path.join(model_dir, save_model_file_name), 'wb') as f:
    #     f.write(frozen_graph.SerializeToString())

    converter = trt.TrtGraphConverter(
        input_graph_def=frozen_graph,
        nodes_blacklist=output_names, #output nodes
        max_batch_size=1,
        is_dynamic_op=False,
        max_workspace_size_bytes = 1 << 25,
        precision_mode=trt.TrtPrecisionMode.FP16, # trt.DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES
        minimum_segment_size=50)
    trt_graph = converter.convert()
    # trt_graph = trt.create_inference_graph(
    #     input_graph_def=frozen_graph,
    #     outputs=output_names,
    #     max_batch_size=1,
    #     max_workspace_size_bytes=1 << 25,
    #     precision_mode='FP16',
    #     minimum_segment_size=3
    # )

    trt_engine_opts = len([1 for n in trt_graph.node if str(n.op) == 'TRTEngineOp'])
    print("trt_engine_opts = {}".format(trt_engine_opts))

    base_name = os.path.splitext(os.path.basename(checkpoint_path))[0]
    save_model_file_name = base_name + '_frozen_fp16.pb'
    with open(os.path.join(model_dir, save_model_file_name), 'wb') as f:
        f.write(trt_graph.SerializeToString())
예제 #13
0
def main(args):
    svo_filepath = None
    if len(args) > 1:
        svo_filepath = args[1]

    # This main thread will run the object detection, the capture thread is loaded later

    # What tensorflow model to download and load
    MODEL_NAME = 'ssd_mobilenet_v1_coco_2018_01_28'
    #MODEL_NAME = 'ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03'
    #MODEL_NAME = 'ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03'
    #MODEL_NAME = 'ssd_mobilenet_v1_coco_2018_01_28'
    #MODEL_NAME = 'faster_rcnn_nas_coco_2018_01_28' # Accurate but heavy

    # What tensorRT model to download and load
    TRT_MODEL_NAME = 'ssd_mobilenet_v2_coco'
    TRTDIR = './data/' + TRT_MODEL_NAME
    TRTFILENAME = 'frozen_inference_graph.pb'
    PATH_TO_FROZEN_TRTGRAPH = TRTDIR + TRTFILENAME

    # Path to frozen non trt detection graph. This is the actual model that is used for the object detection.
    PATH_TO_FROZEN_GRAPH = 'data/' + TRT_MODEL_NAME + '/frozen_inference_graph.pb'


    # Check if the model is already present
    if not os.path.isfile(PATH_TO_FROZEN_GRAPH):
        print("Downloading model " + MODEL_NAME + "...")

        MODEL_FILE = MODEL_NAME + '.tar.gz'
        MODEL_PATH = 'data/' + MODEL_NAME + '.tar.gz'
        DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'

        opener = urllib.request.URLopener()
        opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_PATH)
        tar_file = tarfile.open(MODEL_PATH)
        for file in tar_file.getmembers():
            file_name = os.path.basename(file.name)
            if 'frozen_inference_graph.pb' in file_name:
                tar_file.extract(file, 'data/')

    # List of the strings that is used to add correct label for each box.
    PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
    NUM_CLASSES = 90

    INPUT_NAME='image_tensor'
    BOXES_NAME='detection_boxes'
    CLASSES_NAME='detection_classes'
    SCORES_NAME='detection_scores'
    MASKS_NAME='detection_masks'
    NUM_DETECTIONS_NAME='num_detections'

    output_names = [BOXES_NAME, CLASSES_NAME, SCORES_NAME, NUM_DETECTIONS_NAME]
    input_names =[INPUT_NAME]

    # Start the capture thread with the ZED input
    print("Starting the ZED")
    capture_thread = Thread(target=capture_thread_func, kwargs={'svo_filepath': svo_filepath})
    capture_thread.daemon = True # so exiting the main thread cleans up these as well
    capture_thread.start()
    # Shared resources
    global image_np_global, depth_np_global, new_data, exit_signal

    detection_graph = tf.Graph()
    with detection_graph.as_default():
        if not usingTensorRTOptimisation: # Load a (frozen) Tensorflow model into memory.
            print("Loading model " + MODEL_NAME)
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

        else:
            
            print("Checking if trt graph already exists")
            graph_def = tf.GraphDef()
            if not load_frozen_graph_from_file(PATH_TO_FROZEN_GRAPH,graph_def): #returns true if the pb file is found

                print("File not found, loading model " + TRT_MODEL_NAME)
                config_path, checkpoint_path = download_detection_model(TRT_MODEL_NAME, 'data')
                print("Building graph from checkpoints and config file")
                graph_def, input_names, output_names = build_detection_graph(
                    config=config_path,
                    checkpoint=checkpoint_path,
                    score_threshold=0.3,
                    batch_size=1,
                    force_nms_cpu=False
                )
                print("Saving model")
                save_frozen_graph_to_file(TRTDIR,TRTFILENAME,graph_def)
                # print("Loading model " + MODEL_NAME)
                # with detection_graph.as_default():
                #     with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
                #         serialized_graph = fid.read()
                #         graph_def.ParseFromString(serialized_graph)



                print("Converting graph to trt graph")
                converter = trt.TrtGraphConverter(
                    input_graph_def=graph_def,
                    nodes_blacklist=output_names+input_names) #output nodes

                trt_graph = converter.convert()

                print("Serializing and saving trt graph to file")
                save_frozen_graph_to_file(TRTDIR,TRTFILENAME,trt_graph)
            print("loaded")
            tf.import_graph_def(graph_def, name='')
            print("imported")


    

    # Limit to a maximum of 50% the GPU memory usage taken by TF https://www.tensorflow.org/guide/using_gpu
    config = tf.ConfigProto()
    #config.gpu_options.per_process_gpu_memory_fraction = 0.5
    config.gpu_options.allow_growth = True

    # Loading label map
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
                                                                use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    video = cv2.VideoWriter('video.avi',cv2.VideoWriter_fourcc(*'DIVX'),1,(width,height))
    print("Video recorder initialized")

    # Detection
    with detection_graph.as_default():
        print(detection_graph.get_operations())
        with tf.Session(config=config, graph=detection_graph) as sess:
            # writer = tf.summary.FileWriter('logs', tf.compat.v1.get_default_graph())
            # sleep(20)

            while not exit_signal:
                try:
                    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                    if new_data:
                        lock.acquire()
                        image_np = np.copy(image_np_global)
                        depth_np = np.copy(depth_np_global)
                        new_data = False
                        lock.release()

                        image_np_expanded = np.expand_dims(image_np, axis=0)


                        image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name(INPUT_NAME+":0")
                        # Each box represents a part of the image where a particular object was detected.
                        boxes = tf.compat.v1.get_default_graph().get_tensor_by_name(BOXES_NAME+":0")
                        # Each score represent how level of confidence for each of the objects.
                        # Score is shown on the result image, together with the class label.
                        scores = tf.compat.v1.get_default_graph().get_tensor_by_name(SCORES_NAME+":0")
                        classes = tf.compat.v1.get_default_graph().get_tensor_by_name(CLASSES_NAME+":0")
                        num_detections = tf.compat.v1.get_default_graph().get_tensor_by_name(NUM_DETECTIONS_NAME+":0")
                        # Actual detection.
                        (boxes, scores, classes, num_detections) = sess.run(
                            [boxes, scores, classes, num_detections],
                            feed_dict={image_tensor: image_np_expanded})

                        num_detections_ = num_detections.astype(int)[0]

                        # Visualization of the results of a detection.
                        image_np = display_objects_distances(
                            image_np,
                            depth_np,
                            num_detections_,
                            np.squeeze(boxes),
                            np.squeeze(classes).astype(np.int32),
                            np.squeeze(scores),
                            category_index)

                        #cv2.imshow('ZED object detection', cv2.resize(image_np, (width, height)))
                        video.write(cv2.resize(image_np, (width, height)))
                        print("Frame written")

                        if cv2.waitKey(10) & 0xFF == ord('q'):
                            cv2.destroyAllWindows()
                            video.release()
                            exit_signal = True
                    else:
                        sleep(0.01)
                except KeyboardInterrupt:
                    print("saving video")
                    video.release()
                    writer.close()

            sess.close()
            video.release()

    exit_signal = True
    capture_thread.join()
    writer.close()
    from tf_trt_models.tf_trt_models.detection import download_detection_model, build_detection_graph

# Options in model zoo: ssd_inception_v2_coco, ssd_mobilenet_v2_coco, ssd_mobilenet_v1_coco, ssdlite_mobilenet_v2_coco, ssd_mobilenet_v2_quantized_coco

# Options in Nvidia TRT Downloader: ssd_inception_v2_coco, ssd_mobilenet_v1_coco, ssd_mobilenet_v2_coco, ssd_resnet_50_fpn_coco, faster_rcnn_resnet50_coco, faster_rcnn_nas, mask_rcnn_resnet50_atrous_coco at 300x300

MODEL = 'ssd_mobilenet_v1_coco' if len(sys.argv) < 2 else sys.argv[1]

print("Downloading model " + MODEL + "...")
config_path, checkpoint_path = download_detection_model(MODEL, './data')

print("Building detection graph from model " + MODEL + "...")
frozen_graph, input_names, output_names = build_detection_graph(
    config=config_path,
    checkpoint=checkpoint_path,
    force_nms_cpu=False,
    score_threshold=0.3,
    #iou_threshold=0.5,
    batch_size=1)

# download the detection model and then build the graph locally
# score_threshold is the score below to throw out BBs
# iou is the intersect over union ratio for non-max supression
# batch_size is 1 for the Nano for speed

print("Creating Jetson optimized graph...")
trt_graph = trt.create_inference_graph(input_graph_def=frozen_graph,
                                       outputs=output_names,
                                       max_batch_size=1,
                                       max_workspace_size_bytes=1 << 25,
                                       precision_mode='FP16',
예제 #15
0
import sys
import os
import tensorflow.contrib.tensorrt as trt
from tf_trt_models.detection import download_detection_model, build_detection_graph

# Path to label and frozen detection graph. This is the actual model that is used for the object detection.
parser = argparse.ArgumentParser(description='convert_rt_model.')
parser.add_argument('-c', '--config', default='./parallels.config')
parser.add_argument('-m', '--model', default='./model.ckpt')
parser.add_argument('-o',
                    '--output',
                    default='./exported_graphs/frozen_inference_graph_trt.pb')

args = parser.parse_args()

frozen_graph, input_names, output_names = build_detection_graph(
    config=args.config,
    checkpoint=args.model,
    score_threshold=0.3,
    batch_size=1)

trt_graph = trt.create_inference_graph(input_graph_def=frozen_graph,
                                       outputs=output_names,
                                       max_batch_size=1,
                                       max_workspace_size_bytes=1 << 25,
                                       precision_mode='FP16',
                                       minimum_segment_size=50)

with open(args.output, 'wb') as f:
    f.write(trt_graph.SerializeToString())