Ejemplo n.º 1
0
                parsed['trt_engine_path'], ssd_model_uff_path,
                parsed['trt_engine_datatype'], parsed['max_batch_size'])
            # Outputs from TensorRT are handled differently than
            # outputs from Tensorflow, that's why we use another
            # function to produce the detections from them
            produce_tensorrt_detections(detection_files,
                trt_inference_wrapper, parsed['max_batch_size'],
                voc_image_numbers, voc_image_path)
        elif parsed['inference_backend'] == 'tensorflow':
            # In case of Tensorflow all we need to
            # initialize inference is frozen model...
            tf_inference_wrapper = \
                inference_utils.TensorflowInference(ssd_model_pb_path)
            # ...and after initializing it, we can
            # proceed to producing detections
            produce_tensorflow_detections(detection_files,
                tf_inference_wrapper, parsed['max_batch_size'],
                voc_image_numbers, voc_image_path)


    # Flush detection to files to make sure evaluation is correct
    for key in detection_files:
        detection_files[key].flush()

    # Do mAP computation based on saved detections
    voc_mAP_utils.do_python_eval(parsed['results_dir'])

    # Close detection files, they are not needed anymore
    for key in detection_files:
        detection_files[key].close()
Ejemplo n.º 2
0
def main():
    # Parse command line arguments
    parsed = parse_commandline_arguments()

    # Check if inference should be skipped (if model inference
    # results are already computed, we don't need to recompute
    # them for VOC mAP computation)
    skip_inference = should_skip_inference(parsed)
    # And if inference will not be skipped, then we
    # create files to store its results in
    detection_files = {}
    if not skip_inference:
        for voc_class in VOC_CLASSES:
            detection_files[voc_class] = open(
                os.path.join(parsed['results_dir'],
                             'det_test_{}.txt'.format(voc_class)), 'w')

    # Fetch frozen model .pb path...
    ssd_model_pb_path = PATHS.get_model_pb_path(MODEL_NAME)
    # ...and .uff path, if needed (converting .pb to .uff if not already done)
    if parsed['inference_backend'] == 'tensorrt':
        ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)

    # This block of code sets up and performs inference, if needed
    if not skip_inference:
        # Preprocess VOC dataset if necessary by resizing images
        preprocess_voc()

        # Fetch image list and input .ppm files path
        with open(PATHS.get_voc_image_set_path(), 'r') as f:
            voc_image_numbers = f.readlines()
            voc_image_numbers = [line.strip() for line in voc_image_numbers]
        voc_image_path = PATHS.get_voc_ppm_img_path()

        # Tensorflow and TensorRT paths are a little bit different,
        # so we must treat each one individually
        if parsed['inference_backend'] == 'tensorrt':
            # TRTInference initialization initializes
            # all TensorRT structures, creates engine if it doesn't
            # already exist and finally saves it to file for future uses
            from utils.inference_trt import TRTInference
            trt_inference_wrapper = TRTInference(parsed['trt_engine_path'],
                                                 ssd_model_uff_path,
                                                 parsed['trt_engine_datatype'],
                                                 parsed['max_batch_size'])
            # Outputs from TensorRT are handled differently than
            # outputs from Tensorflow, that's why we use another
            # function to produce the detections from them
            produce_tensorrt_detections(detection_files, trt_inference_wrapper,
                                        parsed['max_batch_size'],
                                        voc_image_numbers, voc_image_path)
        elif parsed['inference_backend'] == 'tensorflow':
            # In case of Tensorflow all we need to
            # initialize inference is frozen model...
            from utils.inference_tf import TensorflowInference
            tf_inference_wrapper = TensorflowInference(ssd_model_pb_path)
            # ...and after initializing it, we can
            # proceed to producing detections
            produce_tensorflow_detections(detection_files,
                                          tf_inference_wrapper,
                                          parsed['max_batch_size'],
                                          voc_image_numbers, voc_image_path)

    # Flush detection to files to make sure evaluation is correct
    for key in detection_files:
        detection_files[key].flush()

    # Do mAP computation based on saved detections
    voc_mAP_utils.do_python_eval(parsed['results_dir'])

    # Close detection files, they are not needed anymore
    for key in detection_files:
        detection_files[key].close()