def extract_voc_data_if_needed():
    if os.path.exists(PATHS.get_voc_dir_path()):
        return
    voc_archive_path = PATHS.get_data_file_path('VOCtest_06-Nov-2007.tar')
    print("Unpacking {}".format(voc_archive_path))
    with tarfile.open(voc_archive_path, "r") as tar:
        tar.extractall(path=PATHS.get_sample_root())
    print("Unpacking done!")
Exemple #2
0
def parse_commandline_arguments():
    """Parses command line arguments and adjusts internal data structures."""

    # Define script command line arguments
    parser = argparse.ArgumentParser(
        description='Run object detection inference on input image.')
    parser.add_argument('input_img_path',
                        metavar='INPUT_IMG_PATH',
                        help='an image file to run inference on')
    parser.add_argument(
        '-p',
        '--precision',
        type=int,
        choices=[32, 16],
        default=32,
        help='desired TensorRT float precision to build an engine with')
    parser.add_argument('-b',
                        '--max_batch_size',
                        type=int,
                        default=1,
                        help='max TensorRT engine batch size')
    parser.add_argument('-w',
                        '--workspace_dir',
                        help='sample workspace directory')
    parser.add_argument("-o",
                        "--output",
                        help="path of the output file",
                        default=os.path.join(PATHS.get_sample_root(),
                                             "image_inferred.jpg"))

    # Parse arguments passed
    args = parser.parse_args()

    # Set workspace dir path if passed by user
    if args.workspace_dir:
        PATHS.set_workspace_dir_path(args.workspace_dir)

    try:
        os.makedirs(PATHS.get_workspace_dir_path())
    except:
        pass

    # Verify Paths after adjustments. This also exits script if verification fails
    PATHS.verify_all_paths()

    # Fetch TensorRT engine path and datatype
    args.trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision]
    args.trt_engine_path = PATHS.get_engine_path(args.trt_engine_datatype,
                                                 args.max_batch_size)
    try:
        os.makedirs(os.path.dirname(args.trt_engine_path))
    except:
        pass

    return args
def main():
    # Parse command line arguments
    parsed = parse_commandline_arguments()

    # Loading FlattenConcat plugin library using CDLL has a side
    # effect of loading FlattenConcat plugin into internal TensorRT
    # PluginRegistry data structure. This will be needed when parsing
    # network into UFF, since some operations will need to use this plugin
    try:
        ctypes.CDLL(PATHS.get_flatten_concat_plugin_path())
    except:
        print("Error: {}\n{}\n{}".format(
            "Could not find {}".format(PATHS.get_flatten_concat_plugin_path()),
            "Make sure you have compiled FlattenConcat custom plugin layer",
            "For more details, check README.md"))
        sys.exit(1)

    # Fetch .uff model path, convert from .pb
    # if needed, using prepare_ssd_model
    ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
    if not os.path.exists(ssd_model_uff_path):
        model_utils.prepare_ssd_model(MODEL_NAME)

    # Set up all TensorRT data structures needed for inference
    trt_inference_wrapper = inference_utils.TRTInference(
        parsed['trt_engine_path'],
        ssd_model_uff_path,
        trt_engine_datatype=parsed['trt_engine_datatype'],
        batch_size=parsed['max_batch_size'])

    # Start measuring time
    inference_start_time = time.time()

    # Get TensorRT SSD model output
    detection_out, keep_count_out = \
        trt_inference_wrapper.infer(parsed['input_img_path'])

    # Make PIL.Image for drawing bounding boxes and
    # let analyze_prediction() draw them based on model output
    img_pil = Image.open(parsed['input_img_path'])
    prediction_fields = len(TRT_PREDICTION_LAYOUT)
    for det in range(int(keep_count_out[0])):
        analyze_prediction(detection_out, det * prediction_fields, img_pil)

    # Output total [img load + inference + drawing bboxes] time
    print("Total time taken for one image: {} ms\n".format(
        int(round((time.time() - inference_start_time) * 1000))))

    # Save output image and output path
    inferred_image_path = os.path.join(PATHS.get_sample_root(),
                                       "image_inferred.jpg")
    img_pil.save(inferred_image_path)
    print("Saved output image to: {}".format(inferred_image_path))
Exemple #4
0
from sklearn.externals import joblib
from models import CAE
from sklearn.linear_model import SGDClassifier
from sklearn.multiclass import OneVsRestClassifier

from cyvlfeat.kmeans import kmeans, kmeans_quantize
from utils.paths import PATHS

sys.path.append('../')
tf.logging.set_verbosity(tf.logging.DEBUG)

summary_save_path_pre = PATHS.get_logs_dir_path()
svm_save_dir = PATHS.get_model_svm_dir_path()
model_save_path_pre = PATHS.get_model_cae_dir_path()

prefix = PATHS.get_sample_root()

batch_size = 64
learning_rate = [1e-3, 1e-4]
lr_decay_epochs = [100]
epochs = 200
'''
The Author said that the model may be better when 90-d one-hot embedding, representing the object class in COCO, 
add to the feature vector, which is can be activated by '--class_add'
'''


def arg_parse():
    parser = argparse.ArgumentParser()
    parser.add_argument('-g',
                        '--gpu',
def parse_commandline_arguments():
    """Parses command line arguments and adjusts internal data structures."""

    # Define script command line arguments
    parser = argparse.ArgumentParser(
        description='Run object detection inference on input image.')
    parser.add_argument('input_img_path',
                        metavar='INPUT_IMG_PATH',
                        help='an image file to run inference on')
    parser.add_argument(
        '-p',
        '--precision',
        type=int,
        choices=[32, 16],
        default=32,
        help='desired TensorRT float precision to build an engine with')
    parser.add_argument('-b',
                        '--max_batch_size',
                        type=int,
                        default=1,
                        help='max TensorRT engine batch size')
    parser.add_argument('-w',
                        '--workspace_dir',
                        help='sample workspace directory')
    parser.add_argument("-o",
                        "--output",
                        help="path of the output file",
                        default=os.path.join(PATHS.get_sample_root(),
                                             "image_inferred.jpg"))
    parser.add_argument(
        '-d',
        '--data',
        help=
        "Specify the data directory where it is saved in. $TRT_DATA_DIR will be overwritten by this argument."
    )

    args, _ = parser.parse_known_args()

    data_dir = os.environ.get('TRT_DATA_DIR',
                              None) if args.data is None else args.data
    if data_dir is None:
        raise ValueError(
            "Data directory must be specified by either `-d $DATA` or environment variable $TRT_DATA_DIR."
        )
    PATHS.set_data_dir_path(data_dir)

    # Set workspace dir path if passed by user
    if args.workspace_dir:
        PATHS.set_workspace_dir_path(args.workspace_dir)

    try:
        os.makedirs(PATHS.get_workspace_dir_path())
    except:
        pass

    # Verify Paths after adjustments. This also exits script if verification fails
    PATHS.verify_all_paths()

    # Fetch TensorRT engine path and datatype
    args.trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision]
    args.trt_engine_path = PATHS.get_engine_path(args.trt_engine_datatype,
                                                 args.max_batch_size)
    try:
        os.makedirs(os.path.dirname(args.trt_engine_path))
    except:
        pass

    return args