Exemplo n.º 1
0
def detect(category_index, func_defs):
    # Prepare TF config options
    tf_config = make_tf_config()

    # Prepare directories
    ck_utils.prepare_dir(params["RESULTS_OUT_DIR"])
    ck_utils.prepare_dir(params["ANNOTATIONS_OUT_DIR"])
    ck_utils.prepare_dir(params["IMAGES_OUT_DIR"])
    ck_utils.prepare_dir(params["DETECTIONS_OUT_DIR"])

    # Load processing image filenames

    image_files = ck_utils.load_image_list(
        params["IMAGES_DIR"], params["BATCH_COUNT"] * params["BATCH_SIZE"],
        params["SKIP_IMAGES"])

    with tf.compat.v1.Graph().as_default(), tf.compat.v1.Session(
            config=tf_config) as sess:
        setup_time_begin = time.time()

        # Make TF graph def from frozen graph file
        begin_time = time.time()
        # FIRST HOOK: load graph
        func_defs["load_graph"](params)
        graph_load_time = time.time() - begin_time
        print('Graph loaded in {:.4f}s'.format(graph_load_time))
        #    print('custom model is: ',CUSTOM_MODEL)
        #SECOND HOOK: get tensors
        tensor_dict, input_tensor = func_defs["get_tensor"]()
        setup_time = time.time() - setup_time_begin
        print("setup time is", setup_time)
        ###### END SETUP PHASE
        # Process images
        test_time_begin = time.time()
        image_index = 0
        load_time_total = 0
        detect_time_total = 0
        images_processed = 0
        processed_image_ids = []
        loop_limit = len(image_files) if (
            params["ENABLE_BATCH"]
        ) == 0 else params[
            "BATCH_COUNT"]  #defines loop boundary, that is different if batch or non batch processing are involved. the structure of the loop is however the same.
        for iter_num in range(loop_limit):

            load_time_begin = time.time()
            # THIRD HOOK: preprocess
            image_data, processed_image_ids, image_size, original_image = func_defs[
                "preprocess"](image_files, iter_num, processed_image_ids,
                              params)

            load_time = time.time() - load_time_begin
            load_time_total += load_time
            # Detect image: common
            detect_time_begin = time.time()
            feed_dict = {input_tensor: image_data}
            output_dict = sess.run(tensor_dict, feed_dict)
            #FOURTH HOOK: convert from tensorRT to normal dict
            output_dict = func_defs["out_conv"](output_dict)

            detect_time = time.time() - detect_time_begin
            # Exclude first image from averaging
            if iter_num > 0 or params["BATCH_COUNT"] == 1:
                detect_time_total += detect_time
                images_processed += 1  ## may be revision needed

            # FIFTH hook: process results
            func_defs["postprocess"](image_files, iter_num, image_size,
                                     original_image, image_data, output_dict,
                                     category_index, params)

            if params["FULL_REPORT"]:
                print('Detected in {:.4f}s'.format(detect_time))

    # Save processed images ids list to be able to run
    # evaluation without repeating detections (CK_SKIP_DETECTION=YES)
    with open(params["IMAGE_LIST_FILE"], 'w') as f:
        f.write(json.dumps(processed_image_ids))

    test_time = time.time() - test_time_begin
    detect_avg_time = detect_time_total / images_processed
    load_avg_time = load_time_total / len(processed_image_ids)
    OPENME = {}
    OPENME['setup_time_s'] = setup_time
    OPENME['test_time_s'] = test_time
    OPENME['graph_load_time_s'] = graph_load_time
    OPENME['images_load_time_total_s'] = load_time_total
    OPENME['images_load_time_avg_s'] = load_avg_time
    OPENME['detection_time_total_s'] = detect_time_total
    OPENME['detection_time_avg_s'] = detect_avg_time
    OPENME['avg_time_ms'] = detect_avg_time * 1000
    OPENME['avg_fps'] = 1.0 / detect_avg_time if detect_avg_time > 0 else 0

    with open(params["TIMER_JSON"], 'w') as o:
        json.dump(OPENME, o, indent=2, sort_keys=True)

    return processed_image_ids
Exemplo n.º 2
0
def detect(category_index, func_defs):
    # Prepare TF config options
    tf_config = make_tf_config()

    # Prepare directories
    ck_utils.prepare_dir(params["RESULTS_OUT_DIR"])
    ck_utils.prepare_dir(params["ANNOTATIONS_OUT_DIR"])
    ck_utils.prepare_dir(params["IMAGES_OUT_DIR"])
    ck_utils.prepare_dir(params["DETECTIONS_OUT_DIR"])

    # Load processing image filenames

    print(params["IMAGES_DIR"], params["BATCH_COUNT"] * params["BATCH_SIZE"],
          params["SKIP_IMAGES"])
    image_files = ck_utils.load_image_list(
        params["IMAGES_DIR"], params["BATCH_COUNT"] * params["BATCH_SIZE"],
        params["SKIP_IMAGES"])

    # with tf.compat.v1.Graph().as_default(), tf.compat.v1.Session(config=tf_config) as sess:
    sessions = []
    begin_time = time.time()
    #sessions.append(func_defs[0]["load_graph"](params["FROZEN_GRAPH"]))
    #  session_1 = func_defs[0]["load_graph"](params["FROZEN_GRAPH"])
    #  session_2 = func_defs[1]["load_graph"]("/home/emanuele/CK-TOOLS/tensorflowmodel-object-detection-faster-rcnn-resnet101-kitti/frozen_inference_graph.pb")
    setup_time_begin = time.time()
    #  print (session_1.graph.get_operations())
    #  print ('*************************************************************************')
    #  print (session_2.graph.get_operations())
    # Make TF graph def from frozen graph file
    # FIRST HOOK: load graph
    #func_defs["load_graph"](params)
    graph_load_time = time.time() - begin_time
    print('Graph loaded in {:.4f}s'.format(graph_load_time))
    #  print('custom model is: ',CUSTOM_MODEL)
    #SECOND HOOK: get tensors
    names = [params["FROZEN_GRAPH"], params["FROZEN_GRAPH_2"]]
    tensor_dict = []
    input_tensor = []
    for i in range(2):
        print(i)
        sess, td, it = func_defs[i]["load_graph"](names[i])
        sessions.append(sess)
        tensor_dict.append(td)
        input_tensor.append(it)

    setup_time = time.time() - setup_time_begin
    print("setup time is", setup_time)
    ###### END SETUP PHASE
    # Process images
    test_time_begin = time.time()
    image_index = 0
    load_time_total = 0
    detect_time_total = 0
    images_processed = 0
    processed_image_ids = []
    loop_limit = len(
        image_files
    )  # if (params["ENABLE_BATCH"]) == 0 else params["BATCH_COUNT"]  #defines loop boundary, that is different if batch or non batch processing are involved. the structure of the loop is however the same.i
    current_session = 1  ##initialize as high precision network
    last_high = 0  ##counter from last frame at high precision
    for iter_num in range(loop_limit):
        #    current_session = iter_num%2
        load_time_begin = time.time()
        # THIRD HOOK: preprocess
        image_data, processed_image_ids, image_size, original_image = func_defs[
            current_session]["preprocess"](image_files, iter_num,
                                           processed_image_ids, params)

        load_time = time.time() - load_time_begin
        load_time_total += load_time
        # Detect image: common
        detect_time_begin = time.time()
        feed_dict = {input_tensor[current_session]: image_data}

        td = tensor_dict[current_session]
        sess = sessions[current_session]
        with sess.graph.as_default():
            output_dict = sess.run(td, feed_dict)


##    output_dict = sessions[current_session].run(tensor_dict[current_session], feed_dict)
#FOURTH HOOK: convert from tensorRT to normal dict
        output_dict = func_defs[current_session]["out_conv"](output_dict)

        detect_time = time.time() - detect_time_begin
        # Exclude first image from averaging
        if iter_num > 0 or params["BATCH_COUNT"] == 1:
            detect_time_total += detect_time
            images_processed += 1  ## may be revision needed

        # FIFTH hook: process results
        out_classes = func_defs[current_session]["postprocess"](
            image_files, iter_num, image_size, original_image, image_data,
            output_dict, category_index, params)
        if last_high > 2 or 2 in out_classes:
            current_session = 1
            last_high = 0
        else:
            current_session = 0
            last_high = last_high + 1

        if params["FULL_REPORT"]:
            print('Detected in {:.4f}s'.format(detect_time))

    # Save processed images ids list to be able to run
    # evaluation without repeating detections (CK_SKIP_DETECTION=YES)
    with open(params["IMAGE_LIST_FILE"], 'w') as f:
        f.write(json.dumps(processed_image_ids))

    test_time = time.time() - test_time_begin
    detect_avg_time = detect_time_total / images_processed
    load_avg_time = load_time_total / len(processed_image_ids)
    OPENME = {}
    OPENME['setup_time_s'] = setup_time
    OPENME['test_time_s'] = test_time
    OPENME['graph_load_time_s'] = graph_load_time
    OPENME['images_load_time_total_s'] = load_time_total
    OPENME['images_load_time_avg_s'] = load_avg_time
    OPENME['detection_time_total_s'] = detect_time_total
    OPENME['detection_time_avg_s'] = detect_avg_time
    OPENME['avg_time_ms'] = detect_avg_time * 1000
    OPENME['avg_fps'] = 1.0 / detect_avg_time if detect_avg_time > 0 else 0

    with open(params["TIMER_JSON"], 'w') as o:
        json.dump(OPENME, o, indent=2, sort_keys=True)

    return processed_image_ids
Exemplo n.º 3
0
def detect(category_index):
    # Prepare TF config options
    tf_config = make_tf_config()

    # Prepare directories
    ck_utils.prepare_dir(RESULTS_OUT_DIR)
    ck_utils.prepare_dir(ANNOTATIONS_OUT_DIR)
    ck_utils.prepare_dir(IMAGES_OUT_DIR)
    ck_utils.prepare_dir(DETECTIONS_OUT_DIR)

    # Load processing image filenames
    image_files = ck_utils.load_image_list(IMAGES_DIR, IMAGE_COUNT,
                                           SKIP_IMAGES)

    with tf.Graph().as_default(), tf.Session(config=tf_config) as sess:
        setup_time_begin = time.time()

        # Make TF graph def from frozen graph file
        begin_time = time.time()
        graph_def = tf.GraphDef()
        with tf.gfile.GFile(FROZEN_GRAPH, 'rb') as f:
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
        graph_load_time = time.time() - begin_time
        print('Graph loaded in {:.4f}s'.format(graph_load_time))

        # NOTE: Load checkpoint here when they are needed

        # Get handles to input and output tensors
        tensor_dict, input_tensor = get_handles_to_tensors()

        setup_time = time.time() - setup_time_begin

        # Process images
        # TODO: implement batched mode
        test_time_begin = time.time()
        image_index = 0
        load_time_total = 0
        detect_time_total = 0
        images_processed = 0
        processed_image_ids = []
        for file_counter, image_file in enumerate(image_files):
            if FULL_REPORT or (file_counter + 1) % 10 == 0:
                print('\nDetect image: {} ({} of {})'.format(
                    image_file, file_counter + 1, len(image_files)))

            # Load image
            load_time_begin = time.time()
            image = PIL.Image.open(os.path.join(IMAGES_DIR, image_file))
            image_id = ck_utils.filename_to_id(image_file, DATASET_TYPE)
            processed_image_ids.append(image_id)

            # The array based representation of the image will be used later
            # in order to prepare the result image with boxes and labels on it.
            image_data = load_pil_image_into_numpy_array(image)
            load_time = time.time() - load_time_begin
            load_time_total += load_time

            # Detect image
            detect_time_begin = time.time()
            feed_dict = {input_tensor: image_data}
            output_dict = sess.run(tensor_dict, feed_dict)
            detect_time = time.time() - detect_time_begin

            # Exclude first image from averaging
            if file_counter > 0 or IMAGE_COUNT == 1:
                detect_time_total += detect_time
                images_processed += 1

            # Process results
            # All outputs are float32 numpy arrays, so convert types as appropriate
            # TODO: implement batched mode (0 here is the image index in the batch)
            output_dict['num_detections'] = int(
                output_dict['num_detections'][0])
            output_dict['detection_classes'] = output_dict[
                'detection_classes'][0].astype(np.uint8)
            output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
            output_dict['detection_scores'] = output_dict['detection_scores'][
                0]

            save_detection_txt(image_file, image.size, output_dict,
                               category_index)
            save_detection_img(image_file, image_data[0], output_dict,
                               category_index)

            if FULL_REPORT:
                print('Detected in {:.4f}s'.format(detect_time))

    # Save processed images ids list to be able to run
    # evaluation without repeating detections (CK_SKIP_DETECTION=YES)
    with open(IMAGE_LIST_FILE, 'w') as f:
        f.write(json.dumps(processed_image_ids))

    test_time = time.time() - test_time_begin
    detect_avg_time = detect_time_total / images_processed
    load_avg_time = load_time_total / len(processed_image_ids)

    OPENME['setup_time_s'] = setup_time
    OPENME['test_time_s'] = test_time
    OPENME['graph_load_time_s'] = graph_load_time
    OPENME['images_load_time_s'] = load_time_total
    OPENME['images_load_time_avg_s'] = load_avg_time
    OPENME['detection_time_total_s'] = detect_time_total
    OPENME['detection_time_avg_s'] = detect_avg_time
    OPENME['avg_time_ms'] = detect_avg_time * 1000
    OPENME['avg_fps'] = 1.0 / detect_avg_time if detect_avg_time > 0 else 0

    with open(TIMER_JSON, 'w') as o:
        json.dump(OPENME, o, indent=2, sort_keys=True)

    return processed_image_ids
Exemplo n.º 4
0
def ck_postprocess(i):
    def my_env(var):
        return i['env'].get(var)

    def dep_env(dep, var):
        return i['deps'][dep]['dict']['env'].get(var)

    def set_in_my_env(var):
        return my_env(var) and my_env(var).lower() in [
            'yes', 'true', 'on', '1'
        ]

    def set_in_dep_env(dep, var):
        return dep_env(dep, var) and dep_env(
            dep, var).lower() in ['yes', 'true', 'on', '1']

    def has_dep_env(dep, var):
        return var in i['deps'][dep]['dict']['env']

    def has_dep(dep):
        return dep in i['deps']

    import os
    import json
    import sys
    import inspect

    # gain access to other scripts in the same directory as the postprocess.py :
    #
    SCRIPT_DIR = os.path.dirname(
        os.path.abspath(inspect.getfile(inspect.currentframe())))
    sys.path.append(SCRIPT_DIR)

    # gain access to the some python dependencies :
    PYTHONPATH = ''
    for dep_name in ['tool-coco', 'lib-python-matplotlib']:
        PYTHONPATH = dep_env(dep_name, 'PYTHONPATH') + ':' + PYTHONPATH

    split_path = set()
    for p in PYTHONPATH.split(":"):
        if p in ["${PYTHONPATH}", "$PYTHONPATH", ""]:
            continue
        split_path.add(p)

    sys.path.extend(list(
        split_path))  # allow THIS SCRIPT to be able to use numpy, pillow, etc.

    # get some parameters directly from the deps' environment:
    #
    MODEL_ROOT = dep_env('weights', "CK_ENV_TENSORFLOW_MODEL_ROOT")
    if MODEL_ROOT:
        LABELMAP_FILE = os.path.join(
            MODEL_ROOT,
            dep_env('weights', 'CK_ENV_TENSORFLOW_MODEL_LABELMAP_FILE') or "")
        MODEL_DATASET_TYPE = dep_env('weights',
                                     "CK_ENV_TENSORFLOW_MODEL_DATASET_TYPE")
    else:
        MODEL_ROOT = dep_env('weights', "CK_ENV_ONNX_MODEL_ROOT")
        LABELMAP_FILE = os.path.join(
            MODEL_ROOT,
            dep_env('weights', 'CK_ENV_ONNX_MODEL_CLASSES_LABELS') or "")
        MODEL_DATASET_TYPE = dep_env('weights',
                                     "CK_ENV_ONNX_MODEL_DATASET_TYPE")

    # Annotations can be a directory or a single file, depending on dataset type:
    ANNOTATIONS_PATH = dep_env('dataset', "CK_ENV_DATASET_ANNOTATIONS")

    TIMER_JSON = my_env('CK_TIMER_FILE')

    PREPROCESSED_FILES = dep_env(
        'dataset', 'CK_ENV_DATASET_OBJ_DETECTION_PREPROCESSED_SUBSET_FOF'
    ) or my_env('CK_PREPROCESSED_FOF_WITH_ORIGINAL_DIMENSIONS')

    DETECTIONS_OUT_DIR = my_env('CK_DETECTIONS_OUT_DIR')
    RESULTS_OUT_DIR = my_env('CK_RESULTS_OUT_DIR')
    ANNOTATIONS_OUT_DIR = my_env('CK_ANNOTATIONS_OUT_DIR')

    DATASET_TYPE = dep_env('dataset', "CK_ENV_DATASET_TYPE")
    METRIC_TYPE = (my_env("CK_METRIC_TYPE") or DATASET_TYPE).lower()

    FULL_REPORT = not set_in_my_env("CK_SILENT_MODE")

    import ck_utils
    import converter_annotations
    import converter_results

    ck_utils.prepare_dir(RESULTS_OUT_DIR)
    ck_utils.prepare_dir(ANNOTATIONS_OUT_DIR)

    if METRIC_TYPE != ck_utils.COCO:
        import calc_metrics_coco_tf
        import calc_metrics_kitti
        import calc_metrics_oid
        from object_detection.utils import label_map_util
    else:
        import calc_metrics_coco_pycocotools

    def evaluate(processed_image_ids, categories_list):
        # Convert annotations from original format of the dataset
        # to a format specific for a tool that will calculate metrics
        if DATASET_TYPE != METRIC_TYPE:
            print('\nConvert annotations from {} to {} ...'.format(
                DATASET_TYPE, METRIC_TYPE))
            annotations = converter_annotations.convert(
                ANNOTATIONS_PATH, ANNOTATIONS_OUT_DIR, DATASET_TYPE,
                METRIC_TYPE)
        else:
            annotations = ANNOTATIONS_PATH

        # Convert detection results from our universal text format
        # to a format specific for a tool that will calculate metrics
        print('\nConvert results to {} ...'.format(METRIC_TYPE))
        results = converter_results.convert(DETECTIONS_OUT_DIR,
                                            RESULTS_OUT_DIR, DATASET_TYPE,
                                            MODEL_DATASET_TYPE, METRIC_TYPE)

        # Run evaluation tool
        print('\nEvaluate metrics as {} ...'.format(METRIC_TYPE))
        if METRIC_TYPE == ck_utils.COCO:
            mAP, recall, all_metrics = calc_metrics_coco_pycocotools.evaluate(
                processed_image_ids, results, annotations)
        elif METRIC_TYPE == ck_utils.COCO_TF:
            mAP, recall, all_metrics = calc_metrics_coco_tf.evaluate(
                categories_list, results, annotations, FULL_REPORT)
        elif METRIC_TYPE == ck_utils.OID:
            mAP, _, all_metrics = calc_metrics_oid.evaluate(
                results, annotations, LABELMAP_FILE, FULL_REPORT)
            recall = 'N/A'

        else:
            raise ValueError(
                'Metrics type is not supported: {}'.format(METRIC_TYPE))

        OPENME['mAP'] = mAP
        OPENME['recall'] = recall
        OPENME['metrics'] = all_metrics

        return

    OPENME = {}

    with open(PREPROCESSED_FILES, 'r') as f:
        processed_image_filenames = [x.split(';')[0] for x in f.readlines()]

    processed_image_ids = [
        ck_utils.filename_to_id(image_filename, DATASET_TYPE)
        for image_filename in processed_image_filenames
    ]

    if os.path.isfile(TIMER_JSON):
        with open(TIMER_JSON, 'r') as f:
            OPENME = json.load(f)

    # Run evaluation
    ck_utils.print_header('Process results')

    if METRIC_TYPE != ck_utils.COCO:
        category_index = label_map_util.create_category_index_from_labelmap(
            LABELMAP_FILE, use_display_name=True)
        categories_list = category_index.values()
    else:
        categories_list = []

    evaluate(processed_image_ids, categories_list)

    OPENME[
        'frame_predictions'] = converter_results.convert_to_frame_predictions(
            DETECTIONS_OUT_DIR)
    OPENME['execution_time'] = OPENME['run_time_state'].get(
        'test_time_s', 0) + OPENME['run_time_state'].get('setup_time_s', 0)

    # Store benchmark results
    with open(TIMER_JSON, 'w') as o:
        json.dump(OPENME, o, indent=2, sort_keys=True)

    # Print metrics
    print('\nSummary:')
    print('-------------------------------')
    print('All images loaded in {:.6f}s'.format(OPENME['run_time_state'].get(
        'load_images_time_total_s', 0)))
    print('Average image load time: {:.6f}s'.format(
        OPENME['run_time_state'].get('load_images_time_avg_s', 0)))
    print('All images detected in {:.6f}s'.format(OPENME['run_time_state'].get(
        'prediction_time_total_s', 0)))
    print('Average detection time: {:.6f}s'.format(
        OPENME['run_time_state'].get('prediction_time_avg_s', 0)))
    print('mAP: {}'.format(OPENME['mAP']))
    print('Recall: {}'.format(OPENME['recall']))
    print('--------------------------------\n')

    return {'return': 0}