示例#1
0
 def predict(img_path, evaluator, cfg, debug=False):
     # detect objects in single image
     regressed_rois, cls_probs = evaluator.process_image(img_path)
     bboxes, labels, scores = od.filter_results(regressed_rois, cls_probs,
                                                cfg)
     # visualize detections on image
     if debug:
         od.visualize_results(img_path,
                              bboxes,
                              labels,
                              scores,
                              cfg,
                              store_to_path=img_path + "o.jpg")
     # write detection results to output
     fg_boxes = np.where(labels > 0)
     result = []
     for i in fg_boxes[0]:
         # print (cfg["DATA"].CLASSES)
         # print(labels)
         result.append({
             'label': cfg["DATA"].CLASSES[labels[i]],
             'score': '%.3f' % (scores[i]),
             'box': [int(v) for v in bboxes[i]]
         })
     return result
def evaluate_model(eval_model, detector_name):
    cfg = get_configuration(detector_name)
    cfg['NUM_CHANNELS'] = 3
    print("Map file = ", cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].CLASSES = parse_class_map_file(
        os.path.join("Steer_Bad_Relevant_output", cfg["DATA"].CLASS_MAP_FILE))
    cfg["DATA"].NUM_CLASSES = len(cfg["DATA"].CLASSES)

    # detect objects in single image
    img_path = os.path.join(
        os.path.dirname(os.path.abspath(__file__)),
        r"Steer_Bad_Relevant_output/testImages/Steer_Bad_Front_Zoom (269).jpg")
    regressed_rois, cls_probs = od.evaluate_single_image(
        eval_model, img_path, cfg)
    bboxes, labels, scores = od.filter_results(regressed_rois, cls_probs, cfg)

    fg_boxes = np.where(labels > 0)
    print("#bboxes: before nms: {}, after nms: {}, foreground: {}".format(
        len(regressed_rois), len(bboxes), len(fg_boxes[0])))
    for i in fg_boxes[0]:
        print("{:<12} (label: {:<2}), score: {:.3f}, box: {}".format(
            cfg["DATA"].CLASSES[labels[i]], labels[i], scores[i],
            [int(v) for v in bboxes[i]]))

    od.visualize_results(img_path,
                         bboxes,
                         labels,
                         scores,
                         cfg,
                         store_to_path="Steer_Bad_Relevant_output/output.jpg")
    eval_results = od.evaluate_test_set(eval_model, cfg)

    # write AP results to output
    for class_name in eval_results:
        print('AP for {:>15} = {:.4f}'.format(class_name,
                                              eval_results[class_name]))
    print('Mean AP = {:.4f}'.format(np.nanmean(list(eval_results.values()))))

    # detect objects in single image
    img_path = os.path.join(
        os.path.dirname(os.path.abspath(__file__)),
        r"../DataSets/Grocery/testImages/WIN_20160803_11_28_42_Pro.jpg")
    regressed_rois, cls_probs = od.evaluate_single_image(
        eval_model, img_path, cfg)
    bboxes, labels, scores = od.filter_results(regressed_rois, cls_probs, cfg)

    # write detection results to output
    fg_boxes = np.where(labels > 0)
    print("#bboxes: before nms: {}, after nms: {}, foreground: {}".format(
        len(regressed_rois), len(bboxes), len(fg_boxes[0])))
    for i in fg_boxes[0]:
        print("{:<12} (label: {:<2}), score: {:.3f}, box: {}".format(
            cfg["DATA"].CLASSES[labels[i]], labels[i], scores[i],
            [int(v) for v in bboxes[i]]))

    # visualize detections on image
    od.visualize_results(img_path, bboxes, labels, scores, cfg)

    # measure inference time
    od.measure_inference_time(eval_model, img_path, cfg, num_repetitions=100)
示例#4
0
if __name__ == '__main__':
    # Currently supported detectors: 'FastRCNN', 'FasterRCNN'
    args = sys.argv
    detector_name = get_detector_name(args)
    cfg = get_configuration(detector_name)

    # train and test
    eval_model = od.train_object_detector(cfg)
    eval_results = od.evaluate_test_set(eval_model, cfg)

    # write AP results to output
    for class_name in eval_results: print('AP for {:>15} = {:.4f}'.format(class_name, eval_results[class_name]))
    print('Mean AP = {:.4f}'.format(np.nanmean(list(eval_results.values()))))

    # detect objects in single image
    img_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), r"../DataSets/Grocery/testImages/WIN_20160803_11_28_42_Pro.jpg")
    regressed_rois, cls_probs = od.evaluate_single_image(eval_model, img_path, cfg)
    bboxes, labels, scores = od.filter_results(regressed_rois, cls_probs, cfg)

    # write detection results to output
    fg_boxes = np.where(labels > 0)
    print("#bboxes: before nms: {}, after nms: {}, foreground: {}".format(len(regressed_rois), len(bboxes), len(fg_boxes[0])))
    for i in fg_boxes[0]: print("{:<12} (label: {:<2}), score: {:.3f}, box: {}".format(
                                cfg["DATA"].CLASSES[labels[i]], labels[i], scores[i], [int(v) for v in bboxes[i]]))

    # visualize detections on image
    od.visualize_results(img_path, bboxes, labels, scores, cfg)

    # measure inference time
    od.measure_inference_time(eval_model, img_path, cfg, num_repetitions=100)