Example #1
0
def find_mapping(detections, objects, object_class):
    detection_object_mapping = []
    for detection in detections:
        # Kitty skip dontcare class
        dclass = du.find_class_detection(detection)
        if dclass == object_class:
            max_iou = 0.0
            boxA = du.find_box_detection(detection)

            bestObject = None
            for obj in objects:
                aclass = du.find_class_gt(obj)
                if aclass == object_class:
                    boxB = du.find_box_gt(obj)
                    iou = compute_IoU(boxA, boxB)
                    if iou > max_iou and iou > IOU_THRESHOLD:  # TODO evaluate if ok set iou threshold
                        max_iou = iou
                        bestObject = obj

            detection_object_mapping.append((detection, bestObject))

    for obj in objects:
        # Kitty skip dontcare class
        aclass = du.find_class_gt(obj)
        if aclass == object_class:
            present = False
            for det, annotation in detection_object_mapping:
                if obj == annotation:
                    present = True
            if not present:
                detection_object_mapping.append((None, obj))

    return detection_object_mapping
Example #2
0
def add_annotation_frequencies(frequencies, annotations):
    for annotation in annotations:
        label = du.find_class_gt(annotation)
        if label in frequencies.keys():
            frequencies[label] = frequencies[label] + 1
        else:
            frequencies[label] = 1
Example #3
0
def add_bbox_areas(bbox_areas, annotations):
    for annotation in annotations:
        label = du.find_class_gt(annotation)
        box = du.find_box_gt(annotation)
        area = du.box_area(box)
        if label in bbox_areas.keys():
            bbox_areas[label].append(area)
        else:
            bbox_areas[label] = [area]
Example #4
0
def draw_gt(img, annotation):
    color = (255, 0, 0)
    box = du.find_box_gt(annotation)
    label = du.find_class_gt(annotation)
    l_point1 = (int(box[0]), int(box[1] - 30))
    l_point2 = (int(box[0] + len(label) * 10), int(box[1]))
    cv2.rectangle(img,
                  l_point1,
                  l_point2,
                  color,
                  thickness=-1,
                  lineType=cv2.LINE_8)
    cv2.putText(img, label, (int(box[0]), int(box[1] - 10)), font, 0.5,
                (0, 0, 0), 1, cv2.LINE_AA)

    pt1 = (int(box[0]), int(box[1]))
    pt2 = (int(box[2]), int(box[3]))
    cv2.rectangle(img, pt1, pt2, color, thickness=2, lineType=cv2.LINE_8)
Example #5
0
def compute_frame_statistics(mapping):
    TP = 0
    FP = 0
    FN = 0
    for (detection, annotation) in mapping:
        if detection is None and annotation is not None:  # FN
            FN = FN + 1
        elif detection is not None and annotation is not None:  # TP
            det_class = du.find_class_detection(detection)
            ann_class = du.find_class_gt(annotation)
            if det_class == ann_class:  # Only if class is correct we have a true positive otherwise a false negative ( class always correct sinc mapping is performend only with single class)
                TP = TP + 1
            else:
                FN = FN + 1
        elif detection is not None and annotation is None:  # FP
            FP = FP + 1

    return TP, FP, FN
Example #6
0
def evaluate_performances(detector, test_images_path, gt_path, object_class):
    images = [
        f for f in listdir(test_images_path)
        if isfile(join(test_images_path, f))
    ]
    images = [f for f in images if ".png" in f]
    images = [f for f in images if '._' not in f]  # Avoid mac issues

    final_TP = 0
    final_FP = 0
    final_FN = 0
    cumulative_eval_time = 0.0
    count = 0

    for filename in images:
        filenameXml = filename.replace('.png', '.xml')

        img = cv2.imread(str(test_images_path) + str(filename))

        im = nparray_to_image(img)
        t0 = time.time()
        r = detector.detect_im(im)
        eval_time = time.time() - t0,
        detections = detector.convert_format(r)

        root = etree.parse(gt_path + filenameXml)
        annotations = root.findall("object")

        TP, FP, FN = evaluate_frame_performance(detections, annotations,
                                                object_class)
        final_TP = final_TP + TP
        final_FP = final_FP + FP
        final_FN = final_FN + FN
        cumulative_eval_time = cumulative_eval_time + eval_time[0]

        # TODO complete splitting performances

        # ============ PRINT =============

        for annotation in annotations:
            aclass = du.find_class_gt(annotation)
            if aclass != 'DontCare':
                dr.draw_gt(img, annotation)

        for detection in detections:
            dclass = du.find_class_detection(detection)
            if dclass != 'DontCare':
                dr.draw_detection(img, detection)

        # TODO remove
        mapping = find_mapping(detections, annotations, object_class)
        print(mapping)
        for det, gt in mapping:
            if det is not None:
                pt1 = (int(det['topleft']['x']), int(det['topleft']['y']))
                pt2 = (int(det['bottomright']['x']),
                       int(det['bottomright']['y']))
                cv2.rectangle(img,
                              pt1,
                              pt2, (0, 0, 255),
                              thickness=1,
                              lineType=cv2.LINE_8)

            if gt is not None:
                box = du.find_box_gt(gt)
                pt1 = (int(box[0]), int(box[1]))
                pt2 = (int(box[2]), int(box[3]))
                cv2.rectangle(img,
                              pt1,
                              pt2, (0, 13, 55),
                              thickness=1,
                              lineType=cv2.LINE_8)

        cv2.imshow("Frame", img)
        cv2.waitKey(1)

        count = count + 1
        print("%s %s" % (count, filename))

    # =============== FINAL EVALUATION ================

    precision = float(final_TP) / float(final_TP + final_FP) if float(
        final_TP + final_FP) != 0.0 else np.nan
    recall = float(final_TP) / float(final_TP + final_FN) if float(
        final_TP + final_FN) != 0.0 else np.nan
    f1_score = 2 * (precision * recall) / (precision + recall) if (
        precision + recall
    ) != 0.0 and precision is not np.nan and recall is not np.nan else np.nan
    mean_eval_time = cumulative_eval_time / len(images)
    print(precision)
    print(recall)
    print(f1_score)
    print(mean_eval_time)

    return precision, recall, f1_score, mean_eval_time