Ejemplo n.º 1
0
 def set_car_detections(self, detections):
     self.last_car_detections = detections
     for det in detections:
         det_label = du.find_class_detection(det)
         if det_label in COLLISION_AVOIDANCE_ACTION_SET:  # TODO test better distance from sign
             self.actions[
                 ACTION_COLLISION_AVOIDANCE] = COLLISION_AVOIDANCE_DURATION_FRAMES
Ejemplo n.º 2
0
def find_mapping(detections, objects, object_class):
    detection_object_mapping = []
    for detection in detections:
        # Kitty skip dontcare class
        dclass = du.find_class_detection(detection)
        if dclass == object_class:
            max_iou = 0.0
            boxA = du.find_box_detection(detection)

            bestObject = None
            for obj in objects:
                aclass = du.find_class_gt(obj)
                if aclass == object_class:
                    boxB = du.find_box_gt(obj)
                    iou = compute_IoU(boxA, boxB)
                    if iou > max_iou and iou > IOU_THRESHOLD:  # TODO evaluate if ok set iou threshold
                        max_iou = iou
                        bestObject = obj

            detection_object_mapping.append((detection, bestObject))

    for obj in objects:
        # Kitty skip dontcare class
        aclass = du.find_class_gt(obj)
        if aclass == object_class:
            present = False
            for det, annotation in detection_object_mapping:
                if obj == annotation:
                    present = True
            if not present:
                detection_object_mapping.append((None, obj))

    return detection_object_mapping
Ejemplo n.º 3
0
 def set_sign_detections(self, detections):
     with self.lock:
         self.last_sign_detections = detections  # only for rendering
         for det in detections:
             det_label = du.find_class_detection(det)
             if det_label == ACTION_STOP and ACTION_STOP not in self.actions.keys(
             ) and ACTION_AVOID_STOP not in self.actions.keys(
             ):  # TODO test better distance from sign
                 self.actions[ACTION_STOP] = STOP_DURATION_FRAMES
             elif det_label == ACTION_PEDESTRIAN_CROSSING:  # while sees sign and for a period slow down
                 self.actions[
                     ACTION_PEDESTRIAN_CROSSING] = PEDESTRIAN_CROSSING_DURATION_FRAMES
Ejemplo n.º 4
0
def compute_frame_statistics(mapping):
    TP = 0
    FP = 0
    FN = 0
    for (detection, annotation) in mapping:
        if detection is None and annotation is not None:  # FN
            FN = FN + 1
        elif detection is not None and annotation is not None:  # TP
            det_class = du.find_class_detection(detection)
            ann_class = du.find_class_gt(annotation)
            if det_class == ann_class:  # Only if class is correct we have a true positive otherwise a false negative ( class always correct sinc mapping is performend only with single class)
                TP = TP + 1
            else:
                FN = FN + 1
        elif detection is not None and annotation is None:  # FP
            FP = FP + 1

    return TP, FP, FN
Ejemplo n.º 5
0
def evaluate_performances(detector, test_images_path, gt_path, object_class):
    images = [
        f for f in listdir(test_images_path)
        if isfile(join(test_images_path, f))
    ]
    images = [f for f in images if ".png" in f]
    images = [f for f in images if '._' not in f]  # Avoid mac issues

    final_TP = 0
    final_FP = 0
    final_FN = 0
    cumulative_eval_time = 0.0
    count = 0

    for filename in images:
        filenameXml = filename.replace('.png', '.xml')

        img = cv2.imread(str(test_images_path) + str(filename))

        im = nparray_to_image(img)
        t0 = time.time()
        r = detector.detect_im(im)
        eval_time = time.time() - t0,
        detections = detector.convert_format(r)

        root = etree.parse(gt_path + filenameXml)
        annotations = root.findall("object")

        TP, FP, FN = evaluate_frame_performance(detections, annotations,
                                                object_class)
        final_TP = final_TP + TP
        final_FP = final_FP + FP
        final_FN = final_FN + FN
        cumulative_eval_time = cumulative_eval_time + eval_time[0]

        # TODO complete splitting performances

        # ============ PRINT =============

        for annotation in annotations:
            aclass = du.find_class_gt(annotation)
            if aclass != 'DontCare':
                dr.draw_gt(img, annotation)

        for detection in detections:
            dclass = du.find_class_detection(detection)
            if dclass != 'DontCare':
                dr.draw_detection(img, detection)

        # TODO remove
        mapping = find_mapping(detections, annotations, object_class)
        print(mapping)
        for det, gt in mapping:
            if det is not None:
                pt1 = (int(det['topleft']['x']), int(det['topleft']['y']))
                pt2 = (int(det['bottomright']['x']),
                       int(det['bottomright']['y']))
                cv2.rectangle(img,
                              pt1,
                              pt2, (0, 0, 255),
                              thickness=1,
                              lineType=cv2.LINE_8)

            if gt is not None:
                box = du.find_box_gt(gt)
                pt1 = (int(box[0]), int(box[1]))
                pt2 = (int(box[2]), int(box[3]))
                cv2.rectangle(img,
                              pt1,
                              pt2, (0, 13, 55),
                              thickness=1,
                              lineType=cv2.LINE_8)

        cv2.imshow("Frame", img)
        cv2.waitKey(1)

        count = count + 1
        print("%s %s" % (count, filename))

    # =============== FINAL EVALUATION ================

    precision = float(final_TP) / float(final_TP + final_FP) if float(
        final_TP + final_FP) != 0.0 else np.nan
    recall = float(final_TP) / float(final_TP + final_FN) if float(
        final_TP + final_FN) != 0.0 else np.nan
    f1_score = 2 * (precision * recall) / (precision + recall) if (
        precision + recall
    ) != 0.0 and precision is not np.nan and recall is not np.nan else np.nan
    mean_eval_time = cumulative_eval_time / len(images)
    print(precision)
    print(recall)
    print(f1_score)
    print(mean_eval_time)

    return precision, recall, f1_score, mean_eval_time