def convert_to_frame_predictions(detections_dir):
    result = {}
    detection_files = helper.get_files(detections_dir)

    for file_name in detection_files:
        read_file = os.path.join(detections_dir, file_name)

        with open(read_file, 'r') as rf:
            file_info = {}
            width, height = [int(i) for i in rf.readline().split()
                             ]  # first line is image size
            file_info["image_height"] = height
            file_info["image_width"] = width
            file_info["detections"] = []

            for line in rf:
                det = helper.Detection(line)
                detection = {}
                detection["bbox"] = [det.x1, det.y1, det.x2, det.y2]
                detection["prob"] = det.score
                detection["class"] = str(det.class_id) + " " + det.class_name
                file_info["detections"].append(detection)

            result[file_name] = file_info

    return result
def evaluate(res_dir, annotations, label_map_path, full_report):
    '''
  Calculate OID metrics via evaluator class included in TF models repository
  https://github.com/tensorflow/models/tree/master/research/object_detection/metrics

  Reads pre-computed object detections and groundtruth.

  Args:
    res_dir: pre-computed object detections directory
    annotations: groundtruth (file with annotations)
    label_map_path: labelmap file

  Returns:
    Evaluated detections metrics.
  '''
    class EvaluatorConfig:
        metrics_set = ['open_images_V2_detection_metrics']

    eval_config = EvaluatorConfig()

    categories = label_map_util.create_categories_from_labelmap(label_map_path)
    class_map = label_map_util.get_label_map_dict(label_map_path, False, False)

    object_detection_evaluators = evaluator.get_evaluators(
        eval_config, categories)
    # Support a single evaluator
    object_detection_evaluator = object_detection_evaluators[0]

    print('Loading annotations...')
    ann = get_annotations(annotations, class_map)

    files = ck_utils.get_files(res_dir)
    for file_index, file_name in enumerate(files):
        if full_report:
            print('Loading detections and annotations for {} ({} of {}) ...'.
                  format(file_name, file_index + 1, len(files)))
        elif (file_index + 1) % 100 == 0:
            print('Loading detections and annotations: {} of {} ...'.format(
                file_index + 1, len(files)))
        det_file = os.path.join(res_dir, file_name)
        key = os.path.splitext(file_name)[0]
        detection = new_detection(key)
        fill_annotations(detection, ann[key])
        fill_detection(detection, det_file)

        object_detection_evaluator.add_single_ground_truth_image_info(
            detection[standard_fields.DetectionResultFields.key], detection)
        object_detection_evaluator.add_single_detected_image_info(
            detection[standard_fields.DetectionResultFields.key], detection)

    all_metrics = object_detection_evaluator.evaluate()
    mAP = all_metrics['OpenImagesV2_Precision/[email protected]']

    return mAP, 0, all_metrics
def convert(detections_dir, target_dir, dataset_type, model_dataset_type,
            metric_type):
    '''
  Convert detection results from our universal text format
  to a format specific for a tool that will calculate metrics.

  Returns whether results directory or path to the new results file,
  depending on target results format.
  '''

    if metric_type == helper.COCO_TF or metric_type == helper.OID:
        return detections_dir

    detection_files = helper.get_files(detections_dir)

    if metric_type == helper.COCO:
        return convert_to_coco(detection_files, detections_dir, target_dir,
                               dataset_type, model_dataset_type)

    if metric_type == helper.KITTI:
        return convert_to_kitti(detection_files, detections_dir, target_dir,
                                model_dataset_type)

    raise ValueError('Unknown target results format: {}'.format(metric_type))
示例#4
0
def convert_kitti_to_coco(source_dir, target_dir):
    files = helper.get_files(source_dir)
    write_file = os.path.join(target_dir, 'kitti_to_coco_annotations.json')
    ann_counter = 0  # annotations counter
    now = datetime.datetime.now()
    body = {
        "info": {
            "year": now.year,
            "version": "0.0",
            "description": "Annotations converted from kitti- to coco-dataset",
            "contributor": "Unknown",
            "url": "",
            "date_created": str(now)
        },
        "images": [],
        "annotations": [],
        "categories": [],
        "licenses": [{
            "id": 0,
            "name": "None",
            "url": ""
        }],
    }
    images_array = []
    annotations_array = []
    for file_counter, file_name in enumerate(files):
        print('{}: {} of {}'.format(file_name, file_counter + 1, len(files)))

        img_file_name = os.path.splitext(file_name)[0] + ".jpg"
        read_file = os.path.join(source_dir, file_name)

        with open(read_file, 'r') as rf:
            file_id = filename_to_id(file_name, 'kitti')
            width = 0
            height = 0
            file_item = {
                "id": file_id,
                "width": width,
                "height": height,
                "file_name": img_file_name,
                "license": 0,
                "flickr_url": "",
                "coco_url": "",
                "date_captured": str(now),
            }

            images_array.append(file_item)

            for line in rf:
                str_array = line.split()
                class_name = str_array[0].lower()
                if class_name not in helper.KITTI_CLASSES:
                    continue
                category_id = helper.KITTI_CLASSES[class_name]
                ann_counter += 1
                x1 = str_array[4]
                y1 = str_array[5]
                x2 = str_array[6]
                y2 = str_array[7]
                x = float(x1)
                y = float(y1)
                width = round(float(x2) - x, 2)
                height = round(float(y2) - y, 2)
                area = round(width * height, 2)
                annotation = {
                    "id": ann_counter,
                    "image_id": file_id,
                    "category_id": category_id,
                    "segmentation": [],
                    "area": area,
                    "bbox": [x, y, width, height],
                    "iscrowd": 0,
                }
                annotations_array.append(annotation)

    categories = []
    for idx in helper.KITTI2COCO:
        categories.append({
            "id": helper.KITTI2COCO[idx][0],
            "name": helper.KITTI2COCO[idx][1],
            "supercategory": helper.KITTI2COCO[idx][2],
        })

    body["images"] = images_array
    body["annotations"] = annotations_array
    body["categories"] = categories

    with open(write_file, 'w') as wf:
        wf.write(json.dumps(body))
    return write_file
示例#5
0
def evaluate_via_tf(categories_list, results_dir, txt_annotatins_dir,
                    full_report):
    '''
  Calculate COCO metrics via evaluator class included in TF models repository
  https://github.com/tensorflow/models/tree/master/research/object_detection/metrics

  This method uses annotation converted to txt files.
  This convertation is done by installation dataset-coco-2014 package.
  '''
    class_name_to_id_map = {}
    for category in categories_list:
        # Converted txt annotation lacks spaces in class names
        # and we have to remove spaces from labelmap's class names too
        # to be able to find class id by class name from annotation
        class_name = category['name'].split()
        class_name_no_spaces = ''.join(class_name)
        class_name_to_id_map[class_name_no_spaces] = category['id']

    evaluator = coco_evaluation.CocoDetectionEvaluator(categories_list)

    total_dets_count = 0
    total_gts_count = 0
    not_found_gts = []

    files = ck_utils.get_files(results_dir)
    for file_index, file_name in enumerate(files):
        if full_report:
            print('Loading detections and annotations for {} ({} of {}) ...'.
                  format(file_name, file_index + 1, len(files)))
        elif (file_index + 1) % 100 == 0:
            print('Loading detections and annotations: {} of {} ...'.format(
                file_index + 1, len(files)))

        gt_file = os.path.join(txt_annotatins_dir, file_name)
        det_file = os.path.join(results_dir, file_name)

        # Skip files for which there is no groundtruth
        # e.g. COCO_val2014_000000013466.jpg
        gts = load_groundtruth(gt_file, class_name_to_id_map)
        if not gts:
            not_found_gts.append(file_name)
            continue

        dets = load_detections(det_file)

        gts_count = gts[gt_field.groundtruth_boxes].shape[0]
        dets_count = dets[det_field.detection_boxes].shape[0]
        total_gts_count += gts_count
        total_dets_count += dets_count

        if full_report:
            print('  Detections: {}'.format(dets_count))
            print('  Groundtruth: {}'.format(gts_count))

        # Groundtruth should be added first, as adding image checks if there is groundtrush for it
        evaluator.add_single_ground_truth_image_info(image_id=file_name,
                                                     groundtruth_dict=gts)
        evaluator.add_single_detected_image_info(image_id=file_name,
                                                 detections_dict=dets)

    all_metrics = evaluator.evaluate()

    if not_found_gts:
        print('Groundtrush not found for {} results:'.format(
            len(not_found_gts)))
        for file_name in not_found_gts:
            print('    {}'.format(file_name))

    print('Total detections: {}'.format(total_dets_count))
    print('Total groundtruths: {}'.format(total_gts_count))
    print('Detection rate: {}'.format(
        float(total_dets_count) / float(total_gts_count)))

    mAP = all_metrics['DetectionBoxes_Precision/mAP']
    recall = all_metrics['DetectionBoxes_Recall/AR@100']
    return mAP, recall, all_metrics