Esempio n. 1
0
def vis_voc(img_root, label_root):
    logging.info('img root: {}, label root: {}'.format(img_root, label_root))
    # auto detection .jpg or .png images
    img_files = glob(os.path.join(img_root, '*.[jp][pn]g'))
    for img_f in img_files:
        if os.path.exists(img_f):
            img = cv2.imread(img_f)
            label_path = os.path.join(
                label_root,
                os.path.basename(img_f).split('.')[0] + '.xml')
            if os.path.exists(label_path):
                #
                tree = ET.parse(label_path)
                root = tree.getroot()
                for obj in get(root, 'object'):
                    category = get_and_check(obj, 'name', 1).text
                    bndbox = get_and_check(obj, 'bndbox', 1)
                    xmin = int(float(get_and_check(bndbox, 'xmin', 1).text))
                    ymin = int(float(get_and_check(bndbox, 'ymin', 1).text))
                    xmax = int(float(get_and_check(bndbox, 'xmax', 1).text))
                    ymax = int(float(get_and_check(bndbox, 'ymax', 1).text))

                    cv2.putText(img, category, (xmin, ymin),
                                cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 255, 255))
                    cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0),
                                  2, 1)
                cv2.imshow('voc check', img)
                cv2.waitKey(0)
            else:
                logging.warning(
                    'xxxx image: {} according label: {} not found.'.format(
                        img_f, label_path))
Esempio n. 2
0
def vis_det_yolo(img_root, label_root):
    logging.info('img root: {}, label root: {}'.format(img_root, label_root))
    # auto detection .jpg or .png images
    txt_files = glob(os.path.join(label_root, '*.txt'))
    for txt_f in txt_files:
        img_f = os.path.join(img_root,
                             os.path.basename(txt_f).split('.')[0] + '.jpg')
        if os.path.exists(img_f):
            img = cv2.imread(img_f)
            h, w, _ = img.shape
            if os.path.exists(txt_f):
                with open(txt_f) as f:
                    annos = f.readlines()
                    for ann in annos:
                        ann = ann.strip().split(' ')
                        category = ann[0]
                        x = float(ann[1]) * w
                        y = float(ann[2]) * h
                        bw = float(ann[3]) * w
                        bh = float(ann[4]) * h
                        xmin = int(x - bw / 2)
                        ymin = int(y - bh / 2)
                        xmax = int(x + bw / 2)
                        ymax = int(y + bh / 2)
                        print(xmin, ymin, xmax, ymax, category)
                        cv2.putText(img, category, (xmin, ymin),
                                    cv2.FONT_HERSHEY_COMPLEX, 0.7,
                                    (255, 255, 255))
                        cv2.rectangle(img, (xmin, ymin), (xmax, ymax),
                                      (0, 255, 0), 2, 1)
                cv2.imshow('yolo check', img)
                cv2.waitKey(0)
            else:
                logging.warning('xxxx image: {} not found.'.format(img_f))
Esempio n. 3
0
def vis_det_txt(img_root, label_root):
    logging.info('img root: {}, label root: {}'.format(img_root, label_root))
    # auto detection .jpg or .png images
    txt_files = glob(os.path.join(label_root, '*.txt'))
    for txt_f in txt_files:
        img_f = os.path.join(img_root,
                             os.path.basename(txt_f).split('.')[0] + '.jpg')
        if os.path.exists(img_f):
            img = cv2.imread(img_f)
            if os.path.exists(txt_f):
                with open(txt_f) as f:
                    annos = f.readlines()
                    for ann in annos:
                        ann = ann.strip().split(' ')
                        if len(ann) == 5:
                            # not include prob
                            category = ann[0]
                            xmin = int(float(ann[1]))
                            ymin = int(float(ann[2]))
                            xmax = int(float(ann[3]))
                            ymax = int(float(ann[4]))
                            cv2.putText(img, category, (xmin, ymin),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.7,
                                        (255, 255, 255))
                            cv2.rectangle(img, (xmin, ymin), (xmax, ymax),
                                          (0, 255, 0), 2, 1)
                        elif len(ann) == 6:
                            # include prob
                            category = ann[0]
                            prob = float(ann[1])
                            xmin = int(float(ann[2]))
                            ymin = int(float(ann[3]))
                            xmax = int(float(ann[4]))
                            ymax = int(float(ann[5]))
                            cv2.putText(img, '{} {}'.format(category, prob),
                                        (xmin, ymin), cv2.FONT_HERSHEY_COMPLEX,
                                        0.7, (255, 255, 255))
                            cv2.rectangle(img, (xmin, ymin), (xmax, ymax),
                                          (0, 255, 0), 2, 1)
                cv2.imshow('txt check', img)
                cv2.waitKey(0)
            else:
                logging.warning('xxxx image: {} not found.'.format(img_f))
Esempio n. 4
0
import os
import numpy as np
import glob
import xml.etree.ElementTree as ET
import cv2
import os
from alfred.utils.log import logger as logging
import sys
from tqdm import tqdm


logging.warning('this script only support VOC format dataset now.')


def iou(box, clusters):
    """
    Calculates the Intersection over Union (IoU) between a box and k clusters.
    :param box: tuple or array, shifted to the origin (i. e. width and height)
    :param clusters: numpy array of shape (k, 2) where k is the number of clusters
    :return: numpy array of shape (k, 0) where k is the number of clusters
    """
    x = np.minimum(clusters[:, 0], box[0])
    y = np.minimum(clusters[:, 1], box[1])
    if np.count_nonzero(x == 0) > 0 or np.count_nonzero(y == 0) > 0:
        raise ValueError("Box has no area")

    intersection = x * y
    box_area = box[0] * box[1]
    cluster_area = clusters[:, 0] * clusters[:, 1]

    iou_ = intersection / (box_area + cluster_area - intersection)
Esempio n. 5
0
def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
    """
    Load a json file with COCO's instances annotation format.
    Currently supports instance detection, instance segmentation,
    and person keypoints annotations.

    Args:
        json_file (str): full path to the json file in COCO instances annotation format.
        image_root (str): the directory where the images in this json file exists.
        dataset_name (str): the name of the dataset (e.g., coco_2017_train).
            If provided, this function will also put "thing_classes" into
            the metadata associated with this dataset.
        extra_annotation_keys (list[str]): list of per-annotation keys that should also be
            loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
            "category_id", "segmentation"). The values for these keys will be returned as-is.
            For example, the densepose annotations are loaded in this way.

    Returns:
        list[dict]: a list of dicts in dl_lib standard format. (See
        `Using Custom Datasets </tutorials/datasets.html>`_ )

    Notes:
        1. This function does not read the image files.
           The results do not have the "image" field.
    """
    from pycocotools.coco import COCO

    timer = Timer()
    json_file = PathManager.get_local_path(json_file)
    with contextlib.redirect_stdout(io.StringIO()):
        coco_api = COCO(json_file)
    if timer.seconds() > 1:
        logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))

    id_map = None
    if dataset_name is not None:
        meta = MetadataCatalog.get(dataset_name)
        cat_ids = sorted(coco_api.getCatIds())
        cats = coco_api.loadCats(cat_ids)
        # The categories in a custom json file may not be sorted.
        thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
        meta.thing_classes = thing_classes

        # In COCO, certain category ids are artificially removed,
        # and by convention they are always ignored.
        # We deal with COCO's id issue and translate
        # the category ids to contiguous ids in [0, 80).

        # It works by looking at the "categories" field in the json, therefore
        # if users' own json also have incontiguous ids, we'll
        # apply this mapping as well but print a warning.
        if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
            if "coco" not in dataset_name:
                logger.warning(
                    """
Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
"""
                )
        id_map = {v: i for i, v in enumerate(cat_ids)}
        meta.thing_dataset_id_to_contiguous_id = id_map

    # sort indices for reproducible results
    img_ids = sorted(list(coco_api.imgs.keys()))
    # imgs is a list of dicts, each looks something like:
    # {'license': 4,
    #  'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
    #  'file_name': 'COCO_val2014_000000001268.jpg',
    #  'height': 427,
    #  'width': 640,
    #  'date_captured': '2013-11-17 05:57:24',
    #  'id': 1268}
    imgs = coco_api.loadImgs(img_ids)
    # anns is a list[list[dict]], where each dict is an annotation
    # record for an object. The inner list enumerates the objects in an image
    # and the outer list enumerates over images. Example of anns[0]:
    # [{'segmentation': [[192.81,
    #     247.09,
    #     ...
    #     219.03,
    #     249.06]],
    #   'area': 1035.749,
    #   'iscrowd': 0,
    #   'image_id': 1268,
    #   'bbox': [192.81, 224.8, 74.73, 33.43],
    #   'category_id': 16,
    #   'id': 42986},
    #  ...]
    anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]

    if "minival" not in json_file:
        # The popular valminusminival & minival annotations for COCO2014 contain this bug.
        # However the ratio of buggy annotations there is tiny and does not affect accuracy.
        # Therefore we explicitly white-list them.
        ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
        assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
            json_file
        )

    imgs_anns = list(zip(imgs, anns))

    logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))

    dataset_dicts = []

    ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or [])

    num_instances_without_valid_segmentation = 0

    for (img_dict, anno_dict_list) in imgs_anns:
        record = {}
        record["file_name"] = os.path.join(image_root, img_dict["file_name"])
        record["height"] = img_dict["height"]
        record["width"] = img_dict["width"]
        image_id = record["image_id"] = img_dict["id"]

        objs = []
        for anno in anno_dict_list:
            # Check that the image_id in this annotation is the same as
            # the image_id we're looking at.
            # This fails only when the data parsing logic or the annotation file is buggy.

            # The original COCO valminusminival2014 & minival2014 annotation files
            # actually contains bugs that, together with certain ways of using COCO API,
            # can trigger this assertion.
            assert anno["image_id"] == image_id

            assert anno.get("ignore", 0) == 0

            obj = {key: anno[key] for key in ann_keys if key in anno}

            segm = anno.get("segmentation", None)
            if segm:  # either list[list[float]] or dict(RLE)
                if not isinstance(segm, dict):
                    # filter out invalid polygons (< 3 points)
                    segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
                    if len(segm) == 0:
                        num_instances_without_valid_segmentation += 1
                        continue  # ignore this instance
                obj["segmentation"] = segm

            keypts = anno.get("keypoints", None)
            if keypts:  # list[int]
                for idx, v in enumerate(keypts):
                    if idx % 3 != 2:
                        # COCO's segmentation coordinates are floating points in [0, H or W],
                        # but keypoint coordinates are integers in [0, H-1 or W-1]
                        # Therefore we assume the coordinates are "pixel indices" and
                        # add 0.5 to convert to floating point coordinates.
                        keypts[idx] = v + 0.5
                obj["keypoints"] = keypts

            obj["bbox_mode"] = BoxMode.XYWH_ABS
            if id_map:
                obj["category_id"] = id_map[obj["category_id"]]
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)

    if num_instances_without_valid_segmentation > 0:
        logger.warn(
            "Filtered out {} instances without valid segmentation. "
            "There might be issues in your dataset generation process.".format(
                num_instances_without_valid_segmentation
            )
        )
    return dataset_dicts