示例#1
0
 def _do_test(b1, b2):
     # Compute IoU overlap with the cython implementation
     cython_iou = box_utils.bbox_overlaps(b1, b2)
     # Compute IoU overlap with the COCO API implementation
     # (requires converting boxes from xyxy to xywh format)
     xywh_b1 = box_utils.xyxy_to_xywh(b1)
     xywh_b2 = box_utils.xyxy_to_xywh(b2)
     not_crowd = [int(False)] * b2.shape[0]
     coco_ious = COCOmask.iou(xywh_b1, xywh_b2, not_crowd)
     # IoUs should be similar
     np.testing.assert_array_almost_equal(
         cython_iou, coco_ious, decimal=5
     )
示例#2
0
def _coco_bbox_results_one_category(json_dataset, boxes, depths, cat_id):
    results = []
    image_ids = json_dataset.COCO.getImgIds()
    image_ids.sort()
    assert len(boxes) == len(image_ids)
    for i, image_id in enumerate(image_ids):
        dets = boxes[i]
        depth_dets = depths[i]
        if isinstance(dets, list) and len(dets) == 0:
            continue
        dets = dets.astype(np.float)
        depth_dets = depth_dets.astype(np.float)
        scores = dets[:, -1]
        xywh_dets = box_utils.xyxy_to_xywh(dets[:, 0:4])
        xs = xywh_dets[:, 0]
        ys = xywh_dets[:, 1]
        ws = xywh_dets[:, 2]
        hs = xywh_dets[:, 3]
        results.extend([{
            'image_id': image_id,
            'category_id': cat_id,
            'bbox': [xs[k], ys[k], ws[k], hs[k]],
            'depth': depth_dets[k],
            'score': scores[k]
        } for k in range(dets.shape[0])])
    return results
示例#3
0
 def test_bbox_dataset_to_prediction_roundtrip(self):
     """Simulate the process of reading a ground-truth box from a dataset,
     make predictions from proposals, convert the predictions back to the
     dataset format, and then use the COCO API to compute IoU overlap between
     the gt box and the predictions. These should have IoU of 1.
     """
     weights = (5, 5, 10, 10)
     # 1/ "read" a box from a dataset in the default (x1, y1, w, h) format
     gt_xywh_box = [10, 20, 100, 150]
     # 2/ convert it to our internal (x1, y1, x2, y2) format
     gt_xyxy_box = box_utils.xywh_to_xyxy(gt_xywh_box)
     # 3/ consider nearby proposal boxes
     prop_xyxy_boxes = random_boxes(gt_xyxy_box, 10, 10)
     # 4/ compute proposal-to-gt transformation deltas
     deltas = box_utils.bbox_transform_inv(
         prop_xyxy_boxes, np.array([gt_xyxy_box]), weights=weights
     )
     # 5/ use deltas to transform proposals to xyxy predicted box
     pred_xyxy_boxes = box_utils.bbox_transform(
         prop_xyxy_boxes, deltas, weights=weights
     )
     # 6/ convert xyxy predicted box to xywh predicted box
     pred_xywh_boxes = box_utils.xyxy_to_xywh(pred_xyxy_boxes)
     # 7/ use COCO API to compute IoU
     not_crowd = [int(False)] * pred_xywh_boxes.shape[0]
     ious = COCOmask.iou(pred_xywh_boxes, np.array([gt_xywh_box]), not_crowd)
     np.testing.assert_array_almost_equal(ious, np.ones(ious.shape))
示例#4
0
def _filter_crowd_proposals(roidb, crowd_thresh):
    """Finds proposals that are inside crowd regions and marks them as
    overlap = -1 with each ground-truth rois, which means they will be excluded
    from training.
    """
    for entry in roidb:
        gt_overlaps = entry['gt_overlaps'].toarray()
        crowd_inds = np.where(entry['is_crowd'] == 1)[0]
        non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
        if len(crowd_inds) == 0 or len(non_gt_inds) == 0:
            continue
        crowd_boxes = box_utils.xyxy_to_xywh(entry['boxes'][crowd_inds, :])
        non_gt_boxes = box_utils.xyxy_to_xywh(entry['boxes'][non_gt_inds, :])
        iscrowd_flags = [int(True)] * len(crowd_inds)
        ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd_flags)
        bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]
        gt_overlaps[non_gt_inds[bad_inds], :] = -1
        entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps)
示例#5
0
def _filter_crowd_proposals(roidb, crowd_thresh):
    """Finds proposals that are inside crowd regions and marks them as
    overlap = -1 with each ground-truth rois, which means they will be excluded
    from training.
    """
    for entry in roidb:
        gt_overlaps = entry['gt_overlaps'].toarray()
        crowd_inds = np.where(entry['is_crowd'] == 1)[0]
        non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
        if len(crowd_inds) == 0 or len(non_gt_inds) == 0:
            continue
        crowd_boxes = box_utils.xyxy_to_xywh(entry['boxes'][crowd_inds, :])
        non_gt_boxes = box_utils.xyxy_to_xywh(entry['boxes'][non_gt_inds, :])
        iscrowd_flags = [int(True)] * len(crowd_inds)
        ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd_flags)
        bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]
        gt_overlaps[non_gt_inds[bad_inds], :] = -1
        entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps)
def _coco_personmask_results_one_category(json_dataset, boxes, segms, cat_id):
    results = []
    image_ids = json_dataset.COCO.getImgIds()
    image_ids.sort()
    assert len(boxes) == len(image_ids)
    assert len(segms) == len(image_ids)

    # Don't use xyxy_to_xywh function for consistency with the original imp
    # Instead, cast to ints and don't add 1 when computing ws and hs
    # xywh_box_dets = box_utils.xyxy_to_xywh(box_dets[:, 0:4])
    # xs = xywh_box_dets[:, 0]
    # ys = xywh_box_dets[:, 1]
    # ws = xywh_box_dets[:, 2]
    # hs = xywh_box_dets[:, 3]

    for i, image_id in enumerate(image_ids):

        dets = boxes[i]
        rles = segms[i]

        if isinstance(dets, list) and len(dets) == 0:
            continue
        dets = dets.astype(np.float)
        scores = dets[:, -1]

        xywh_dets = box_utils.xyxy_to_xywh(dets[:, 0:4])
        xs = xywh_dets[:, 0]
        ys = xywh_dets[:, 1]
        ws = xywh_dets[:, 2]
        hs = xywh_dets[:, 3]
        '''
        dets = dets.astype(np.float)
        xs = dets[:, 0]
        ys = dets[:, 1]
        ws = (dets[:, 2] - xs).astype(np.int)
        hs = (dets[:, 3] - ys).astype(np.int)
        '''
        results.extend(
            [{'image_id': image_id,
              'category_id': cat_id,
              'segmentation': rles[k],
              'bbox': [xs[k], ys[k], ws[k], hs[k]],
              'score': scores[k]}
              for k in range(dets.shape[0])])

    return results
def _coco_bbox_results_one_category(json_dataset, boxes, cat_id):
    results = []
    image_ids = json_dataset.COCO.getImgIds()
    image_ids.sort()
    assert len(boxes) == len(image_ids)
    for i, image_id in enumerate(image_ids):
        dets = boxes[i]
        if isinstance(dets, list) and len(dets) == 0:
            continue
        dets = dets.astype(np.float)
        scores = dets[:, -1]
        xywh_dets = box_utils.xyxy_to_xywh(dets[:, 0:4])
        xs = xywh_dets[:, 0]
        ys = xywh_dets[:, 1]
        ws = xywh_dets[:, 2]
        hs = xywh_dets[:, 3]
        results.extend(
            [{'image_id': image_id,
              'category_id': cat_id,
              'bbox': [xs[k], ys[k], ws[k], hs[k]],
              'score': scores[k]} for k in range(dets.shape[0])])
    return results
def convert_cityscapes_instance_only(data_dir, out_dir):
    """Convert from cityscapes format to COCO instance seg format - polygons"""
    sets = [
        #'gtFine_val'
        'gtFine_train',
        # 'gtFine_test',

        # 'gtCoarse_train',
        # 'gtCoarse_val',
        # 'gtCoarse_train_extra'
    ]
    ann_dirs = [
        #'gtFine_trainvaltest/gtFine/val'
        'gtFine_trainvaltest/gtFine/train',
        # 'gtFine_trainvaltest/gtFine/test',

        # 'gtCoarse/train',
        # 'gtCoarse/train_extra',
        # 'gtCoarse/val'
    ]
    json_name = 'instancesonly_filtered_%s.json'
    ends_in = '%s_polygons.json'
    img_id = 0
    ann_id = 0
    cat_id = 1
    category_dict = {}

    category_instancesonly = [
        'person',
        'rider',
        'car',
        'truck',
        'bus',
        'caravan',
        'trailer',
        'train',
        'motorcycle',
        'bicycle',
    ]

    for data_set, ann_dir in zip(sets, ann_dirs):
        print('Starting %s' % data_set)
        ann_dict = {}
        images = []
        annotations = []
        ann_dir = os.path.join(data_dir, ann_dir)

        for root, _, files in os.walk(ann_dir):
            for filename in files:
                #print("*******", filename)
                if filename.endswith(ends_in % data_set.split('_')[0]):
                    if len(images) % 50 == 0:
                        print("Processed %s images, %s annotations" %
                              (len(images), len(annotations)))
                    json_ann = json.load(open(os.path.join(root, filename)))
                    image = {}
                    image['id'] = img_id
                    img_id += 1

                    image['width'] = json_ann['imgWidth']
                    image['height'] = json_ann['imgHeight']
                    image['file_name'] = filename[:-len(
                        ends_in % data_set.split('_')[0])] + 'leftImg8bit.png'
                    image['seg_file_name'] = filename[:-len(
                        ends_in % data_set.split('_')[0]
                    )] + '%s_instanceIds.png' % data_set.split('_')[0]
                    images.append(image)

                    fullname = os.path.join(
                        root, image['seg_file_name'])  # instance name
                    objects = cs.instances2dict_with_polygons(
                        [fullname], False)[os.path.abspath(fullname)]
                    #print(fullname)
                    #print(objects)
                    for object_cls in objects:
                        if object_cls not in category_instancesonly:
                            continue  # skip non-instance categories

                        for obj in objects[object_cls]:
                            if obj['contours'] == []:
                                print('Warning: empty contours.')
                                continue  # skip non-instance categories
                            len_p = [len(p) for p in obj['contours']]
                            if min(len_p) <= 4:
                                print('Warning: invalid contours.')
                                continue  # skip non-instance categories

                            ann = {}
                            ann['id'] = ann_id
                            ann_id += 1
                            ann['image_id'] = image['id']
                            ann['segmentation'] = obj['contours']

                            if object_cls not in category_dict:
                                category_dict[object_cls] = cat_id
                                cat_id += 1
                            ann['category_id'] = category_dict[object_cls]
                            ann['iscrowd'] = 0
                            ann['area'] = obj['pixelCount']
                            ann['bbox'] = bboxs_util.xyxy_to_xywh(
                                segms_util.polys_to_boxes(
                                    [ann['segmentation']])).tolist()[0]

                            annotations.append(ann)

        ann_dict['images'] = images
        categories = [{
            "id": category_dict[name],
            "name": name
        } for name in category_dict]
        ann_dict['categories'] = categories
        ann_dict['annotations'] = annotations
        print("Num categories: %s" % len(categories))
        print("Num images: %s" % len(images))
        print("Num annotations: %s" % len(annotations))
        with open(os.path.join(out_dir, json_name % data_set), 'w') as outfile:
            outfile.write(json.dumps(ann_dict))
def convert_cityscapes_instance_only(
        data_dir, out_dir):
    """Convert from cityscapes format to COCO instance seg format - polygons"""
    sets = [
        'train',
        'val'
        # 'images/100k/train',
        # 'images/100k/val'

        # 'gtFine_train',
        # 'gtCoarse_train',
        # 'gtCoarse_val',
        # 'gtCoarse_train_extra'
    ]
    ann_dirs = [
        'annotation_train',
        'annotation_val'

        # 'labels/100k/train',
        # 'labels/100k/val'

        # 'gtFine_trainvaltest/gtFine/train',
        # 'gtFine_trainvaltest/gtFine/test',

        # 'gtCoarse/train',
        # 'gtCoarse/train_extra',
        # 'gtCoarse/val'
    ]
    json_name = 'instancesonly_filtered_%s.json'
    ends_in = '.json'
    img_id = 0
    ann_id = 0
    cat_id = 1
    category_dict = {}

    category_instancesonly = ['__background__',
        "bike",
        "bus",
        "car",
        # "motor",
        "person",
        "rider",
        "traffic light",
        "traffic sign",
        # "train",
        "truck",
        "area/alternative",
        "area/drivable",
        # "lane/crosswalk",
        # "lane/double other",
        # "lane/double white",
        # "lane/double yellow",
        # "lane/road curb",
        # "lane/single other",
        # "lane/single white",
        # "lane/single yellow"
    ]#--------------------------------------------------------------------------------------
    # Write "info"
    infodata = {'info': {'description': 'This is stable 1.0 version of the 2014 MS COCO dataset.', 'url': u'http://mscoco.org', 'version': u'1.0', 'year': 2014, 'contributor': 'Microsoft COCO group', 'date_created': '2015-01-27 09:11:52.357475'}}


    for data_set, ann_dir in zip(sets, ann_dirs):
        print('Starting %s' % data_set)
        ann_dict = {}
        ann_dict["info"] = infodata["info"]
        ann_dict["type"] = 'instances'

        annPath = os.path.join(data_dir, 'coco_ref',
                               'instances_' + data_set + '2014.json')

        with open(annPath) as annFile:
            print ("open " + str(annFile))
            cocodata = json.load(annFile)
            licdata = [i for i in cocodata['licenses']]
            ann_dict["licenses"] = licdata
        images = []
        annotations = []
        ann_dir = os.path.join(data_dir, ann_dir)
        for root, _, files in os.walk(ann_dir):
            for filename in files:

                if filename.endswith(ends_in):
                    if len(images) % 50 == 0:
                        print("Processed %s images, %s annotations" % (
                            len(images), len(annotations)))
                    json_ann = json.load(open(os.path.join(root, filename)))
                    image = {}
                    image['id'] = img_id
                    img_id += 1

                    # im = Image.open(filename)
                    # (width, height) = im.size
                    image['width'] = 1280
                    image['height'] = 720
                    outmask = np.zeros((image['height'], image['width']), np.uint8)

                    img_dir = os.path.join(data_dir, data_set)
                    # image['file_name'] = img_dir + "/" + filename.split('.')[0] + ".jpg"
                    image['file_name'] = filename.split('.')[0] + ".jpg"
                    images.append(image)

                    # fullname = os.path.join(root, image['seg_file_name'])
                    # objects = cs.instances2dict_with_polygons(
                    #     [fullname], verbose=False)[fullname]

                    objects = json_ann["frames"][0]["objects"]
                    for obj in objects:
                        if obj["category"] not in category_instancesonly:
                            continue  # skip non-instance categories
                        index = category_instancesonly.index(obj["category"])# + 184
                        seg_points = getPointByObj(obj)#[[[point1],[point2]]]
                        seg = []
                        for seg_poit in seg_points:
                            seg.extend(sum(seg_poit, []))
                        if len(seg) == 0:
                            print('Warning: invalid segmentation.')
                            continue
                        ann = {}
                        ann['id'] = ann_id
                        ann_id += 1
                        ann['image_id'] = image['id']

                        category_dict[obj["category"]] = index
                        ann['category_id'] = index
                        ann['iscrowd'] = 0
                        if obj.has_key("box2d"):
                            ann['bbox'] = getBoxByObj(obj)
                        else:
                            ann['area'], ann['segmentation'] = getAreaByObj(seg_points, image['height'], image['width'], ann['category_id'])
                            ann['bbox'] = bboxs_util.xyxy_to_xywh(segms_util.polys_to_boxes(
                                [ann['segmentation']])).tolist()[0]

                        annotations.append(ann)
                        # break
        ann_dict['images'] = images
        # category_dict.values()
        # categories = [{"id": category_dict[name], "name": name} for name in category_dict]
        categories = []
        for index, value in enumerate(category_instancesonly):
            categories.append({"id": index, "name": value})
        categories = categories[1:]
        ann_dict['categories'] = categories
        ann_dict['annotations'] = annotations
        print("Num categories: %s" % len(categories))
        print("Num images: %s" % len(images))
        print("Num annotations: %s" % len(annotations))
        with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
            outfile.write(json.dumps(ann_dict))
示例#10
0
def convert_cityscapes_instance_only(data_dir, out_dir):
    """Convert from cityscapes format to COCO instance seg format - polygons"""
    sets = [
        "gtFine_val",
        "gtFine_train",
        "gtFine_test",
        # 'gtCoarse_train',
        # 'gtCoarse_val',
        # 'gtCoarse_train_extra'
    ]
    ann_dirs = [
        "gtFine_trainvaltest/gtFine/val",
        "gtFine_trainvaltest/gtFine/train",
        "gtFine_trainvaltest/gtFine/test",
        # 'gtCoarse/train',
        # 'gtCoarse/train_extra',
        # 'gtCoarse/val'
    ]
    json_name = "instancesonly_filtered_%s.json"
    ends_in = "%s_polygons.json"
    img_id = 0
    ann_id = 0
    cat_id = 1
    category_dict = {}

    category_instancesonly = [
        "person",
        "rider",
        "car",
        "truck",
        "bus",
        "train",
        "motorcycle",
        "bicycle",
    ]

    for data_set, ann_dir in zip(sets, ann_dirs):
        print("Starting %s" % data_set)
        ann_dict = {}
        images = []
        annotations = []
        ann_dir = os.path.join(data_dir, ann_dir)
        for root, _, files in os.walk(ann_dir):
            for filename in files:
                if filename.endswith(ends_in % data_set.split("_")[0]):
                    if len(images) % 50 == 0:
                        print("Processed %s images, %s annotations" %
                              (len(images), len(annotations)))
                    json_ann = json.load(open(os.path.join(root, filename)))
                    image = {}
                    image["id"] = img_id
                    img_id += 1

                    image["width"] = json_ann["imgWidth"]
                    image["height"] = json_ann["imgHeight"]
                    image["file_name"] = (
                        filename[:-len(ends_in % data_set.split("_")[0])] +
                        "leftImg8bit.png")
                    image["seg_file_name"] = (
                        filename[:-len(ends_in % data_set.split("_")[0])] +
                        "%s_instanceIds.png" % data_set.split("_")[0])
                    images.append(image)

                    fullname = os.path.join(root, image["seg_file_name"])
                    objects = cs.instances2dict_with_polygons(
                        [fullname], verbose=False)[fullname]

                    for object_cls in objects:
                        if object_cls not in category_instancesonly:
                            continue  # skip non-instance categories

                        for obj in objects[object_cls]:
                            if obj["contours"] == []:
                                print("Warning: empty contours.")
                                continue  # skip non-instance categories

                            len_p = [len(p) for p in obj["contours"]]
                            if min(len_p) <= 4:
                                print("Warning: invalid contours.")
                                continue  # skip non-instance categories

                            ann = {}
                            ann["id"] = ann_id
                            ann_id += 1
                            ann["image_id"] = image["id"]
                            ann["segmentation"] = obj["contours"]

                            if object_cls not in category_dict:
                                category_dict[object_cls] = cat_id
                                cat_id += 1
                            ann["category_id"] = category_dict[object_cls]
                            ann["iscrowd"] = 0
                            ann["area"] = obj["pixelCount"]
                            ann["bbox"] = bboxs_util.xyxy_to_xywh(
                                segms_util.polys_to_boxes(
                                    [ann["segmentation"]])).tolist()[0]

                            annotations.append(ann)

        ann_dict["images"] = images
        categories = [{
            "id": category_dict[name],
            "name": name
        } for name in category_dict]
        ann_dict["categories"] = categories
        ann_dict["annotations"] = annotations
        print("Num categories: %s" % len(categories))
        print("Num images: %s" % len(images))
        print("Num annotations: %s" % len(annotations))
        with open(os.path.join(out_dir, json_name % data_set), "w") as outfile:
            outfile.write(json.dumps(ann_dict))
def convert_cityscapes_instance_only(
        data_dir, out_dir):
    """Convert from cityscapes format to COCO instance seg format - polygons"""
    sets = [
        'gtFine_val',
        # 'gtFine_train',
        # 'gtFine_test',

        # 'gtCoarse_train',
        # 'gtCoarse_val',
        # 'gtCoarse_train_extra'
    ]
    ann_dirs = [
        'gtFine_trainvaltest/gtFine/val',
        # 'gtFine_trainvaltest/gtFine/train',
        # 'gtFine_trainvaltest/gtFine/test',

        # 'gtCoarse/train',
        # 'gtCoarse/train_extra',
        # 'gtCoarse/val'
    ]
    json_name = 'instancesonly_filtered_%s.json'
    ends_in = '%s_polygons.json'
    img_id = 0
    ann_id = 0
    cat_id = 1
    category_dict = {}

    category_instancesonly = [
        'person',
        'rider',
        'car',
        'truck',
        'bus',
        'train',
        'motorcycle',
        'bicycle',
    ]

    for data_set, ann_dir in zip(sets, ann_dirs):
        print('Starting %s' % data_set)
        ann_dict = {}
        images = []
        annotations = []
        ann_dir = os.path.join(data_dir, ann_dir)
        for root, _, files in os.walk(ann_dir):
            for filename in files:
                if filename.endswith(ends_in % data_set.split('_')[0]):
                    if len(images) % 50 == 0:
                        print("Processed %s images, %s annotations" % (
                            len(images), len(annotations)))
                    json_ann = json.load(open(os.path.join(root, filename)))
                    image = {}
                    image['id'] = img_id
                    img_id += 1

                    image['width'] = json_ann['imgWidth']
                    image['height'] = json_ann['imgHeight']
                    image['file_name'] = filename[:-len(
                        ends_in % data_set.split('_')[0])] + 'leftImg8bit.png'
                    image['seg_file_name'] = filename[:-len(
                        ends_in % data_set.split('_')[0])] + \
                        '%s_instanceIds.png' % data_set.split('_')[0]
                    images.append(image)

                    fullname = os.path.join(root, image['seg_file_name'])
                    objects = cs.instances2dict_with_polygons(
                        [fullname], verbose=False)[fullname]

                    for object_cls in objects:
                        if object_cls not in category_instancesonly:
                            continue  # skip non-instance categories

                        for obj in objects[object_cls]:
                            if obj['contours'] == []:
                                print('Warning: empty contours.')
                                continue  # skip non-instance categories

                            len_p = [len(p) for p in obj['contours']]
                            if min(len_p) <= 4:
                                print('Warning: invalid contours.')
                                continue  # skip non-instance categories

                            ann = {}
                            ann['id'] = ann_id
                            ann_id += 1
                            ann['image_id'] = image['id']
                            ann['segmentation'] = obj['contours']

                            if object_cls not in category_dict:
                                category_dict[object_cls] = cat_id
                                cat_id += 1
                            ann['category_id'] = category_dict[object_cls]
                            ann['iscrowd'] = 0
                            ann['area'] = obj['pixelCount']
                            ann['bbox'] = bboxs_util.xyxy_to_xywh(
                                segms_util.polys_to_boxes(
                                    [ann['segmentation']])).tolist()[0]

                            annotations.append(ann)

        ann_dict['images'] = images
        categories = [{"id": category_dict[name], "name": name} for name in
                      category_dict]
        ann_dict['categories'] = categories
        ann_dict['annotations'] = annotations
        print("Num categories: %s" % len(categories))
        print("Num images: %s" % len(images))
        print("Num annotations: %s" % len(annotations))
        with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
            outfile.write(json.dumps(ann_dict))
示例#12
0
def convert_cityscapes_instance_only(data_dir, out_dir):
    """Convert from cityscapes format to COCO instance seg format - polygons"""
    sets = args.set.split(',')
    for i in sets:
        if i == 'train' or i == 'val':
            ann_dirs = ['train/images', 'val/images']
        elif i == 'test':  # NEED TEST MASK ANNOTATIONS
            ann_dirs = ['test/images']
        else:
            print('Invalid input')

    json_name = 'instancesonly_filtered_%s.json'
    img_id = 0
    ann_id = 0
    cat_id = 1
    category_dict = {}

    category_instancesonly = [
        'unlabeled', 'ship', 'storage_tank', 'baseball_diamond',
        'tennis_court', 'basketball_court', 'Ground_Track_Field', 'Bridge',
        'Large_Vehicle', 'Small_Vehicle', 'Helicopter', 'Swimming_pool',
        'Roundabout', 'Soccer_ball_field', 'plane', 'Harbor'
    ]
    for data_set, ann_dir in zip(sets, ann_dirs):
        print('Starting %s' % data_set)
        ann_dict = {}
        images = []
        annotations = []
        ann_dir = os.path.join(data_dir, ann_dir)
        print(ann_dir)
        img_id = 0  # for every image_id with different indexing
        c_images = 0
        for root, _, files in os.walk(ann_dir):
            for filename in natsorted(files):
                if filename.endswith(
                        '_color_RGB.png'
                ):  #if re.match(r'\w*\d+.png', filename) or filename.split('.')[0].count('_')==4:
                    #import pdb;pdb.set_trace()
                    c_images += 1
                    filename = ''.join(filename)
                    filename = filename.split('_')[:-3]
                    if len(filename) > 1:
                        filename = '_'.join(filename)
                    else:
                        filename = ''.join(filename)
                    filename = filename + '.png'
                    print("Processed %s images" % (c_images))
                    image_dim = cv2.imread(os.path.join(root, filename))
                    imgHeight, imgWidth, _ = image_dim.shape
                    image = {}
                    image['id'] = img_id
                    img_id += 1
                    image['width'] = imgWidth
                    image['height'] = imgHeight
                    print("Processing Image", filename)
                    image['file_name'] = filename.split('.')[0] + '.png'
                    print("Processing Image", image['file_name'])
                    image['ins_file_name'] = filename.split(
                        '.')[0] + '_instance_id_RGB.png'
                    image['seg_file_name'] = filename.split(
                        '.')[0] + '_instance_color_RGB.png'
                    images.append(image)

                    #import pdb;pdb.set_trace()
                    seg_fullname = os.path.join(root, image['seg_file_name'])
                    inst_fullname = os.path.join(root, image['ins_file_name'])

                    if not os.path.exists(seg_fullname):
                        print("YOU DONT HAVE TEST MASKS")
                        sys.exit(0)
                    objects = cs.instances2dict_with_polygons([seg_fullname],
                                                              [inst_fullname],
                                                              verbose=True)
                    for k, v in objects.items():
                        for object_cls in list(v.keys()):
                            if object_cls not in category_instancesonly:  #to get the labels only mentioned in category_instancesonly
                                continue
                            for obj in v[object_cls]:
                                if obj['contours'] == []:
                                    print('Warning: empty contours.')
                                    continue
                                len_p = [len(p) for p in obj['contours']]
                                if min(len_p) <= 4:
                                    print('Warning: invalid contours.')
                                    continue

                                ann = {}
                                ann['id'] = ann_id
                                ann_id += 1
                                ann['image_id'] = image['id']
                                ann['segmentation'] = obj['contours']
                                if object_cls not in category_dict:
                                    category_dict[object_cls] = label2id[
                                        object_cls]

                                ann['category_id'] = category_dict[object_cls]
                                ann['category_name'] = object_cls
                                ann['iscrowd'] = 0
                                ann['area'] = obj['pixelCount']
                                ann['bbox'] = bboxs_util.xyxy_to_xywh(
                                    segms_util.polys_to_boxes(
                                        [ann['segmentation']])).tolist()[0]

                                #annotations.append(ann)
                                if ann['area'] > 10:
                                    annotations.append(ann)

        ann_dict['images'] = images
        categories = [{
            "id": category_dict[name],
            "name": name
        } for name in category_dict]
        ann_dict['categories'] = categories
        ann_dict['annotations'] = annotations
        print("Num categories: %s" % len(categories))
        print("Num images: %s" % len(images))
        print("Num annotations: %s" % len(annotations))
        with open(
                os.path.join(os.path.join(out_dir, data_set),
                             json_name % data_set), "w") as outfile:
            outfile.write(json.dumps(ann_dict))
示例#13
0
def convert_cityscapes_instance_only(data_dir, out_dir):
    """Convert from cityscapes format to COCO instance seg format - polygons"""
    sets = [
        'gtFine_train',
        'gtFine_val',
        # 'gtFine_test',

        # 'gtCoarse_train',
        # 'gtCoarse_val',
        # 'gtCoarse_train_extra'
    ]
    ann_dirs = [
        'labels_train',
        'labels_val',
        # 'gtFine_trainvaltest/gtFine/test',

        # 'gtCoarse/train',
        # 'gtCoarse/train_extra',
        # 'gtCoarse/val'
    ]
    json_name = 'instancesonly_filtered_%s.json'
    ends_in = '.json'
    img_id = 0
    ann_id = 0
    cat_id = 1
    category_dict = {}

    category_instancesonly = [
        '__background__', 'person', 'rider', 'car', 'truck', 'bus',
        'motorcycle', 'bicycle', 'ground', 'road', 'sky'
    ]

    for data_set, ann_dir in zip(sets, ann_dirs):
        print('Starting %s' % data_set)
        ann_dict = {}
        images = []
        annotations = []
        ann_dir = os.path.join(data_dir, ann_dir)
        for root, _, files in os.walk(ann_dir):
            for filename in files:
                if filename.endswith(ends_in):
                    if len(images) % 50 == 0:
                        print("Processed %s images, %s annotations" %
                              (len(images), len(annotations)))
                    json_ann = json.load(open(os.path.join(root, filename)))
                    image = {}
                    image['id'] = img_id
                    img_id += 1

                    image['width'] = json_ann['imgWidth']
                    image['height'] = json_ann['imgHeight']
                    sub_name = filename.split('_')[0:3]
                    image['file_name'] = sub_name[0] + "_" + sub_name[
                        1] + "_" + sub_name[2] + "_" + 'leftImg8bit.png'
                    images.append(image)

                    objects = json_ann["objects"]

                    for obj in objects:
                        if obj["label"] not in category_instancesonly:
                            continue  # skip non-instance categories

                        index = category_instancesonly.index(
                            obj["label"])  # + 184

                        ann = {}
                        ann['id'] = ann_id
                        ann_id += 1
                        ann['image_id'] = image['id']
                        ann['segmentation'] = [sum(obj['polygon'], [])]

                        ann['category_id'] = index
                        ann['iscrowd'] = 0

                        seg_points = obj["polygon"]
                        ann['area'] = getAreaByPoint(seg_points,
                                                     image['height'],
                                                     image['width'],
                                                     ann['category_id'])
                        ann['bbox'] = bboxs_util.xyxy_to_xywh(
                            segms_util.polys_to_boxes([ann['segmentation']
                                                       ])).tolist()[0]

                        annotations.append(ann)
                    # break
        ann_dict['images'] = images
        categories = []
        for index, value in enumerate(category_instancesonly):
            categories.append({"id": index, "name": value})
        categories = categories[1:]
        ann_dict['categories'] = categories
        ann_dict['annotations'] = annotations
        print("Num categories: %s" % len(categories))
        print("Num images: %s" % len(images))
        print("Num annotations: %s" % len(annotations))
        with open(os.path.join(out_dir, json_name % data_set),
                  'wb') as outfile:
            outfile.write(json.dumps(ann_dict))
示例#14
0
def convert_cityscapes_instance_only(data_dir, out_dir):
    """Convert from cityscapes format to COCO instance seg format - polygons"""
    sets = [
        'image_train',
        'image_val',
    ]
    ann_dirs = [
        'gtFine/train',
        'gtFine/val',
    ]
    json_name = 'instancesonly_filtered_%s.json'
    ends_in = '_polygons.json'
    img_id = 0
    ann_id = 0
    cat_id = 1
    category_dict = {}

    add_instancesonly = ['__background__', 'person', 'car']
    category_instancesonly = [
        '__background__', 'guard rail', 'car', 'dashed', 'solid',
        'solid solid', 'dashed dashed', 'dashed-solid', 'solid-dashed',
        'yellow dashed', 'yellow solid', 'yellow solid solid',
        'yellow dashed dashed', 'yellow dashed-solid', 'yellow solid-dashed',
        'boundary', 'fork_line', 'fork_edge', 'arrow_s', 'arrow_r', 'arrow_l',
        'arrow_lr', 'arrow_inclined_r', 'arrow_r_s', 'arrow_l_s', 'sidewalk',
        'handrail'
    ]
    category_instancesonly = [
        '__background__',
        'person',
        'bicycle',
        'car',
        'motorcycle',
        'airplane',
        'bus',
        'train',
        'truck',
        'boat',
        'traffic light',
        'fire hydrant',
        'stop sign',
        'parking meter',
        'bench',
        'bird',
        'cat',
        'dog',
        'horse',
        'sheep',
        'cow',
        'elephant',
        'bear',
        'zebra',
        'giraffe',
        'backpack',
        'umbrella',
        'handbag',
        'tie',
        'suitcase',
        'frisbee',
        'skis',
        'snowboard',
        'sports ball',
        'kite',
        'baseball bat',
        'baseball glove',
        'skateboard',
        'surfboard',
        'tennis racket',
        'bottle',
        'wine glass',
        'cup',
        'fork',
        'knife',
        'spoon',
        'bowl',
        'banana',
        'apple',
        'sandwich',
        'orange',
        'broccoli',
        'carrot',
        'hot dog',
        'pizza',
        'donut',
        'cake',
        'chair',
        'couch',
        'potted plant',
        'bed',
        'dining table',
        'toilet',
        'tv',
        'laptop',
        'mouse',
        'remote',
        'keyboard',
        'cell phone',
        'microwave',
        'oven',
        'toaster',
        'sink',
        'refrigerator',
        'book',
        'clock',
        'vase',
        'scissors',
        'teddy bear',
        'hair drier',
        'toothbrush',
        'guard rail',
        # 'car',
        'dashed',
        'solid',
        'solid solid',
        'dashed dashed',
        'dashed-solid',
        'solid-dashed',
        'yellow dashed',
        'yellow solid',
        'yellow solid solid',
        'yellow dashed dashed',
        'yellow dashed-solid',
        'yellow solid-dashed',
        'boundary',
        'fork_line',
        'fork_edge',
        'arrow_s',
        'arrow_r',
        'arrow_l',
        'arrow_lr',
        'arrow_inclined_r',
        'arrow_r_s',
        'arrow_l_s',
        'sidewalk',
        'handrail'
    ]

    for data_set, ann_dir in zip(sets, ann_dirs):
        print('Starting %s' % data_set)
        ann_dict = {}
        images = []
        annotations = []
        ann_dir = os.path.join(data_dir, ann_dir)
        file_list = []
        for root, sub_dirs, files in os.walk(ann_dir):
            for filename in files:
                file_list.append([root, filename])
        random.shuffle(file_list)

        for file_item in file_list:
            root = file_item[0]
            filename = file_item[1]
            if filename.endswith(ends_in):
                if filename.startswith("shcity_000875_000002"):
                    pass
                if len(images) % 50 == 0:
                    print("Processed %s images, %s annotations" %
                          (len(images), len(annotations)))
                json_ann = json.load(open(os.path.join(root, filename)))
                image = {}
                image['id'] = img_id
                img_id += 1

                image['width'] = json_ann['imgWidth']
                image['height'] = json_ann['imgHeight']
                sub_file_name = filename.split('_')
                image['file_name'] = os.path.join(
                    sub_file_name[0],
                    '_'.join(sub_file_name[:-2]) + '_leftImg8bit.png')
                image['seg_file_name'] = '_'.join(
                    filename.split('_')[:-1]) + '_instanceIds.png'
                images.append(image)

                fullname = os.path.join(root, image['seg_file_name'])
                print("fullname:" + fullname)
                objects = cs.instances2dict_with_polygons(
                    [fullname], verbose=False)[fullname]

                for object_cls in objects:
                    # if object_cls not in add_instancesonly:
                    #     continue

                    if object_cls not in category_instancesonly:
                        continue  # skip non-instance categories

                    for obj in objects[object_cls]:
                        if obj['contours'] == []:
                            print('Warning: empty contours.')
                            continue  # skip non-instance categories

                        index = category_instancesonly.index(
                            object_cls)  # + 184
                        good_area = [p for p in obj['contours'] if len(p) > 4]

                        if len(good_area) == 0:
                            print('Warning: invalid contours.')
                            continue  # skip non-instance categories

                        ann = {}
                        ann['id'] = ann_id
                        ann_id += 1
                        ann['image_id'] = image['id']
                        ann['segmentation'] = good_area

                        ann['category_id'] = index
                        ann['iscrowd'] = 0
                        ann['area'] = obj['pixelCount']
                        ann['bbox'] = bboxs_util.xyxy_to_xywh(
                            segms_util.polys_to_boxes([ann['segmentation']
                                                       ])).tolist()[0]

                        annotations.append(ann)

        ann_dict['images'] = images

        categories = []
        for index, value in enumerate(category_instancesonly):
            categories.append({"id": index, "name": value})
        categories = categories[1:]
        ann_dict['categories'] = categories
        ann_dict['annotations'] = annotations
        print("Num categories: %s" % len(categories))
        print("Num images: %s" % len(images))
        print("Num annotations: %s" % len(annotations))
        with open(os.path.join(out_dir, json_name % data_set),
                  'wb') as outfile:
            outfile.write(json.dumps(ann_dict))