def getGtInstances(groundTruthList,args):
    gtInstances = {}
    # if there is a global statistics json, then load it
    if (os.path.isfile(args.gtInstancesFile)):
        if not args.quiet:
            print("Loading ground truth instances from JSON.")
        with open(args.gtInstancesFile) as json_file:
            gtInstances = json.load(json_file)
    # otherwise create it
    else:
        if (not args.quiet):
            print("Creating ground truth instances from png files.")
        gtInstances = instances2dict(groundTruthList,not args.quiet)
        writeDict2JSON(gtInstances, args.gtInstancesFile)

    return gtInstances
def getGtInstances(groundTruthList, args):
    gtInstances = {}
    # if there is a global statistics json, then load it
    if (os.path.isfile(args.gtInstancesFile)):
        if not args.quiet:
            print("Loading ground truth instances from JSON.")
        with open(args.gtInstancesFile) as json_file:
            gtInstances = json.load(json_file)
    # otherwise create it
    else:
        if (not args.quiet):
            print("Creating ground truth instances from png files.")
        gtInstances = instances2dict(groundTruthList, not args.quiet)
        writeDict2JSON(gtInstances, args.gtInstancesFile)

    return gtInstances
    def _run(self, input_folder, output_folder, FORMAT):

        train_path = os.path.join(input_folder, 'gtFine_trainvaltest',
                                  'gtFine', 'train')
        assert os.path.isdir(train_path)

        val_path = os.path.join(input_folder, 'gtFine_trainvaltest', 'gtFine',
                                'val')
        assert os.path.isdir(val_path)

        dataset_folders = [train_path, val_path]
        dataset_names = ['train', 'val']
        input_image_folders = [
            os.path.join(input_folder, 'leftImg8bit_trainvaltest',
                         'leftImg8bit', 'train'),
            os.path.join(input_folder, 'leftImg8bit_trainvaltest',
                         'leftImg8bit', 'val')
        ]

        self._ensure_folder_exists_and_is_clear(output_folder)

        label_names = [
            label.name for label in labels
            if label.hasInstances and not label.ignoreInEval
        ]

        annotation_folder = os.path.join(output_folder, 'annotations')
        self._ensure_folder_exists_and_is_clear(annotation_folder)

        for input_image_folder, dataset_folder, dataset_name in zip(
                input_image_folders, dataset_folders, dataset_names):

            self._reset_dump()

            image_folder = os.path.join(output_folder, dataset_name)
            self._ensure_folder_exists_and_is_clear(image_folder)

            label2name = {}
            for cat_id, label_name in enumerate(label_names):
                label2name[name2label[label_name].id] = label_name
                self.to_dump['categories'].append({
                    'id': cat_id + 1,
                    'name': label_name
                })

            for city_folder in os.listdir(dataset_folder):

                input_image_folder_city = os.path.join(input_image_folder,
                                                       city_folder)

                assert os.path.isdir(os.path.join(dataset_folder, city_folder))
                for file_name in os.listdir(
                        os.path.join(dataset_folder, city_folder)):

                    file_name = os.path.join(dataset_folder, city_folder,
                                             file_name)
                    assert os.path.isfile(file_name)

                    if file_name.endswith('_polygons.json'):
                        self._parse_json(file_name)

                        img_inst = cv2.imread(self.img_inst_name,
                                              cv2.IMREAD_ANYDEPTH)
                        instance_dict = instances2dict(
                            [self.img_inst_name],
                            verbose=False)[self.img_inst_name]

                        for instanceId in np.unique(img_inst):

                            if instanceId < 1000:
                                continue

                            label = None
                            for value in list(instance_dict.values()):
                                for list_value in value:
                                    if list_value['instID'] == instanceId:
                                        label = list_value['labelID']
                                        break

                            assert label is not None

                            if label not in label2name:
                                continue

                            label_to_dump = None
                            for category in self.to_dump['categories']:
                                if category['name'] == label2name[label]:
                                    label_to_dump = category['id']
                                    break

                            assert label_to_dump is not None

                            mask = (img_inst == instanceId).astype(np.uint8)
                            _, contours, _ = cv2.findContours(
                                mask, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)

                            x_min = float("inf")
                            y_min = float("inf")
                            x_max = 0
                            y_max = 0
                            for s in contours:
                                x, y, w, h = cv2.boundingRect(s)
                                x_b = x + w
                                y_b = y + h
                                x_min = min(x_min, x)
                                y_min = min(y_min, y)
                                x_max = max(x_max, x_b)
                                y_max = max(y_max, y_b)

                            area = int(np.sum(mask == 1))
                            segm = [
                                contour.ravel().tolist()
                                for contour in contours
                            ]

                            self.to_dump['annotations'].append({
                                'segmentation':
                                segm,
                                'area':
                                area,
                                'iscrowd':
                                0,
                                'image_id':
                                self.image_id,
                                'bbox':
                                [x_min, y_min, x_max - x_min, y_max - y_min],
                                'category_id':
                                label_to_dump,
                                'id':
                                self.annotation_id,
                                'ignore':
                                0
                            })

                            self.annotation_id += 1

                        self.image_id += 1
                        self.copy(
                            os.path.join(input_image_folder_city,
                                         self.image_name), image_folder)

            with open(os.path.join(annotation_folder, dataset_name + '.json'),
                      'w') as f:
                json.dump(self.to_dump, f)
Example #4
0
def convert_cityscapes_car_only(data_dir, out_dir):
    """Convert from cityscapes format to COCO instance seg format - polygons"""
    sets = [
        "gtFine_val",
        "gtFine_train",
        # 'gtFine_test',
        # 'gtCoarse_train',
        # 'gtCoarse_val',
        # 'gtCoarse_train_extra'
    ]
    ann_dirs = [
        "gtFine_trainvaltest/gtFine/val",
        "gtFine_trainvaltest/gtFine/train",
        # 'gtFine_trainvaltest/gtFine/test',
        # 'gtCoarse/train',
        # 'gtCoarse/train_extra',
        # 'gtCoarse/val'
    ]
    json_name = "caronly_filtered_unlabeled_%s.json"
    ends_in = "%s_polygons.json"
    img_id = 0
    ann_id = 0
    cat_id = 1
    category_dict = {}

    category_instancesonly = [
        "car",
    ]
    category_dict["car"] = cat_id

    for data_set, ann_dir in zip(sets, ann_dirs):
        print("Starting %s" % data_set)
        ann_dict = {}
        images = []
        annotations = []
        ann_dir = os.path.join(data_dir, ann_dir)
        for root, _, files in os.walk(ann_dir):
            for filename in files:
                if filename.endswith(ends_in % data_set.split("_")[0]):
                    if len(images) % 50 == 0:
                        print("Processed %s images, %s annotations" %
                              (len(images), len(annotations)))
                    json_ann = json.load(open(os.path.join(root, filename)))
                    image = {}
                    image["id"] = img_id
                    img_id += 1

                    image["width"] = json_ann["imgWidth"]
                    image["height"] = json_ann["imgHeight"]
                    image["file_name"] = (
                        filename[:-len(ends_in % data_set.split("_")[0])] +
                        "leftImg8bit.png")
                    image["seg_file_name"] = (
                        filename[:-len(ends_in % data_set.split("_")[0])] +
                        "%s_instanceIds.png" % data_set.split("_")[0])
                    images.append(image)

                    fullname = os.path.join(root, image["seg_file_name"])
                    objects = cs.instances2dict([fullname],
                                                verbose=False)[fullname]

                    bbox = []
                    # x
                    bbox.append(0)
                    # y
                    bbox.append(0)
                    # w
                    bbox.append(int(json_ann["imgWidth"]))
                    # h
                    bbox.append(int(json_ann["imgHeight"]))

                    seg = []
                    # bbox[] is x,y,w,h
                    # left_top
                    seg.append(bbox[0])
                    seg.append(bbox[1])
                    # left_bottom
                    seg.append(bbox[0])
                    seg.append(bbox[1] + bbox[3])
                    # right_bottom
                    seg.append(bbox[0] + bbox[2])
                    seg.append(bbox[1] + bbox[3])
                    # right_top
                    seg.append(bbox[0] + bbox[2])
                    seg.append(bbox[1])

                    ann = {}
                    ann["id"] = ann_id
                    ann_id += 1
                    ann["image_id"] = image["id"]
                    ann["segmentation"] = [seg]
                    ann["category_id"] = 1
                    ann["iscrowd"] = 0
                    ann["area"] = bbox[2] * bbox[3]
                    ann["bbox"] = bbox

                    annotations.append(ann)

        ann_dict["images"] = images
        categories = [{
            "id": category_dict[name],
            "name": name
        } for name in category_dict]
        ann_dict["categories"] = categories
        ann_dict["annotations"] = annotations
        print("Num categories: %s" % len(categories))
        print("Num images: %s" % len(images))
        print("Num annotations: %s" % len(annotations))
        print(categories)
        with open(os.path.join(out_dir, json_name % data_set), "w") as outfile:
            outfile.write(json.dumps(ann_dict))