Example #1
0
    def json2wireframe(self, image_fn):
        basename = wwtool.get_basename(image_fn)
        json_file = os.path.join(self.splitted_label_dir, basename + '.json')

        annotations = mmcv.load(json_file)['annotations']
        image_lines = []
        for annotation in annotations:
            roof_mask = annotation['roof']
            lines = self.mask2lines(roof_mask)
            image_lines += lines

        image_lines = np.array(image_lines).reshape(-1, 2, 2)
        np_save_file_prefix = os.path.join(self.wireframe_dir, basename)
        self.save_heatmap(np_save_file_prefix, image_lines)
Example #2
0
    def get_image_annotation_pairs(self):
        images = []
        annotations = []
        index = 0
        progress_bar = mmcv.ProgressBar(len(self.imgpaths))
        imId = 0
        for imgfile, annofile in zip(self.imgpaths, self.annotpaths):
            # imgpath = os.path.join(self.imgpath, name + self.image_format)
            # annotpath = os.path.join(self.annopath, name + self.anno_format)
            name = wwtool.get_basename(imgfile)

            annotations_coco = self.__generate_coco_annotation__(
                annofile, imgfile)

            # if annotation is empty, skip this annotation
            if annotations_coco != [] or self.groundtruth == False:
                img = cv2.imread(imgfile)
                height, width, channels = img.shape
                images.append({
                    "date_captured": "2019",
                    "file_name": name + self.image_format,
                    "id": imId + 1,
                    "license": 1,
                    "url": "http://jwwangchn.cn",
                    "height": height,
                    "width": width
                })

                for annotation in annotations_coco:
                    index = index + 1
                    annotation["iscrowd"] = 0
                    annotation["image_id"] = imId + 1
                    annotation["id"] = index
                    annotations.append(annotation)

                imId += 1

            if imId % 500 == 0:
                print(
                    "\nImage ID: {}, Instance ID: {}, Small Object Counter: {}, Max Object Number: {}"
                    .format(imId, index, self.small_object_idx,
                            self.max_object_num_per_image))

            progress_bar.update()

        return images, annotations
Example #3
0
    def __init__(self,
                 imgpath=None,
                 annopath=None,
                 imageset_file=None,
                 image_format='.jpg',
                 anno_format='.txt',
                 data_categories=None,
                 data_info=None,
                 data_licenses=None,
                 data_type="instances",
                 groundtruth=True,
                 small_object_area=0,
                 sub_anno_fold=False,
                 cities=None):
        super(SIMPLETXT2COCO, self).__init__()

        self.imgpath = imgpath
        self.annopath = annopath
        self.image_format = image_format
        self.anno_format = anno_format

        self.categories = data_categories
        self.info = data_info
        self.licenses = data_licenses
        self.type = data_type
        self.small_object_area = small_object_area
        self.small_object_idx = 0
        self.groundtruth = groundtruth
        self.max_object_num_per_image = 0
        self.sub_anno_fold = sub_anno_fold
        self.imageset_file = imageset_file

        self.imgpaths, self.annotpaths = [], []

        for city in cities:
            for image_fn in os.listdir(
                    os.path.join(self.imgpath, city, 'images')):
                basename = wwtool.get_basename(image_fn)
                self.imgpaths.append(
                    os.path.join(self.imgpath, city, 'images',
                                 basename + '.png'))
                self.annotpaths.append(
                    os.path.join(self.imgpath, city, 'labels',
                                 basename + '.txt'))
Example #4
0
    def __simpletxt_parse__(self, label_file, image_file):
        """
        (xmin, ymin, xmax, ymax)
        """
        image_basename = wwtool.get_basename(image_file)
        origin_image_name = image_basename.split('__')[0].split(sub_fold +
                                                                '_')[1]
        # print(image_name_list, origin_image_name)
        if origin_image_name not in image_name_list:
            # print("===========================================")
            return []

        with open(label_file, 'r') as f:
            lines = f.readlines()

        objects = []
        total_object_num = 0

        for line in lines:
            object_struct = {}
            line = line.rstrip().split(' ')
            label = " ".join(line[-1])
            mask = [float(_) for _ in line[0:-1]]

            xmin, ymin, xmax, ymax = wwtool.pointobb2bbox(mask)
            bbox_w = xmax - xmin
            bbox_h = ymax - ymin

            total_object_num += 1

            object_struct['bbox'] = [xmin, ymin, bbox_w, bbox_h]
            object_struct['segmentation'] = mask
            object_struct['label'] = 1

            objects.append(object_struct)

        return objects
Example #5
0
    def simpletxt2json(self, image_fn):
        # 1. open the ignore file and get the polygons
        base_name = wwtool.get_basename(image_fn)
        sub_fold = base_name.split("__")[0].split('_')[0]
        ori_image_fn = "_".join(base_name.split("__")[0].split('_')[1:])
        # if ori_image_fn in self.wrong_shp_file_dict[sub_fold]:
        #     print("Skip this wrong shape file")
        #     return
        coord_x, coord_y = base_name.split("__")[1].split(
            '_')  # top left corner
        coord_x, coord_y = int(coord_x), int(coord_y)
        print(
            f"splitted items: {self.city}, {sub_fold}, {ori_image_fn}, {(coord_x, coord_y)}"
        )

        ignore_file = './data/buildchange/{}/{}/{}/pixel_anno_v2/{}'.format(
            src_version, self.city, sub_fold, ori_image_fn + '.png')
        # print("ignore file name: ", ignore_file)
        roof_shp_file = './data/buildchange/{}/{}/{}/roof_shp_4326/{}'.format(
            src_version, self.city, sub_fold, ori_image_fn + '.shp')
        geo_info_file = './data/buildchange/{}/{}/{}/geo_info/{}'.format(
            src_version, self.city, sub_fold, ori_image_fn + '.png')

        objects = shp_parser(roof_shp_file, geo_info_file)
        roof_polygon_4326 = [obj['converted_polygon'] for obj in objects]
        roof_property = [obj['converted_property'] for obj in objects]

        pixel_anno = cv2.imread(ignore_file)
        if pixel_anno is None:
            return
        objects = mask_parser(pixel_anno[coord_y:coord_y + sub_img_h,
                                         coord_x:coord_x + sub_img_w, :],
                              category=255)
        if objects == []:
            return
        ignore_polygons = [obj['polygon'] for obj in objects]
        # print("ignore polygon: ", ignore_polygons)

        # 2. read the simpletxt file and convert to polygons
        objects = self.simpletxt_parse(
            os.path.join(self.splitted_label_dir, base_name + '.txt'))
        roof_polygons = [
            wwtool.mask2polygon(obj['polygon']) for obj in objects
        ]
        # print("roof polygon: ", roof_polygons)

        _, ignore_indexes = wwtool.cleaning_polygon_by_polygon(
            roof_polygons[:], ignore_polygons, show=False)
        ignore_list = len(roof_polygons) * [0]
        for ignore_index in ignore_indexes:
            ignore_list[ignore_index] = 1

        new_anno_objects = []
        for idx, roof_polygon in enumerate(roof_polygons):
            footprint_polygon, xoffset, yoffset = self.get_footprint(
                roof_polygon, [coord_x, coord_y], roof_polygon_4326,
                roof_property)
            object_struct = dict()
            ignore_flag = ignore_list[idx]
            object_struct['roof'] = wwtool.polygon2mask(roof_polygon)
            object_struct['footprint'] = wwtool.polygon2mask(footprint_polygon)
            object_struct['offset'] = [xoffset, yoffset]
            object_struct['ignore'] = ignore_flag
            new_anno_objects.append(object_struct)

        image_info = {
            "ori_filename": ori_image_fn + '.jpg',
            "subimage_filename": image_fn,
            "width": 1024,
            "height": 1024,
            "city": self.city,
            "sub_fold": sub_fold,
            "coordinate": [coord_x, coord_y]
        }

        json_data = {"image": image_info, "annotations": new_anno_objects}

        json_file = os.path.join(self.json_dir, f'{base_name}.json')
        with open(json_file, "w") as jsonfile:
            json.dump(json_data, jsonfile, indent=4)
    train_rate = 0.8
    imagename_sets = defaultdict(set)

    np.random.seed(seed)

    for imageset in imagesets:
        for sub_imageset_fold in sub_imageset_folds[imageset]:
            print('Processing {} {}'.format(imageset, sub_imageset_fold))
            image_path = './data/{}/{}/{}/{}/images'.format(core_dataset_name, src_version, imageset, sub_imageset_fold)

            for image_fn in os.listdir(image_path):
                if image_fn.endswith('.jpg') or image_fn.endswith('.png'):
                    pass
                else:
                    continue
                image_basename = wwtool.get_basename(image_fn)
                imagename_sets[imageset].add(image_basename)

        file_names = list(dict(imagename_sets)[imageset])
        file_names = sorted(file_names)
        np.random.shuffle(file_names)
        train_file_names = file_names[0 : int(len(file_names) * train_rate)]
        val_file_names = file_names[int(len(file_names) * train_rate):]

        save_trainset_fn = './data/{}/{}/{}/trainset.txt'.format(core_dataset_name, src_version, imageset)
        save_valset_fn = './data/{}/{}/{}/valset.txt'.format(core_dataset_name, src_version, imageset)

        for save_fn, file_names in zip([save_trainset_fn, save_valset_fn], [train_file_names, val_file_names]):
            with open(save_fn, 'w') as f:
                for file_name in file_names:
                    f.write('{}\n'.format(file_name))
Example #7
0
if __name__ == '__main__':
    # cities = ['shanghai', 'beijing', 'jinan', 'haerbin', 'chengdu']
    cities = ['shanghai']

    label_list = []
    for city in cities:
        for label_fn in os.listdir(
                './data/buildchange/v2/{}/labels_json'.format(city)):
            label_list.append([city, label_fn])

    label_list = sorted(label_list)
    np.random.shuffle(label_list)

    dst_label_dir = './data/buildchange/v2/sampling/labels_json'
    dst_image_dir = './data/buildchange/v2/sampling/images'
    wwtool.mkdir_or_exist(dst_label_dir)
    wwtool.mkdir_or_exist(dst_image_dir)

    for label_fn in label_list[0:1000]:
        basename = wwtool.get_basename(label_fn[1])
        src_label_file = './data/buildchange/v2/{}/labels_json/{}'.format(
            label_fn[0], basename + '.json')
        src_image_file = './data/buildchange/v2/{}/images/{}'.format(
            label_fn[0], basename + '.png')

        dst_label_file = os.path.join(dst_label_dir, basename + '.json')
        dst_image_file = os.path.join(dst_image_dir, basename + '.png')

        shutil.copy(src_label_file, dst_label_file)
        shutil.copy(src_image_file, dst_image_file)
Example #8
0
import mmcv

if __name__ == '__main__':

    keywords = ['roof', 'footprint']

    for key in keywords:
        csv_file = './data/buildchange/v2/xian_fine/xian_fine_{}_gt.csv'.format(
            key)
        first_in = True

        json_dir = './data/buildchange/v2/xian_fine/labels_json'
        rgb_img_dir = './data/buildchange/v2/xian_fine/images'

        for json_fn in os.listdir(json_dir):
            base_name = wwtool.get_basename(json_fn)

            rgb_img_file = os.path.join(rgb_img_dir, base_name + '.png')
            json_file = os.path.join(json_dir, json_fn)

            annotations = mmcv.load(json_file)['annotations']

            masks = [wwtool.mask2polygon(anno[key]) for anno in annotations]

            csv_image = pandas.DataFrame({
                'ImageId': base_name,
                'BuildingId': range(len(masks)),
                'PolygonWKT_Pix': masks,
                'Confidence': 1
            })
            if first_in:
Example #9
0
if __name__ == '__main__':
    sub_folds = ['arg', 'google', 'ms']

    csv_file = './data/buildchange/v0/xian_fine/xian_fine_gt.csv'
    first_in = True

    for sub_fold in sub_folds:
        shp_dir = f'./data/buildchange/v0/xian_fine/{sub_fold}/roof_shp_pixel'
        rgb_img_dir = f'./data/buildchange/v0/xian_fine/{sub_fold}/images'

        for shp_fn in os.listdir(shp_dir):
            if not shp_fn.endswith('shp'):
                continue
            print("Processing: ", shp_fn)
            base_name = wwtool.get_basename(shp_fn)

            rgb_img_file = os.path.join(rgb_img_dir, base_name + '.jpg')
            shp_file = os.path.join(shp_dir, shp_fn)

            rgb_img = cv2.imread(rgb_img_file)
            geo_info = rio.open(rgb_img_file)

            shp_parser = wwtool.ShpParse()
            objects = shp_parser(shp_file,
                                 geo_info,
                                 coord='pixel',
                                 merge_flag=True,
                                 connection_mode='floor')

            gt_polygons = [