Example #1
0
def panoptic_converter(original_format_folder, out_folder, out_file):

    if not os.path.isdir(out_folder):
        print("Creating folder {} for panoptic segmentation PNGs".format(
            out_folder))
        os.mkdir(out_folder)

    categories = []
    for idx, el in enumerate(labels):
        if el.ignoreInEval:
            continue
        categories.append({
            'id': el.id,
            'name': el.name,
            'color': el.color,
            'supercategory': el.category,
            'isthing': 1 if el.hasInstances else 0
        })

    categories_dict = {cat['id']: cat for cat in categories}

    file_list = sorted(
        glob.glob(
            os.path.join(original_format_folder,
                         '*/*_gtFine_instanceIds.png')))

    images = []
    annotations = []
    for working_idx, f in enumerate(file_list):
        if working_idx % 10 == 0:
            print(working_idx, len(file_list))

        original_format = np.array(Image.open(f))

        file_name = f.split('/')[-1]
        image_id = file_name.rsplit('_', 2)[0]
        image_filename = '{}_leftImg8bit.png'.format(image_id)
        # image entry, id for image is its filename without extension
        images.append({
            "id": image_id,
            "width": original_format.shape[1],
            "height": original_format.shape[0],
            "file_name": image_filename
        })

        pan_format = np.zeros(
            (original_format.shape[0], original_format.shape[1], 3),
            dtype=np.uint8)
        id_generator = IdGenerator(categories_dict)

        ll = np.unique(original_format)
        segm_info = []
        for el in ll:
            if el < 1000:
                semantic_id = el
                is_crowd = 1
            else:
                semantic_id = el // 1000
                is_crowd = 0
            if semantic_id not in categories_dict:
                continue
            if categories_dict[semantic_id]['isthing'] == 0:
                is_crowd = 0
            mask = original_format == el
            segment_id, color = id_generator.get_id_and_color(semantic_id)
            pan_format[mask] = color

            area = np.sum(mask)  # segment area computation

            # bbox computation for a segment
            hor = np.sum(mask, axis=0)
            hor_idx = np.nonzero(hor)[0]
            x = hor_idx[0]
            width = hor_idx[-1] - x + 1
            vert = np.sum(mask, axis=1)
            vert_idx = np.nonzero(vert)[0]
            y = vert_idx[0]
            height = vert_idx[-1] - y + 1
            bbox = [x, y, width, height]

            segm_info.append({
                "id": int(segment_id),
                "category_id": int(semantic_id),
                "area": area,
                "bbox": bbox,
                "iscrowd": is_crowd
            })

        annotations.append({
            'image_id': image_id,
            'file_name': file_name,
            "segments_info": segm_info
        })

        Image.fromarray(pan_format).save(os.path.join(out_folder, file_name))

    d = {
        'images': images,
        'annotations': annotations,
        'categories': categories,
    }

    save_json(d, out_file)
Example #2
0
def extract_semantic(input_json_file, segmentations_folder, output_json_file,
                     semantic_seg_folder, categories_json_file, things_other):
    start_time = time.time()
    with open(input_json_file, 'r') as f:
        d_coco = json.load(f)
    annotations = d_coco['annotations']

    if segmentations_folder is None:
        segmentations_folder = input_json_file.rsplit('.', 1)[0]

    print("EXTRACTING FROM...")
    print("COCO panoptic format:")
    print("\tSegmentation folder: {}".format(segmentations_folder))
    print("\tJSON file: {}".format(input_json_file))
    print("SEMANTIC SEGMENTATION")

    if output_json_file is not None and semantic_seg_folder is not None:
        raise Exception("'--output_json_file' and '--semantic_seg_folder' \
                        options cannot be used together")

    save_as_png = False
    if output_json_file is None:
        if semantic_seg_folder is None:
            raise Exception(
                "One of '--output_json_file' and '--semantic_seg_folder' \
                            options must be used specified")
        else:
            save_as_png = True
            print("in PNG format:")
            print("\tFolder with semnatic segmentations: {}".format(
                semantic_seg_folder))
            if not os.path.isdir(semantic_seg_folder):
                print(
                    "Creating folder {} for semantic segmentation PNGs".format(
                        semantic_seg_folder))
                os.mkdir(semantic_seg_folder)
    else:
        print("in COCO detection format:")
        print("\tJSON file: {}".format(output_json_file))
    if things_other:
        print("Merging all things categories into 'other' category")
    print('\n')

    with open(categories_json_file, 'r') as f:
        categories_list = json.load(f)
    categories = {category['id']: category for category in categories_list}

    cpu_num = multiprocessing.cpu_count()
    annotations_split = np.array_split(annotations, cpu_num)
    print("Number of cores: {}, images per core: {}".format(
        cpu_num, len(annotations_split[0])))
    workers = multiprocessing.Pool(processes=cpu_num)
    processes = []
    for proc_id, annotations_set in enumerate(annotations_split):
        p = workers.apply_async(
            extract_semantic_single_core,
            (proc_id, annotations_set, segmentations_folder, output_json_file,
             semantic_seg_folder, categories, save_as_png, things_other))
        processes.append(p)
    annotations_coco_semantic_seg = []
    for p in processes:
        annotations_coco_semantic_seg.extend(p.get())

    if not save_as_png:
        for idx, ann in enumerate(annotations_coco_semantic_seg):
            ann['id'] = idx
        d_coco['annotations'] = annotations_coco_semantic_seg
        categories_coco_semantic_seg = []
        for category in categories_list:
            if things_other and category['isthing'] == 1:
                continue
            category.pop('isthing')
            category.pop('color')
            categories_coco_semantic_seg.append(category)
        if things_other:
            categories_coco_semantic_seg.append({
                'id': OTHER_CLASS_ID,
                'name': 'other',
                'supercategory': 'other'
            })
        d_coco['categories'] = categories_coco_semantic_seg
        save_json(d_coco, output_json_file)

    t_delta = time.time() - start_time
    print("Time elapsed: {:0.2f} seconds".format(t_delta))
    workers.close()
    workers.join()
Example #3
0
def convert_panoptic_to_detection_coco_format(input_json_file,
                                              segmentations_folder,
                                              output_json_file,
                                              categories_json_file,
                                              things_only):
    start_time = time.time()

    if segmentations_folder is None:
        segmentations_folder = input_json_file.rsplit('.', 1)[0]

    print("CONVERTING...")
    print("COCO panoptic format:")
    print("\tSegmentation folder: {}".format(segmentations_folder))
    print("\tJSON file: {}".format(input_json_file))
    print("TO")
    print("COCO detection format")
    print("\tJSON file: {}".format(output_json_file))
    if things_only:
        print("Saving only segments of things classes.")
    print('\n')

    print("Reading annotation information from {}".format(input_json_file))
    with open(input_json_file, 'r') as f:
        d_coco = json.load(f)
    annotations_panoptic = d_coco['annotations']

    with open(categories_json_file, 'r') as f:
        categories_list = json.load(f)
    categories = {category['id']: category for category in categories_list}

    # normal:
    cpu_num = multiprocessing.cpu_count()
    #print("cpu_num: " + str(cpu_num))
    # for debug: only 1 cpu core
    #cpu_num = 1
    annotations_split = np.array_split(annotations_panoptic, cpu_num)
    print("Number of cores: {}, images per core: {}".format(
        cpu_num, len(annotations_split[0])))
    workers = multiprocessing.Pool(processes=cpu_num)
    processes = []
    for proc_id, annotations_set in enumerate(annotations_split):
        p = workers.apply_async(
            convert_panoptic_to_detection_coco_format_single_core,
            (proc_id, annotations_set, categories, segmentations_folder,
             things_only))
        processes.append(p)
    annotations_coco_detection = []
    for p in processes:
        annotations_coco_detection.extend(p.get())
    for idx, ann in enumerate(annotations_coco_detection):
        ann['id'] = idx

    d_coco['annotations'] = annotations_coco_detection
    categories_coco_detection = []
    for category in d_coco['categories']:
        if things_only and category['isthing'] != 1:
            continue
        category.pop('isthing')
        category.pop('color')
        categories_coco_detection.append(category)
    d_coco['categories'] = categories_coco_detection
    save_json(d_coco, output_json_file)

    t_delta = time.time() - start_time
    print("Time elapsed: {:0.2f} seconds".format(t_delta))
Example #4
0
def combine_predictions(semseg_json_file, instseg_json_file, images_json_file,
                        categories_json_file, segmentations_folder,
                        panoptic_json_file, confidence_thr, overlap_thr,
                        stuff_area_limit):
    start_time = time.time()

    with open(semseg_json_file, 'r') as f:
        sem_results = json.load(f)
    with open(instseg_json_file, 'r') as f:
        inst_results = json.load(f)
    with open(images_json_file, 'r') as f:
        images_d = json.load(f)
    img_id2img = {img['id']: img for img in images_d['images']}

    with open(categories_json_file, 'r') as f:
        categories_list = json.load(f)
    categories = {el['id']: el for el in categories_list}

    if segmentations_folder is None:
        segmentations_folder = panoptic_json_file.rsplit('.', 1)[0]
    if not os.path.isdir(segmentations_folder):
        print("Creating folder {} for panoptic segmentation PNGs".format(
            segmentations_folder))
        os.mkdir(segmentations_folder)

    print("Combining:")
    print("Semantic segmentation:")
    print("\tJSON file: {}".format(semseg_json_file))
    print("and")
    print("Instance segmentations:")
    print("\tJSON file: {}".format(instseg_json_file))
    print("into")
    print("Panoptic segmentations:")
    print("\tSegmentation folder: {}".format(segmentations_folder))
    print("\tJSON file: {}".format(panoptic_json_file))
    print(
        "List of images to combine is takes from {}".format(images_json_file))
    print('\n')

    inst_by_image = defaultdict(list)
    for inst in inst_results:
        if inst['score'] < confidence_thr:
            continue
        inst_by_image[inst['image_id']].append(inst)
    for img_id in inst_by_image.keys():
        inst_by_image[img_id] = sorted(inst_by_image[img_id],
                                       key=lambda el: -el['score'])

    sem_by_image = defaultdict(list)
    for sem in sem_results:
        if categories[sem['category_id']]['isthing'] == 1:
            continue
        sem_by_image[sem['image_id']].append(sem)

    panoptic_json = combine_to_panoptic_multi_core(
        img_id2img, inst_by_image, sem_by_image, segmentations_folder,
        overlap_thr, stuff_area_limit, categories)

    with open(images_json_file, 'r') as f:
        coco_d = json.load(f)
    coco_d['annotations'] = panoptic_json
    coco_d['categories'] = list(categories.values())
    save_json(coco_d, panoptic_json_file)

    t_delta = time.time() - start_time
    print("Time elapsed: {:0.2f} seconds".format(t_delta))
def panoptic_video_converter():

    original_format_folder = os.path.join(ROOT_DIR, MODE, 'panoptic_inst')
    # folder to store panoptic PNGs
    out_folder = os.path.join(ROOT_DIR, MODE, 'panoptic_video')
    out_file = os.path.join(ROOT_DIR, 'panoptic_gt_%s_city_vps.json' % (MODE))
    if not os.path.isdir(out_folder):
        os.makedirs(out_folder)

    categories = CATEGORIES
    categories_dict = {el['id']: el for el in CATEGORIES}
    file_list = sorted(glob.glob(os.path.join(original_format_folder,
                                              '*.png')))
    images = []
    annotations = []
    instid2color = {}
    videos = []
    id_generator = IdGenerator(categories_dict)
    print('==> %s/panoptic_video/ ...' % (MODE))

    for idx in trange(len(file_list)):
        f = file_list[idx]
        original_format = np.array(Image.open(f))

        file_name = f.split('/')[-1]
        image_id = file_name.rsplit('_', 2)[0]
        video_id = image_id[:4]
        if video_id not in videos:
            videos.append(video_id)
            instid2color = {}

        image_filename = file_name.replace('final_mask', 'newImg8bit').replace(
            'gtFine_color', 'leftImg8bit')
        # image entry, id for image is its filename without extension
        images.append({
            "id": image_id,
            "width": original_format.shape[1],
            "height": original_format.shape[0],
            "file_name": image_filename
        })
        pan_format = np.zeros(
            (original_format.shape[0], original_format.shape[1], 3),
            dtype=np.uint8)

        l = np.unique(original_format)

        segm_info = {}
        for el in l:
            if el < 1000:
                semantic_id = el
                is_crowd = 1
            else:
                semantic_id = el // 1000
                is_crowd = 0
            if semantic_id not in categories_dict:
                continue
            if categories_dict[semantic_id]['isthing'] == 0:
                is_crowd = 0
            mask = (original_format == el)

            if el not in instid2color:
                segment_id, color = id_generator.get_id_and_color(semantic_id)
                instid2color[el] = (segment_id, color)
            else:
                segment_id, color = instid2color[el]

            pan_format[mask] = color
            # area = np.sum(mask) # segment area computation
            # # bbox computation for a segment
            # hor = np.sum(mask, axis=0)
            # hor_idx = np.nonzero(hor)[0]
            # x = hor_idx[0]
            # width = hor_idx[-1] - x + 1
            # vert = np.sum(mask, axis=1)
            # vert_idx = np.nonzero(vert)[0]
            # y = vert_idx[0]
            # height = vert_idx[-1] - y + 1
            # bbox = [int(x), int(y), int(width), int(height)]
            segm_info[int(segment_id)] = \
                    {"id": int(segment_id),
                     "category_id": int(semantic_id),
                     # "area": int(area),
                     "iscrowd": is_crowd}

        Image.fromarray(pan_format).save(os.path.join(out_folder, file_name))

        # segment sanity check, area recalculation
        gt_pan = np.uint32(pan_format)
        pan_gt = gt_pan[:, :,
                        0] + gt_pan[:, :, 1] * 256 + gt_pan[:, :,
                                                            2] * 256 * 256
        labels, labels_cnt = np.unique(pan_gt, return_counts=True)
        gt_labels = [_ for _ in segm_info.keys()]
        gt_labels_set = set(gt_labels)
        for label, area in zip(labels, labels_cnt):
            if label == 0:
                continue
            if label not in gt_labels and label > 0:
                print('png label not in json labels.')
            segm_info[label]["area"] = int(area)
            gt_labels_set.remove(label)
        if len(gt_labels_set) != 0:
            raise KeyError('remaining gt_labels json')

        segm_info = [v for k, v in segm_info.items()]
        annotations.append({
            'image_id': image_id,
            'file_name': file_name,
            "segments_info": segm_info
        })

    d = {
        'images': images,
        'annotations': annotations,
        'categories': categories,
    }

    save_json(d, out_file)
    print('==> Saved json file at %s' % (out_file))