コード例 #1
0
def convert_single_core(proc_id,
                        image_set,
                        categories,
                        source_folder,
                        segmentations_folder,
                        VOID=0):
    annotations = []
    for working_idx, image_info in enumerate(image_set):
        if working_idx % 100 == 0:
            print('Core: {}, {} from {} images converted'.format(
                proc_id, working_idx, len(image_set)))

        file_name = '{}.png'.format(image_info['file_name'].rsplit('.')[0])
        try:
            original_format = np.array(Image.open(
                os.path.join(source_folder, file_name)),
                                       dtype=np.uint32)
        except IOError:
            raise KeyError('no prediction png file for id: {}'.format(
                image_info['id']))

        pan = OFFSET * original_format[:, :, 0] + original_format[:, :, 1]
        pan_format = np.zeros(
            (original_format.shape[0], original_format.shape[1], 3),
            dtype=np.uint8)

        id_generator = IdGenerator(categories)

        l = np.unique(pan)
        segm_info = []
        for el in l:
            sem = el // OFFSET
            if sem == VOID:
                continue
            if sem not in categories:
                raise KeyError('Unknown semantic label {}'.format(sem))
            mask = pan == el
            segment_id, color = id_generator.get_id_and_color(sem)
            pan_format[mask] = color
            segm_info.append({"id": segment_id, "category_id": sem})

        annotations.append({
            'image_id': image_info['id'],
            'file_name': file_name,
            "segments_info": segm_info
        })

        Image.fromarray(pan_format).save(
            os.path.join(segmentations_folder, file_name))
    print('Core: {}, all {} images processed'.format(proc_id, len(image_set)))
    return annotations
コード例 #2
0
    def infer_panoptic_mask(self, instance_mask_only=False):
        segments = []
        meta = {
            'image_id': None,  # not needed for now
            'file_name': None,
            'segments_info': segments
        }
        id_gen = IdGenerator(self.categories)

        instance_tsr, _, sem_cats = self.get_instance_tsr(resolve_overlap=True)
        assert instance_tsr.sum(
            0).max() <= 1, 'contested pixs should not exist'
        sem_cats = sem_cats.tolist()
        sem_cats = [self.trainId_2_catId[el] for el in sem_cats]
        iids = []
        for _cat, _ins_mask in zip(sem_cats, instance_tsr):
            _id = id_gen.get_id(_cat)
            iids.append(_id)
            segments.append({
                'id':
                _id,
                'category_id':
                _cat,
                'isthing':
                1,
                'bbox': [
                    int(elem)
                    for elem in get_xywh_bbox_from_binary_mask(_ins_mask)
                ],
                'area':
                int(_ins_mask.sum())
            })  # note numpy.int64 is not json serializable
        mask = (instance_tsr * np.array(iids).reshape(-1, 1, 1)).sum(axis=0)
        mask = mask.astype(np.uint32)
        if instance_mask_only:
            return mask, meta
        sem_remain = self.sem_decision.copy()
        sem_remain[mask > 0] = -1
        for trainId in np.unique(sem_remain):
            if trainId < 0:
                continue
            _cat = self.trainId_2_catId[trainId]
            if self.categories[_cat]['isthing']:
                continue  # abstain on ungrouped instance pixels
            _id = id_gen.get_id(_cat)
            segments.append({'id': _id, 'category_id': _cat, 'isthing': 0})
            mask[sem_remain == trainId] = _id

        for seg in meta['segments_info']:
            assert seg['id'] in mask
        return mask, meta
コード例 #3
0
def show_semseg_result(semseg_json_file, categories_json_file, out_image_file):
    sem_by_image = defaultdict(list)

    with open(semseg_json_file, 'r') as f:
        sem_results = json.load(f)

    print("Semantic segmentation:")
    print("\tJSON file: {}".format(semseg_json_file))


    with open(categories_json_file, 'r') as f:
        categories_list = json.load(f)
    categories = {el['id']: el for el in categories_list}


    for sem in sem_results:
        img_id = sem['image_id']
        sem_by_image[sem['image_id']].append(sem)

    id_generator = IdGenerator(categories)

    #pan_segm_id = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint32)
    pan_segm_id = np.zeros((480, 640), dtype=np.uint32)

    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # To Do: change logic of multiple annotations case, for now, it is override
    # but we can easily learn it from panoptic combine script
    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!


    for ann in sem_by_image[img_id]:
        mask = COCOmask.decode(ann['segmentation'])

        #print(mask.shape())
        plt.imshow(mask)
        plt.show()
        segment_id = id_generator.get_id(ann['category_id'])
        print("id: ") 
        print(ann['category_id']) 
        pan_segm_id[mask==1] = segment_id
        print("segment_id: ") 
        print(segment_id)

        print(id2rgb(pan_segm_id).shape)
    #print(sem_by_image)
    Image.fromarray(id2rgb(pan_segm_id)).save(
        os.path.join("/home/zhiliu/Documents/Panoptic_Segement/Cocopanopticapi/VanillaPanopticSeg/data/predictions/test_result_for_vox/", out_image_file) 
    )
コード例 #4
0
        def get_gt(pan_gt_json_file=None, pan_gt_folder=None):
            if pan_gt_json_file is None:
                pan_gt_json_file = self.panoptic_json_file
            if pan_gt_folder is None:
                pan_gt_folder = self.panoptic_gt_folder
            with open(pan_gt_json_file, 'r') as f:
                pan_gt_json = json.load(f)
            files = [item['file_name'] for item in pan_gt_json['images']]
            if 'viper' in pan_gt_folder:
                files = [
                    _.split('/')[-1].replace('.jpg', '.png') for _ in files
                ]
            cpu_num = multiprocessing.cpu_count()
            files_split = np.array_split(files, cpu_num)
            workers = multiprocessing.Pool(processes=cpu_num)
            processes = []
            for proc_id, files_set in enumerate(files_split):
                p = workers.apply_async(BaseDataset._load_image_single_core,
                                        (proc_id, files_set, pan_gt_folder))
                processes.append(p)
            workers.close()
            workers.join()
            pan_gt_all = []
            for p in processes:
                pan_gt_all.extend(p.get())

            categories = pan_gt_json['categories']
            categories = {el['id']: el for el in categories}
            color_gererator = IdGenerator(categories)
            return pan_gt_all, pan_gt_json, categories, color_gererator
コード例 #5
0
def convert_detection_to_panoptic_coco_format_single_core(
        proc_id, coco_detection, img_ids, categories, segmentations_folder):
    id_generator = IdGenerator(categories)

    annotations_panoptic = []
    for working_idx, img_id in enumerate(img_ids):
        if working_idx % 100 == 0:
            print('Core: {}, {} from {} images processed'.format(
                proc_id, working_idx, len(img_ids)))
        img = coco_detection.loadImgs(int(img_id))[0]
        pan_format = np.zeros((img['height'], img['width'], 3), dtype=np.uint8)
        overlaps_map = np.zeros((img['height'], img['width']), dtype=np.uint32)

        anns_ids = coco_detection.getAnnIds(img_id)
        anns = coco_detection.loadAnns(anns_ids)

        panoptic_record = {}
        panoptic_record['image_id'] = img_id
        file_name = '{}.png'.format(img['file_name'].rsplit('.')[0])
        panoptic_record['file_name'] = file_name
        segments_info = []
        for ann in anns:
            if ann['category_id'] not in categories:
                raise Exception(
                    'Panoptic coco categories file does not contain \
                    category with id: {}'.format(ann['category_id']))
            segment_id, color = id_generator.get_id_and_color(
                ann['category_id'])
            mask = coco_detection.annToMask(ann)
            overlaps_map += mask
            pan_format[mask == 1] = color
            ann.pop('segmentation')
            ann.pop('image_id')
            ann['id'] = segment_id
            segments_info.append(ann)

        if np.sum(overlaps_map > 1) != 0:
            raise Exception(
                "Segments for image {} overlap each other.".format(img_id))
        panoptic_record['segments_info'] = segments_info
        annotations_panoptic.append(panoptic_record)

        Image.fromarray(pan_format).save(
            os.path.join(segmentations_folder, file_name))

    print('Core: {}, all {} images processed'.format(proc_id, len(img_ids)))
    return annotations_panoptic
コード例 #6
0
        def get_gt(pan_gt_json_file=None):
            if pan_gt_json_file is None:
                pan_gt_json_file = self.panoptic_json_file
            with open(pan_gt_json_file, 'r') as f:
                pan_gt_json = json.load(f)
            files = [item['file_name'] for item in pan_gt_json['images']]
            categories = pan_gt_json['categories']
            categories = {el['id']: el for el in categories}
            #breakpoint()
            color_gererator = IdGenerator(categories)

            return pan_gt_json, categories, color_gererator
コード例 #7
0
def do_panoptic_test(cfg, model):
    categoryies=json.load(open("../data/panoptic_coco_categories.json",'r'))
    os.makedirs("../data/detectron2_panoptic", exist_ok=True)
    categoryies_dict={}
    for category in categoryies:
        categoryies_dict[category['id']]=category
    id_generator=IdGenerator(categoryies_dict)
    image_dict = {}
    error_list=[]
    for dataset_name in ['viroi_test']:#,'viroi_train']:#cfg.DATASETS.TRAIN:#cfg.DATASETS.TEST:
        # data_loader = build_detection_test_loader(cfg, dataset_name)
        thing_id_map = MetadataCatalog.get(dataset_name).get("thing_contiguous_id_to_class_id")
        stuff_id_map = MetadataCatalog.get(dataset_name).get("stuff_contiguous_id_to_class_id")
        test_images_dict=json.load(open(MetadataCatalog.get(dataset_name).get("instance_json_file"),'r'))
        image_path = MetadataCatalog.get(dataset_name).get("image_path")

        predictor=DefaultPredictor(cfg)

        total = len(test_images_dict)
        count=0
        # for idx, inputs in enumerate(data_loader):
        for image_id in test_images_dict:
            image_info=test_images_dict[image_id]
            img=read_image(image_path+"/"+image_info['image_name'],format="BGR")
            count+=1
            print(str(count)+"/"+str(total))
            if True:
            # try:
                # print(inputs[0])
                # predictions = model(inputs, "panoptic")[0]  # 'sem_seg', 'instances', 'panoptic_seg'
                predictions = predictor(img,0)
                panoptic_seg, segments_info = predictions["panoptic_seg"]  # seg, info
                panoptic_seg=panoptic_seg.data.cpu().numpy()

                panoptic_color_seg = np.zeros((panoptic_seg.shape[0], panoptic_seg.shape[1], 3)) #tensor
                instance_dict={}
                for info in segments_info:
                    if 'score' in info:
                        del info['score']
                    if 'area' in info:
                        del info['area']
                    bbox = info['box']  # x1,y1,x2,y2->y1,x1,y2,x2
                    info['bbox'] = [int(bbox[1]), int(bbox[0]), int(bbox[3]), int(bbox[2])]
                    del info['box']

                    class_id=info['class_id']
                    del info['category_id']

                    mask = info['mask'].data.cpu().numpy()
                    mask = np.asfortranarray(mask)
                    segmentation = maskUtils.encode(mask)
                    segmentation['counts'] = segmentation['counts'].decode('utf8')
                    info['segmentation'] = segmentation
                    instance_id, panoptic_color_seg[mask] = id_generator.get_id_and_color(categoryies[class_id - 1]['id'])
                    info['instance_id'] = instance_id
                    del info['mask']

                    instance_dict[str(instance_id)]=info
                image_dict[image_id]={'instances':instance_dict,
                                    "image_id":image_info['image_id'],
                                    "height":image_info['height'],
                                    "width":image_info['width'],
                                    "image_name":image_info['image_name']
                                      }
                # print(image_dict)
                Image.fromarray(panoptic_color_seg.astype(np.uint8)).save("../data/detectron2_panoptic/"+image_info["image_name"].replace("jpg","png"))
            # except:
            #     print("ERROR - "+image_info['image_name'])
            #     error_list.append(image_info['image_name'])
        json.dump(image_dict,open("../data/viroi_json/detectron2_"+dataset_name+"_images_dict.json",'w'))
    json.dump(error_list,open("error_list.json",'w'))
コード例 #8
0
    def inference_panoptic_video(
            self,
            pred_pans_2ch,
            output_dir,
            # pan_im_json_file,
            categories,
            names,
            n_video=0):
        from panopticapi.utils import IdGenerator

        # Sample only frames with GT annotations.
        pred_pans_2ch = pred_pans_2ch[(self.labeled_fid //
                                       self.lambda_)::self.lambda_]
        categories = {el['id']: el for el in categories}
        color_generator = IdGenerator(categories)

        def get_pred_large(pan_2ch_all, vid_num, nframes_per_video=6):
            vid_num = len(pan_2ch_all) // nframes_per_video  # 10
            cpu_num = multiprocessing.cpu_count() // 2  # 32 --> 16
            nprocs = min(vid_num, cpu_num)  # 10
            max_nframes = cpu_num * nframes_per_video
            nsplits = (len(pan_2ch_all) - 1) // max_nframes + 1
            annotations, pan_all = [], []
            for i in range(0, len(pan_2ch_all), max_nframes):
                print('==> Read and convert VPS output - split %d/%d' %
                      ((i // max_nframes) + 1, nsplits))
                pan_2ch_part = pan_2ch_all[i:min(i +
                                                 max_nframes, len(pan_2ch_all
                                                                  ))]
                pan_2ch_split = np.array_split(pan_2ch_part, nprocs)
                workers = multiprocessing.Pool(processes=nprocs)
                processes = []
                for proc_id, pan_2ch_set in enumerate(pan_2ch_split):
                    p = workers.apply_async(
                        self.converter_2ch_track_core,
                        (proc_id, pan_2ch_set, color_generator))
                    processes.append(p)
                workers.close()
                workers.join()

                for p in processes:
                    p = p.get()
                    annotations.extend(p[0])
                    pan_all.extend(p[1])

            pan_json = {'annotations': annotations}
            return pan_all, pan_json

        def save_image(images, save_folder, names, colors=None):
            os.makedirs(save_folder, exist_ok=True)

            names = [
                osp.join(
                    save_folder,
                    name.replace('_leftImg8bit',
                                 '').replace('_newImg8bit', '').replace(
                                     'jpg', 'png').replace('jpeg', 'png'))
                for name in names
            ]
            cpu_num = multiprocessing.cpu_count() // 2
            images_split = np.array_split(images, cpu_num)
            names_split = np.array_split(names, cpu_num)
            workers = multiprocessing.Pool(processes=cpu_num)
            for proc_id, (images_set, names_set) in enumerate(
                    zip(images_split, names_split)):
                workers.apply_async(BaseDataset._save_image_single_core,
                                    (proc_id, images_set, names_set, colors))
            workers.close()
            workers.join()

        # inference_panoptic_video
        pred_pans, pred_json = get_pred_large(pred_pans_2ch, vid_num=n_video)
        print('--------------------------------------')
        print('==> Saving VPS output png files')
        os.makedirs(output_dir, exist_ok=True)
        save_image(pred_pans_2ch, osp.join(output_dir, 'pan_2ch'), names)
        save_image(pred_pans, osp.join(output_dir, 'pan_pred'), names)
        print('==> Saving pred.jsons file')
        json.dump(pred_json, open(osp.join(output_dir, 'pred.json'), 'w'))
        print('--------------------------------------')

        return pred_pans, pred_json
コード例 #9
0
def combine_to_panoptic_single_core(proc_id, img_ids, img_id2img,
                                    inst_by_image, sem_by_image,
                                    segmentations_folder, overlap_thr,
                                    stuff_area_limit, categories):
    panoptic_json = []
    id_generator = IdGenerator(categories)

    for idx, img_id in enumerate(img_ids):
        img = img_id2img[img_id]

        if idx % 100 == 0:
            print('Core: {}, {} from {} images processed.'.format(
                proc_id, idx, len(img_ids)))

        pan_segm_id = np.zeros((img['height'], img['width']), dtype=np.uint32)
        used = None
        annotation = {}
        try:
            annotation['image_id'] = int(img_id)
        except Exception:
            annotation['image_id'] = img_id

        annotation['file_name'] = img['file_name'].replace('.jpg', '.png')

        segments_info = []
        for ann in inst_by_image[img_id]:
            area = COCOmask.area(ann['segmentation'])
            if area == 0:
                continue
            if used is None:
                intersect = 0
                used = copy.deepcopy(ann['segmentation'])
            else:
                intersect = COCOmask.area(
                    COCOmask.merge([used, ann['segmentation']],
                                   intersect=True))
            if intersect / area > overlap_thr:
                continue
            used = COCOmask.merge([used, ann['segmentation']], intersect=False)

            mask = COCOmask.decode(ann['segmentation']) == 1
            if intersect != 0:
                mask = np.logical_and(pan_segm_id == 0, mask)
            segment_id = id_generator.get_id(ann['category_id'])
            panoptic_ann = {}
            panoptic_ann['id'] = segment_id
            panoptic_ann['category_id'] = ann['category_id']
            pan_segm_id[mask] = segment_id
            segments_info.append(panoptic_ann)

        for ann in sem_by_image[img_id]:
            mask = COCOmask.decode(ann['segmentation']) == 1
            mask_left = np.logical_and(pan_segm_id == 0, mask)
            if mask_left.sum() < stuff_area_limit:
                continue
            segment_id = id_generator.get_id(ann['category_id'])
            panoptic_ann = {}
            panoptic_ann['id'] = segment_id
            panoptic_ann['category_id'] = ann['category_id']
            pan_segm_id[mask_left] = segment_id
            segments_info.append(panoptic_ann)

        annotation['segments_info'] = segments_info
        panoptic_json.append(annotation)

        Image.fromarray(id2rgb(pan_segm_id)).save(
            os.path.join(segmentations_folder, annotation['file_name']))

    return panoptic_json
コード例 #10
0
def process_image(working_idx):
    global file_list, categories_dic, output_folder
    f = file_list[working_idx]
    # print(f)
    images = []
    img = Image.open(f)
    img = img.resize((1280, 720))
    original_format = np.array(img)
    # print("Processing file", f)
    file_name = f.split('/')[-1]
    image_id = file_name.rsplit('_', 2)[0]
    image_filename = '{}_{}_gtFine_panopticlevel3Ids.png'.format(
        f.split('/')[-2], image_id)
    # pdb.set_trace()
    # image entry, id for image is its filename without extension
    image = {
        "id": image_filename,
        "width": original_format.shape[1],
        "height": original_format.shape[0],
        "file_name": image_filename
    }

    pan_format = np.zeros(
        (original_format.shape[0], original_format.shape[1], 3),
        dtype=np.uint8)
    id_generator = IdGenerator(categories_dict)

    idx = 0
    l = np.unique(original_format)
    segm_info = []
    for el in l:
        if el < 1000:
            semantic_id = el
            is_crowd = 1
        else:
            semantic_id = el // 1000
            is_crowd = 0
        if semantic_id not in categories_dict:
            continue
        if categories_dict[semantic_id]['isthing'] == 0:
            is_crowd = 0
        mask = original_format == el
        segment_id, color = id_generator.get_id_and_color(semantic_id)
        pan_format[mask] = color

        area = np.sum(mask)  # segment area computation

        # bbox computation for a segment
        hor = np.sum(mask, axis=0)
        hor_idx = np.nonzero(hor)[0]
        x = hor_idx[0]
        width = hor_idx[-1] - x + 1
        vert = np.sum(mask, axis=1)
        vert_idx = np.nonzero(vert)[0]
        y = vert_idx[0]
        height = vert_idx[-1] - y + 1
        bbox = [x, y, width, height]

        segm_info.append({
            "id": int(segment_id),
            "category_id": int(semantic_id),
            "area": int(area),
            "bbox": [int(x) for x in bbox],
            "iscrowd": is_crowd
        })

    Image.fromarray(pan_format).save(
        os.path.join(output_folder, image_filename))
    return image, segm_info
コード例 #11
0
def visualize(image_path, label_path):
    """
    Visualizes in a pyplot window an image and a label pair from
    provided paths. For reading Pillow is used so all paths and formats
    must be Pillow-compatible.

    Args:
        image_path: an image path provided to Pillow.Image.open
        label_path: a label path provided to Pillow.Image.open
    """
    assert op.exists(image_path)
    assert op.exists(label_path)

    # Prepare canvases and decode the labels.
    image = np.array(Image.open(image_path), dtype=np.uint8)
    label = np.array(Image.open(label_path), dtype=np.int32)
    uids_unique_org = np.unique(label)
    semantic_segmentation = np.zeros((image.shape[0], image.shape[1], 3),
                                     dtype=np.uint8)
    instance_segmentation = np.zeros((image.shape[0], image.shape[1], 3),
                                     dtype=np.uint8)
    parts_segmentation = np.zeros((image.shape[0], image.shape[1], 3),
                                  dtype=np.uint8)
    sids, iids, _ = decode_uids(label)

    # Color at the semantic level.
    color_generator = IdGenerator(CATEGORIES)
    for sid in np.unique(sids):
        mask = np.equal(sids, sid)
        color = CATEGORIES[sid]['color']
        semantic_segmentation[mask] = color

    # Color at the semantic and instance level and find the instance-level boundaries.
    sids_only = np.where(iids < 0, sids, np.zeros_like(iids))
    for sid in np.unique(sids_only):
        mask = np.equal(sids_only, sid)
        color = color_generator.get_color(sid)
        instance_segmentation[mask] = color

    sid_iids = np.where(iids >= 0, sids * 10**3 + iids, np.zeros_like(iids))
    boundaries = np.full(sid_iids.shape, False)
    for sid_iid in np.unique(sid_iids):
        if sid_iid != 0:
            mask = np.equal(sid_iids, sid_iid)
            color = color_generator.get_color(sid_iid // 1000)
            instance_segmentation[mask] = color
            boundary_horizon = ndimage.sobel(mask, 0)
            boundary_vertical = ndimage.sobel(mask, 1)
            boundaries = np.logical_or(
                np.hypot(boundary_horizon, boundary_vertical), boundaries)

    # Color at the part level.
    # Conver the original labels into the form for visualization with IdGenerator.
    for uid in uids_unique_org:
        # If uid is sid or sid_iid, encode them as they are.
        if uid <= 99_999:
            sid_iid = uid
        # If uid is sid_iid_pid, map sid_pid to its corresponding sid and create new label as sid_iid.
        else:
            sid, iid, pid = decode_uids(uid)
            sid_pid = sid * 10**2 + pid
            if sid_pid in SID_PID2PARTS_CID:
                sid_iid = SID_PID2PARTS_CID[sid_pid] * 10**3 + iid
            else:
                sid_iid = sid * 10**3 + iid

        label[label == uid] = sid_iid

    color_generator = IdGenerator(CATEGORIES_PARTS)

    for sid_iid in np.unique(label):
        # If sid_iid is in the format of sid , use sid for color generation (things and stuff classes differentiated by IdGenerator inherently).
        if sid_iid <= 99:
            id_ = sid_iid
        # If sid_iid is in the format of sid_iid, get sid.
        else:
            id_ = sid_iid // 1000
        mask = label == sid_iid
        color = color_generator.get_color(id_)
        parts_segmentation[mask] = color

    # Depict boundaries.
    instance_segmentation[boundaries] = [255, 255, 255]
    parts_segmentation[boundaries] = [255, 255, 255]

    # plot
    # initialize figure for plotting
    _, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
    # for ax in axes:
    #   ax.set_axis_off()
    ax1.imshow(image)
    ax1.set_title('image')
    ax2.imshow(semantic_segmentation)
    ax2.set_title('labels colored on semantic level')
    ax3.imshow(instance_segmentation)
    ax3.set_title('labels colored on semantic and instance levels')
    ax4.imshow(parts_segmentation)
    ax4.set_title('labels colored on semantic, instance, and parts levels')
    plt.show()
def generate_occlusion_ground_truth_single_core(proc_id, coco_detection,
                                                annotations_set, categories,
                                                segmentations_folder,
                                                threshold):
    id_generator = IdGenerator(categories)

    occlusion_gt = []
    for working_idx, annotation in enumerate(annotations_set):
        if working_idx % 100 == 0:
            print('Core: {}, {} from {} images processed'.format(
                proc_id, working_idx, len(annotations_set)))
        img_id = annotation['image_id']
        img = coco_detection.loadImgs(int(img_id))[0]
        overlaps_map = np.zeros((img['height'], img['width']), dtype=np.uint32)

        anns_ids = coco_detection.getAnnIds(img_id)
        anns = coco_detection.loadAnns(anns_ids)

        occlusion_record = dict()
        occlusion_record['image_id'] = img_id
        file_name = '{}.png'.format(annotation['file_name'].rsplit('.')[0])

        # Read instance segments from panoptic segmentation gt
        try:
            pan_format = np.array(Image.open(
                os.path.join(segmentations_folder, file_name)),
                                  dtype=np.uint32)
        except IOError:
            raise KeyError('no prediction png file for id: {}'.format(
                annotation['image_id']))
        pan = rgb2id(pan_format)
        pan_mask = {}
        for segm_info in annotation['segments_info']:
            if categories[segm_info['category_id']]['isthing'] != 1:
                continue
            mask = (pan == segm_info['id']).astype(np.uint8)
            pan_mask[segm_info['id']] = mask

        # Read instance segments from instance segmentation gt
        segments_info = []
        ins_mask = {}
        overlap_pairs = []
        for ann in anns:
            if ann['category_id'] not in categories:
                raise Exception(
                    'Panoptic coco categories file does not contain \
                    category with id: {}'.format(ann['category_id']))
            _, color = id_generator.get_id_and_color(ann['category_id'])
            mask = coco_detection.annToMask(ann)
            overlaps_map += mask
            pan_format[mask == 1] = color
            ann.pop('segmentation')
            ann.pop('image_id')
            # ann['id'] kept as the same
            segments_info.append(ann)

            ins_mask[ann['id']] = mask

        # match segment ID in instance segmentation and panoptic segmentation by IOU
        ins2pan = {}
        pan2ins = {}
        for pan_id in pan_mask:
            iou_max = 0
            match = None
            for ins_id in ins_mask:
                if ins_id in ins2pan:
                    continue
                mask_sum = pan_mask[pan_id] + ins_mask[ins_id]
                iou = np.sum(mask_sum > 1) / np.sum(mask_sum > 0)
                if iou > iou_max:
                    iou_max = iou
                    match = ins_id
            if not match:
                print(
                    "Inconsistent panoptic annotation and instance annotation")
            else:
                ins2pan[match] = pan_id
                pan2ins[pan_id] = match

        if np.sum(overlaps_map > 1) != 0:
            for i, _ in enumerate(segments_info):
                for j in range(i + 1, len(segments_info)):
                    id_i = segments_info[i]['id']
                    id_j = segments_info[j]['id']
                    mask_i = ins_mask[id_i]
                    mask_j = ins_mask[id_j]
                    mask_merge = mask_i + mask_j
                    r_i = np.sum(mask_merge > 1) / np.sum(mask_i)
                    r_j = np.sum(mask_merge > 1) / np.sum(mask_j)
                    if r_i >= threshold or r_j >= threshold:
                        if id_i not in ins2pan or id_j not in ins2pan:
                            continue
                        pan_id_i = ins2pan[id_i]
                        pan_id_j = ins2pan[id_j]
                        pan_id_top = None
                        max_cnt = 0
                        pan_intersection = pan[mask_merge > 1]
                        candidate_ids, candidate_cnts = np.unique(
                            pan_intersection, return_counts=True
                        )  # count the number of different segments
                        if candidate_ids.size == 0:
                            print("candidate_ids: ")
                            print(candidate_ids)
                            print("candidate_cnts: ")
                            print(candidate_cnts)
                            print("filename: ")
                            print(file_name)
                            print("imgid={} ".format(img_id))
                            raise Exception("Wrong intersection.")
                        for it in range(candidate_ids.size):
                            if candidate_ids[it] == 0:  # remove background 0
                                continue
                            if candidate_cnts[it] > max_cnt:
                                max_cnt = candidate_cnts[it]
                                pan_id_top = int(candidate_ids[it])
                        if pan_id_top and pan_id_top in [pan_id_i, pan_id_j]:
                            # overlap_pairs.append((pan_id_i, pan_id_j, pan_id_top))
                            overlap_pairs.append(
                                (pan2ins[pan_id_i], pan2ins[pan_id_j],
                                 pan2ins[pan_id_top]))
        occlusion_record['overlap_pairs'] = overlap_pairs
        occlusion_gt.append(occlusion_record)

    print('Core: {}, all {} images processed'.format(proc_id,
                                                     len(annotations_set)))
    return occlusion_gt
コード例 #13
0
def sa_pixel_to_coco_panoptic(dataset_name, export_root, thing_ids):
    os.makedirs(os.path.join(dataset_name, "annotations"), exist_ok=True)

    info = {
        'description':
            'This is stable 1.0 version of the ' + dataset_name + ' dataset.',
        'url':
            'https://superannotate.ai',
        'version':
            '1.0',
        'year':
            2019,
        'contributor':
            'Annotator LLC',
        'date_created':
            '2019-11-15 11:47:32.67823'
    }

    licences = [
        {
            'url': 'https://superannotate.ai',
            'id': 1,
            'name': 'Superannotate License'
        }
    ]

    categories = []
    dbid_to_catid = {}
    classes = json.load(
        open(os.path.join(export_root, "classes", "classes.json"))
    )
    for idx, dbclass in enumerate(classes, 1):
        category = {
            "id": idx,
            "name": dbclass["name"],
            "supercategory": dbclass["name"],
            "isthing": dbclass["id"] in thing_ids,
            "color": id2rgb(int(dbclass["color"][1:], 16))
        }

        dbid_to_catid[dbclass["id"]] = category["id"]
        categories.append(category)

    print("Converting annotations for {} dataset ...".format(dataset_name))

    id_generator = IdGenerator({cat['id']: cat for cat in categories})
    panoptic_root = os.path.join(
        dataset_name, "panoptic_{}".format(dataset_name)
    )
    os.makedirs(panoptic_root, exist_ok=True)
    jsons = glob.glob(os.path.join(export_root, "*.json"))
    images = []
    annotations = []
    for idx, filepath in tqdm(enumerate(jsons, 1)):
        filename = os.path.basename(filepath)
        imagename = filename[:-len('___pixel.json')] + '___lores.jpg'

        width, height = Image.open(os.path.join(export_root, imagename)).size
        image_info = {
            "id": idx,
            "file_name": imagename,
            "height": height,
            "width": width,
            "license": 1
        }
        images.append(image_info)

        segments_info = []
        sa_ann_json = json.load(open(os.path.join(export_root, filename)))

        sa_bluemask_path = os.path.join(
            export_root, filename[:-len('___pixel.json')] + '___save.png'
        )
        sa_bluemask_rgb = np.asarray(
            Image.open(sa_bluemask_path).convert('RGB'), dtype=np.uint32
        )
        ann_mask = np.zeros((height, width), dtype=np.uint32)
        flat_mask = (sa_bluemask_rgb[:, :, 0] <<
                     16) | (sa_bluemask_rgb[:, :, 1] <<
                            8) | (sa_bluemask_rgb[:, :, 2])

        for instance in sa_ann_json:
            parts = [int(part["color"][1:], 16) for part in instance["parts"]]
            category_id = dbid_to_catid[instance["classId"]]
            instance_bitmask = np.isin(flat_mask, parts)
            segment_id = id_generator.get_id(category_id)
            ann_mask[instance_bitmask] = segment_id
            coco_instance_mask = cocomask.encode(
                np.asfortranarray(instance_bitmask)
            )
            bbox = cocomask.toBbox(coco_instance_mask).tolist()
            area = int(cocomask.area(coco_instance_mask))

            segment_info = {
                "id": segment_id,
                "category_id": category_id,
                "area": area,
                "bbox": bbox,
                "iscrowd": 0
            }
            segments_info.append(segment_info)
        panopticmask = imagename[:-len("jpg")] + "png"
        Image.fromarray(id2rgb(ann_mask)).save(
            os.path.join(panoptic_root, panopticmask)
        )

        annotation = {
            "image_id": idx,
            "file_name": panopticmask,
            "segments_info": segments_info
        }
        annotations.append(annotation)

    panoptic_data = {
        "info": info,
        "licences": licences,
        "images": images,
        "annotations": annotations,
        "categories": categories
    }

    json_data = json.dumps(panoptic_data, indent=4)
    with open(
        os.path.join(
            dataset_name, "annotations",
            "panoptic_{}.json".format(dataset_name)
        ), "w+"
    ) as coco_json:
        coco_json.write(json_data)
コード例 #14
0
def build_panoptic_area(img_id, output_path, detection_path,
                        segmentation_path):
    """
    Build panoptic segmentation of the specified image.
    Sort segments by area: write first smaller objects to avoid overlapping.
    :param img_id: image identifier (for retrieving file name)
    :param output_path: path to store panoptic segmentation results (png)
    :param detection_path: input path for detection
    :param segmentation_path: input path for semantic segmentation
    :return: the json annotation with class information for each panoptic segment.
    """
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    #Read categories and create IdGenerator (official panopticapi repository)
    categories = load_panoptic_category_info()
    id_generator = IdGenerator(categories)

    #Parameters:
    overlap_thr = 0.5
    stuff_area_limit = 64 * 64

    #read segmentation data
    segm_probs = json.load(
        open(segmentation_path + '/' + img_id + '_prob.json', 'r'))
    segm_labelmap = np.array(
        Image.open(segmentation_path + '/' + img_id + '_0.png'),
        np.uint8)  #.labelmap.astype(np.uint8)).save()
    #read detection data
    detection = json.load(
        open(detection_path + '/' + img_id + '_prob.json', 'r'))

    pan_segm_id = np.zeros(segm_labelmap.shape, dtype=np.uint32)
    used = np.full(segm_labelmap.shape, False)

    annotation = {}
    try:
        annotation['image_id'] = int(img_id)
    except Exception:
        annotation['image_id'] = img_id

    annotation['file_name'] = img_id + '.png'

    segments_info = []

    for obj in detection:  #for ann in ...
        obj_mask = extract_mask_bool(obj['mask'])
        obj_area = np.count_nonzero(obj_mask)
        obj['area'] = obj_area
        obj['mask'] = obj_mask

    detection.sort(key=lambda x: x['area'],
                   reverse=False)  ##First smaller, than bigger

    for obj in detection:  #for ann in ...
        obj_mask = obj['mask']  #extract_mask_bool(obj['mask'])
        obj_area = obj['area']  #np.count_nonzero(obj_mask)
        if obj_area == 0:
            continue
        #Filter out objects with intersection > 50% with used area
        intersection_mask = used & obj_mask
        intersect_area = np.count_nonzero(intersection_mask)
        if 1.0 * intersect_area / obj_area > overlap_thr:
            continue
        used = used | obj_mask

        segment_id = id_generator.get_id(obj['class'])
        panoptic_ann = {}
        panoptic_ann['id'] = segment_id
        panoptic_ann['category_id'] = obj['class']
        if intersect_area > 0:
            pan_segm_id[obj_mask & (~intersection_mask)] = segment_id
        else:
            pan_segm_id[obj_mask] = segment_id
        segments_info.append(panoptic_ann)

    #
    #
    for segm_class in np.unique(segm_labelmap):
        segm_class = int(segm_class)
        if segm_class == 183:  #void class
            continue

        #Check class: exclude non-stuff objects
        category = categories[segm_class]
        if category['isthing'] == 1:
            continue

        segm_mask = (segm_labelmap == segm_class)
        mask_left = segm_mask & (~used)
        # Filter out segments with small area
        if np.count_nonzero(mask_left) < stuff_area_limit:
            continue
        segment_id = id_generator.get_id(segm_class)
        panoptic_ann = {}
        panoptic_ann['id'] = segment_id
        panoptic_ann['category_id'] = segm_class
        used = used | mask_left
        pan_segm_id[mask_left] = segment_id
        segments_info.append(panoptic_ann)

    annotation['segments_info'] = segments_info

    # Save annotated image
    Image.fromarray(id2rgb(pan_segm_id)).save(
        os.path.join(output_path, annotation['file_name']))

    ##############
    ##remove segments with zero area
    ids = set(np.unique(pan_segm_id))
    segments_info_cleaned = []
    for seg in segments_info:
        if seg['id'] in ids:
            segments_info_cleaned.append(seg)
    annotation['segments_info'] = segments_info_cleaned
    ##################

    return annotation
コード例 #15
0
def panoptic_video_converter():

    original_format_folder = os.path.join(ROOT_DIR, MODE, 'panoptic_inst')
    # folder to store panoptic PNGs
    out_folder = os.path.join(ROOT_DIR, MODE, 'panoptic_video')
    out_file = os.path.join(ROOT_DIR, 'panoptic_gt_%s_city_vps.json' % (MODE))
    if not os.path.isdir(out_folder):
        os.makedirs(out_folder)

    categories = CATEGORIES
    categories_dict = {el['id']: el for el in CATEGORIES}
    file_list = sorted(glob.glob(os.path.join(original_format_folder,
                                              '*.png')))
    images = []
    annotations = []
    instid2color = {}
    videos = []
    id_generator = IdGenerator(categories_dict)
    print('==> %s/panoptic_video/ ...' % (MODE))

    for idx in trange(len(file_list)):
        f = file_list[idx]
        original_format = np.array(Image.open(f))

        file_name = f.split('/')[-1]
        image_id = file_name.rsplit('_', 2)[0]
        video_id = image_id[:4]
        if video_id not in videos:
            videos.append(video_id)
            instid2color = {}

        image_filename = file_name.replace('final_mask', 'newImg8bit').replace(
            'gtFine_color', 'leftImg8bit')
        # image entry, id for image is its filename without extension
        images.append({
            "id": image_id,
            "width": original_format.shape[1],
            "height": original_format.shape[0],
            "file_name": image_filename
        })
        pan_format = np.zeros(
            (original_format.shape[0], original_format.shape[1], 3),
            dtype=np.uint8)

        l = np.unique(original_format)

        segm_info = {}
        for el in l:
            if el < 1000:
                semantic_id = el
                is_crowd = 1
            else:
                semantic_id = el // 1000
                is_crowd = 0
            if semantic_id not in categories_dict:
                continue
            if categories_dict[semantic_id]['isthing'] == 0:
                is_crowd = 0
            mask = (original_format == el)

            if el not in instid2color:
                segment_id, color = id_generator.get_id_and_color(semantic_id)
                instid2color[el] = (segment_id, color)
            else:
                segment_id, color = instid2color[el]

            pan_format[mask] = color
            # area = np.sum(mask) # segment area computation
            # # bbox computation for a segment
            # hor = np.sum(mask, axis=0)
            # hor_idx = np.nonzero(hor)[0]
            # x = hor_idx[0]
            # width = hor_idx[-1] - x + 1
            # vert = np.sum(mask, axis=1)
            # vert_idx = np.nonzero(vert)[0]
            # y = vert_idx[0]
            # height = vert_idx[-1] - y + 1
            # bbox = [int(x), int(y), int(width), int(height)]
            segm_info[int(segment_id)] = \
                    {"id": int(segment_id),
                     "category_id": int(semantic_id),
                     # "area": int(area),
                     "iscrowd": is_crowd}

        Image.fromarray(pan_format).save(os.path.join(out_folder, file_name))

        # segment sanity check, area recalculation
        gt_pan = np.uint32(pan_format)
        pan_gt = gt_pan[:, :,
                        0] + gt_pan[:, :, 1] * 256 + gt_pan[:, :,
                                                            2] * 256 * 256
        labels, labels_cnt = np.unique(pan_gt, return_counts=True)
        gt_labels = [_ for _ in segm_info.keys()]
        gt_labels_set = set(gt_labels)
        for label, area in zip(labels, labels_cnt):
            if label == 0:
                continue
            if label not in gt_labels and label > 0:
                print('png label not in json labels.')
            segm_info[label]["area"] = int(area)
            gt_labels_set.remove(label)
        if len(gt_labels_set) != 0:
            raise KeyError('remaining gt_labels json')

        segm_info = [v for k, v in segm_info.items()]
        annotations.append({
            'image_id': image_id,
            'file_name': file_name,
            "segments_info": segm_info
        })

    d = {
        'images': images,
        'annotations': annotations,
        'categories': categories,
    }

    save_json(d, out_file)
    print('==> Saved json file at %s' % (out_file))
コード例 #16
0
ファイル: visualization.py プロジェクト: kemaloksuz/LRP-Error
            img = np.array(
                Image.open(os.path.join(img_folder, image_info['file_name'])))
        except BaseException:
            print("Undable to find correspoding input image.")
        break

segmentation = np.array(Image.open(
    os.path.join(segmentations_folder, ann['file_name'])),
                        dtype=np.uint8)
segmentation_id = rgb2id(segmentation)
# find segments boundaries
boundaries = find_boundaries(segmentation_id, mode='thick')

if generate_new_colors:
    segmentation[:, :, :] = 0
    color_generator = IdGenerator(categegories)
    for segment_info in ann['segments_info']:
        color = color_generator.get_color(segment_info['category_id'])
        mask = segmentation_id == segment_info['id']
        segmentation[mask] = color

# depict boundaries
segmentation[boundaries] = [0, 0, 0]

if img is None:
    plt.figure()
    plt.imshow(segmentation)
    plt.axis('off')
else:
    plt.figure(figsize=(9, 5))
    plt.subplot(121)
コード例 #17
0
def panoptic_converter(original_format_folder, out_folder, out_file):

    if not os.path.isdir(out_folder):
        print("Creating folder {} for panoptic segmentation PNGs".format(
            out_folder))
        os.mkdir(out_folder)

    categories = []
    for idx, el in enumerate(labels):
        if el.ignoreInEval:
            continue
        categories.append({
            'id': el.id,
            'name': el.name,
            'color': el.color,
            'supercategory': el.category,
            'isthing': 1 if el.hasInstances else 0
        })

    categories_dict = {cat['id']: cat for cat in categories}

    file_list = sorted(
        glob.glob(
            os.path.join(original_format_folder,
                         '*/*_gtFine_instanceIds.png')))

    images = []
    annotations = []
    for working_idx, f in enumerate(file_list):
        if working_idx % 10 == 0:
            print(working_idx, len(file_list))

        original_format = np.array(Image.open(f))

        file_name = f.split('/')[-1]
        image_id = file_name.rsplit('_', 2)[0]
        image_filename = '{}_leftImg8bit.png'.format(image_id)
        # image entry, id for image is its filename without extension
        images.append({
            "id": image_id,
            "width": original_format.shape[1],
            "height": original_format.shape[0],
            "file_name": image_filename
        })

        pan_format = np.zeros(
            (original_format.shape[0], original_format.shape[1], 3),
            dtype=np.uint8)
        id_generator = IdGenerator(categories_dict)

        ll = np.unique(original_format)
        segm_info = []
        for el in ll:
            if el < 1000:
                semantic_id = el
                is_crowd = 1
            else:
                semantic_id = el // 1000
                is_crowd = 0
            if semantic_id not in categories_dict:
                continue
            if categories_dict[semantic_id]['isthing'] == 0:
                is_crowd = 0
            mask = original_format == el
            segment_id, color = id_generator.get_id_and_color(semantic_id)
            pan_format[mask] = color

            area = np.sum(mask)  # segment area computation

            # bbox computation for a segment
            hor = np.sum(mask, axis=0)
            hor_idx = np.nonzero(hor)[0]
            x = hor_idx[0]
            width = hor_idx[-1] - x + 1
            vert = np.sum(mask, axis=1)
            vert_idx = np.nonzero(vert)[0]
            y = vert_idx[0]
            height = vert_idx[-1] - y + 1
            bbox = [x, y, width, height]

            segm_info.append({
                "id": int(segment_id),
                "category_id": int(semantic_id),
                "area": area,
                "bbox": bbox,
                "iscrowd": is_crowd
            })

        annotations.append({
            'image_id': image_id,
            'file_name': file_name,
            "segments_info": segm_info
        })

        Image.fromarray(pan_format).save(os.path.join(out_folder, file_name))

    d = {
        'images': images,
        'annotations': annotations,
        'categories': categories,
    }

    save_json(d, out_file)