Ejemplo n.º 1
0
    def infer_panoptic_mask(self, instance_mask_only=False):
        segments = []
        meta = {
            'image_id': None,  # not needed for now
            'file_name': None,
            'segments_info': segments
        }
        id_gen = IdGenerator(self.categories)

        instance_tsr, _, sem_cats = self.get_instance_tsr(resolve_overlap=True)
        assert instance_tsr.sum(
            0).max() <= 1, 'contested pixs should not exist'
        sem_cats = sem_cats.tolist()
        sem_cats = [self.trainId_2_catId[el] for el in sem_cats]
        iids = []
        for _cat, _ins_mask in zip(sem_cats, instance_tsr):
            _id = id_gen.get_id(_cat)
            iids.append(_id)
            segments.append({
                'id':
                _id,
                'category_id':
                _cat,
                'isthing':
                1,
                'bbox': [
                    int(elem)
                    for elem in get_xywh_bbox_from_binary_mask(_ins_mask)
                ],
                'area':
                int(_ins_mask.sum())
            })  # note numpy.int64 is not json serializable
        mask = (instance_tsr * np.array(iids).reshape(-1, 1, 1)).sum(axis=0)
        mask = mask.astype(np.uint32)
        if instance_mask_only:
            return mask, meta
        sem_remain = self.sem_decision.copy()
        sem_remain[mask > 0] = -1
        for trainId in np.unique(sem_remain):
            if trainId < 0:
                continue
            _cat = self.trainId_2_catId[trainId]
            if self.categories[_cat]['isthing']:
                continue  # abstain on ungrouped instance pixels
            _id = id_gen.get_id(_cat)
            segments.append({'id': _id, 'category_id': _cat, 'isthing': 0})
            mask[sem_remain == trainId] = _id

        for seg in meta['segments_info']:
            assert seg['id'] in mask
        return mask, meta
def show_semseg_result(semseg_json_file, categories_json_file, out_image_file):
    sem_by_image = defaultdict(list)

    with open(semseg_json_file, 'r') as f:
        sem_results = json.load(f)

    print("Semantic segmentation:")
    print("\tJSON file: {}".format(semseg_json_file))


    with open(categories_json_file, 'r') as f:
        categories_list = json.load(f)
    categories = {el['id']: el for el in categories_list}


    for sem in sem_results:
        img_id = sem['image_id']
        sem_by_image[sem['image_id']].append(sem)

    id_generator = IdGenerator(categories)

    #pan_segm_id = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint32)
    pan_segm_id = np.zeros((480, 640), dtype=np.uint32)

    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # To Do: change logic of multiple annotations case, for now, it is override
    # but we can easily learn it from panoptic combine script
    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!


    for ann in sem_by_image[img_id]:
        mask = COCOmask.decode(ann['segmentation'])

        #print(mask.shape())
        plt.imshow(mask)
        plt.show()
        segment_id = id_generator.get_id(ann['category_id'])
        print("id: ") 
        print(ann['category_id']) 
        pan_segm_id[mask==1] = segment_id
        print("segment_id: ") 
        print(segment_id)

        print(id2rgb(pan_segm_id).shape)
    #print(sem_by_image)
    Image.fromarray(id2rgb(pan_segm_id)).save(
        os.path.join("/home/zhiliu/Documents/Panoptic_Segement/Cocopanopticapi/VanillaPanopticSeg/data/predictions/test_result_for_vox/", out_image_file) 
    )
def combine_to_panoptic_single_core(proc_id, img_ids, img_id2img,
                                    inst_by_image, sem_by_image,
                                    segmentations_folder, overlap_thr,
                                    stuff_area_limit, categories):
    panoptic_json = []
    id_generator = IdGenerator(categories)

    for idx, img_id in enumerate(img_ids):
        img = img_id2img[img_id]

        if idx % 100 == 0:
            print('Core: {}, {} from {} images processed.'.format(
                proc_id, idx, len(img_ids)))

        pan_segm_id = np.zeros((img['height'], img['width']), dtype=np.uint32)
        used = None
        annotation = {}
        try:
            annotation['image_id'] = int(img_id)
        except Exception:
            annotation['image_id'] = img_id

        annotation['file_name'] = img['file_name'].replace('.jpg', '.png')

        segments_info = []
        for ann in inst_by_image[img_id]:
            area = COCOmask.area(ann['segmentation'])
            if area == 0:
                continue
            if used is None:
                intersect = 0
                used = copy.deepcopy(ann['segmentation'])
            else:
                intersect = COCOmask.area(
                    COCOmask.merge([used, ann['segmentation']],
                                   intersect=True))
            if intersect / area > overlap_thr:
                continue
            used = COCOmask.merge([used, ann['segmentation']], intersect=False)

            mask = COCOmask.decode(ann['segmentation']) == 1
            if intersect != 0:
                mask = np.logical_and(pan_segm_id == 0, mask)
            segment_id = id_generator.get_id(ann['category_id'])
            panoptic_ann = {}
            panoptic_ann['id'] = segment_id
            panoptic_ann['category_id'] = ann['category_id']
            pan_segm_id[mask] = segment_id
            segments_info.append(panoptic_ann)

        for ann in sem_by_image[img_id]:
            mask = COCOmask.decode(ann['segmentation']) == 1
            mask_left = np.logical_and(pan_segm_id == 0, mask)
            if mask_left.sum() < stuff_area_limit:
                continue
            segment_id = id_generator.get_id(ann['category_id'])
            panoptic_ann = {}
            panoptic_ann['id'] = segment_id
            panoptic_ann['category_id'] = ann['category_id']
            pan_segm_id[mask_left] = segment_id
            segments_info.append(panoptic_ann)

        annotation['segments_info'] = segments_info
        panoptic_json.append(annotation)

        Image.fromarray(id2rgb(pan_segm_id)).save(
            os.path.join(segmentations_folder, annotation['file_name']))

    return panoptic_json
Ejemplo n.º 4
0
def sa_pixel_to_coco_panoptic(dataset_name, export_root, thing_ids):
    os.makedirs(os.path.join(dataset_name, "annotations"), exist_ok=True)

    info = {
        'description':
            'This is stable 1.0 version of the ' + dataset_name + ' dataset.',
        'url':
            'https://superannotate.ai',
        'version':
            '1.0',
        'year':
            2019,
        'contributor':
            'Annotator LLC',
        'date_created':
            '2019-11-15 11:47:32.67823'
    }

    licences = [
        {
            'url': 'https://superannotate.ai',
            'id': 1,
            'name': 'Superannotate License'
        }
    ]

    categories = []
    dbid_to_catid = {}
    classes = json.load(
        open(os.path.join(export_root, "classes", "classes.json"))
    )
    for idx, dbclass in enumerate(classes, 1):
        category = {
            "id": idx,
            "name": dbclass["name"],
            "supercategory": dbclass["name"],
            "isthing": dbclass["id"] in thing_ids,
            "color": id2rgb(int(dbclass["color"][1:], 16))
        }

        dbid_to_catid[dbclass["id"]] = category["id"]
        categories.append(category)

    print("Converting annotations for {} dataset ...".format(dataset_name))

    id_generator = IdGenerator({cat['id']: cat for cat in categories})
    panoptic_root = os.path.join(
        dataset_name, "panoptic_{}".format(dataset_name)
    )
    os.makedirs(panoptic_root, exist_ok=True)
    jsons = glob.glob(os.path.join(export_root, "*.json"))
    images = []
    annotations = []
    for idx, filepath in tqdm(enumerate(jsons, 1)):
        filename = os.path.basename(filepath)
        imagename = filename[:-len('___pixel.json')] + '___lores.jpg'

        width, height = Image.open(os.path.join(export_root, imagename)).size
        image_info = {
            "id": idx,
            "file_name": imagename,
            "height": height,
            "width": width,
            "license": 1
        }
        images.append(image_info)

        segments_info = []
        sa_ann_json = json.load(open(os.path.join(export_root, filename)))

        sa_bluemask_path = os.path.join(
            export_root, filename[:-len('___pixel.json')] + '___save.png'
        )
        sa_bluemask_rgb = np.asarray(
            Image.open(sa_bluemask_path).convert('RGB'), dtype=np.uint32
        )
        ann_mask = np.zeros((height, width), dtype=np.uint32)
        flat_mask = (sa_bluemask_rgb[:, :, 0] <<
                     16) | (sa_bluemask_rgb[:, :, 1] <<
                            8) | (sa_bluemask_rgb[:, :, 2])

        for instance in sa_ann_json:
            parts = [int(part["color"][1:], 16) for part in instance["parts"]]
            category_id = dbid_to_catid[instance["classId"]]
            instance_bitmask = np.isin(flat_mask, parts)
            segment_id = id_generator.get_id(category_id)
            ann_mask[instance_bitmask] = segment_id
            coco_instance_mask = cocomask.encode(
                np.asfortranarray(instance_bitmask)
            )
            bbox = cocomask.toBbox(coco_instance_mask).tolist()
            area = int(cocomask.area(coco_instance_mask))

            segment_info = {
                "id": segment_id,
                "category_id": category_id,
                "area": area,
                "bbox": bbox,
                "iscrowd": 0
            }
            segments_info.append(segment_info)
        panopticmask = imagename[:-len("jpg")] + "png"
        Image.fromarray(id2rgb(ann_mask)).save(
            os.path.join(panoptic_root, panopticmask)
        )

        annotation = {
            "image_id": idx,
            "file_name": panopticmask,
            "segments_info": segments_info
        }
        annotations.append(annotation)

    panoptic_data = {
        "info": info,
        "licences": licences,
        "images": images,
        "annotations": annotations,
        "categories": categories
    }

    json_data = json.dumps(panoptic_data, indent=4)
    with open(
        os.path.join(
            dataset_name, "annotations",
            "panoptic_{}.json".format(dataset_name)
        ), "w+"
    ) as coco_json:
        coco_json.write(json_data)
Ejemplo n.º 5
0
def build_panoptic_area(img_id, output_path, detection_path,
                        segmentation_path):
    """
    Build panoptic segmentation of the specified image.
    Sort segments by area: write first smaller objects to avoid overlapping.
    :param img_id: image identifier (for retrieving file name)
    :param output_path: path to store panoptic segmentation results (png)
    :param detection_path: input path for detection
    :param segmentation_path: input path for semantic segmentation
    :return: the json annotation with class information for each panoptic segment.
    """
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    #Read categories and create IdGenerator (official panopticapi repository)
    categories = load_panoptic_category_info()
    id_generator = IdGenerator(categories)

    #Parameters:
    overlap_thr = 0.5
    stuff_area_limit = 64 * 64

    #read segmentation data
    segm_probs = json.load(
        open(segmentation_path + '/' + img_id + '_prob.json', 'r'))
    segm_labelmap = np.array(
        Image.open(segmentation_path + '/' + img_id + '_0.png'),
        np.uint8)  #.labelmap.astype(np.uint8)).save()
    #read detection data
    detection = json.load(
        open(detection_path + '/' + img_id + '_prob.json', 'r'))

    pan_segm_id = np.zeros(segm_labelmap.shape, dtype=np.uint32)
    used = np.full(segm_labelmap.shape, False)

    annotation = {}
    try:
        annotation['image_id'] = int(img_id)
    except Exception:
        annotation['image_id'] = img_id

    annotation['file_name'] = img_id + '.png'

    segments_info = []

    for obj in detection:  #for ann in ...
        obj_mask = extract_mask_bool(obj['mask'])
        obj_area = np.count_nonzero(obj_mask)
        obj['area'] = obj_area
        obj['mask'] = obj_mask

    detection.sort(key=lambda x: x['area'],
                   reverse=False)  ##First smaller, than bigger

    for obj in detection:  #for ann in ...
        obj_mask = obj['mask']  #extract_mask_bool(obj['mask'])
        obj_area = obj['area']  #np.count_nonzero(obj_mask)
        if obj_area == 0:
            continue
        #Filter out objects with intersection > 50% with used area
        intersection_mask = used & obj_mask
        intersect_area = np.count_nonzero(intersection_mask)
        if 1.0 * intersect_area / obj_area > overlap_thr:
            continue
        used = used | obj_mask

        segment_id = id_generator.get_id(obj['class'])
        panoptic_ann = {}
        panoptic_ann['id'] = segment_id
        panoptic_ann['category_id'] = obj['class']
        if intersect_area > 0:
            pan_segm_id[obj_mask & (~intersection_mask)] = segment_id
        else:
            pan_segm_id[obj_mask] = segment_id
        segments_info.append(panoptic_ann)

    #
    #
    for segm_class in np.unique(segm_labelmap):
        segm_class = int(segm_class)
        if segm_class == 183:  #void class
            continue

        #Check class: exclude non-stuff objects
        category = categories[segm_class]
        if category['isthing'] == 1:
            continue

        segm_mask = (segm_labelmap == segm_class)
        mask_left = segm_mask & (~used)
        # Filter out segments with small area
        if np.count_nonzero(mask_left) < stuff_area_limit:
            continue
        segment_id = id_generator.get_id(segm_class)
        panoptic_ann = {}
        panoptic_ann['id'] = segment_id
        panoptic_ann['category_id'] = segm_class
        used = used | mask_left
        pan_segm_id[mask_left] = segment_id
        segments_info.append(panoptic_ann)

    annotation['segments_info'] = segments_info

    # Save annotated image
    Image.fromarray(id2rgb(pan_segm_id)).save(
        os.path.join(output_path, annotation['file_name']))

    ##############
    ##remove segments with zero area
    ids = set(np.unique(pan_segm_id))
    segments_info_cleaned = []
    for seg in segments_info:
        if seg['id'] in ids:
            segments_info_cleaned.append(seg)
    annotation['segments_info'] = segments_info_cleaned
    ##################

    return annotation