コード例 #1
0
def read_COCOA(ann, h, w):
    if 'visible_mask' in ann.keys():
        rle = [ann['visible_mask']]
    else:
        rles = maskUtils.frPyObjects([ann['segmentation']], h, w)
        rle = maskUtils.merge(rles)
    modal = maskUtils.decode(rle).squeeze()
    if np.all(modal != 1):
        # if the object if fully occluded by others,
        # use amodal bbox as an approximated location,
        # note that it will produce random amodal results.
        amodal = maskUtils.decode(maskUtils.merge(
            maskUtils.frPyObjects([ann['segmentation']], h, w)))
        bbox = utils.mask_to_bbox(amodal)
    else:
        bbox = utils.mask_to_bbox(modal)
    return modal, bbox, 1 # category as constant 1
コード例 #2
0
 def get_instance(self, idx, with_gt=False):
     assert not with_gt, \
         "Mapillary Vista has no ground truth for ordering or amodal masks."
     imgidx, regidx = self.indexing[idx]
     # img
     image_id = self.annot_info[imgidx]['image_id']
     image_fn = image_id + ".jpg"
     # region
     instance_map = np.array(
         Image.open("{}/instances/{}.png".format(
         self.root, image_id)), dtype=np.uint16)
     h, w = instance_map.shape[:2]
     reg_info = self.annot_info[imgidx]['regions'][regidx]
     modal = (instance_map == reg_info['instance_id']).astype(np.uint8)
     category = reg_info['category_id']
     bbox = np.array(utils.mask_to_bbox(modal))
     return modal, bbox, category, image_fn, None
コード例 #3
0
ファイル: test.py プロジェクト: ywcmaike/deocclusion
 def make_KINS_output(self, idx, amodal_pred, category):
     results = []
     for i in range(amodal_pred.shape[0]):
         data = dict()
         rle = maskUtils.encode(
             np.array(amodal_pred[i, :, :, np.newaxis], order='F'))[0]
         if hasattr(self.data_reader, 'img_ids'):
             data['image_id'] = self.data_reader.img_ids[idx]
         data['category_id'] = category[i].item()
         if isinstance(rle['counts'], bytes):
             rle['counts'] = rle['counts'].decode()
         data['segmentation'] = rle
         data['bbox'] = utils.mask_to_bbox(amodal_pred[i, :, :])
         data['area'] = float(data['bbox'][2] * data['bbox'][3])
         data['iscrowd'] = 0
         data['score'] = 1.
         data['id'] = self.count
         results.append(data)
         self.count += 1
     return results
コード例 #4
0
 def get_image_instances(self, idx, with_gt=False, with_anns=False, ignore_stuff=False):
     assert not with_gt
     assert not ignore_stuff
     # img
     image_id = self.annot_info[idx]['image_id']
     image_fn = image_id + ".jpg"
     # region
     instance_map = np.array(
         Image.open("{}/instances/{}.png".format(
         self.root, image_id)), dtype=np.uint16)
     h, w = instance_map.shape[:2]
     instance_ids = np.unique(instance_map)
     category = instance_ids // 256
     num_instance = len(instance_ids)
     instance_ids_tensor = np.zeros((num_instance, h, w), dtype=np.uint16)   
     instance_ids_tensor[...] = instance_ids[:, np.newaxis, np.newaxis]
     modal = (instance_ids_tensor == instance_map).astype(np.uint8)
     bboxes = []
     for i in range(modal.shape[0]):
         bboxes.append(utils.mask_to_bbox(modal[i,...]))
     return modal, category, np.array(bboxes), None, image_fn
コード例 #5
0
ファイル: deocc_app.py プロジェクト: rezon99/deocclusion-demo
 def objectSave(self):
     obj = self.objects[self.this_obj - 1]
     crop_obj = utils.crop_padding(obj,
                                   utils.mask_to_bbox(obj[:, :, 3]),
                                   pad_value=(0, 0, 0, 0))
     self.window().objectSaveAs(crop_obj)