Example #1
0
def post_process_and_resize_prediction(left_prediction: BoxList,
                                       right_prediction: BoxList,
                                       dst_size=(1280, 720),
                                       threshold=0.7,
                                       padding=1,
                                       process_disparity=True):
    left_prediction = left_prediction.clone()
    right_prediction = right_prediction.clone()
    if process_disparity and not left_prediction.has_map('disparity'):
        disparity_map_processor = DisparityMapProcessor()
        disparity_pred_full_img = disparity_map_processor(
            left_prediction, right_prediction)
        left_prediction.add_map('disparity', disparity_pred_full_img)
    left_prediction = left_prediction.resize(dst_size)
    right_prediction = right_prediction.resize(dst_size)
    mask_pred = left_prediction.get_field('mask')
    masker = Masker(threshold=threshold, padding=padding)
    mask_pred = masker([mask_pred], [left_prediction])[0].squeeze(1)
    if mask_pred.shape[0] != 0:
        # mask_preds_per_img = mask_pred.sum(dim=0)[0].clamp(max=1)
        mask_preds_per_img = mask_pred
    else:
        mask_preds_per_img = torch.zeros((1, *dst_size[::-1]))
    left_prediction.add_field('mask', mask_preds_per_img)
    return left_prediction, right_prediction
Example #2
0
 def get_ground_truth(self, index):
     img_id = self.ids[index]
     if not is_testing_split(self.split):
         left_annotation = self.annotations['left'][int(img_id)]
         right_annotation = self.annotations['right'][int(img_id)]
         info = self.get_img_info(index)
         height, width = info['height'], info['width']
         # left target
         left_target = BoxList(left_annotation["boxes"], (width, height), mode="xyxy")
         left_target.add_field("labels", left_annotation["labels"])
         # left_target.add_field("alphas", left_annotation['alphas'])
         boxes_3d = Box3DList(left_annotation["boxes_3d"], (width, height), mode='ry_lhwxyz')
         left_target.add_field("box3d", boxes_3d)
         left_target.add_map('disparity', self.get_disparity(index))
         left_target.add_field('kins_masks', self.get_kins_mask(index))
         left_target.add_field('truncation', torch.tensor(self.truncations_list[int(img_id)]))
         left_target.add_field('occlusion', torch.tensor(self.occlusions_list[int(img_id)]))
         left_target.add_field('image_size', torch.tensor([[width, height]]).repeat(len(left_target), 1))
         left_target.add_field('calib', Calib(self.get_calibration(index), (width, height)))
         left_target.add_field('index', torch.full((len(left_target), 1), index, dtype=torch.long))
         left_target.add_field('imgid', torch.full((len(left_target), 1), int(img_id), dtype=torch.long))
         left_target = left_target.clip_to_image(remove_empty=True)
         # right target
         right_target = BoxList(right_annotation["boxes"], (width, height), mode="xyxy")
         right_target.add_field("labels", right_annotation["labels"])
         right_target = right_target.clip_to_image(remove_empty=True)
         target = {'left': left_target, 'right': right_target}
         return target
     else:
         fakebox = torch.tensor([[0, 0, 0, 0]])
         info = self.get_img_info(index)
         height, width = info['height'], info['width']
         # left target
         left_target = BoxList(fakebox, (width, height), mode="xyxy")
         left_target.add_field('image_size', torch.tensor([[width, height]]).repeat(len(left_target), 1))
         left_target.add_field('calib', Calib(self.get_calibration(index), (width, height)))
         left_target.add_field('index', torch.full((len(left_target), 1), index, dtype=torch.long))
         left_target.add_field('imgid', torch.full((len(left_target), 1), int(img_id), dtype=torch.long))
         # right target
         right_target = BoxList(fakebox, (width, height), mode="xyxy")
         target = {'left': left_target, 'right': right_target}
         return target