예제 #1
0
    def forward(self,
                x,
                char_mask,
                boxes,
                seq_outputs=None,
                seq_scores=None,
                detailed_seq_scores=None):
        """
        Arguments:
            x (Tensor): the mask logits
            char_mask (Tensor): the char mask logits
            boxes (list[BoxList]): bounding boxes that are used as
                reference, one for ech image

        Returns:
            results (list[BoxList]): one BoxList for each image, containing
                the extra field mask
        """
        if x is not None:
            mask_prob = x.sigmoid()
            mask_prob = mask_prob.squeeze(dim=1)[:, None]
            if self.masker:
                mask_prob = self.masker(mask_prob, boxes)
        boxes_per_image = [len(box) for box in boxes]
        if x is not None:
            mask_prob = mask_prob.split(boxes_per_image, dim=0)
        if self.cfg.MODEL.CHAR_MASK_ON:
            char_mask_softmax = F.softmax(char_mask, dim=1)
            char_results = {
                'char_mask': char_mask_softmax.cpu().numpy(),
                'boxes': boxes[0].bbox.cpu().numpy(),
                'seq_outputs': seq_outputs,
                'seq_scores': seq_scores,
                'detailed_seq_scores': detailed_seq_scores
            }
        else:
            char_results = {
                'char_mask': None,
                'boxes': boxes[0].bbox.cpu().numpy(),
                'seq_outputs': seq_outputs,
                'seq_scores': seq_scores,
                'detailed_seq_scores': detailed_seq_scores
            }
        results = []
        if x is not None:
            for prob, box in zip(mask_prob, boxes):
                bbox = BoxList(box.bbox, box.size, mode="xyxy")
                for field in box.fields():
                    bbox.add_field(field, box.get_field(field))
                bbox.add_field("mask", prob)
                results.append(bbox)
        else:
            for box in boxes:
                bbox = BoxList(box.bbox, box.size, mode="xyxy")
                for field in box.fields():
                    bbox.add_field(field, box.get_field(field))
                results.append(bbox)

        return [results, char_results]
예제 #2
0
 def forward_for_single_feature_map(self, pred, image_shapes):
     """
     Arguments:
         pred: tensor of size N, 1, H, W
     """
     device = pred.device
     # torch.cuda.synchronize()
     # start_time = time.time()
     bitmap = self.binarize(pred)
     # torch.cuda.synchronize()
     # end_time = time.time()
     # print('binarize time:', end_time - start_time)
     N, height, width = pred.shape[0], pred.shape[2], pred.shape[3]
     # torch.cuda.synchronize()
     # start_time = time.time()
     bitmap_numpy = bitmap.cpu().numpy()  # The first channel
     pred_map_numpy = pred.cpu().numpy()
     # torch.cuda.synchronize()
     # end_time = time.time()
     # print('gpu2numpy time:', end_time - start_time)
     boxes_batch = []
     rotated_boxes_batch = []
     polygons_batch = []
     scores_batch = []
     # torch.cuda.synchronize()
     # start_time = time.time()
     for batch_index in range(N):
         image_shape = image_shapes[batch_index]
         boxes, scores, rotated_boxes, polygons = self.boxes_from_bitmap(
             pred_map_numpy[batch_index], bitmap_numpy[batch_index], width,
             height)
         boxes = boxes.to(device)
         if self.training and self.cfg.MODEL.SEG.AUG_PROPOSALS:
             boxes = self.aug_tensor_proposals(boxes)
         if boxes.shape[0] > self.top_n:
             boxes = boxes[:self.top_n, :]
             # _, top_index = scores.topk(self.top_n, 0, sorted=False)
             # boxes = boxes[top_index, :]
             # scores = scores[top_index]
         # boxlist = BoxList(boxes, (width, height), mode="xyxy")
         boxlist = BoxList(boxes, (image_shape[1], image_shape[0]),
                           mode="xyxy")
         if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE:
             masks = SegmentationMask(polygons,
                                      (image_shape[1], image_shape[0]))
             boxlist.add_field('masks', masks)
         boxlist = boxlist.clip_to_image(remove_empty=False)
         # boxlist = remove_small_boxes(boxlist, self.min_size)
         boxes_batch.append(boxlist)
         rotated_boxes_batch.append(rotated_boxes)
         polygons_batch.append(polygons)
         scores_batch.append(scores)
     # torch.cuda.synchronize()
     # end_time = time.time()
     # print('loop time:', end_time - start_time)
     return boxes_batch, rotated_boxes_batch, polygons_batch, scores_batch
예제 #3
0
 def __getitem__(self, item):
     im_name = os.path.basename(self.image_lists[item])
     # print(self.image_lists[item])
     img = Image.open(self.image_lists[item]).convert("RGB")
     width, height = img.size
     gt_path = os.path.join(self.gts_dir, im_name + ".txt")
     words, boxes, charsbbs, segmentations, labels = self.load_gt_from_txt(
         gt_path, height, width
     )
     if words[0] == "":
         use_char_ann = False
     else:
         use_char_ann = True
     if not self.use_charann:
         use_char_ann = False
     target = BoxList(boxes[:, :4], img.size, mode="xyxy", use_char_ann=use_char_ann)
     if self.ignore_difficult:
         labels = torch.from_numpy(np.array(labels))
     else:
         labels = torch.ones(len(boxes))
     target.add_field("labels", labels)
     masks = SegmentationMask(segmentations, img.size)
     target.add_field("masks", masks)
     char_masks = SegmentationCharMask(
         charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes)
     )
     target.add_field("char_masks", char_masks)
     if self.transforms is not None:
         img, target = self.transforms(img, target)
     if self.vis:
         new_im = img.numpy().copy().transpose([1, 2, 0]) + [
             102.9801,
             115.9465,
             122.7717,
         ]
         new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB")
         mask = target.extra_fields["masks"].polygons[0].convert("mask")
         mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB")
         if self.use_charann:
             m, _ = (
                 target.extra_fields["char_masks"]
                 .chars_boxes[0]
                 .convert("char_mask")
             )
             color = self.creat_color_map(37, 255)
             color_map = color[m.numpy().astype(np.uint8)]
             char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB")
             char = Image.blend(char, new_im, 0.5)
         else:
             char = new_im
         new = Image.blend(char, mask, 0.5)
         img_draw = ImageDraw.Draw(new)
         for box in target.bbox.numpy():
             box = list(box)
             box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2]
             img_draw.line(box, fill=(255, 0, 0), width=2)
         new.save("./vis/char_" + im_name)
     return img, target, self.image_lists[item]
예제 #4
0
    def forward_for_single_feature_map(self, anchors, objectness, box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            objectness: tensor of size N, A, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        device = objectness.device 
        N, A, H, W = objectness.shape

        # put in the same format as anchors
        objectness = objectness.permute(0, 2, 3, 1).reshape(N, -1)
        objectness = objectness.sigmoid()
        box_regression = box_regression.view(N, -1, 4, H, W).permute(0, 3, 4, 1, 2)
        box_regression = box_regression.reshape(N, -1, 4)

        num_anchors = A * H * W

        pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
        objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True)

        batch_idx = torch.arange(N, device=device)[:, None]
        box_regression = box_regression[batch_idx, topk_idx]

        image_shapes = [box.size for box in anchors]
        concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
        concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]

        proposals = self.box_coder.decode(
            box_regression.view(-1, 4), concat_anchors.view(-1, 4)
        )

        proposals = proposals.view(N, -1, 4)

        result = []
        for proposal, score, im_shape in zip(proposals, objectness, image_shapes):
            boxlist = BoxList(proposal, im_shape, mode="xyxy")
            boxlist.add_field("objectness", score)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            boxlist = boxlist_nms(
                boxlist,
                self.nms_thresh,
                max_proposals=self.post_nms_top_n,
                score_field="objectness",
            )
            result.append(boxlist)
        return result
예제 #5
0
    def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = [obj["bbox"] for obj in anno]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh",
                         use_char_ann=False).convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        masks = [obj["segmentation"] for obj in anno]
        masks = SegmentationMask(masks, img.size)
        target.add_field("masks", masks)

        target = target.clip_to_image(remove_empty=True)

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target, idx
예제 #6
0
    def __getitem__(self, item):
        img = Image.open(self.image_lists[item]).convert("RGB")

        # dummy target
        w, h = img.size
        target = BoxList([[0, 0, w, h]], img.size, mode="xyxy")

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target
예제 #7
0
    def forward(self, x, boxes):
        """
        Arguments:
            x (Tensor): the mask logits
            boxes (list[BoxList]): bounding boxes that are used as
                reference, one for ech image

        Returns:
            results (list[BoxList]): one BoxList for each image, containing
                the extra field mask
        """
        mask_prob = x.sigmoid()

        # select masks coresponding to the predicted classes
        num_masks = x.shape[0]
        labels = [bbox.get_field("labels") for bbox in boxes]
        labels = torch.cat(labels)
        index = torch.arange(num_masks, device=labels.device)
        mask_prob = mask_prob[index, labels][:, None]

        if self.masker:
            mask_prob = self.masker(mask_prob, boxes)

        boxes_per_image = [len(box) for box in boxes]
        mask_prob = mask_prob.split(boxes_per_image, dim=0)

        results = []
        for prob, box in zip(mask_prob, boxes):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            bbox.add_field("mask", prob)
            results.append(bbox)

        return results
예제 #8
0
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > self.score_thresh
        for j in range(1, num_classes):
            inds = inds_all[:, j].nonzero().squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4:(j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(boxlist_for_class,
                                            self.nms,
                                            score_field="scores")
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels",
                torch.full((num_labels, ), j, dtype=torch.int64,
                           device=device))
            if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE:
                boxlist_for_class.add_field('masks',
                                            boxlist.get_field('masks'))
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(),
                number_of_detections - self.detections_per_img + 1)
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        return result
예제 #9
0
 def forward(self, image_list, feature_maps):
     grid_height, grid_width = feature_maps[0].shape[-2:]
     grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
     anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)
     anchors = []
     for i, (image_height,
             image_width) in enumerate(image_list.image_sizes):
         anchors_in_image = []
         for anchors_per_feature_map in anchors_over_all_feature_maps:
             boxlist = BoxList(anchors_per_feature_map,
                               (image_width, image_height),
                               mode="xyxy")
             self.add_visibility_to(boxlist)
             anchors_in_image.append(boxlist)
         anchors.append(anchors_in_image)
     return anchors
예제 #10
0
 def prepare_boxlist(self, boxes, scores, image_shape, mask=None):
     """
     Returns BoxList from `boxes` and adds probability scores information
     as an extra field
     `boxes` has shape (#detections, 4 * #classes), where each row represents
     a list of predicted bounding boxes for each of the object classes in the
     dataset (including the background class). The detections in each row
     originate from the same object proposal.
     `scores` has shape (#detection, #classes), where each row represents a list
     of object detection confidence scores for each of the object classes in the
     dataset (including the background class). `scores[i, j]`` corresponds to the
     box at `boxes[i, j * 4:(j + 1) * 4]`.
     """
     if not self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
         scores = scores.reshape(-1)
         boxes.add_field("scores", scores)
         return boxes
     boxes = boxes.reshape(-1, 4)
     scores = scores.reshape(-1)
     boxlist = BoxList(boxes, image_shape, mode="xyxy")
     boxlist.add_field("scores", scores)
     if mask is not None:
         boxlist.add_field('masks', mask)
     return boxlist