Example #1
0
 def forward(self, image, im_info, gt_boxes=None):
     image = image - torch.tensor(config.image_mean[None, :, None, None],
             dtype=image.dtype, device=image.device)
     image = get_padded_tensor(image, 64)
     if self.training:
         return self._forward_train(image, im_info, gt_boxes)
     else:
         return self._forward_test(image, im_info)
Example #2
0
 def forward(self, image, im_info, gt_boxes=None):
     image = (image - torch.tensor(config.image_mean[None, :, None, None]).type_as(image)) / (
             torch.tensor(config.image_std[None, :, None, None]).type_as(image))
     image = get_padded_tensor(image, 64)
     if self.training:
         return self._forward_train(image, im_info, gt_boxes)
     else:
         return self._forward_test(image, im_info)
Example #3
0
    def forward(self, image, im_info, gt_boxes=None):
        # pre-processing the data
        image = (image - torch.tensor(config.image_mean[
            None, :, None, None]).type_as(image)) / (torch.tensor(
                config.image_std[None, :, None, None]).type_as(image))
        image = get_padded_tensor(image, 64)
        # do inference
        # stride: 128,64,32,16,8, p7->p3
        fpn_fms = self.FPN(image)
        anchors_list = self.R_Anchor(fpn_fms)
        pred_cls_list, pred_reg_list = self.R_Head(fpn_fms)
        # release the useless data
        if self.training:
            loss_dict = self.R_Criteria(pred_cls_list, pred_reg_list,
                                        anchors_list, gt_boxes, im_info)
            return loss_dict
        else:
            #pred_bbox = union_inference(
            #        anchors_list, pred_cls_list, pred_reg_list, im_info)

            return anchors_list, pred_cls_list, pred_reg_list, im_info