コード例 #1
0
ファイル: ops.py プロジェクト: RAIVNLab/vince
def crop_and_resize(
        img, center, size, out_size, border_type=cv2.BORDER_CONSTANT, border_value=(0, 0, 0), interp=cv2.INTER_LINEAR
):
    # convert box to corners (0-indexed)
    xyxy = bb_util.xywh_to_xyxy([center[1], center[0], size, size])
    avg_color = np.mean(img, axis=(0, 1), dtype=float)

    # Use my faster version.
    patch = image_util.get_cropped_input(img, xyxy, 1, out_size, interp, avg_color)[0]
    return patch
コード例 #2
0
 def _crop_and_stretch(self, img, box, box_transforms):
     # Faster version of their crop and stretch functions which only computes the output image once instead of many
     # times.
     box_start = copy.deepcopy(box)
     box = self._get_crop_box(box, self.instance_sz)
     box = box_transforms(box)
     box[2:4] = np.maximum(box[2:4], 2)
     if np.any(np.array(box[2:4]) < 2):
         print("box is very small", box_start, box)
     xyxy = bb_util.xywh_to_xyxy(box[:4])
     avg_color = np.mean(img, axis=(0, 1), dtype=float)
     img = image_util.get_cropped_input(img, xyxy, 1, box[4],
                                        cv2.INTER_LINEAR, avg_color)[0]
     return img
コード例 #3
0
    def get_metrics(
            self,
            network_outputs: Optional[Dict]) -> Dict[str, Optional[float]]:
        with torch.no_grad():
            if network_outputs is None:
                return {"dist": None, "center_dist": None, "mean_iou": None}
            metrics = {}
            responses = network_outputs["responses"]
            labels = self._create_labels(responses.size())
            responses_sigm = torch.sigmoid(responses)
            dist = torch.abs(responses_sigm - labels)
            metrics["dist"] = dist.mean()

            pred_boxes = self.prediction_to_box(responses)
            network_outputs["pred_boxes_cxcywh"] = pred_boxes

            gt_box_cxcywh = [0.5, 0.5, 0.5, 0.5]
            center_dist = torch.mean(torch.abs(pred_boxes[:2] - 0.5))
            metrics["center_dist"] = center_dist
            gt_box = bb_util.xywh_to_xyxy(gt_box_cxcywh)
            pred_boxes = bb_util.xywh_to_xyxy(pred_boxes)
            ious = bb_util.IOU_numpy(pred_boxes.T, gt_box)
            metrics["mean_iou"] = np.mean(ious)
            return metrics
コード例 #4
0
 def _crop_and_stretch(self, img, box, box_transforms, make_label):
     # Faster version of their crop and stretch functions which only computes the output image once instead of many
     # times.
     box = self._get_crop_box(box, self.instance_sz)
     box_start = copy.deepcopy(box)
     box = box_transforms(box)
     box[2:4] = np.maximum(box[2:4], 2)
     if np.any(np.array(box[2:4]) < 2):
         print("box is very small", box_start, box)
     xyxy = bb_util.xywh_to_xyxy(box[:4])
     avg_color = np.mean(img, axis=(0, 1), dtype=float)
     img = image_util.get_cropped_input(img, xyxy, 1, box[4], cv2.INTER_LINEAR, avg_color)[0]
     if make_label:
         # pdb.set_trace()
         center_diff = (box_start[:2] - box[:2]) / box[3] * self.label_size
         # dist = np.sqrt((self.x_grid - center_diff[0]) ** 2 + (self.y_grid - center_diff[1]) ** 2)
         dist = np.abs(self.x_grid - center_diff[0]) + np.abs(self.y_grid - center_diff[1])
         mask = dist <= (self.positive_label_width / 2)
         return img, mask
     return img