Ejemplo n.º 1
0
    def __call__(self, ori_img):
        # img to tensor
        assert isinstance(ori_img, np.ndarray), "input must be a numpy array!"

        # 转换类型

        img = ori_img.astype(np.float) / 255.
        img = cv2.resize(img, self.size)

        # https://blog.csdn.net/zzw000000/article/details/80320040
        # 将numpy的格式转化为tensor格式
        # permute(dims)  将tensor的维度换位
        # unsqueeze()这个函数主要是对数据维度进行扩充。 ???
        img = torch.from_numpy(img).float().permute(2, 0, 1).unsqueeze(0)
        # forward
        with torch.no_grad():
            img = img.to(self.device)
            out_boxes = self.net(img)
            boxes = get_all_boxes(out_boxes,
                                  self.conf_thresh,
                                  self.net.num_classes,
                                  use_cuda=self.use_cuda)[0]
            boxes = nms(boxes, self.nms_thresh)
            # print(boxes)
        # plot boxes
        if self.is_plot:
            return self.plot_bbox(ori_img, boxes)
        if len(boxes) == 0:
            return None, None, None

        height, width = ori_img.shape[:2]
        boxes = np.vstack(boxes)
        bbox = np.empty_like(boxes[:, :4])
        if self.is_xywh:
            # bbox x y w h
            bbox[:, 0] = boxes[:, 0] * width
            bbox[:, 1] = boxes[:, 1] * height
            bbox[:, 2] = boxes[:, 2] * width
            bbox[:, 3] = boxes[:, 3] * height
        else:
            # bbox xmin ymin xmax ymax
            bbox[:, 0] = (boxes[:, 0] - boxes[:, 2] / 2.0) * width
            bbox[:, 1] = (boxes[:, 1] - boxes[:, 3] / 2.0) * height
            bbox[:, 2] = (boxes[:, 0] + boxes[:, 2] / 2.0) * width
            bbox[:, 3] = (boxes[:, 1] + boxes[:, 3] / 2.0) * height
        cls_conf = boxes[:, 5]
        cls_ids = boxes[:, 6]
        # print(cls_ids)
        return bbox, cls_conf, cls_ids
Ejemplo n.º 2
0
    def __call__(self, ori_img):
        # img to tensor
        assert isinstance(ori_img, np.ndarray), "input must be a numpy array!"
        img = ori_img.astype(np.float) / 255.

        img = cv2.resize(img, self.size)
        img = torch.from_numpy(img).float().permute(2, 0, 1).unsqueeze(0)
        # forward
        with torch.no_grad():
            img = img.to(self.device)
            out_boxes = self.net(img)
            boxes = get_all_boxes(out_boxes, self.conf_thresh,
                                  self.net.num_classes, self.use_cuda)[0]
            boxes = nms(boxes, self.nms_thresh)
            print(type(boxes))
            re_boxes = boxes
            # print(boxes)
        # plot boxes
        if self.is_plot:
            return self.plot_bbox(ori_img, boxes)
        if len(boxes) == 0:
            return None, None, None

        height, width = ori_img.shape[:2]
        boxes = np.vstack(boxes)
        bbox = np.empty_like(boxes[:, :4])
        if self.is_xywh:
            # bbox x y w h
            bbox[:, 0] = boxes[:, 0] * width
            bbox[:, 1] = boxes[:, 1] * height
            bbox[:, 2] = boxes[:, 2] * width
            bbox[:, 3] = boxes[:, 3] * height
        else:
            # bbox xmin ymin xmax ymax
            bbox[:, 0] = (boxes[:, 0] - boxes[:, 2] / 2.0) * width
            bbox[:, 1] = (boxes[:, 1] - boxes[:, 3] / 2.0) * height
            bbox[:, 2] = (boxes[:, 0] + boxes[:, 2] / 2.0) * width
            bbox[:, 3] = (boxes[:, 1] + boxes[:, 3] / 2.0) * height
        cls_conf = boxes[:, 5]
        cls_ids = boxes[:, 6]
        # print(bbox[:,0], bbox[:,1], bbox[:,2], bbox[:,3])
        # return bbox, cls_conf, cls_ids
        return re_boxes, cls_conf, cls_ids
Ejemplo n.º 3
0
def yolo_crop(from_mat):
    height, width = from_mat.shape[:2]
    original_image = from_mat
    # original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]
    image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
    image_data = image_data[np.newaxis, ...]

    pred_sbbox, pred_mbbox, pred_lbbox = sess.run(
        [return_tensors[1], return_tensors[2], return_tensors[3]],
        feed_dict={return_tensors[0]: image_data})

    # print(pred_sbbox.shape, pred_mbbox.shape, pred_lbbox.shape)
    pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),
                                np.reshape(pred_mbbox, (-1, 5 + num_classes)),
                                np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)

    # bboxes: (xmin, ymin, xmax, ymax, score, class)
    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')

    # image = utils.draw_bbox(original_image, bboxes)
    # img_arr = image
    # image = Image.fromarray(image)

    for (x, y, xm, ym, score, cls) in bboxes:

        low_res = False
        out_bounds = False
        relaxed_fit = False

        # BB new crop 4
        if False:
            w = xm - x
            h = ym - y
            x_mid = (x + xm) * 0.5
            y_mid = (ym - (xm - x) * 0.5)
            s = w
            crop_x = int(x_mid - s * 1.2)
            crop_x2 = crop_x + int(s * 2.4)
            crop_y2 = int(y_mid + s * 1.1)
            crop_y = crop_y2 - int(s * 2.4)
            if crop_x < 0:
                crop_x = 0
                out_bounds = True
            if crop_y < 0:
                crop_y = 0
                out_bounds = True
            if crop_x2 > width:
                crop_x2 = width
                out_bounds = True
            if crop_y2 > height:
                crop_y2 = height
                out_bounds = True

            # cropped = from_mat[crop_y:crop_y2, crop_x:crop_x2]
        elif True:  # halfway between bbn4 and no expanding
            w = xm - x
            h = ym - y
            x_mid = (x + xm) * 0.5
            y_mid = (ym - (xm - x) * 0.5)
            s = w
            crop_x = int(x_mid - s * 0.9)
            crop_x2 = crop_x + int(s * 1.8)
            crop_y2 = int(y_mid + s * 0.825)
            crop_y = crop_y2 - int(s * 1.8)
            if crop_x < 0:
                crop_x = 0
                out_bounds = True
            if crop_y < 0:
                crop_y = 0
                out_bounds = True
            if crop_x2 > width:
                crop_x2 = width
                out_bounds = True
            if crop_y2 > height:
                crop_y2 = height
                out_bounds = True
        else:
            print("ERRROR ERROR NO CROP SELECTED")
            exit()

        # need to return to match fa.face_detector.detect_from_image
        # [[min_x, min_y, max_x, max_y, prob]...]

        # basic crop
        # yield [x, y, xm, ym, score]
        # crop above
        yield [crop_x, crop_y, crop_x2, crop_y2, score]