예제 #1
0
 def detect_onet(self, img, r_boxes):
     sq_r_boxes = utils.convert_to_square(r_boxes)
     # return sq_r_boxes
     img_datas = []
     for _box in sq_r_boxes:
         # img_np = self.transf(img).numpy()
         # img_trans = np.transpose(img_np,(1,2,0))
         # print(img_trans.shape)
         _x1 = int(_box[0])
         _y1 = int(_box[1])
         _x2 = int(_box[2])
         _y2 = int(_box[3])
         # if _x1>_x2 or _y1>_y2:
         #     continue
         # img_crop = img_trans[_y1:_y2,_x1:_x2,:]
         # print(img_crop.shape)
         # if img_crop.size==0:
         #     continue
         # img_crop = img.crop((_x1, _y1, _x2, _y2))
         # img_resize = cv2.resize(img_crop,(48, 48))
         # img_retrans = np.transpose(img_resize,(2,0,1))
         img_crop = img.crop((_x1, _y1, _x2, _y2))
         img_re = img_crop.resize((48, 48))
         img_data = self.transf(img_re)
         img_datas.append(img_data)
         # img_datas.append(img_retrans)
     img_datasets = torch.stack(img_datas)
     # cond, offset = self.onet(img_datasets)
     if self.isCuda:
         img_datasets = img_datasets.cuda()
     with torch.no_grad():
         cond, offset, land = self.onet(img_datasets)
     cls = cond.cpu().data.numpy()
     # print("cls",cls)
     offset = offset.cpu().data.numpy()
     indexs, _ = np.where(cls > self.cond[2])  # [0 1 2 3 4 5]
     # indexs = torch.nonzero(torch.gt(cls, 0.6))
     # print(indexs)
     boxes = sq_r_boxes[indexs]
     # print(sq_p_boxes.shape,boxes.shape)
     nx1, ny1, nx2, ny2 = boxes[:, 0].astype(np.int32), boxes[:, 1].astype(
         np.int32), boxes[:, 2].astype(np.int32), boxes[:,
                                                        3].astype(np.int32)
     dw = nx2 - nx1
     dh = ny2 - ny1
     # print(offset[indexs].shape)
     x1 = nx1 + dw * offset[indexs][:, 0]
     y1 = ny1 + dh * offset[indexs][:, 1]
     x2 = nx2 + dw * offset[indexs][:, 2]
     y2 = ny2 + dh * offset[indexs][:, 3]
     # print(cls[indexs].T[0].shape)
     boxes = np.array([x1, y1, x2, y2, cls[indexs].T[0]]).T
     # print(boxes.shape)
     # return boxes
     return utils.NMS(boxes, self.threshold[2], IsUnion=False)
예제 #2
0
    def detect_pnet(self, img):
        w, h = img.size
        min_side = min(w, h)
        scale = 1.0
        new_scale = 1.0
        p_boxes = []
        while min_side > 12:
            image_tensor = self.transf(img)
            image_tensor = image_tensor.unsqueeze(0)
            if self.isCuda:
                image_tensor = image_tensor.cuda()
            # image_tensor = image_tensor
            # print(image_tensor.shape)
            with torch.no_grad():
                cond, offset, land = self.pnet(image_tensor)

            # cls_map_np = (1, 1,n, m ),reg_np.shape = (1, 4,n, m )
            cls = cond[0][0].cpu().data
            reg = offset[0].cpu().data.numpy()
            # print(map.shape,reg.shape)#torch.Size([257, 388]) torch.Size([4, 257, 388])
            indexs = torch.nonzero(torch.gt(cls, self.cond[0])).numpy()
            # print(indexs.shape)
            # if indexs.size == 0:
            #     continue

            boxes = self.box(indexs, reg, cls[indexs[:, 0], indexs[:, 1]],
                             new_scale)
            # print(boxes.shape)
            # p_boxes.extend(boxes)
            p_boxes.extend(utils.NMS(boxes, self.threshold[0]))
            scale *= 0.7
            nw = int(w * scale)
            nh = int(h * scale)
            new_scale = min(nw, nh) / min(w, h)
            img = img.resize((nw, nh))
            min_side = min(nw, nh)

        # print(np.array(p_boxes).shape)
        return utils.NMS(np.array(p_boxes), 0.7)
예제 #3
0
    def __rnet_detect(self, image, pnet_boxes):

        _img_dataset = []
        _pnet_boxes = utils.convert_to_square(pnet_boxes)
        for _box in _pnet_boxes:
            _x1 = int(_box[0])
            _y1 = int(_box[1])
            _x2 = int(_box[2])
            _y2 = int(_box[3])

            img = image.crop((_x1, _y1, _x2, _y2))
            img = img.resize((24, 24))
            img_data = self.__image_transform(img)

            _img_dataset.append(img_data)

        img_dataset =torch.stack(_img_dataset)
        # print(img_dataset)
        if self.isCuda:
            img_dataset = img_dataset.cuda()

        _cls, _offset,land = self.rnet(img_dataset)
        # print(_cls)
        cls = _cls.cpu().data.numpy()
        offset = _offset.cpu().data.numpy()
        # print(cls)
        boxes = []
        idxs, _ = np.where(cls > 0.7)
        # print(idxs.shape)
        for idx in idxs:
            _box = _pnet_boxes[idx]
            _x1 = int(_box[0])
            _y1 = int(_box[1])
            _x2 = int(_box[2])
            _y2 = int(_box[3])

            ow = _x2 - _x1
            oh = _y2 - _y1

            x1 = _x1 + ow * offset[idx][0]
            y1 = _y1 + oh * offset[idx][1]
            x2 = _x2 + ow * offset[idx][2]
            y2 = _y2 + oh * offset[idx][3]

            boxes.append([x1, y1, x2, y2, cls[idx][0]])
        # print(boxes)
        return utils.NMS(np.array(boxes), 0.7)
예제 #4
0
    def __onet_detect(self, image, rnet_boxes):

        datasets = []
        # print(rnet_boxes)
        _rnet_boxes = utils.convert_to_square(rnet_boxes)
        # print(_rnet_boxes.shape)
        for _box in _rnet_boxes:
            _x1 = int(_box[0])
            _y1 = int(_box[1])
            _x2 = int(_box[2])
            _y2 = int(_box[3])

            img_crop = image.crop((_x1, _y1, _x2, _y2))
            img_re = img_crop.resize((48, 48))
            img_data = self.__image_transform(img_re)
            datasets.append(img_data)

        img_dataset = torch.stack(datasets)
        if self.isCuda:
            img_dataset = img_dataset.cuda()
        # print(img_dataset.shape)
        _cls, _offset,land = self.onet(img_dataset)
        # print(_cls)
        cls = _cls.cpu().data.numpy()
        offset = _offset.cpu().data.numpy()

        boxes = []
        idxs, _ = np.where(cls > 0.7)
        for idx in idxs:
            _box = _rnet_boxes[idx]
            _x1 = int(_box[0])
            _y1 = int(_box[1])
            _x2 = int(_box[2])
            _y2 = int(_box[3])

            ow = _x2 - _x1
            oh = _y2 - _y1

            x1 = _x1 + ow * offset[idx][0]
            y1 = _y1 + oh * offset[idx][1]
            x2 = _x2 + ow * offset[idx][2]
            y2 = _y2 + oh * offset[idx][3]

            boxes.append([x1, y1, x2, y2, cls[idx][0]])
        # print(boxes)
        return utils.NMS(np.array(boxes), 0.7,IsUnion=False)
예제 #5
0
 def detect_rnet(self, img, p_boxes):
     # print(type(p_boxes))
     sq_p_boxes = utils.convert_to_square(p_boxes)
     # return sq_p_boxes
     img_datas = []
     for _box in sq_p_boxes:
         _x1 = int(_box[0])
         _y1 = int(_box[1])
         _x2 = int(_box[2])
         _y2 = int(_box[3])
         img_crop = img.crop((_x1, _y1, _x2, _y2))
         img_resize = img_crop.resize((24, 24))
         img_data = self.transf(img_resize)
         img_datas.append(img_data)
     img_datasets = torch.stack(img_datas)
     if self.isCuda:
         img_datasets = img_datasets.cuda()
     with torch.no_grad():
         cond, offset, land = self.rnet(img_datasets)
     cls = cond.cpu().data.numpy()
     # print(cls)
     offset = offset.cpu().data.numpy()
     indexs, _ = np.where(cls > self.cond[1])  #[0 1 2 3 4 5]
     # print(indexs.shape)
     # indexs = torch.nonzero(torch.gt(cls, 0.6))
     # print(indexs)
     boxes = sq_p_boxes[indexs]
     # print(sq_p_boxes.shape,boxes.shape)
     nx1, ny1, nx2, ny2 = boxes[:, 0].astype(np.int32), boxes[:, 1].astype(
         np.int32), boxes[:, 2].astype(np.int32), boxes[:,
                                                        3].astype(np.int32)
     dw = nx2 - nx1
     dh = ny2 - ny1
     # print(offset[indexs].shape)
     x1 = nx1 + dw * offset[indexs][:, 0]
     y1 = ny1 + dh * offset[indexs][:, 1]
     x2 = nx2 + dw * offset[indexs][:, 2]
     y2 = ny2 + dh * offset[indexs][:, 3]
     # print(cls[indexs].T[0].shape)
     boxes = np.array([x1, y1, x2, y2, cls[indexs].T[0]]).T
     # print(boxes.shape)
     return utils.NMS(boxes, self.threshold[1])
예제 #6
0
    def __pnet_detect(self, image):


        p_boxes=[]

        img = image
        w, h = img.size
        min_side_len = min(w, h)

        scale = 1.0
        boxes = []
        while min_side_len > 12:

            img_data = self.__image_transform(img)
            if self.isCuda:
                img_data = img_data.cuda()
            img_data.unsqueeze_(0)

            _cls, _offest ,land= self.pnet(img_data)

            cls, offest = _cls[0][0].cpu().data, _offest[0].cpu().data
            idxs = torch.nonzero(torch.gt(cls, 0.6)).numpy()

            for idx in idxs:
                boxes.append(self.__box(idx, offest, cls[idx[0], idx[1]], scale))
            # print(np.array(boxes).shape)
            # p_boxes.extend(utils.NMS(np.array(boxes), 0.5))

            scale *= 0.7
            _w = int(w * scale)
            _h = int(h * scale)

            img = img.resize((_w, _h))
            min_side_len = min(_w, _h)

        # print(p_boxes)
        return np.array(utils.NMS(np.array(boxes),0.5))