Example #1
0
    def bbox_transform(self, bbox):
        num_priors = self.priors.shape[0]
        if bbox is None or len(bbox) == 0:
            return np.zeros(
                (num_priors,
                 4)).astype(np.float32), np.zeros(num_priors).astype(np.int64)
        elif isinstance(bbox, np.ndarray):
            height, width = self.image_size
            gt_label = None
            gt_box = bbox
            if bbox.shape[-1] % 2 == 1:
                gt_box = bbox[:, :-1]
                gt_label = bbox[:, -1]

            gt_box[:, 0::2] /= width
            gt_box[:, 1::2] /= height

            # match priors (default boxes) and ground truth boxes
            if gt_box is not None and len(gt_box) > 0:
                truths = to_tensor(gt_box).float()
                labels = to_tensor(gt_label).long()
                loc_t, conf_t = match(truths, self.priors.data, (0.1, 0.2),
                                      labels, self.gt_overlap_tolerance)

                return to_numpy(loc_t).astype(
                    np.float32), to_numpy(conf_t).astype(np.int64)
            return np.zeros(
                (num_priors,
                 4)).astype(np.float32), np.zeros(num_priors).astype(np.int64)
Example #2
0
 def predict(self, width, height, confidences, boxes, detection_threshold=None, iou_threshold=0.3, top_k=-1):
     boxes = boxes
     confidences = confidences
     if detection_threshold is not None:
         self.detection_threshold=detection_threshold
     picked_box_probs = []
     picked_labels = []
     for class_index in range(1, confidences.shape[1]):
         probs = confidences[:, class_index]
         mask = probs > self.detection_threshold
         probs = probs[mask]
         if probs.shape[0] == 0:
             continue
         subset_boxes = boxes[mask, :]
         box_probs = concate([subset_boxes, probs.reshape(-1, 1)], axis=1)
         box_probs = self.hard_nms(box_probs,
                                   iou_threshold=iou_threshold,
                                   top_k=top_k,
                                   )
         picked_box_probs.append(box_probs)
         picked_labels.extend([class_index] * box_probs.shape[0])
     if not picked_box_probs:
         return np.array([]), np.array([]), np.array([])
     picked_box_probs = concate(picked_box_probs)
     picked_box_probs[:, 0] *= width
     picked_box_probs[:, 1] *= height
     picked_box_probs[:, 2] *= width
     picked_box_probs[:, 3] *= height
     return to_numpy(picked_box_probs[:, :4]).astype(np.int32), np.array(picked_labels), to_numpy(picked_box_probs[:, 4])
Example #3
0
    def rerec(self, bboxA, img_shape):
        """Convert bboxA to square."""
        bboxA = to_numpy(bboxA)
        h = bboxA[:, 3] - bboxA[:, 1]
        w = bboxA[:, 2] - bboxA[:, 0]
        max_len = np.maximum(w, h)

        bboxA[:, 0] = bboxA[:, 0] - 0.5 * (max_len - w)
        bboxA[:, 1] = bboxA[:, 1] - 0.5 * (max_len - h)
        bboxA[:, 2] = bboxA[:, 0] + max_len
        bboxA[:, 3] = bboxA[:, 1] + max_len
        return to_tensor(bboxA)
Example #4
0
 def bbox_transform(self, bbox):
     if bbox is None or len(bbox) == 0:
         return np.zeros(
             (self.priors.shape[0], 4)).astype(np.float32), np.zeros(
                 (self.priors.shape[0])).astype(np.int64)
     elif isinstance(bbox, np.ndarray):
         height, width = self.image_size
         bbox[:, 0] = bbox[:, 0] / width
         bbox[:, 2] = bbox[:, 2] / width
         bbox[:, 1] = bbox[:, 1] / height
         bbox[:, 3] = bbox[:, 3] / height
         if bbox.shape[-1] == 5:
             gt_box = bbox[:, :4]
             gt_label = bbox[:, 4]
             boxes, labels = self.assign_priors(gt_box, gt_label,
                                                to_numpy(self.priors), 0.3)
             boxes = xyxy2xywh(boxes)
             locations = self.convert_boxes_to_locations(
                 boxes, to_numpy(self.priors))
             return boxes.astype(np.float32), labels.astype(np.int64)
         else:
             return bbox
Example #5
0
    def infer_single_image(self, img, scale=1):
        if self._model.built:
            try:
                self._model.to(self.device)
                self._model.eval()
                if self._model.input_spec.object_type is None:
                    self._model.input_spec.object_type = ObjectType.rgb
                img = image2array(img)
                if img.shape[-1] == 4:
                    img = img[:, :, :3]
                img_orig = img.copy()
                rescale_scale = 1
                for func in self.preprocess_flow:
                    if (inspect.isfunction(func) or isinstance(
                            func,
                            Transform)) and func is not image_backend_adaption:
                        img = func(img, spec=self._model.input_spec)
                        if (inspect.isfunction(func) and func.__qualname__
                                == 'resize.<locals>.img_op') or (
                                    isinstance(func, Transform)
                                    and func.name == 'resize'):
                            rescale_scale = func.scale
                    else:
                        print(func)

                img = image_backend_adaption(img)
                inp = to_tensor(np.expand_dims(img, 0)).to(
                    torch.device("cuda" if self._model.weights[0].data.
                                 is_cuda else "cpu")).to(
                                     self._model.weights[0].data.dtype)

                confidence, boxes = self._model(inp)
                boxes = boxes[0]
                confidence = confidence[0]
                probs, label = confidence.data.max(-1)

                mask = probs > self.detection_threshold
                probs = probs[mask]
                label = label[mask]
                boxes = boxes[mask, :]
                mask = label > 0
                probs = probs[mask]
                label = label[mask]
                boxes = boxes[mask, :]

                if boxes is not None and len(boxes) > 0:
                    box_probs = concate([
                        boxes.float(),
                        label.reshape(-1, 1).float(),
                        probs.reshape(-1, 1).float()
                    ],
                                        axis=1)
                    if len(boxes) > 1:
                        box_probs, keep = self.hard_nms(
                            box_probs,
                            nms_threshold=self.nms_threshold,
                            top_k=-1,
                        )

                    boxes = box_probs[:, :4]
                    boxes[:, 0::2] *= self._model.input_spec.shape.dims[-1]
                    boxes[:, 1::2] *= self._model.input_spec.shape.dims[-2]
                    boxes[:, :4] /= rescale_scale

                    # boxes = boxes * (1 / scale[0])
                    return img_orig, to_numpy(boxes), to_numpy(
                        box_probs[:,
                                  4]).astype(np.int32), to_numpy(box_probs[:,
                                                                           5])
                else:
                    return img_orig, None, None, None
            except:
                PrintException()
        else:
            raise ValueError('the model is not built yet.')
Example #6
0
    def forward(self, x, scale):
        inp = x.exand_dims(0)
        boxes = self.pnet(inp)
        boxes_list = []
        if boxes is not None and len(boxes) > 0:
            box = boxes[:, :4] / scale
            score = boxes[:, 4:]
            boxes = concate([box.round_(), score], axis=1)
            if len(boxes) > 0:
                boxes_list.append(boxes)

        #######################################
        #########pnet finish
        #######################################
        if len(boxes_list) > 0:
            boxes = to_tensor(concate(boxes_list, axis=0))

            # print('total {0} boxes in pnet in all scale '.format(len(boxes)))
            boxes = clip_boxes_to_image(boxes, (x.shape[0], x.shape[1]))
            boxes = nms(boxes, threshold=self.detection_threshold[0])
            print('pnet:{0} boxes '.format(len(boxes)))
            # print('total {0} boxes after nms '.format(len(boxes)))
            # score = to_numpy(boxes[:, 4]).reshape(-1)
            if boxes is not None:
                # prepare rnet input

                boxes = self.rerec(boxes, x.shape)
                new_arr = np.zeros((boxes.shape[0], 3, 24, 24))

                for k in range(boxes.shape[0]):
                    box = boxes[k]
                    crop_img = x.copy()[int(box[1]):int(box[3]),
                                        int(box[0]):int(box[2]), :]
                    if crop_img.shape[0] > 0 and crop_img.shape[1] > 0:
                        new_arr[k] = Resize(
                            (24, 24))(crop_img / 255.0).transpose([2, 0, 1])
                    # else:
                    #     print(box)
                new_arr = to_tensor(new_arr)
                r_output1_list = []
                r_output2_list = []
                r_output3_list = []
                if len(new_arr) > 16:
                    for i in range(len(new_arr) // 16 + 1):
                        if i * 16 < len(new_arr):
                            r_out1, r_out2, r_out3 = self.rnet(
                                new_arr[i * 16:(i + 1) * 16, :, :, :])
                            r_output1_list.append(r_out1)
                            r_output2_list.append(r_out2)
                            r_output3_list.append(r_out3)
                    r_out1 = concate(r_output1_list, axis=0)
                    r_out2 = concate(r_output2_list, axis=0)
                    r_out3 = concate(r_output3_list, axis=0)
                else:
                    r_out1, r_out2, r_out3 = self.rnet(new_arr)

                probs = to_numpy(r_out1)
                keep = np.where(probs[:, 0] > self.detection_threshold[1])[0]
                r_out1 = r_out1[keep]
                boxes = boxes[keep]
                boxes[:, 4] = r_out1[:, 0]
                r_out2 = r_out2[keep]
                boxes = calibrate_box(boxes, r_out2)

                #######################################
                #########rnet finish
                #######################################

                boxes = nms(boxes,
                            threshold=self.detection_threshold[1],
                            image_size=(x.shape[0], x.shape[1]),
                            min_size=self.min_size)
                print('rnet:{0} boxes '.format(len(boxes)))
                # print('total {0} boxes after nms '.format(len(boxes)))
                boxes = clip_boxes_to_image(boxes, (x.shape[0], x.shape[1]))
                boxes = self.rerec(boxes, x.shape)
                new_arr = np.zeros((boxes.shape[0], 3, 48, 48))

                for k in range(boxes.shape[0]):
                    box = boxes[k]
                    crop_img = x.copy()[int(box[1]):int(box[3]),
                                        int(box[0]):int(box[2]), :]
                    if crop_img.shape[0] > 0 and crop_img.shape[1] > 0:
                        new_arr[k] = Resize(
                            (48, 48))(crop_img / 255.0).transpose([2, 0, 1])
                    # else:
                    #     print(box)

                new_arr = to_tensor(new_arr)
                o_out1, o_out2, o_out3 = self.onet(new_arr)
                probs = to_numpy(o_out1)
                keep = np.where(probs[:, 0] > self.detection_threshold[2])[0]
                o_out1 = o_out1[keep]
                boxes = boxes[keep]

                boxes[:, 4] = o_out1[:, 0]
                o_out2 = o_out2[keep]
                o_out3 = o_out3[keep]
                boxes = calibrate_box(boxes, o_out2)

                landmarks_x = boxes[:, 0:1] + o_out3[:, 0::2] * (
                    boxes[:, 2:3] - boxes[:, 0:1] + 1)
                landmarks_y = boxes[:, 1:2] + o_out3[:, 1::2] * (
                    boxes[:, 3:4] - boxes[:, 1:2] + 1)

                boxes = concate([boxes, landmarks_x, landmarks_y], axis=-1)
    def infer_single_image(self, img, **kwargs):
        if self.model.built:
            self.model.to(self.device)
            self.model.eval()
            img = image2array(img)
            if img.shape[-1] == 4:
                img = img[:, :, :3]

            imgs, scales = self.get_image_pyrimid(img)
            boxes_list = []
            for i in range(len(scales)):
                scaled_img = imgs[i]
                inp = to_tensor(expand_dims(scaled_img, 0)).to(
                    torch.device("cuda" if self.pnet.weights[0].data.
                                 is_cuda else "cpu")).to(
                                     self.pnet.weights[0].data.dtype)

                boxes = self.pnet(inp)
                if boxes is not None and len(boxes) > 0:
                    scale = scales[i]
                    box = boxes[:, :4] / scale
                    score = boxes[:, 4:]
                    boxes = torch.cat([box.round_(), score], dim=1)
                    if len(boxes) > 0:
                        boxes_list.append(boxes)

            #######################################
            #########pnet finish
            #######################################
            if len(boxes_list) > 0:
                boxes = to_tensor(torch.cat(boxes_list, dim=0))

                #print('total {0} boxes in pnet in all scale '.format(len(boxes)))
                boxes = clip_boxes_to_image(boxes,
                                            (img.shape[0], img.shape[1]))
                boxes = self.boxes_nms(
                    boxes, overlap_threshold=self.detection_threshould[0])
                if self.verbose:
                    print('pnet:{0} boxes '.format(len(boxes)))
                #print('total {0} boxes after nms '.format(len(boxes)))
                #score = to_numpy(boxes[:, 4]).reshape(-1)
                if boxes is not None:
                    #prepare rnet input

                    boxes = self.rerec(boxes, img.shape)
                    new_arr = np.zeros((boxes.shape[0], 3, 24, 24))

                    for k in range(boxes.shape[0]):
                        box = boxes[k]
                        crop_img = img.copy()[int(box[1]):int(box[3]),
                                              int(box[0]):int(box[2]), :]
                        if crop_img.shape[0] > 0 and crop_img.shape[1] > 0:
                            new_arr[k] = resize((24, 24))(crop_img).transpose(
                                [2, 0, 1]) / 255.0
                        # else:
                        #     print(box)
                    new_arr = to_tensor(new_arr)
                    r_output1_list = []
                    r_output2_list = []
                    r_output3_list = []
                    if len(new_arr) > 16:
                        for i in range(len(new_arr) // 16 + 1):
                            if i * 16 < len(new_arr):
                                r_out1, r_out2, r_out3 = self.rnet(
                                    new_arr[i * 16:(i + 1) * 16, :, :, :])
                                r_output1_list.append(r_out1)
                                r_output2_list.append(r_out2)
                                r_output3_list.append(r_out3)
                        r_out1 = torch.cat(r_output1_list, dim=0)
                        r_out2 = torch.cat(r_output2_list, dim=0)
                        r_out3 = torch.cat(r_output3_list, dim=0)
                    else:
                        r_out1, r_out2, r_out3 = self.rnet(new_arr)

                    probs = r_out1
                    keep = probs[:, 0] > self.detection_threshould[1]
                    r_out1 = r_out1[keep]

                    boxes = boxes[keep]
                    if len(boxes) == 0:
                        return boxes
                    boxes[:, 4] = r_out1[:, 0]
                    r_out2 = r_out2[keep]
                    boxes = calibrate_box(boxes, r_out2)

                    #######################################
                    #########rnet finish
                    #######################################
                    boxes = self.boxes_nms(
                        boxes, overlap_threshold=self.detection_threshould[1])
                    if self.verbose:
                        print('rnet:{0} boxes '.format(len(boxes)))
                    #print('total {0} boxes after nms '.format(len(boxes)))
                    boxes = clip_boxes_to_image(boxes,
                                                (img.shape[0], img.shape[1]))
                    boxes = self.rerec(to_tensor(boxes), img.shape)
                    new_arr = np.zeros((boxes.shape[0], 3, 48, 48))

                    for k in range(boxes.shape[0]):
                        box = boxes[k]
                        crop_img = img.copy()[int(box[1]):int(box[3]),
                                              int(box[0]):int(box[2]), :]
                        if crop_img.shape[0] > 0 and crop_img.shape[1] > 0:
                            new_arr[k] = resize((48, 48))(crop_img).transpose(
                                [2, 0, 1]) / 255.0
                        # else:
                        #     print(box)

                    new_arr = to_tensor(new_arr)
                    o_out1, o_out2, o_out3 = self.onet(new_arr)
                    probs = o_out1
                    keep = probs[:, 0] > self.detection_threshould[2]
                    o_out1 = o_out1[keep]
                    boxes = boxes[keep]
                    if len(boxes) == 0:
                        return boxes
                    boxes[:, 4] = o_out1[:, 0]
                    o_out2 = o_out2[keep]
                    o_out3 = o_out3[keep]
                    boxes = calibrate_box(boxes, o_out2)

                    landmarks_x = boxes[:, 0:1] + o_out3[:, 0::2] * (
                        boxes[:, 2:3] - boxes[:, 0:1] + 1)
                    landmarks_y = boxes[:, 1:2] + o_out3[:, 1::2] * (
                        boxes[:, 3:4] - boxes[:, 1:2] + 1)

                    boxes = torch.cat([boxes, landmarks_x, landmarks_y],
                                      dim=-1)

                    #######################################
                    #########onet finish
                    #######################################
                    boxes = self.boxes_nms(
                        boxes, overlap_threshold=self.detection_threshould[2])
                    if self.verbose:
                        print('onet:{0} boxes '.format(len(boxes)))
                    return to_numpy(boxes)
            else:
                return None
            #idx=int(np.argmax(result,-1)[0])

        else:
            raise ValueError('the model is not built yet.')