def __test_img(self, image_path, save_path):
        Log.info('Image Path: {}'.format(image_path))
        image_raw = ImageHelper.cv2_open_bgr(image_path)
        inputs = ImageHelper.bgr2rgb(image_raw)
        inputs = ImageHelper.resize(inputs, tuple(self.configer.get('data', 'input_size')), Image.CUBIC)
        inputs = ToTensor()(inputs)
        inputs = Normalize(mean=self.configer.get('trans_params', 'mean'),
                           std=self.configer.get('trans_params', 'std'))(inputs)

        with torch.no_grad():
            inputs = inputs.unsqueeze(0).to(self.device)
            bbox, cls = self.det_net(inputs)

        bbox = bbox.cpu().data.squeeze(0)
        cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data
        boxes, lbls, scores, has_obj = self.__decode(bbox, cls)
        if has_obj:
            boxes = boxes.cpu().numpy()
            boxes = np.clip(boxes, 0, 1)
            lbls = lbls.cpu().numpy()
            scores = scores.cpu().numpy()

            img_canvas = self.__draw_box(image_raw, boxes, lbls, scores)

        else:
            # print('None obj detected!')
            img_canvas = image_raw

        Log.info('Save Path: {}'.format(save_path))
        cv2.imwrite(save_path, img_canvas)
        # Boxes is within 0-1.
        self.__save_json(save_path, boxes, lbls, scores, image_raw)

        return image_raw, lbls, scores, boxes, has_obj
예제 #2
0
    def __test_img(self, image_path, json_path, raw_path, vis_path):
        Log.info('Image Path: {}'.format(image_path))
        ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path))
        ori_img_bgr = ImageHelper.rgb2bgr(ori_img_rgb)
        inputs = ImageHelper.resize(ori_img_rgb, tuple(self.configer.get('data', 'input_size')), Image.CUBIC)
        inputs = ToTensor()(inputs)
        inputs = Normalize(mean=self.configer.get('trans_params', 'mean'),
                           std=self.configer.get('trans_params', 'std'))(inputs)

        with torch.no_grad():
            inputs = inputs.unsqueeze(0).to(self.device)
            bbox, cls = self.det_net(inputs)

        bbox = bbox.cpu().data.squeeze(0)
        cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data
        boxes, lbls, scores = self.__decode(bbox, cls)
        json_dict = self.__get_info_tree(boxes, lbls, scores, ori_img_rgb)

        image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(),
                                                   json_dict,
                                                   conf_threshold=self.configer.get('vis', 'conf_threshold'))
        cv2.imwrite(vis_path, image_canvas)
        cv2.imwrite(raw_path, ori_img_bgr)

        Log.info('Json Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)
        return json_dict
예제 #3
0
    def make_input(self, image=None, input_size=None,
                   min_side_length=None, max_side_length=None, scale=None):
        if input_size is not None and min_side_length is None and max_side_length is None:
            if input_size[0] == -1 and input_size[1] == -1:
                in_width, in_height = ImageHelper.get_size(image)

            elif input_size[0] != -1 and input_size[1] != -1:
                in_width, in_height = input_size

            elif input_size[0] == -1 and input_size[1] != -1:
                width, height = ImageHelper.get_size(image)
                scale_ratio = input_size[1] / height
                w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
                in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

            else:
                assert input_size[0] != -1 and input_size[1] == -1
                width, height = ImageHelper.get_size(image)
                scale_ratio = input_size[0] / width
                w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
                in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        elif input_size is None and min_side_length is not None and max_side_length is None:
            width, height = ImageHelper.get_size(image)
            scale_ratio = min_side_length / min(width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        elif input_size is None and min_side_length is None and max_side_length is not None:
            width, height = ImageHelper.get_size(image)
            scale_ratio = max_side_length / max(width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        elif input_size is None and min_side_length is not None and max_side_length is not None:
            width, height = ImageHelper.get_size(image)
            scale_ratio = min_side_length / min(width, height)
            bound_scale_ratio = max_side_length / max(width, height)
            scale_ratio = min(scale_ratio, bound_scale_ratio)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        else:
            in_width, in_height = ImageHelper.get_size(image)

        image = ImageHelper.resize(image, (int(in_width * scale), int(in_height * scale)), interpolation='cubic')
        img_tensor = ToTensor()(image)
        img_tensor = Normalize(div_value=self.configer.get('normalize', 'div_value'),
                               mean=self.configer.get('normalize', 'mean'),
                               std=self.configer.get('normalize', 'std'))(img_tensor)
        img_tensor = img_tensor.unsqueeze(0).to(torch.device('cpu' if self.configer.get('gpu') is None else 'cuda'))

        return img_tensor
예제 #4
0
    def __get_paf_and_heatmap(self, img_raw):
        multiplier = [scale * self.configer.get('data', 'input_size')[0] / img_raw.shape[1]
                      for scale in self.configer.get('data', 'scale_search')]

        heatmap_avg = np.zeros((img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'heatmap_out')))
        paf_avg = np.zeros((img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'paf_out')))

        for i, scale in enumerate(multiplier):
            img_test = cv2.resize(img_raw, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
            img_test_pad, pad = PadImage(self.configer.get('network', 'stride'))(img_test)
            img_test_pad = ToTensor()(img_test_pad)
            img_test_pad = Normalize(mean=self.configer.get('trans_params', 'mean'),
                                     std=self.configer.get('trans_params', 'std'))(img_test_pad)
            with torch.no_grad():
                img_test_pad = img_test_pad.unsqueeze(0).to(self.device)
                paf_out, heatmap_out = self.pose_net(img_test_pad)

            # extract outputs, resize, and remove padding
            heatmap = heatmap_out.data.squeeze().cpu().numpy().transpose(1, 2, 0)
            heatmap = cv2.resize(heatmap,  (0, 0), fx=self.configer.get('network', 'stride'),
                                 fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC)
            heatmap = heatmap[:img_test_pad.size(2) - pad[3], :img_test_pad.size(3) - pad[2], :]
            heatmap = cv2.resize(heatmap, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC)

            paf = paf_out.data.squeeze().cpu().numpy().transpose(1, 2, 0)
            paf = cv2.resize(paf, (0, 0), fx=self.configer.get('network', 'stride'),
                                 fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC)
            paf = paf[:img_test_pad.size(2) - pad[3], :img_test_pad.size(3) - pad[2], :]
            paf = cv2.resize(paf, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC)

            heatmap_avg = heatmap_avg + heatmap / len(multiplier)
            paf_avg = paf_avg + paf / len(multiplier)

        return paf_avg, heatmap_avg
예제 #5
0
 def forward(self, image_path):
     image = Image.open(image_path).convert('RGB')
     image = RandomResize(size=self.configer.get('data', 'input_size'),
                          is_base=False)(image)
     image = ToTensor()(image)
     image = Normalize(mean=[128.0, 128.0, 128.0],
                       std=[256.0, 256.0, 256.0])(image)
     inputs = Variable(image.unsqueeze(0).cuda(), volatile=True)
     results = self.seg_net.forward(inputs)
     return results.data.cpu().numpy().argmax(axis=1)[0].squeeze()
예제 #6
0
    def make_input(self, image=None, input_size=None,
                   min_side_length=None, max_side_length=None, scale=None):
        in_width, in_height = None, None
        if input_size is None and min_side_length is None and max_side_length is None:
            in_width, in_height = ImageHelper.get_size(image)

        elif input_size is not None and min_side_length is None and max_side_length is None:
            in_width, in_height = input_size

        elif input_size is None and min_side_length is not None and max_side_length is None:
            width, height = ImageHelper.get_size(image)
            scale_ratio = min_side_length / min(width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        elif input_size is None and min_side_length is None and max_side_length is not None:
            width, height = ImageHelper.get_size(image)
            scale_ratio = max_side_length / max(width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        else:
            Log.error('Incorrect target size setting.')
            exit(1)

        if not isinstance(scale, (list, tuple)):
            image = ImageHelper.resize(image, (int(in_width * scale), int(in_height * scale)), interpolation='linear')
            img_tensor = ToTensor()(image)
            img_tensor = Normalize(div_value=self.configer.get('normalize', 'div_value'),
                                   mean=self.configer.get('normalize', 'mean'),
                                   std=self.configer.get('normalize', 'std'))(img_tensor)
            img_tensor = img_tensor.unsqueeze(0).to(torch.device('cpu' if self.configer.get('gpu') is None else 'cuda'))

            return img_tensor

        else:
            img_tensor_list = []
            for s in scale:
                image = ImageHelper.resize(image, (int(in_width * s), int(in_height * s)), interpolation='linear')
                img_tensor = ToTensor()(image)
                img_tensor = Normalize(div_value=self.configer.get('normalize', 'div_value'),
                                       mean=self.configer.get('normalize', 'mean'),
                                       std=self.configer.get('normalize', 'std'))(img_tensor)
                img_tensor = img_tensor.unsqueeze(0).to(
                    torch.device('cpu' if self.configer.get('gpu') is None else 'cuda'))

                img_tensor_list.append(img_tensor)

            return img_tensor_list
예제 #7
0
    def inference(self, image_rgb):
        image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
        inputs = cv2.resize(image_rgb,
                            tuple(self.configer.get('data', 'input_size')))
        inputs = ToTensor()(inputs)
        inputs = Normalize(mean=self.configer.get('trans_params', 'mean'),
                           std=self.configer.get('trans_params',
                                                 'std'))(inputs)

        inputs = Variable(inputs.unsqueeze(0).cuda(), volatile=True)
        bbox, cls = self.det_net(inputs)
        bbox = bbox.cpu().data.squeeze(0)
        cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data
        boxes, lbls, scores, has_obj = self.__decode(bbox, cls)
        if has_obj:
            boxes = boxes.cpu().numpy()
            boxes = np.clip(boxes, 0, 1)
            lbls = lbls.cpu().numpy()
            scores = scores.cpu().numpy()
            img_shape = image_bgr.shape
            for i in range(len(boxes)):
                boxes[i][0] = int(boxes[i][0] * img_shape[1])
                boxes[i][2] = int(boxes[i][2] * img_shape[1])
                boxes[i][1] = int(boxes[i][1] * img_shape[0])
                boxes[i][3] = int(boxes[i][3] * img_shape[0])

            img_canvas = self.__draw_box(image_bgr, boxes, lbls, scores)

            # if is_save_txt:
            #    self.__save_txt(save_path, boxes, lbls, scores, img_size)
        else:
            # print('None obj detected!')
            img_canvas = image_bgr

        # Boxes is within 0-1.
        return img_canvas, lbls, scores, boxes, has_obj
예제 #8
0
    def __init__(self, configer):
        self.configer = configer

        if self.configer.get('data', 'image_tool') == 'pil':
            self.aug_test_transform = pil_aug_trans.PILAugCompose(
                self.configer, split='test')
        elif self.configer.get('data', 'image_tool') == 'cv2':
            self.aug_test_transform = cv2_aug_trans.CV2AugCompose(
                self.configer, split='test')
        else:
            Log.error('Not support {} image tool.'.format(
                self.configer.get('data', 'image_tool')))
            exit(1)

        self.img_transform = Compose([
            ToTensor(),
            Normalize(**self.configer.get('data', 'normalize')),
        ])
예제 #9
0
    def __test_img(self, image_path, save_path):
        image = ImageHelper.pil_open_rgb(image_path)
        ori_width, ori_height = image.size
        image = Scale(size=self.configer.get('data', 'input_size'))(image)
        image = ToTensor()(image)
        image = Normalize(mean=self.configer.get('trans_params', 'mean'),
                          std=self.configer.get('trans_params', 'std'))(image)
        with torch.no_grad():
            inputs = image.unsqueeze(0).to(self.device)
            results = self.seg_net.forward(inputs)

            label_map = results.data.cpu().numpy().argmax(axis=1)[0].squeeze()

            label_img = np.array(label_map, dtype=np.uint8)
            if not self.configer.is_empty('details', 'label_list'):
                label_img = self.__relabel(label_img)

            label_img = Image.fromarray(label_img, 'P')
            label_img = label_img.resize((ori_width, ori_height),
                                         Image.NEAREST)
            label_img.save(save_path)
예제 #10
0
    def __get_paf_and_heatmap(self, img_raw):
        multiplier = [
            scale * self.configer.get('data', 'input_size')[0] /
            img_raw.shape[1]
            for scale in self.configer.get('data', 'scale_search')
        ]

        heatmap_avg = np.zeros((img_raw.shape[0], img_raw.shape[1],
                                self.configer.get('data', 'num_keypoints')))
        paf_avg = np.zeros((img_raw.shape[0], img_raw.shape[1],
                            self.configer.get('network', 'paf_out')))
        partmap_avg = np.zeros((img_raw.shape[0], img_raw.shape[1],
                                self.configer.get('network', 'heatmap_out')))

        for i, scale in enumerate(multiplier):
            img_test = cv2.resize(img_raw, (0, 0),
                                  fx=scale,
                                  fy=scale,
                                  interpolation=cv2.INTER_CUBIC)
            img_test_pad, pad = PadImage(self.configer.get(
                'network', 'stride'))(img_test)
            pad_right = pad[2]
            pad_down = pad[3]
            img_test_pad = ToTensor()(img_test_pad)
            img_test_pad = Normalize(
                mean=self.configer.get('trans_params', 'mean'),
                std=self.configer.get('trans_params', 'std'))(img_test_pad)
            with torch.no_grad():
                img_test_pad = img_test_pad.unsqueeze(0).to(self.device)
                paf_out_list, partmap_out_list = self.pose_net(img_test_pad)

            paf_out = paf_out_list[-1]
            partmap_out = partmap_out_list[-1]
            partmap = partmap_out.data.squeeze().cpu().numpy().transpose(
                1, 2, 0)
            paf = paf_out.data.squeeze().cpu().numpy().transpose(1, 2, 0)
            # self.pose_visualizer.vis_tensor(heatmap_out)
            heatmap = np.zeros((partmap.shape[0], partmap.shape[1],
                                self.configer.get('data', 'num_keypoints')))
            part_num = np.zeros((self.configer.get('data', 'num_keypoints'), ))

            for index in range(len(self.configer.get('details', 'limb_seq'))):
                a = self.configer.get('details', 'limb_seq')[index][0] - 1
                b = self.configer.get('details', 'limb_seq')[index][1] - 1
                heatmap_a = partmap[:, :, index * 4:index * 4 + 2]**2
                heatmap_a = np.sqrt(np.sum(heatmap_a, axis=2).squeeze())
                heatmap[:, :, a] = (heatmap[:, :, a] * part_num[a] +
                                    heatmap_a) / (part_num[a] + 1)
                part_num[a] += 1
                heatmap_b = partmap[:, :, index * 4 + 2:index * 4 + 4]**2
                heatmap_b = np.sqrt(np.sum(heatmap_b, axis=2).squeeze())
                heatmap[:, :, b] = (heatmap[:, :, b] * part_num[b] +
                                    heatmap_b) / (part_num[b] + 1)
                part_num[b] += 1

            heatmap = cv2.resize(heatmap, (0, 0),
                                 fx=self.configer.get('network', 'stride'),
                                 fy=self.configer.get('network', 'stride'),
                                 interpolation=cv2.INTER_CUBIC)
            heatmap = heatmap[:img_test_pad.size(2) -
                              pad_down, :img_test_pad.size(3) - pad_right, :]
            heatmap = cv2.resize(heatmap, (img_raw.shape[1], img_raw.shape[0]),
                                 interpolation=cv2.INTER_CUBIC)

            partmap = cv2.resize(partmap, (0, 0),
                                 fx=self.configer.get('network', 'stride'),
                                 fy=self.configer.get('network', 'stride'),
                                 interpolation=cv2.INTER_CUBIC)
            partmap = partmap[:img_test_pad.size(2) -
                              pad_down, :img_test_pad.size(3) - pad_right, :]
            partmap = cv2.resize(partmap, (img_raw.shape[1], img_raw.shape[0]),
                                 interpolation=cv2.INTER_CUBIC)

            paf = cv2.resize(paf, (0, 0),
                             fx=self.configer.get('network', 'stride'),
                             fy=self.configer.get('network', 'stride'),
                             interpolation=cv2.INTER_CUBIC)
            paf = paf[:img_test_pad.size(2) - pad_down, :img_test_pad.size(3) -
                      pad_right, :]
            paf = cv2.resize(paf, (img_raw.shape[1], img_raw.shape[0]),
                             interpolation=cv2.INTER_CUBIC)

            partmap_avg = partmap_avg + partmap / len(multiplier)
            heatmap_avg = heatmap_avg + heatmap / len(multiplier)
            paf_avg = paf_avg + paf / len(multiplier)

        return paf_avg, heatmap_avg, partmap_avg
예제 #11
0
    def __init__(self, configer):
        self.configer = configer

        self.img_transform = Compose([
            ToTensor(),
            Normalize(**self.configer.get('data', 'normalize')), ])