Exemplo n.º 1
0
    def __getitem__(self, index):
        imgA = ImageHelper.read_image(
            self.imgA_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        indexB = random.randint(0,
                                len(self.imgB_list) - 1) % len(self.imgB_list)
        imgB = ImageHelper.read_image(
            self.imgB_list[indexB],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        if self.aug_transform is not None:
            imgA = self.aug_transform(imgA)
            imgB = self.aug_transform(imgB)

        if self.img_transform is not None:
            imgA = self.img_transform(imgA)
            imgB = self.img_transform(imgB)

        return dict(imgA=DataContainer(imgA, stack=True),
                    imgB=DataContainer(imgB, stack=True),
                    labelA=DataContainer(self.labelA_list[index], stack=True),
                    labelB=DataContainer(self.labelB_list[indexB], stack=True))
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        if os.path.exists(self.mask_list[index]):
            maskmap = ImageHelper.read_image(self.mask_list[index],
                                             tool=self.configer.get(
                                                 'data', 'image_tool'),
                                             mode='P')
        else:
            maskmap = np.ones((img.size[1], img.size[0]), dtype=np.uint8)
            if self.configer.get('data', 'image_tool') == 'pil':
                maskmap = ImageHelper.to_img(maskmap)

        kpts, bboxes = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None and len(bboxes) > 0:
            img, maskmap, kpts, bboxes = self.aug_transform(img,
                                                            maskmap=maskmap,
                                                            kpts=kpts,
                                                            bboxes=bboxes)

        elif self.aug_transform is not None:
            img, maskmap, kpts = self.aug_transform(img,
                                                    maskmap=maskmap,
                                                    kpts=kpts)

        width, height = ImageHelper.get_size(maskmap)
        maskmap = ImageHelper.resize(
            maskmap, (width // self.configer.get('network', 'stride'),
                      height // self.configer.get('network', 'stride')),
            interpolation='nearest')

        maskmap = torch.from_numpy(np.array(maskmap, dtype=np.float32))
        maskmap = maskmap.unsqueeze(0)
        heatmap = self.heatmap_generator(kpts, [width, height], maskmap)
        vecmap = self.paf_generator(kpts, [width, height], maskmap)
        if self.img_transform is not None:
            img = self.img_transform(img)

        meta = dict(kpts=kpts, )
        return dict(
            img=DataContainer(img, stack=True),
            heatmap=DataContainer(heatmap, stack=True),
            maskmap=DataContainer(maskmap, stack=True),
            vecmap=DataContainer(vecmap, stack=True),
            meta=DataContainer(meta, stack=False, cpu_only=True),
        )
Exemplo n.º 3
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.item_list[index][0],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        ori_img_size = ImageHelper.get_size(img)
        if self.aug_transform is not None:
            img = self.aug_transform(img)

        border_size = ImageHelper.get_size(img)
        if self.img_transform is not None:
            img = self.img_transform(img)

        meta = dict(ori_img_size=ori_img_size,
                    border_size=border_size,
                    img_path=self.item_list[index][0],
                    filename=self.item_list[index][1])
        return dict(img=DataContainer(img,
                                      stack=True,
                                      return_dc=True,
                                      samples_per_gpu=True),
                    meta=DataContainer(meta,
                                       stack=False,
                                       cpu_only=True,
                                       return_dc=True,
                                       samples_per_gpu=True))
    def test(self, test_dir, out_dir):
        for _, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            data_dict['testing'] = True
            data_dict = RunnerHelper.to_device(self, data_dict)
            out_dict = self.det_net(data_dict)
            meta_list = DCHelper.tolist(data_dict['meta'])
            test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = out_dict[
                'test_group']
            batch_detections = self.decode(test_roi_locs, test_roi_scores,
                                           test_indices_and_rois,
                                           test_rois_num, self.configer,
                                           meta_list)
            for i in range(len(meta_list)):
                ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'],
                                                     tool='cv2',
                                                     mode='BGR')
                json_dict = self.__get_info_tree(batch_detections[i])
                image_canvas = self.det_parser.draw_bboxes(
                    ori_img_bgr.copy(),
                    json_dict,
                    conf_threshold=self.configer.get('res', 'vis_conf_thre'))
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(
                                         meta_list[i]['filename'])))

                Log.info('Json Path: {}'.format(
                    os.path.join(
                        out_dir,
                        'json/{}.json'.format(meta_list[i]['filename']))))
                JsonHelper.save_file(json_dict,
                                     save_path=os.path.join(
                                         out_dir, 'json/{}.json'.format(
                                             meta_list[i]['filename'])))
    def __test_img(self, image_path, save_path):
        Log.info('Image Path: {}'.format(image_path))
        ori_image = ImageHelper.read_image(image_path,
                                           tool=self.configer.get('data', 'image_tool'),
                                           mode=self.configer.get('data', 'input_mode'))

        ori_width, ori_height = ImageHelper.get_size(ori_image)
        ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode'))
        heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out')))
        for i, scale in enumerate(self.configer.get('test', 'scale_search')):
            image = self.blob_helper.make_input(ori_image,
                                                input_size=self.configer.get('test', 'input_size'),
                                                scale=scale)
            with torch.no_grad():
                heatmap_out_list = self.pose_net(image)
                heatmap_out = heatmap_out_list[-1]

                # extract outputs, resize, and remove padding
                heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)
                heatmap = cv2.resize(heatmap, (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                heatmap_avg = heatmap_avg + heatmap / len(self.configer.get('test', 'scale_search'))

        all_peaks = self.__extract_heatmap_info(heatmap_avg)
        image_canvas = self.__draw_key_point(all_peaks, ori_img_bgr)
        ImageHelper.save(image_canvas, save_path)
Exemplo n.º 6
0
    def test(self, test_dir, out_dir):
        for _, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            total_logits = None
            if self.configer.get('test', 'mode') == 'ss_test':
                total_logits = self.ss_test(data_dict)

            elif self.configer.get('test', 'mode') == 'sscrop_test':
                total_logits = self.sscrop_test(data_dict,
                                                params_dict=self.configer.get(
                                                    'test', 'sscrop_test'))

            elif self.configer.get('test', 'mode') == 'ms_test':
                total_logits = self.ms_test(data_dict,
                                            params_dict=self.configer.get(
                                                'test', 'ms_test'))

            elif self.configer.get('test', 'mode') == 'mscrop_test':
                total_logits = self.mscrop_test(data_dict,
                                                params_dict=self.configer.get(
                                                    'test', 'mscrop_test'))

            else:
                Log.error('Invalid test mode:{}'.format(
                    self.configer.get('test', 'mode')))
                exit(1)

            meta_list = DCHelper.tolist(data_dict['meta'])
            for i in range(len(meta_list)):
                label_map = np.argmax(total_logits[i], axis=-1)
                label_img = np.array(label_map, dtype=np.uint8)
                ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'],
                                                     tool='cv2',
                                                     mode='BGR')
                image_canvas = self.seg_parser.colorize(
                    label_img, image_canvas=ori_img_bgr)
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(
                                         meta_list[i]['filename'])))

                if self.configer.get('data.label_list',
                                     default=None) is not None:
                    label_img = self.__relabel(label_img)

                if self.configer.get('data.reduce_zero_label', default=False):
                    label_img = label_img + 1
                    label_img = label_img.astype(np.uint8)

                label_img = Image.fromarray(label_img, 'P')
                label_path = os.path.join(
                    out_dir, 'label/{}.png'.format(meta_list[i]['filename']))
                Log.info('Label Path: {}'.format(label_path))
                ImageHelper.save(label_img, label_path)
Exemplo n.º 7
0
    def evaluate(self, pred_dir, gt_dir):
        img_cnt = 0
        for filename in os.listdir(pred_dir):
            pred_path = os.path.join(pred_dir, filename)
            gt_path = os.path.join(gt_dir, filename)
            predmap = ImageHelper.to_np(
                ImageHelper.read_image(pred_path, tool='pil', mode='P'))
            gtmap = ImageHelper.to_np(
                ImageHelper.read_image(gt_path, tool='pil', mode='P'))
            predmap = self.relabel(predmap)
            gtmap = self.relabel(gtmap)

            self.seg_running_score.update(predmap[np.newaxis, :, :],
                                          gtmap[np.newaxis, :, :])
            img_cnt += 1

        Log.info('Evaluate {} images'.format(img_cnt))
        Log.info('mIOU: {}'.format(self.seg_running_score.get_mean_iou()))
        Log.info('Pixel ACC: {}'.format(
            self.seg_running_score.get_pixel_acc()))
    def __getitem__(self, index):
        imgA = ImageHelper.read_image(self.imgA_list[index],
                                      tool=self.configer.get('data', 'image_tool'),
                                      mode=self.configer.get('data', 'input_mode'))

        imgB = ImageHelper.read_image(self.imgB_list[index],
                                      tool=self.configer.get('data', 'image_tool'),
                                      mode=self.configer.get('data', 'input_mode'))

        if self.aug_transform is not None:
            imgA, imgB = self.aug_transform([imgA, imgB])

        if self.img_transform is not None:
            imgA = self.img_transform(imgA)
            imgB = self.img_transform(imgB)


        return dict(
            imgA=DataContainer(imgA, stack=True),
            imgB=DataContainer(imgB, stack=True),
        )
Exemplo n.º 9
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        img_size = ImageHelper.get_size(img)
        labelmap = ImageHelper.read_image(self.label_list[index],
                                          tool=self.configer.get(
                                              'data', 'image_tool'),
                                          mode='P')
        if self.configer.get('data.label_list', default=None):
            labelmap = self._encode_label(labelmap)

        if self.configer.get('data.reduce_zero_label', default=None):
            labelmap = self._reduce_zero_label(labelmap)

        ori_target = ImageHelper.to_np(labelmap)

        if self.aug_transform is not None:
            img, labelmap = self.aug_transform(img, labelmap=labelmap)

        border_size = ImageHelper.get_size(img)

        if self.img_transform is not None:
            img = self.img_transform(img)

        if self.label_transform is not None:
            labelmap = self.label_transform(labelmap)

        meta = dict(ori_img_wh=img_size,
                    border_wh=border_size,
                    ori_target=ori_target)
        return dict(
            img=DataContainer(img, stack=True),
            labelmap=DataContainer(labelmap, stack=True),
            meta=DataContainer(meta, stack=False, cpu_only=True),
        )
Exemplo n.º 10
0
    def __test_img(self, image_path, json_path, raw_path, vis_path):

        Log.info('Image Path: {}'.format(image_path))
        ori_image = ImageHelper.read_image(image_path,
                                           tool=self.configer.get('data', 'image_tool'),
                                           mode=self.configer.get('data', 'input_mode'))

        ori_width, ori_height = ImageHelper.get_size(ori_image)
        ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode'))
        heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out')))
        paf_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'paf_out')))
        multiplier = [scale * self.configer.get('test', 'input_size')[1] / ori_height
                      for scale in self.configer.get('test', 'scale_search')]
        stride = self.configer.get('network', 'stride')
        for i, scale in enumerate(multiplier):
            image, border_hw = self._get_blob(ori_image, scale=scale)
            with torch.no_grad():
                paf_out_list, heatmap_out_list = self.pose_net(image)
                paf_out = paf_out_list[-1]
                heatmap_out = heatmap_out_list[-1]

                # extract outputs, resize, and remove padding
                heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)

                heatmap = cv2.resize(heatmap, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
                heatmap = cv2.resize(heatmap[:border_hw[0], :border_hw[1]],
                                     (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                paf = paf_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)
                paf = cv2.resize(paf, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
                paf = cv2.resize(paf[:border_hw[0], :border_hw[1]],
                                 (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                heatmap_avg = heatmap_avg + heatmap / len(multiplier)
                paf_avg = paf_avg + paf / len(multiplier)

        all_peaks = self.__extract_heatmap_info(heatmap_avg)
        special_k, connection_all = self.__extract_paf_info(ori_img_bgr, paf_avg, all_peaks)
        subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks)
        json_dict = self.__get_info_tree(ori_img_bgr, subset, candidate)

        image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict)
        image_canvas = self.pose_parser.link_points(image_canvas, json_dict)

        ImageHelper.save(image_canvas, vis_path)
        ImageHelper.save(ori_img_bgr, raw_path)
        Log.info('Json Save Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)
Exemplo n.º 11
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(self.img_list[index],
                                     tool=self.configer.get('data', 'image_tool'),
                                     mode=self.configer.get('data', 'input_mode'))
        label = self.label_list[index]

        if self.aug_transform is not None:
            img = self.aug_transform(img)

        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(
            img=DataContainer(img, stack=True),
            label=DataContainer(label, stack=True),
        )
Exemplo n.º 12
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        img_size = ImageHelper.get_size(img)
        bboxes, labels = self.__read_json_file(self.json_list[index])
        ori_bboxes, ori_labels = bboxes.copy(), labels.copy()

        if self.aug_transform is not None:
            img, bboxes, labels = self.aug_transform(img,
                                                     bboxes=bboxes,
                                                     labels=labels)

        img_scale = ImageHelper.get_size(img)[0] / img_size[0]

        labels = torch.from_numpy(labels).long()
        bboxes = torch.from_numpy(bboxes).float()

        meta = dict(ori_img_size=img_size,
                    border_size=ImageHelper.get_size(img),
                    img_scale=img_scale,
                    ori_bboxes=torch.from_numpy(ori_bboxes).float(),
                    ori_labels=torch.from_numpy(ori_labels).long())
        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(img=DataContainer(img,
                                      stack=True,
                                      return_dc=True,
                                      samples_per_gpu=True),
                    bboxes=DataContainer(bboxes,
                                         stack=False,
                                         return_dc=True,
                                         samples_per_gpu=True),
                    labels=DataContainer(labels,
                                         stack=False,
                                         return_dc=True,
                                         samples_per_gpu=True),
                    meta=DataContainer(meta,
                                       stack=False,
                                       cpu_only=True,
                                       return_dc=True,
                                       samples_per_gpu=True))
Exemplo n.º 13
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(self.img_list[index],
                                     tool=self.configer.get('data', 'image_tool'),
                                     mode=self.configer.get('data', 'input_mode'))

        kpts, bboxes = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None:
            img, kpts, bboxes = self.aug_transform(img, kpts=kpts, bboxes=bboxes)

        heatmap = self.heatmap_generator(kpts, ImageHelper.get_size(img))
        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(
            img=DataContainer(img, stack=True),
            heatmap=DataContainer(heatmap, stack=True),
        )
Exemplo n.º 14
0
    def __test_img(self, image_path, json_path, raw_path, vis_path):
        Log.info('Image Path: {}'.format(image_path))
        img = ImageHelper.read_image(
            image_path,
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        trans = None
        if self.configer.get('dataset') == 'imagenet':
            if self.configer.get('data', 'image_tool') == 'cv2':
                img = Image.fromarray(img)

            trans = transforms.Compose([
                transforms.Scale(256),
                transforms.CenterCrop(224),
            ])

        assert trans is not None
        img = trans(img)

        ori_img_bgr = ImageHelper.get_cv2_bgr(img,
                                              mode=self.configer.get(
                                                  'data', 'input_mode'))

        inputs = self.blob_helper.make_input(img,
                                             input_size=self.configer.get(
                                                 'test', 'input_size'),
                                             scale=1.0)

        with torch.no_grad():
            outputs = self.cls_net(inputs)

        json_dict = self.__get_info_tree(outputs, image_path)

        image_canvas = self.cls_parser.draw_label(ori_img_bgr.copy(),
                                                  json_dict['label'])
        cv2.imwrite(vis_path, image_canvas)
        cv2.imwrite(raw_path, ori_img_bgr)

        Log.info('Json Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)
        return json_dict
Exemplo n.º 15
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        labels, bboxes, polygons = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None:
            img, bboxes, labels, polygons = self.aug_transform(
                img, bboxes=bboxes, labels=labels, polygons=polygons)

        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(img=DataContainer(img, stack=True),
                    bboxes=DataContainer(bboxes, stack=False),
                    labels=DataContainer(labels, stack=False),
                    polygons=DataContainer(polygons,
                                           stack=False,
                                           cpu_only=True))