def debug(self):
        base_dir = os.path.join(self.configer.get('project_dir'),
                                'vis/results/pose',
                                self.configer.get('dataset'), 'debug')

        if not os.path.exists(base_dir):
            os.makedirs(base_dir)

        val_data_loader = self.pose_data_loader.get_valloader()

        for i, (inputs, heatmap) in enumerate(val_data_loader):
            for j in range(inputs.size(0)):
                ori_img = DeNormalize(
                    mean=self.configer.get('trans_params', 'mean'),
                    std=self.configer.get('trans_params', 'std'))(inputs[j])
                image_raw = ori_img.numpy().transpose(1, 2, 0)
                image_raw = cv2.cvtColor(image_raw, cv2.COLOR_RGB2BGR)
                heatmap_avg = heatmap[j].numpy().transpose(1, 2, 0)
                heatmap_avg = cv2.resize(
                    heatmap_avg, (0, 0),
                    fx=self.configer.get('network', 'stride'),
                    fy=self.configer.get('network', 'stride'),
                    interpolation=cv2.INTER_CUBIC)
                all_peaks = self.__extract_heatmap_info(heatmap_avg)
                image_save = self.__draw_key_point(all_peaks, image_raw)
                cv2.imwrite(
                    os.path.join(base_dir, '{}_{}_result.jpg'.format(i, j)),
                    image_save)
示例#2
0
    def debug(self):
        base_dir = os.path.join(self.configer.get('project_dir'),
                                'vis/results/seg',
                                self.configer.get('dataset'), 'debug')

        if not os.path.exists(base_dir):
            os.makedirs(base_dir)

        val_data_loader = self.seg_data_loader.get_valloader()

        count = 0
        for i, (inputs, targets) in enumerate(val_data_loader):
            for j in range(inputs.size(0)):
                count = count + 1
                if count > 20:
                    exit(1)

                ori_img = DeNormalize(
                    mean=self.configer.get('trans_params', 'mean'),
                    std=self.configer.get('trans_params', 'std'))(inputs[j])
                ori_img = ori_img.numpy().transpose(1, 2, 0).astype(np.uint8)

                image_bgr = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
                label_map = targets[j].numpy()
                image_canvas = self.seg_parser.colorize(label_map,
                                                        image_canvas=image_bgr)
                cv2.imwrite(
                    os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)),
                    image_canvas)
                cv2.imshow('main', image_canvas)
                cv2.waitKey()
示例#3
0
    def debug(self):
        base_dir = os.path.join(self.configer.get('project_dir'),
                                'vis/results/det', self.configer.get('dataset'), 'debug')

        if not os.path.exists(base_dir):
            os.makedirs(base_dir)

        val_data_loader = self.det_data_loader.get_valloader()

        count = 0
        for i, (inputs, bboxes, labels) in enumerate(val_data_loader):
            for j in range(inputs.size(0)):
                count = count + 1
                if count > 20:
                    exit(1)

                ori_img_rgb = DeNormalize(mean=self.configer.get('trans_params', 'mean'),
                                          std=self.configer.get('trans_params', 'std'))(inputs[j])
                ori_img_rgb = ori_img_rgb.numpy().transpose(1, 2, 0).astype(np.uint8)
                ori_img_bgr = cv2.cvtColor(ori_img_rgb, cv2.COLOR_RGB2BGR)
                eye_matrix = torch.eye(self.configer.get('data', 'num_classes'))
                labels_target = eye_matrix[labels.view(-1)].view(inputs.size(0), -1,
                                                                 self.configer.get('data', 'num_classes'))
                boxes, lbls, scores = self.__decode(bboxes[j], labels_target[j])
                json_dict = self.__get_info_tree(boxes, lbls, scores, ori_img_rgb)
                image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(),
                                                           json_dict,
                                                           conf_threshold=self.configer.get('vis', 'conf_threshold'))

                cv2.imwrite(os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas)
                cv2.imshow('main', image_canvas)
                cv2.waitKey()
示例#4
0
    def tensor2bgr(self, tensor):
        assert len(tensor.size()) == 3

        ori_img = DeNormalize(**self.configer.get('data', 'normalize'))(tensor.cpu())
        ori_img = ori_img.numpy().transpose(1, 2, 0).astype(np.uint8)

        if self.configer.get('data', 'input_mode') == 'BGR':
            return ori_img
        else:
            image_bgr = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
            return image_bgr
示例#5
0
    def debug(self):
        base_dir = os.path.join(self.configer.get('project_dir'),
                                'vis/results/pose',
                                self.configer.get('dataset'), 'debug')

        if not os.path.exists(base_dir):
            os.makedirs(base_dir)

        val_data_loader = self.pose_data_loader.get_valloader()

        count = 0
        for i, (inputs, heatmap, maskmap,
                vecmap) in enumerate(val_data_loader):
            for j in range(inputs.size(0)):
                count = count + 1
                if count > 10:
                    exit(1)

                Log.info(heatmap.size())
                ori_img = DeNormalize(
                    mean=self.configer.get('trans_params', 'mean'),
                    std=self.configer.get('trans_params', 'std'))(inputs[j])
                ori_img = ori_img.numpy().transpose(1, 2, 0).astype(np.uint8)
                image_bgr = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
                heatmap_avg = heatmap[j].numpy().transpose(1, 2, 0)
                heatmap_avg = cv2.resize(
                    heatmap_avg, (0, 0),
                    fx=self.configer.get('network', 'stride'),
                    fy=self.configer.get('network', 'stride'),
                    interpolation=cv2.INTER_CUBIC)
                paf_avg = vecmap[j].numpy().transpose(1, 2, 0)
                paf_avg = cv2.resize(paf_avg, (0, 0),
                                     fx=self.configer.get('network', 'stride'),
                                     fy=self.configer.get('network', 'stride'),
                                     interpolation=cv2.INTER_CUBIC)
                # self.pose_visualizer.vis_paf(paf_avg, image_rgb.astype(np.uint8),  name='314{}_{}'.format(i,j))
                self.pose_visualizer.vis_peaks(heatmap_avg, image_bgr)
                self.pose_visualizer.vis_paf(paf_avg, image_bgr)
                all_peaks = self.__extract_heatmap_info(heatmap_avg)
                special_k, connection_all = self.__extract_paf_info(
                    image_bgr, paf_avg, all_peaks)
                subset, candidate = self.__get_subsets(connection_all,
                                                       special_k, all_peaks)
                json_dict = self.__get_info_tree(image_bgr, subset, candidate)
                image_canvas = self.pose_parser.draw_points(
                    image_bgr, json_dict)
                image_canvas = self.pose_parser.link_points(
                    image_canvas, json_dict)
                cv2.imwrite(
                    os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)),
                    image_canvas)
                cv2.imshow('main', image_canvas)
                cv2.waitKey()
    def debug(self):
        base_dir = os.path.join(self.configer.get('project_dir'),
                                'vis/results/det', self.configer.get('dataset'), 'debug')

        if not os.path.exists(base_dir):
            os.makedirs(base_dir)

        val_data_loader = self.det_data_loader.get_valloader(SSDDataLoader)

        count = 0
        for i, (inputs, bboxes, labels) in enumerate(val_data_loader):
            for j in range(inputs.size(0)):
                count = count + 1
                if count > 20:
                    exit(1)

                ori_img = DeNormalize(mean=self.configer.get('trans_params', 'mean'),
                                      std=self.configer.get('trans_params', 'std'))(inputs[j])
                ori_img = ori_img.numpy().transpose(1, 2, 0)
                image_bgr = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
                eye_matrix = torch.eye(self.configer.get('data', 'num_classes'))
                labels_target = eye_matrix[labels.view(-1)].view(inputs.size(0), -1,
                                                                 self.configer.get('data', 'num_classes'))
                boxes, lbls, scores, has_obj = self.__decode(bboxes[j], labels_target[j])
                if has_obj:
                    boxes = boxes.cpu().numpy()
                    boxes = np.clip(boxes, 0, 1)
                    lbls = lbls.cpu().numpy()
                    scores = scores.cpu().numpy()

                    img_canvas = self.__draw_box(image_bgr, boxes, lbls, scores)

                else:
                    # print('None obj detected!')
                    img_canvas = image_bgr

                # self.det_visualizer.vis_bboxes(paf_avg, image_rgb.astype(np.uint8),  name='314{}_{}'.format(i,j))
                cv2.imwrite(os.path.join(base_dir, '{}_{}_result.jpg'.format(i, j)), img_canvas)
示例#7
0
    def debug(self):
        base_dir = os.path.join(self.configer.get('project_dir'),
                                'vis/results/pose',
                                self.configer.get('dataset'), 'debug')

        if not os.path.exists(base_dir):
            os.makedirs(base_dir)

        val_data_loader = self.pose_data_loader.get_valloader()

        count = 0
        for i, (inputs, partmap, maskmap,
                vecmap) in enumerate(val_data_loader):
            for j in range(inputs.size(0)):
                count = count + 1
                if count > 2:
                    exit(1)

                Log.info(partmap.size())
                ori_img = DeNormalize(
                    mean=self.configer.get('trans_params', 'mean'),
                    std=self.configer.get('trans_params', 'std'))(inputs[j])
                ori_img = ori_img.numpy().transpose(1, 2, 0).astype(np.uint8)
                image_bgr = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
                partmap_avg = partmap[j].numpy().transpose(1, 2, 0)

                heatmap_avg = np.zeros(
                    (partmap_avg.shape[0], partmap_avg.shape[1],
                     self.configer.get('data', 'num_keypoints')))
                part_num = np.zeros((self.configer.get('data',
                                                       'num_keypoints'), ))

                for index in range(
                        len(self.configer.get('details', 'limb_seq'))):
                    a = self.configer.get('details', 'limb_seq')[index][0] - 1
                    b = self.configer.get('details', 'limb_seq')[index][1] - 1
                    heatmap_a = partmap_avg[:, :, index * 4:index * 4 + 2]**2
                    heatmap_a = np.sqrt(np.sum(heatmap_a, axis=2).squeeze())
                    heatmap_avg[:, :,
                                a] = (heatmap_avg[:, :, a] * part_num[a] +
                                      heatmap_a) / (part_num[a] + 1)
                    part_num[a] += 1
                    heatmap_b = partmap_avg[:, :,
                                            index * 4 + 2:index * 4 + 4]**2
                    heatmap_b = np.sqrt(np.sum(heatmap_b, axis=2).squeeze())
                    heatmap_avg[:, :,
                                b] = (heatmap_avg[:, :, b] * part_num[b] +
                                      heatmap_b) / (part_num[b] + 1)
                    part_num[b] += 1

                partmap_avg = cv2.resize(
                    partmap_avg, (0, 0),
                    fx=self.configer.get('network', 'stride'),
                    fy=self.configer.get('network', 'stride'),
                    interpolation=cv2.INTER_CUBIC)
                heatmap_avg = cv2.resize(
                    heatmap_avg, (0, 0),
                    fx=self.configer.get('network', 'stride'),
                    fy=self.configer.get('network', 'stride'),
                    interpolation=cv2.INTER_CUBIC)
                paf_avg = vecmap[j].numpy().transpose(1, 2, 0)
                paf_avg = cv2.resize(paf_avg, (0, 0),
                                     fx=self.configer.get('network', 'stride'),
                                     fy=self.configer.get('network', 'stride'),
                                     interpolation=cv2.INTER_CUBIC)

                self.pose_visualizer.vis_peaks(heatmap_avg, image_bgr)
                self.pose_visualizer.vis_paf(paf_avg, image_bgr)
                all_peaks = self.__extract_heatmap_info(heatmap_avg)
                special_k, connection_all = self.__extract_paf_info(
                    image_bgr, paf_avg, partmap_avg, all_peaks)
                subset, candidate = self.__get_subsets(connection_all,
                                                       special_k, all_peaks)
                json_dict = self.__get_info_tree(image_bgr, subset, candidate)
                image_canvas = self.pose_parser.draw_points(
                    image_bgr, json_dict)
                image_canvas = self.pose_parser.link_points(
                    image_canvas, json_dict)
                cv2.imwrite(
                    os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)),
                    image_canvas)
                cv2.imshow('main', image_canvas)
                cv2.waitKey()