def __list_dirs(self, root_dir, dataset):
        imgA_list = list()
        imgB_list = list()

        imageA_dir = os.path.join(root_dir, dataset, 'imageA')
        imageB_dir = os.path.join(root_dir, dataset, 'imageB')

        for file_name in os.listdir(imageA_dir):
            image_name = '.'.join(file_name.split('.')[:-1])
            imgA_path = ImageHelper.imgpath(imageA_dir, image_name)
            imgB_path = ImageHelper.imgpath(imageB_dir, image_name)
            if not os.path.exists(imgA_path) or not os.path.exists(imgB_path):
                Log.warn('Img Path: {} not exists.'.format(imgA_path))
                continue

            imgA_list.append(imgA_path)
            imgB_list.append(imgB_path)

        if dataset == 'train' and self.configer.get('data', 'include_val'):
            imageA_dir = os.path.join(root_dir, 'val/imageA')
            imageB_dir = os.path.join(root_dir, 'val/imageB')
            for file_name in os.listdir(imageA_dir):
                image_name = '.'.join(file_name.split('.')[:-1])
                imgA_path = ImageHelper.imgpath(imageA_dir, image_name)
                imgB_path = ImageHelper.imgpath(imageB_dir, image_name)
                if not os.path.exists(imgA_path) or not os.path.exists(
                        imgB_path):
                    Log.warn('Img Path: {} not exists.'.format(imgA_path))
                    continue

                imgA_list.append(imgA_path)
                imgB_list.append(imgB_path)

        return imgA_list, imgB_list
Пример #2
0
    def __getitem__(self, index):
        imgA = ImageHelper.read_image(
            self.imgA_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        indexB = random.randint(0,
                                len(self.imgB_list) - 1) % len(self.imgB_list)
        imgB = ImageHelper.read_image(
            self.imgB_list[indexB],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        if self.aug_transform is not None:
            imgA = self.aug_transform(imgA)
            imgB = self.aug_transform(imgB)

        if self.img_transform is not None:
            imgA = self.img_transform(imgA)
            imgB = self.img_transform(imgB)

        return dict(imgA=DataContainer(imgA, stack=True),
                    imgB=DataContainer(imgB, stack=True),
                    labelA=DataContainer(self.labelA_list[index], stack=True),
                    labelB=DataContainer(self.labelB_list[indexB], stack=True))
    def test(self, test_dir, out_dir):
        for _, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            data_dict['testing'] = True
            data_dict = RunnerHelper.to_device(self, data_dict)
            out_dict = self.det_net(data_dict)
            meta_list = DCHelper.tolist(data_dict['meta'])
            test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = out_dict[
                'test_group']
            batch_detections = self.decode(test_roi_locs, test_roi_scores,
                                           test_indices_and_rois,
                                           test_rois_num, self.configer,
                                           meta_list)
            for i in range(len(meta_list)):
                ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'],
                                                     tool='cv2',
                                                     mode='BGR')
                json_dict = self.__get_info_tree(batch_detections[i])
                image_canvas = self.det_parser.draw_bboxes(
                    ori_img_bgr.copy(),
                    json_dict,
                    conf_threshold=self.configer.get('res', 'vis_conf_thre'))
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(
                                         meta_list[i]['filename'])))

                Log.info('Json Path: {}'.format(
                    os.path.join(
                        out_dir,
                        'json/{}.json'.format(meta_list[i]['filename']))))
                JsonHelper.save_file(json_dict,
                                     save_path=os.path.join(
                                         out_dir, 'json/{}.json'.format(
                                             meta_list[i]['filename'])))
    def __test_img(self, image_path, save_path):
        Log.info('Image Path: {}'.format(image_path))
        ori_image = ImageHelper.read_image(image_path,
                                           tool=self.configer.get('data', 'image_tool'),
                                           mode=self.configer.get('data', 'input_mode'))

        ori_width, ori_height = ImageHelper.get_size(ori_image)
        ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode'))
        heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out')))
        for i, scale in enumerate(self.configer.get('test', 'scale_search')):
            image = self.blob_helper.make_input(ori_image,
                                                input_size=self.configer.get('test', 'input_size'),
                                                scale=scale)
            with torch.no_grad():
                heatmap_out_list = self.pose_net(image)
                heatmap_out = heatmap_out_list[-1]

                # extract outputs, resize, and remove padding
                heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)
                heatmap = cv2.resize(heatmap, (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                heatmap_avg = heatmap_avg + heatmap / len(self.configer.get('test', 'scale_search'))

        all_peaks = self.__extract_heatmap_info(heatmap_avg)
        image_canvas = self.__draw_key_point(all_peaks, ori_img_bgr)
        ImageHelper.save(image_canvas, save_path)
Пример #5
0
    def vis_bboxes(self,
                   image_in,
                   bboxes_list,
                   name='default',
                   sub_dir='bbox'):
        """
          Show the diff bbox of individuals.
        """
        base_dir = os.path.join(self.configer.get('project_dir'), DET_DIR,
                                sub_dir)

        if isinstance(image_in, Image.Image):
            image = ImageHelper.rgb2bgr(ImageHelper.to_np(image_in))

        else:
            image = image_in.copy()

        if not os.path.exists(base_dir):
            log.error('Dir:{} not exists!'.format(base_dir))
            os.makedirs(base_dir)

        img_path = os.path.join(
            base_dir,
            name if ImageHelper.is_img(name) else '{}.jpg'.format(name))

        for bbox in bboxes_list:
            image = cv2.rectangle(image, (bbox[0], bbox[1]),
                                  (bbox[2], bbox[3]), (0, 255, 0), 2)

        cv2.imwrite(img_path, image)
Пример #6
0
    def __list_dirs(self, root_dir, dataset):
        img_list = list()
        label_list = list()
        image_dir = os.path.join(root_dir, dataset, 'image')
        label_dir = os.path.join(root_dir, dataset, 'label')

        for file_name in os.listdir(label_dir):
            image_name = '.'.join(file_name.split('.')[:-1])
            label_path = os.path.join(label_dir, file_name)
            img_path = ImageHelper.imgpath(image_dir, image_name)
            if not os.path.exists(label_path) or img_path is None:
                Log.warn('Label Path: {} not exists.'.format(label_path))
                continue

            img_list.append(img_path)
            label_list.append(label_path)

        if dataset == 'train' and self.configer.get('data', 'include_val'):
            image_dir = os.path.join(root_dir, 'val/image')
            label_dir = os.path.join(root_dir, 'val/label')
            for file_name in os.listdir(label_dir):
                image_name = '.'.join(file_name.split('.')[:-1])
                label_path = os.path.join(label_dir, file_name)
                img_path = ImageHelper.imgpath(image_dir, image_name)
                if not os.path.exists(label_path) or img_path is None:
                    Log.warn('Label Path: {} not exists.'.format(label_path))
                    continue

                img_list.append(img_path)
                label_list.append(label_path)

        return img_list, label_list
Пример #7
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.item_list[index][0],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        ori_img_size = ImageHelper.get_size(img)
        if self.aug_transform is not None:
            img = self.aug_transform(img)

        border_size = ImageHelper.get_size(img)
        if self.img_transform is not None:
            img = self.img_transform(img)

        meta = dict(ori_img_size=ori_img_size,
                    border_size=border_size,
                    img_path=self.item_list[index][0],
                    filename=self.item_list[index][1])
        return dict(img=DataContainer(img,
                                      stack=True,
                                      return_dc=True,
                                      samples_per_gpu=True),
                    meta=DataContainer(meta,
                                       stack=False,
                                       cpu_only=True,
                                       return_dc=True,
                                       samples_per_gpu=True))
Пример #8
0
    def test(self, test_dir, out_dir):
        for _, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            total_logits = None
            if self.configer.get('test', 'mode') == 'ss_test':
                total_logits = self.ss_test(data_dict)

            elif self.configer.get('test', 'mode') == 'sscrop_test':
                total_logits = self.sscrop_test(data_dict,
                                                params_dict=self.configer.get(
                                                    'test', 'sscrop_test'))

            elif self.configer.get('test', 'mode') == 'ms_test':
                total_logits = self.ms_test(data_dict,
                                            params_dict=self.configer.get(
                                                'test', 'ms_test'))

            elif self.configer.get('test', 'mode') == 'mscrop_test':
                total_logits = self.mscrop_test(data_dict,
                                                params_dict=self.configer.get(
                                                    'test', 'mscrop_test'))

            else:
                Log.error('Invalid test mode:{}'.format(
                    self.configer.get('test', 'mode')))
                exit(1)

            meta_list = DCHelper.tolist(data_dict['meta'])
            for i in range(len(meta_list)):
                label_map = np.argmax(total_logits[i], axis=-1)
                label_img = np.array(label_map, dtype=np.uint8)
                ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'],
                                                     tool='cv2',
                                                     mode='BGR')
                image_canvas = self.seg_parser.colorize(
                    label_img, image_canvas=ori_img_bgr)
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(
                                         meta_list[i]['filename'])))

                if self.configer.get('data.label_list',
                                     default=None) is not None:
                    label_img = self.__relabel(label_img)

                if self.configer.get('data.reduce_zero_label', default=False):
                    label_img = label_img + 1
                    label_img = label_img.astype(np.uint8)

                label_img = Image.fromarray(label_img, 'P')
                label_path = os.path.join(
                    out_dir, 'label/{}.png'.format(meta_list[i]['filename']))
                Log.info('Label Path: {}'.format(label_path))
                ImageHelper.save(label_img, label_path)
    def align_face(self, img, f5pt):
        ang_tan = (f5pt[0,1] - f5pt[1, 1]) / (f5pt[0, 0]-f5pt[1, 0])
        rotate_degree = math.atan(ang_tan) / math.pi * 180
        height, width, _ = img[0].shape if isinstance(img, (list, tuple)) else img.shape

        img_center = (width / 2.0, height / 2.0)

        rotate_mat = cv2.getRotationMatrix2D(img_center, rotate_degree, 1.0)
        cos_val = np.abs(rotate_mat[0, 0])
        sin_val = np.abs(rotate_mat[0, 1])
        new_width = int(height * sin_val + width * cos_val)
        new_height = int(height * cos_val + width * sin_val)
        rotate_mat[0, 2] += (new_width / 2.) - img_center[0]
        rotate_mat[1, 2] += (new_height / 2.) - img_center[1]
        if isinstance(img, (list, tuple)):
            for i in range(len(img)):
                img[i] = cv2.warpAffine(img[i], rotate_mat, (new_width, new_height), borderValue=0).astype(np.uint8)
        else:
            img = cv2.warpAffine(img, rotate_mat, (new_width, new_height), borderValue=0).astype(np.uint8)

        for i in range(len(f5pt)):
            x = f5pt[i][0]
            y = f5pt[i][1]
            p = np.array([x, y, 1])
            p = rotate_mat.dot(p)
            f5pt[i][0] = p[0]
            f5pt[i][1] = p[1]

        r_scale = self.dist_ec_mc / ((f5pt[3, 1] + f5pt[4, 1]) / 2 - (f5pt[0, 1] + f5pt[1, 1]) / 2)
        height, width, _ = img[0].shape if isinstance(img, (list, tuple)) else img.shape
        target_size = [int(width * r_scale), int(height * r_scale)]
        if r_scale < 0:
            return None, None

        if isinstance(img, (list, tuple)):
            for i in range(len(img)):
                img[i] = ImageHelper.resize(img[i], target_size, interpolation='cubic')
        else:
            img = ImageHelper.resize(img, target_size, interpolation='cubic')
        f5pt = f5pt * r_scale

        crop_y = max(int((f5pt[0, 1] + f5pt[1, 1]) / 2 - self.ec_y), 0)
        crop_x = max(int((f5pt[0, 0] + f5pt[1, 0]) / 2 - self.crop_size // 2), 0)
        f5pt[:, 0] -= crop_x
        f5pt[:, 1] -= crop_y
        if isinstance(img, (list, tuple)):
            for i in range(len(img)):
                img[i] = img[i][crop_y:crop_y+self.crop_size, crop_x:crop_x+self.crop_size]
        else:
            img = img[crop_y:crop_y+self.crop_size, crop_x:crop_x+self.crop_size]
        return img, f5pt
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        if os.path.exists(self.mask_list[index]):
            maskmap = ImageHelper.read_image(self.mask_list[index],
                                             tool=self.configer.get(
                                                 'data', 'image_tool'),
                                             mode='P')
        else:
            maskmap = np.ones((img.size[1], img.size[0]), dtype=np.uint8)
            if self.configer.get('data', 'image_tool') == 'pil':
                maskmap = ImageHelper.to_img(maskmap)

        kpts, bboxes = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None and len(bboxes) > 0:
            img, maskmap, kpts, bboxes = self.aug_transform(img,
                                                            maskmap=maskmap,
                                                            kpts=kpts,
                                                            bboxes=bboxes)

        elif self.aug_transform is not None:
            img, maskmap, kpts = self.aug_transform(img,
                                                    maskmap=maskmap,
                                                    kpts=kpts)

        width, height = ImageHelper.get_size(maskmap)
        maskmap = ImageHelper.resize(
            maskmap, (width // self.configer.get('network', 'stride'),
                      height // self.configer.get('network', 'stride')),
            interpolation='nearest')

        maskmap = torch.from_numpy(np.array(maskmap, dtype=np.float32))
        maskmap = maskmap.unsqueeze(0)
        heatmap = self.heatmap_generator(kpts, [width, height], maskmap)
        vecmap = self.paf_generator(kpts, [width, height], maskmap)
        if self.img_transform is not None:
            img = self.img_transform(img)

        meta = dict(kpts=kpts, )
        return dict(
            img=DataContainer(img, stack=True),
            heatmap=DataContainer(heatmap, stack=True),
            maskmap=DataContainer(maskmap, stack=True),
            vecmap=DataContainer(vecmap, stack=True),
            meta=DataContainer(meta, stack=False, cpu_only=True),
        )
Пример #11
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        img_size = ImageHelper.get_size(img)
        bboxes, labels = self.__read_json_file(self.json_list[index])
        ori_bboxes, ori_labels = bboxes.copy(), labels.copy()

        if self.aug_transform is not None:
            img, bboxes, labels = self.aug_transform(img,
                                                     bboxes=bboxes,
                                                     labels=labels)

        img_scale = ImageHelper.get_size(img)[0] / img_size[0]

        labels = torch.from_numpy(labels).long()
        bboxes = torch.from_numpy(bboxes).float()

        meta = dict(ori_img_size=img_size,
                    border_size=ImageHelper.get_size(img),
                    img_scale=img_scale,
                    ori_bboxes=torch.from_numpy(ori_bboxes).float(),
                    ori_labels=torch.from_numpy(ori_labels).long())
        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(img=DataContainer(img,
                                      stack=True,
                                      return_dc=True,
                                      samples_per_gpu=True),
                    bboxes=DataContainer(bboxes,
                                         stack=False,
                                         return_dc=True,
                                         samples_per_gpu=True),
                    labels=DataContainer(labels,
                                         stack=False,
                                         return_dc=True,
                                         samples_per_gpu=True),
                    meta=DataContainer(meta,
                                       stack=False,
                                       cpu_only=True,
                                       return_dc=True,
                                       samples_per_gpu=True))
    def process_3d(self, data_dir):
        new_data_dir = '{}_new'.format(data_dir.rstrip('/'))
        if os.path.exists(new_data_dir):
            shutil.rmtree(new_data_dir)

        os.makedirs(new_data_dir)

        for filename in FileHelper.list_dir(data_dir):
            if not ImageHelper.is_img(filename) or 'depth' in filename:
                Log.info('Image Path: {}'.format(os.path.join(data_dir, filename)))
                continue

            file_path = os.path.join(data_dir, filename)
            img = io.imread(file_path)
            kpts = self.detect_face(img)
            if kpts is None:
                Log.info('Invliad face detected in {}'.format(file_path))
                continue

            depth = np.array(io.imread(os.path.join(data_dir, filename.replace('rgb', 'depth'))))
            face_depth, kpts = self.align_face([np.array(img), np.array(depth)], kpts)
            if face_depth is None:
                Log.info('Invliad face detected in {}'.format(file_path))
                continue
            ImageHelper.save(ImageHelper.rgb2bgr(face_depth[0]), os.path.join(new_data_dir, filename))
            ImageHelper.save(ImageHelper.rgb2bgr(face_depth[1]), os.path.join(new_data_dir, filename.replace('rgb', 'depth')))
Пример #13
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(self.img_list[index],
                                     tool=self.configer.get('data', 'image_tool'),
                                     mode=self.configer.get('data', 'input_mode'))

        kpts, bboxes = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None:
            img, kpts, bboxes = self.aug_transform(img, kpts=kpts, bboxes=bboxes)

        heatmap = self.heatmap_generator(kpts, ImageHelper.get_size(img))
        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(
            img=DataContainer(img, stack=True),
            heatmap=DataContainer(heatmap, stack=True),
        )
Пример #14
0
    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get('test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(test_dir, 'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(test_dir, 'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(json_path=jsonB_path) if os.path.exists(jsonB_path) else None
        elif self.configer.exists('test', 'mode') and self.configer.get('test', 'mode') == 'pix2pix':
            imgA_dir = os.path.join(test_dir, 'imageA')
            test_loader_A = self.test_loader.get_testloader(test_dir=imgA_dir) if os.path.exists(imgA_dir) else None
            imgB_dir = os.path.join(test_dir, 'imageB')
            test_loader_B = self.test_loader.get_testloader(test_dir=imgB_dir) if os.path.exists(imgB_dir) else None
        else:
            imgA_dir = os.path.join(test_dir, 'imageA')
            test_loader_A = self.test_loader.get_testloader(test_dir=imgA_dir) if os.path.exists(imgA_dir) else None
            imgB_dir = os.path.join(test_dir, 'imageB')
            test_loader_B = self.test_loader.get_testloader(test_dir=imgB_dir) if os.path.exists(imgB_dir) else None

        if test_loader_A is not None:
            for data_dict in test_loader_A:
                new_data_dict = dict(imgA=data_dict['img'], testing=True)
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict)

                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        ImageHelper.save(img_bgr, os.path.join(out_dir, '{}_{}.jpg'.format(meta_list[i]['filename'], key)))

        if test_loader_B is not None:
            for data_dict in test_loader_B:
                new_data_dict = dict(imgB=data_dict['img'], testing=True)
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        ImageHelper.save(img_bgr, os.path.join(out_dir, '{}_{}.jpg'.format(meta_list[i]['filename'], key)))
Пример #15
0
    def __test_img(self, image_path, json_path, raw_path, vis_path):
        Log.info('Image Path: {}'.format(image_path))
        img = ImageHelper.read_image(
            image_path,
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        trans = None
        if self.configer.get('dataset') == 'imagenet':
            if self.configer.get('data', 'image_tool') == 'cv2':
                img = Image.fromarray(img)

            trans = transforms.Compose([
                transforms.Scale(256),
                transforms.CenterCrop(224),
            ])

        assert trans is not None
        img = trans(img)

        ori_img_bgr = ImageHelper.get_cv2_bgr(img,
                                              mode=self.configer.get(
                                                  'data', 'input_mode'))

        inputs = self.blob_helper.make_input(img,
                                             input_size=self.configer.get(
                                                 'test', 'input_size'),
                                             scale=1.0)

        with torch.no_grad():
            outputs = self.cls_net(inputs)

        json_dict = self.__get_info_tree(outputs, image_path)

        image_canvas = self.cls_parser.draw_label(ori_img_bgr.copy(),
                                                  json_dict['label'])
        cv2.imwrite(vis_path, image_canvas)
        cv2.imwrite(raw_path, ori_img_bgr)

        Log.info('Json Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)
        return json_dict
Пример #16
0
    def evaluate(self, pred_dir, gt_dir):
        img_cnt = 0
        for filename in os.listdir(pred_dir):
            pred_path = os.path.join(pred_dir, filename)
            gt_path = os.path.join(gt_dir, filename)
            predmap = ImageHelper.to_np(
                ImageHelper.read_image(pred_path, tool='pil', mode='P'))
            gtmap = ImageHelper.to_np(
                ImageHelper.read_image(gt_path, tool='pil', mode='P'))
            predmap = self.relabel(predmap)
            gtmap = self.relabel(gtmap)

            self.seg_running_score.update(predmap[np.newaxis, :, :],
                                          gtmap[np.newaxis, :, :])
            img_cnt += 1

        Log.info('Evaluate {} images'.format(img_cnt))
        Log.info('mIOU: {}'.format(self.seg_running_score.get_mean_iou()))
        Log.info('Pixel ACC: {}'.format(
            self.seg_running_score.get_pixel_acc()))
Пример #17
0
    def __read_json(self, root_dir, json_path):
        item_list = []
        for item in JsonHelper.load_file(json_path):
            img_path = os.path.join(root_dir, item['image_path'])
            if not os.path.exists(img_path) or not ImageHelper.is_img(img_path):
                Log.error('Image Path: {} is Invalid.'.format(img_path))
                exit(1)

            item_list.append((img_path, '.'.join(item['image_path'].split('.')[:-1])))

        Log.info('There are {} images..'.format(len(item_list)))
        return item_list
Пример #18
0
    def _encode_label(self, labelmap):
        labelmap = np.array(labelmap)
        shape = labelmap.shape
        encoded_labelmap = np.ones(shape=(shape[0], shape[1]), dtype=np.float32) * 255
        for i in range(len(self.configer.get('data', 'label_list'))):
            class_id = self.configer.get('data', 'label_list')[i]
            encoded_labelmap[labelmap == class_id] = i

        if self.configer.get('data', 'image_tool') == 'pil':
            encoded_labelmap = ImageHelper.to_img(encoded_labelmap.astype(np.uint8))

        return encoded_labelmap
Пример #19
0
    def _reduce_zero_label(self, labelmap):
        if not self.configer.get('data', 'reduce_zero_label'):
            return labelmap

        labelmap = np.array(labelmap)
        labelmap[labelmap == 0] = 255
        labelmap = labelmap - 1
        labelmap[labelmap == 254] = 255
        if self.configer.get('data', 'image_tool') == 'pil':
            labelmap = ImageHelper.to_img(labelmap.astype(np.uint8))

        return labelmap
    def process(self, data_dir):
        new_data_dir = '{}_new'.format(data_dir.rstrip('/'))
        if os.path.exists(new_data_dir):
            shutil.rmtree(new_data_dir)

        os.makedirs(new_data_dir)

        for filename in FileHelper.list_dir(data_dir):
            if not ImageHelper.is_img(filename):
                Log.info('Image Path: {}'.format(os.path.join(data_dir, filename)))
                continue

            file_path = os.path.join(data_dir, filename)
            img = io.imread(file_path)
            kpts = self.detect_face(img)
            if kpts is None:
                Log.info('Invliad face detected in {}'.format(file_path))
                continue

            face, kpts = self.align_face(img, kpts)
            cv2.imwrite(os.path.join(new_data_dir, filename), ImageHelper.rgb2bgr(face))
    def __getitem__(self, index):
        imgA = ImageHelper.read_image(self.imgA_list[index],
                                      tool=self.configer.get('data', 'image_tool'),
                                      mode=self.configer.get('data', 'input_mode'))

        imgB = ImageHelper.read_image(self.imgB_list[index],
                                      tool=self.configer.get('data', 'image_tool'),
                                      mode=self.configer.get('data', 'input_mode'))

        if self.aug_transform is not None:
            imgA, imgB = self.aug_transform([imgA, imgB])

        if self.img_transform is not None:
            imgA = self.img_transform(imgA)
            imgB = self.img_transform(imgB)


        return dict(
            imgA=DataContainer(imgA, stack=True),
            imgB=DataContainer(imgB, stack=True),
        )
Пример #22
0
 def __init__(self,
              test_dir=None,
              aug_transform=None,
              img_transform=None,
              configer=None):
     super(DefaultLoader, self).__init__()
     self.configer = configer
     self.aug_transform = aug_transform
     self.img_transform = img_transform
     self.item_list = [(os.path.join(test_dir, filename),
                        '.'.join(filename.split('.')[:-1]))
                       for filename in FileHelper.list_dir(test_dir)
                       if ImageHelper.is_img(filename)]
Пример #23
0
    def __list_dirs(self, root_dir, dataset):
        img_list = list()
        json_list = list()
        mask_list = list()
        image_dir = os.path.join(root_dir, dataset, 'image')
        json_dir = os.path.join(root_dir, dataset, 'json')
        mask_dir = os.path.join(root_dir, dataset, 'mask')

        for file_name in os.listdir(json_dir):
            image_name = '.'.join(file_name.split('.')[:-1])
            mask_path = os.path.join(mask_dir, '{}.png'.format(image_name))
            img_path = ImageHelper.imgpath(image_dir, image_name)
            json_path = os.path.join(json_dir, file_name)
            if not os.path.exists(json_path) or img_path is None:
                Log.warn('Json Path: {} not exists.'.format(json_path))
                continue

            json_list.append(json_path)
            mask_list.append(mask_path)
            img_list.append(img_path)

        if dataset == 'train' and self.configer.get('data', 'include_val'):
            image_dir = os.path.join(root_dir, 'val/image')
            json_dir = os.path.join(root_dir, 'val/json')
            mask_dir = os.path.join(root_dir, 'val/mask')
            for file_name in os.listdir(json_dir):
                image_name = '.'.join(file_name.split('.')[:-1])
                mask_path = os.path.join(mask_dir, '{}.png'.format(image_name))
                img_path = ImageHelper.imgpath(image_dir, image_name)
                json_path = os.path.join(json_dir, file_name)
                if not os.path.exists(json_path) or img_path is None:
                    Log.warn('Json Path: {} not exists.'.format(json_path))
                    continue

                json_list.append(json_path)
                mask_list.append(mask_path)
                img_list.append(img_path)

        return img_list, json_list, mask_list
Пример #24
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        img_size = ImageHelper.get_size(img)
        labelmap = ImageHelper.read_image(self.label_list[index],
                                          tool=self.configer.get(
                                              'data', 'image_tool'),
                                          mode='P')
        if self.configer.get('data.label_list', default=None):
            labelmap = self._encode_label(labelmap)

        if self.configer.get('data.reduce_zero_label', default=None):
            labelmap = self._reduce_zero_label(labelmap)

        ori_target = ImageHelper.to_np(labelmap)

        if self.aug_transform is not None:
            img, labelmap = self.aug_transform(img, labelmap=labelmap)

        border_size = ImageHelper.get_size(img)

        if self.img_transform is not None:
            img = self.img_transform(img)

        if self.label_transform is not None:
            labelmap = self.label_transform(labelmap)

        meta = dict(ori_img_wh=img_size,
                    border_wh=border_size,
                    ori_target=ori_target)
        return dict(
            img=DataContainer(img, stack=True),
            labelmap=DataContainer(labelmap, stack=True),
            meta=DataContainer(meta, stack=False, cpu_only=True),
        )
Пример #25
0
    def __read_list(self, root_dir, list_path):
        item_list = []
        with open(list_path, 'r') as f:
            for line in f.readlines()[0:]:
                filename = line.strip().split()[0]
                img_path = os.path.join(root_dir, filename)
                if not os.path.exists(img_path) or not ImageHelper.is_img(
                        img_path):
                    Log.error('Image Path: {} is Invalid.'.format(img_path))
                    exit(1)

                item_list.append(
                    (img_path, '.'.join(filename.split('.')[:-1])))

        Log.info('There are {} images..'.format(len(item_list)))
        return item_list
Пример #26
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(self.img_list[index],
                                     tool=self.configer.get('data', 'image_tool'),
                                     mode=self.configer.get('data', 'input_mode'))
        label = self.label_list[index]

        if self.aug_transform is not None:
            img = self.aug_transform(img)

        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(
            img=DataContainer(img, stack=True),
            label=DataContainer(label, stack=True),
        )
Пример #27
0
    def __test_img(self, image_path, json_path, raw_path, vis_path):

        Log.info('Image Path: {}'.format(image_path))
        ori_image = ImageHelper.read_image(image_path,
                                           tool=self.configer.get('data', 'image_tool'),
                                           mode=self.configer.get('data', 'input_mode'))

        ori_width, ori_height = ImageHelper.get_size(ori_image)
        ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode'))
        heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out')))
        paf_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'paf_out')))
        multiplier = [scale * self.configer.get('test', 'input_size')[1] / ori_height
                      for scale in self.configer.get('test', 'scale_search')]
        stride = self.configer.get('network', 'stride')
        for i, scale in enumerate(multiplier):
            image, border_hw = self._get_blob(ori_image, scale=scale)
            with torch.no_grad():
                paf_out_list, heatmap_out_list = self.pose_net(image)
                paf_out = paf_out_list[-1]
                heatmap_out = heatmap_out_list[-1]

                # extract outputs, resize, and remove padding
                heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)

                heatmap = cv2.resize(heatmap, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
                heatmap = cv2.resize(heatmap[:border_hw[0], :border_hw[1]],
                                     (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                paf = paf_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)
                paf = cv2.resize(paf, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
                paf = cv2.resize(paf[:border_hw[0], :border_hw[1]],
                                 (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                heatmap_avg = heatmap_avg + heatmap / len(multiplier)
                paf_avg = paf_avg + paf / len(multiplier)

        all_peaks = self.__extract_heatmap_info(heatmap_avg)
        special_k, connection_all = self.__extract_paf_info(ori_img_bgr, paf_avg, all_peaks)
        subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks)
        json_dict = self.__get_info_tree(ori_img_bgr, subset, candidate)

        image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict)
        image_canvas = self.pose_parser.link_points(image_canvas, json_dict)

        ImageHelper.save(image_canvas, vis_path)
        ImageHelper.save(ori_img_bgr, raw_path)
        Log.info('Json Save Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)
Пример #28
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        labels, bboxes, polygons = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None:
            img, bboxes, labels, polygons = self.aug_transform(
                img, bboxes=bboxes, labels=labels, polygons=polygons)

        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(img=DataContainer(img, stack=True),
                    bboxes=DataContainer(bboxes, stack=False),
                    labels=DataContainer(labels, stack=False),
                    polygons=DataContainer(polygons,
                                           stack=False,
                                           cpu_only=True))
Пример #29
0
 def _inner_exist_file(path):
     if ImageHelper.is_zip_path(path):
         return ZipReader.exist_file(path)
     else:
         return os.path.exists(path)
Пример #30
0
    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get('test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(test_dir, 'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(test_dir, 'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(json_path=jsonB_path) if os.path.exists(jsonB_path) else None

        else:
            test_loader_A, test_loader_B = None, None
            Log.error('Test Mode not Exists!')
            exit(1)

        assert test_loader_A is not None and test_loader_B is not None
        probe_features = []
        gallery_features = []
        probe_labels = []
        gallery_labels = []
        for data_dict in test_loader_A:
            new_data_dict = dict(imgA=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                probe_features.append(out_dict['featA'][idx].cpu().numpy())
                probe_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get('test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(img_bgr, os.path.join(out_dir, key, meta_list[i]['filename']))

        for data_dict in test_loader_B:
            new_data_dict = dict(imgB=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                gallery_features.append(out_dict['feat'][idx].cpu().numpy())
                gallery_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get('test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(img_bgr, os.path.join(out_dir, key, meta_list[i]['filename']))

        r_acc, tpr = self.decode(probe_features, gallery_features, probe_labels, gallery_labels)
        Log.info('Final Rank1 accuracy is {}'.format(r_acc))
        Log.info('Final VR@FAR=0.1% accuracy is {}'.format(tpr))