def align_face(self, img, f5pt):
        ang_tan = (f5pt[0,1] - f5pt[1, 1]) / (f5pt[0, 0]-f5pt[1, 0])
        rotate_degree = math.atan(ang_tan) / math.pi * 180
        height, width, _ = img[0].shape if isinstance(img, (list, tuple)) else img.shape

        img_center = (width / 2.0, height / 2.0)

        rotate_mat = cv2.getRotationMatrix2D(img_center, rotate_degree, 1.0)
        cos_val = np.abs(rotate_mat[0, 0])
        sin_val = np.abs(rotate_mat[0, 1])
        new_width = int(height * sin_val + width * cos_val)
        new_height = int(height * cos_val + width * sin_val)
        rotate_mat[0, 2] += (new_width / 2.) - img_center[0]
        rotate_mat[1, 2] += (new_height / 2.) - img_center[1]
        if isinstance(img, (list, tuple)):
            for i in range(len(img)):
                img[i] = cv2.warpAffine(img[i], rotate_mat, (new_width, new_height), borderValue=0).astype(np.uint8)
        else:
            img = cv2.warpAffine(img, rotate_mat, (new_width, new_height), borderValue=0).astype(np.uint8)

        for i in range(len(f5pt)):
            x = f5pt[i][0]
            y = f5pt[i][1]
            p = np.array([x, y, 1])
            p = rotate_mat.dot(p)
            f5pt[i][0] = p[0]
            f5pt[i][1] = p[1]

        r_scale = self.dist_ec_mc / ((f5pt[3, 1] + f5pt[4, 1]) / 2 - (f5pt[0, 1] + f5pt[1, 1]) / 2)
        height, width, _ = img[0].shape if isinstance(img, (list, tuple)) else img.shape
        target_size = [int(width * r_scale), int(height * r_scale)]
        if r_scale < 0:
            return None, None

        if isinstance(img, (list, tuple)):
            for i in range(len(img)):
                img[i] = ImageHelper.resize(img[i], target_size, interpolation='cubic')
        else:
            img = ImageHelper.resize(img, target_size, interpolation='cubic')
        f5pt = f5pt * r_scale

        crop_y = max(int((f5pt[0, 1] + f5pt[1, 1]) / 2 - self.ec_y), 0)
        crop_x = max(int((f5pt[0, 0] + f5pt[1, 0]) / 2 - self.crop_size // 2), 0)
        f5pt[:, 0] -= crop_x
        f5pt[:, 1] -= crop_y
        if isinstance(img, (list, tuple)):
            for i in range(len(img)):
                img[i] = img[i][crop_y:crop_y+self.crop_size, crop_x:crop_x+self.crop_size]
        else:
            img = img[crop_y:crop_y+self.crop_size, crop_x:crop_x+self.crop_size]
        return img, f5pt
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        if os.path.exists(self.mask_list[index]):
            maskmap = ImageHelper.read_image(self.mask_list[index],
                                             tool=self.configer.get(
                                                 'data', 'image_tool'),
                                             mode='P')
        else:
            maskmap = np.ones((img.size[1], img.size[0]), dtype=np.uint8)
            if self.configer.get('data', 'image_tool') == 'pil':
                maskmap = ImageHelper.to_img(maskmap)

        kpts, bboxes = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None and len(bboxes) > 0:
            img, maskmap, kpts, bboxes = self.aug_transform(img,
                                                            maskmap=maskmap,
                                                            kpts=kpts,
                                                            bboxes=bboxes)

        elif self.aug_transform is not None:
            img, maskmap, kpts = self.aug_transform(img,
                                                    maskmap=maskmap,
                                                    kpts=kpts)

        width, height = ImageHelper.get_size(maskmap)
        maskmap = ImageHelper.resize(
            maskmap, (width // self.configer.get('network', 'stride'),
                      height // self.configer.get('network', 'stride')),
            interpolation='nearest')

        maskmap = torch.from_numpy(np.array(maskmap, dtype=np.float32))
        maskmap = maskmap.unsqueeze(0)
        heatmap = self.heatmap_generator(kpts, [width, height], maskmap)
        vecmap = self.paf_generator(kpts, [width, height], maskmap)
        if self.img_transform is not None:
            img = self.img_transform(img)

        meta = dict(kpts=kpts, )
        return dict(
            img=DataContainer(img, stack=True),
            heatmap=DataContainer(heatmap, stack=True),
            maskmap=DataContainer(maskmap, stack=True),
            vecmap=DataContainer(vecmap, stack=True),
            meta=DataContainer(meta, stack=False, cpu_only=True),
        )
Esempio n. 3
0
    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get('test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(test_dir, 'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(test_dir, 'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(json_path=jsonB_path) if os.path.exists(jsonB_path) else None

        else:
            test_loader_A, test_loader_B = None, None
            Log.error('Test Mode not Exists!')
            exit(1)

        assert test_loader_A is not None and test_loader_B is not None
        probe_features = []
        gallery_features = []
        probe_labels = []
        gallery_labels = []
        for data_dict in test_loader_A:
            new_data_dict = dict(imgA=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                probe_features.append(out_dict['featA'][idx].cpu().numpy())
                probe_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get('test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(img_bgr, os.path.join(out_dir, key, meta_list[i]['filename']))

        for data_dict in test_loader_B:
            new_data_dict = dict(imgB=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                gallery_features.append(out_dict['feat'][idx].cpu().numpy())
                gallery_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get('test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(img_bgr, os.path.join(out_dir, key, meta_list[i]['filename']))

        r_acc, tpr = self.decode(probe_features, gallery_features, probe_labels, gallery_labels)
        Log.info('Final Rank1 accuracy is {}'.format(r_acc))
        Log.info('Final VR@FAR=0.1% accuracy is {}'.format(tpr))