Exemple #1
0
    def make_input(self, image=None, input_size=None,
                   min_side_length=None, max_side_length=None, scale=None):
        in_width, in_height = None, None
        if input_size is None and min_side_length is None and max_side_length is None:
            in_width, in_height = ImageHelper.get_size(image)

        elif input_size is not None and min_side_length is None and max_side_length is None:
            in_width, in_height = input_size

        elif input_size is None and min_side_length is not None and max_side_length is None:
            width, height = ImageHelper.get_size(image)
            scale_ratio = min_side_length / min(width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        elif input_size is None and min_side_length is None and max_side_length is not None:
            width, height = ImageHelper.get_size(image)
            scale_ratio = max_side_length / max(width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        else:
            Log.error('Incorrect target size setting.')
            exit(1)

        if not isinstance(scale, (list, tuple)):
            image = ImageHelper.resize(image, (int(in_width * scale), int(in_height * scale)), interpolation='linear')
            img_tensor = ToTensor()(image)
            img_tensor = Normalize(div_value=self.configer.get('normalize', 'div_value'),
                                   mean=self.configer.get('normalize', 'mean'),
                                   std=self.configer.get('normalize', 'std'))(img_tensor)
            img_tensor = img_tensor.unsqueeze(0).to(torch.device('cpu' if self.configer.get('gpu') is None else 'cuda'))

            return img_tensor

        else:
            img_tensor_list = []
            for s in scale:
                image = ImageHelper.resize(image, (int(in_width * s), int(in_height * s)), interpolation='linear')
                img_tensor = ToTensor()(image)
                img_tensor = Normalize(div_value=self.configer.get('normalize', 'div_value'),
                                       mean=self.configer.get('normalize', 'mean'),
                                       std=self.configer.get('normalize', 'std'))(img_tensor)
                img_tensor = img_tensor.unsqueeze(0).to(
                    torch.device('cpu' if self.configer.get('gpu') is None else 'cuda'))

                img_tensor_list.append(img_tensor)

            return img_tensor_list
    def __getitem__(self, index):
        img = ImageHelper.pil_open_rgb(self.img_list[index])
        if os.path.exists(self.mask_list[index]):
            maskmap = ImageHelper.pil_open_p(self.mask_list[index])
        else:
            maskmap = ImageHelper.np2img(np.ones((img.size[1], img.size[0]), dtype=np.uint8))

        kpts, bboxes = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None and len(bboxes) > 0:
            img, maskmap, kpts, bboxes = self.aug_transform(img, mask=maskmap, kpts=kpts, bboxes=bboxes)

        elif self.aug_transform is not None:
            img, maskmap, kpts = self.aug_transform(img, mask=maskmap, kpts=kpts)

        width, height = maskmap.size
        maskmap = ImageHelper.resize(maskmap, (width // self.configer.get('network', 'stride'),
                                               height // self.configer.get('network', 'stride')), Image.NEAREST)

        maskmap = np.expand_dims(np.array(maskmap, dtype=np.float32), axis=2)

        heatmap = self.pose_data_utilizer.generate_heatmap(kpts=kpts, mask=maskmap)

        vecmap = self.pose_data_utilizer.generate_paf(kpts=kpts, mask=maskmap)

        if self.img_transform is not None:
            img = self.img_transform(img)

        if self.label_transform is not None:
            heatmap = self.label_transform(heatmap)
            vecmap = self.label_transform(vecmap)
            maskmap = self.label_transform(maskmap)

        return img, heatmap, maskmap, vecmap
Exemple #3
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(self.img_list[index],
                                     tool=self.configer.get('data', 'image_tool'),
                                     mode=self.configer.get('data', 'input_mode'))
        if os.path.exists(self.mask_list[index]):
            maskmap = ImageHelper.read_image(self.mask_list[index],
                                             tool=self.configer.get('data', 'image_tool'), mode='P')
        else:
            maskmap = np.ones((img.size[1], img.size[0]), dtype=np.uint8)
            if self.configer.get('data', 'image_tool') == 'pil':
                maskmap = ImageHelper.np2img(maskmap)

        kpts, bboxes = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None and len(bboxes) > 0:
            img, maskmap, kpts, bboxes = self.aug_transform(img, maskmap=maskmap, kpts=kpts, bboxes=bboxes)

        elif self.aug_transform is not None:
            img, maskmap, kpts = self.aug_transform(img, maskmap=maskmap, kpts=kpts)

        width, height = maskmap.size
        maskmap = ImageHelper.resize(maskmap,
                                     (width // self.configer.get('network', 'stride'),
                                      height // self.configer.get('network', 'stride')),
                                     interpolation='nearest')

        maskmap = torch.from_numpy(np.array(maskmap, dtype=np.float32))
        kpts = torch.from_numpy(kpts).float()

        if self.img_transform is not None:
            img = self.img_transform(img)

        return img, maskmap, kpts
    def __test_img(self, image_path, json_path, raw_path, vis_path):
        Log.info('Image Path: {}'.format(image_path))
        ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path))
        cur_img_rgb = ImageHelper.resize(ori_img_rgb,
                                         self.configer.get(
                                             'data', 'input_size'),
                                         interpolation=Image.CUBIC)

        ori_img_bgr = ImageHelper.bgr2rgb(ori_img_rgb)
        paf_avg, heatmap_avg = self.__get_paf_and_heatmap(cur_img_rgb)
        all_peaks = self.__extract_heatmap_info(heatmap_avg)
        special_k, connection_all = self.__extract_paf_info(
            cur_img_rgb, paf_avg, all_peaks)
        subset, candidate = self.__get_subsets(connection_all, special_k,
                                               all_peaks)
        json_dict = self.__get_info_tree(cur_img_rgb, subset, candidate)
        for i in range(len(json_dict['objects'])):
            for index in range(len(json_dict['objects'][i]['keypoints'])):
                if json_dict['objects'][i]['keypoints'][index][2] == -1:
                    continue

                json_dict['objects'][i]['keypoints'][index][0] *= (
                    ori_img_rgb.shape[1] / cur_img_rgb.shape[1])
                json_dict['objects'][i]['keypoints'][index][1] *= (
                    ori_img_rgb.shape[0] / cur_img_rgb.shape[0])

        image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(),
                                                    json_dict)
        image_canvas = self.pose_parser.link_points(image_canvas, json_dict)

        cv2.imwrite(vis_path, image_canvas)
        cv2.imwrite(raw_path, ori_img_bgr)
        Log.info('Json Save Path: {}'.format(json_path))
        with open(json_path, 'w') as save_stream:
            save_stream.write(json.dumps(json_dict))
    def __test_img(self, image_path, save_path):
        Log.info('Image Path: {}'.format(image_path))
        image_raw = ImageHelper.cv2_open_bgr(image_path)
        inputs = ImageHelper.bgr2rgb(image_raw)
        inputs = ImageHelper.resize(inputs, tuple(self.configer.get('data', 'input_size')), Image.CUBIC)
        inputs = ToTensor()(inputs)
        inputs = Normalize(mean=self.configer.get('trans_params', 'mean'),
                           std=self.configer.get('trans_params', 'std'))(inputs)

        with torch.no_grad():
            inputs = inputs.unsqueeze(0).to(self.device)
            bbox, cls = self.det_net(inputs)

        bbox = bbox.cpu().data.squeeze(0)
        cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data
        boxes, lbls, scores, has_obj = self.__decode(bbox, cls)
        if has_obj:
            boxes = boxes.cpu().numpy()
            boxes = np.clip(boxes, 0, 1)
            lbls = lbls.cpu().numpy()
            scores = scores.cpu().numpy()

            img_canvas = self.__draw_box(image_raw, boxes, lbls, scores)

        else:
            # print('None obj detected!')
            img_canvas = image_raw

        Log.info('Save Path: {}'.format(save_path))
        cv2.imwrite(save_path, img_canvas)
        # Boxes is within 0-1.
        self.__save_json(save_path, boxes, lbls, scores, image_raw)

        return image_raw, lbls, scores, boxes, has_obj
Exemple #6
0
    def __test_img(self, image_path, json_path, raw_path, vis_path):
        Log.info('Image Path: {}'.format(image_path))
        ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path))
        ori_img_bgr = ImageHelper.rgb2bgr(ori_img_rgb)
        inputs = ImageHelper.resize(ori_img_rgb, tuple(self.configer.get('data', 'input_size')), Image.CUBIC)
        inputs = ToTensor()(inputs)
        inputs = Normalize(mean=self.configer.get('trans_params', 'mean'),
                           std=self.configer.get('trans_params', 'std'))(inputs)

        with torch.no_grad():
            inputs = inputs.unsqueeze(0).to(self.device)
            bbox, cls = self.det_net(inputs)

        bbox = bbox.cpu().data.squeeze(0)
        cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data
        boxes, lbls, scores = self.__decode(bbox, cls)
        json_dict = self.__get_info_tree(boxes, lbls, scores, ori_img_rgb)

        image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(),
                                                   json_dict,
                                                   conf_threshold=self.configer.get('vis', 'conf_threshold'))
        cv2.imwrite(vis_path, image_canvas)
        cv2.imwrite(raw_path, ori_img_bgr)

        Log.info('Json Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)
        return json_dict
    def test(self, test_dir, out_dir):
        for i, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            total_logits = None
            if self.configer.get('test', 'mode') == 'ss_test':
                total_logits = self.ss_test(data_dict)

            elif self.configer.get('test', 'mode') == 'sscrop_test':
                total_logits = self.sscrop_test(data_dict,
                                                params_dict=self.configer.get(
                                                    'test', 'sscrop_test'))

            elif self.configer.get('test', 'mode') == 'ms_test':
                total_logits = self.ms_test(data_dict,
                                            params_dict=self.configer.get(
                                                'test', 'ms_test'))

            elif self.configer.get('test', 'mode') == 'mscrop_test':
                total_logits = self.mscrop_test(data_dict,
                                                params_dict=self.configer.get(
                                                    'test', 'mscrop_test'))

            else:
                Log.error('Invalid test mode:{}'.format(
                    self.configer.get('test', 'mode')))
                exit(1)

            meta_list = DCHelper.tolist(data_dict['meta'])
            img_list = DCHelper.tolist(data_dict['img'])
            for i in range(len(meta_list)):
                filename = meta_list[i]['img_path'].split('/')[-1].split(
                    '.')[0]
                label_map = np.argmax(total_logits[i], axis=-1)
                label_img = np.array(label_map, dtype=np.uint8)
                ori_img_bgr = self.blob_helper.tensor2bgr(img_list[i][0])
                ori_img_bgr = ImageHelper.resize(
                    ori_img_bgr,
                    target_size=meta_list[i]['ori_img_size'],
                    interpolation='linear')
                image_canvas = self.seg_parser.colorize(
                    label_img, image_canvas=ori_img_bgr)
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(filename)))

                if self.configer.exists('data', 'label_list'):
                    label_img = self.__relabel(label_img)

                if self.configer.exists(
                        'data', 'reduce_zero_label') and self.configer.get(
                            'data', 'reduce_zero_label'):
                    label_img = label_img + 1
                    label_img = label_img.astype(np.uint8)

                label_img = Image.fromarray(label_img, 'P')
                label_path = os.path.join(out_dir,
                                          'label/{}.png'.format(filename))
                Log.info('Label Path: {}'.format(label_path))
                ImageHelper.save(label_img, label_path)
Exemple #8
0
 def __call__(self, img):
     img_size = ImageHelper.get_size(img)
     scale1 = self.resize_bound[0] / min(img_size)
     scale2 = self.resize_bound[1] / max(img_size)
     scale = min(scale1, scale2)
     target_size = [int(round(i * scale)) for i in img_size]
     img = ImageHelper.resize(img,
                              target_size=target_size,
                              interpolation='cubic')
     return img, scale
Exemple #9
0
    def make_input(self, image=None, input_size=None,
                   min_side_length=None, max_side_length=None, scale=None):
        if input_size is not None and min_side_length is None and max_side_length is None:
            if input_size[0] == -1 and input_size[1] == -1:
                in_width, in_height = ImageHelper.get_size(image)

            elif input_size[0] != -1 and input_size[1] != -1:
                in_width, in_height = input_size

            elif input_size[0] == -1 and input_size[1] != -1:
                width, height = ImageHelper.get_size(image)
                scale_ratio = input_size[1] / height
                w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
                in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

            else:
                assert input_size[0] != -1 and input_size[1] == -1
                width, height = ImageHelper.get_size(image)
                scale_ratio = input_size[0] / width
                w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
                in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        elif input_size is None and min_side_length is not None and max_side_length is None:
            width, height = ImageHelper.get_size(image)
            scale_ratio = min_side_length / min(width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        elif input_size is None and min_side_length is None and max_side_length is not None:
            width, height = ImageHelper.get_size(image)
            scale_ratio = max_side_length / max(width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        elif input_size is None and min_side_length is not None and max_side_length is not None:
            width, height = ImageHelper.get_size(image)
            scale_ratio = min_side_length / min(width, height)
            bound_scale_ratio = max_side_length / max(width, height)
            scale_ratio = min(scale_ratio, bound_scale_ratio)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))

        else:
            in_width, in_height = ImageHelper.get_size(image)

        image = ImageHelper.resize(image, (int(in_width * scale), int(in_height * scale)), interpolation='cubic')
        img_tensor = ToTensor()(image)
        img_tensor = Normalize(div_value=self.configer.get('normalize', 'div_value'),
                               mean=self.configer.get('normalize', 'mean'),
                               std=self.configer.get('normalize', 'std'))(img_tensor)
        img_tensor = img_tensor.unsqueeze(0).to(torch.device('cpu' if self.configer.get('gpu') is None else 'cuda'))

        return img_tensor
Exemple #10
0
    def __getitem__(self, index):
        image = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        img_size = ImageHelper.get_size(image)
        if self.configer.exists('test', 'input_size'):
            input_size = self.configer.get('test', 'input_size')
            if input_size[0] == -1 and input_size[1] == -1:
                in_width, in_height = ImageHelper.get_size(image)

            elif input_size[0] != -1 and input_size[1] != -1:
                in_width, in_height = input_size

            elif input_size[0] == -1 and input_size[1] != -1:
                width, height = ImageHelper.get_size(image)
                scale_ratio = input_size[1] / height
                w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
                in_width, in_height = int(round(width * w_scale_ratio)), int(
                    round(height * h_scale_ratio))

            else:
                assert input_size[0] != -1 and input_size[1] == -1
                width, height = ImageHelper.get_size(image)
                scale_ratio = input_size[0] / width
                w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
                in_width, in_height = int(round(width * w_scale_ratio)), int(
                    round(height * h_scale_ratio))

        elif self.configer.exists(
                'test', 'min_side_length') and not self.configer.exists(
                    'test', 'max_side_length'):
            width, height = ImageHelper.get_size(image)
            scale_ratio = self.configer.get('test', 'min_side_length') / min(
                width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(
                round(height * h_scale_ratio))

        elif not self.configer.exists(
                'test', 'min_side_length') and self.configer.exists(
                    'test', 'max_side_length'):
            width, height = ImageHelper.get_size(image)
            scale_ratio = self.configer.get('test', 'max_side_length') / max(
                width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(
                round(height * h_scale_ratio))

        elif self.configer.exists('test',
                                  'min_side_length') and self.configer.exists(
                                      'test', 'max_side_length'):
            width, height = ImageHelper.get_size(image)
            scale_ratio = self.configer.get('test', 'min_side_length') / min(
                width, height)
            bound_scale_ratio = self.configer.get(
                'test', 'max_side_length') / max(width, height)
            scale_ratio = min(scale_ratio, bound_scale_ratio)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(
                round(height * h_scale_ratio))

        else:
            in_width, in_height = ImageHelper.get_size(image)

        img = ImageHelper.resize(image, (int(in_width), int(in_height)),
                                 interpolation='linear')
        if self.img_transform is not None:
            img = self.img_transform(img)

        meta = dict(ori_img_size=img_size,
                    border_hw=[in_height, in_width],
                    img_path=self.img_list[index])
        return dict(img=DataContainer(img,
                                      stack=True,
                                      return_dc=True,
                                      samples_per_gpu=True),
                    meta=DataContainer(meta,
                                       stack=False,
                                       cpu_only=True,
                                       return_dc=True,
                                       samples_per_gpu=True))
    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(
                self.configer.get('test', 'test_dir'),
                'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(
                json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(
                self.configer.get('test', 'test_dir'),
                'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(
                json_path=jsonB_path) if os.path.exists(jsonB_path) else None
        elif self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'pix2pix':
            imgA_dir = os.path.join(test_dir, 'imageA')
            test_loader_A = self.test_loader.get_testloader(
                test_dir=imgA_dir) if os.path.exists(imgA_dir) else None
            test_loader_B = None
        else:
            imgA_dir = os.path.join(test_dir, 'imageA')
            test_loader_A = self.test_loader.get_testloader(
                test_dir=imgA_dir) if os.path.exists(imgA_dir) else None
            imgB_dir = os.path.join(test_dir, 'imageB')
            test_loader_B = self.test_loader.get_testloader(
                test_dir=imgB_dir) if os.path.exists(imgB_dir) else None

        if test_loader_A is not None:
            for data_dict in test_loader_A:
                new_data_dict = dict(imgA=data_dict['img'])
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict, testing=True)

                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        # filename = '_'.join(img_path.rstrip().split('/')[-2:])
                        img_bgr = ImageHelper.resize(
                            img_bgr,
                            target_size=self.configer.get('test', 'out_size'),
                            interpolation='linear')
                        ImageHelper.save(
                            img_bgr,
                            os.path.join(out_dir, key,
                                         meta_list[i]['filename']))

        if test_loader_B is not None:
            for data_dict in test_loader_B:
                new_data_dict = dict(imgB=data_dict['img'])
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict, testing=True)
                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        # filename = img_path.rstrip().split('/')[-1]
                        # filename = '_'.join(img_path.rstrip().split('/')[-2:])
                        img_bgr = ImageHelper.resize(
                            img_bgr,
                            target_size=self.configer.get('test', 'out_size'),
                            interpolation='linear')
                        ImageHelper.save(
                            img_bgr,
                            os.path.join(out_dir, key,
                                         meta_list[i]['filename']))
Exemple #12
0
    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(
                test_dir,
                'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(
                json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(
                test_dir,
                'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(
                json_path=jsonB_path) if os.path.exists(jsonB_path) else None

        else:
            test_loader_A, test_loader_B = None, None
            Log.error('Test Mode not Exists!')
            exit(1)

        assert test_loader_A is not None and test_loader_B is not None
        probe_features = []
        gallery_features = []
        probe_labels = []
        gallery_labels = []
        for data_dict in test_loader_A:
            new_data_dict = dict(imgA=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                probe_features.append(out_dict['featA'][idx].cpu().numpy())
                probe_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get(
                                                     'test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(
                        img_bgr,
                        os.path.join(out_dir, key, meta_list[i]['filename']))

        for data_dict in test_loader_B:
            new_data_dict = dict(imgB=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                gallery_features.append(out_dict['feat'][idx].cpu().numpy())
                gallery_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get(
                                                     'test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(
                        img_bgr,
                        os.path.join(out_dir, key, meta_list[i]['filename']))

        r_acc, tpr = self.decode(probe_features, gallery_features,
                                 probe_labels, gallery_labels)
        Log.info('Final Rank1 accuracy is {}'.format(r_acc))
        Log.info('Final VR@FAR=0.1% accuracy is {}'.format(tpr))
Exemple #13
0
    def align_face(self, img, f5pt):
        ang_tan = (f5pt[0, 1] - f5pt[1, 1]) / (f5pt[0, 0] - f5pt[1, 0])
        rotate_degree = math.atan(ang_tan) / math.pi * 180
        height, width, _ = img[0].shape if isinstance(img,
                                                      (list,
                                                       tuple)) else img.shape

        img_center = (width / 2.0, height / 2.0)

        rotate_mat = cv2.getRotationMatrix2D(img_center, rotate_degree, 1.0)
        cos_val = np.abs(rotate_mat[0, 0])
        sin_val = np.abs(rotate_mat[0, 1])
        new_width = int(height * sin_val + width * cos_val)
        new_height = int(height * cos_val + width * sin_val)
        rotate_mat[0, 2] += (new_width / 2.) - img_center[0]
        rotate_mat[1, 2] += (new_height / 2.) - img_center[1]
        if isinstance(img, (list, tuple)):
            for i in range(len(img)):
                img[i] = cv2.warpAffine(img[i],
                                        rotate_mat, (new_width, new_height),
                                        borderValue=0).astype(np.uint8)
        else:
            img = cv2.warpAffine(img,
                                 rotate_mat, (new_width, new_height),
                                 borderValue=0).astype(np.uint8)

        for i in range(len(f5pt)):
            x = f5pt[i][0]
            y = f5pt[i][1]
            p = np.array([x, y, 1])
            p = rotate_mat.dot(p)
            f5pt[i][0] = p[0]
            f5pt[i][1] = p[1]

        r_scale = self.dist_ec_mc / ((f5pt[3, 1] + f5pt[4, 1]) / 2 -
                                     (f5pt[0, 1] + f5pt[1, 1]) / 2)
        height, width, _ = img[0].shape if isinstance(img,
                                                      (list,
                                                       tuple)) else img.shape
        target_size = [int(width * r_scale), int(height * r_scale)]
        if r_scale < 0:
            return None, None

        if isinstance(img, (list, tuple)):
            for i in range(len(img)):
                img[i] = ImageHelper.resize(img[i],
                                            target_size,
                                            interpolation='cubic')
        else:
            img = ImageHelper.resize(img, target_size, interpolation='cubic')
        f5pt = f5pt * r_scale

        crop_y = max(int((f5pt[0, 1] + f5pt[1, 1]) / 2 - self.ec_y), 0)
        crop_x = max(int((f5pt[0, 0] + f5pt[1, 0]) / 2 - self.crop_size // 2),
                     0)
        f5pt[:, 0] -= crop_x
        f5pt[:, 1] -= crop_y
        if isinstance(img, (list, tuple)):
            for i in range(len(img)):
                img[i] = img[i][crop_y:crop_y + self.crop_size,
                                crop_x:crop_x + self.crop_size]
        else:
            img = img[crop_y:crop_y + self.crop_size,
                      crop_x:crop_x + self.crop_size]
        return img, f5pt