Пример #1
0
    def __test_img(self, image_path, json_path, raw_path, vis_path):
        Log.info('Image Path: {}'.format(image_path))
        img = ImageHelper.read_image(
            image_path,
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        ori_img_bgr = ImageHelper.get_cv2_bgr(img,
                                              mode=self.configer.get(
                                                  'data', 'input_mode'))

        inputs = self.blob_helper.make_input(img,
                                             input_size=self.configer.get(
                                                 'data', 'input_size'),
                                             scale=1.0)

        with torch.no_grad():
            inputs = inputs.unsqueeze(0).to(self.device)
            _, _, detections = self.det_net(inputs)

        batch_detections = self.decode(detections, self.configer)
        json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr)

        image_canvas = self.det_parser.draw_bboxes(
            ori_img_bgr.copy(),
            json_dict,
            conf_threshold=self.configer.get('res', 'vis_conf_thre'))
        ImageHelper.save(ori_img_bgr, raw_path)
        ImageHelper.save(image_canvas, vis_path)

        Log.info('Json Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)
        return json_dict
Пример #2
0
    def __test_img(self, image_path, save_path):
        Log.info('Image Path: {}'.format(image_path))
        ori_image = ImageHelper.read_image(
            image_path,
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        ori_width, ori_height = ImageHelper.get_size(ori_image)
        ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image,
                                              mode=self.configer.get(
                                                  'data', 'input_mode'))
        heatmap_avg = np.zeros(
            (ori_height, ori_width, self.configer.get('network',
                                                      'heatmap_out')))
        for i, scale in enumerate(self.configer.get('test', 'scale_search')):
            image = self.blob_helper.make_input(ori_image,
                                                input_size=self.configer.get(
                                                    'test', 'input_size'),
                                                scale=scale)
            with torch.no_grad():
                heatmap_out_list = self.pose_net(image)
                heatmap_out = heatmap_out_list[-1]

                # extract outputs, resize, and remove padding
                heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(
                    1, 2, 0)
                heatmap = cv2.resize(heatmap, (ori_width, ori_height),
                                     interpolation=cv2.INTER_CUBIC)

                heatmap_avg = heatmap_avg + heatmap / len(
                    self.configer.get('test', 'scale_search'))

        all_peaks = self.__extract_heatmap_info(heatmap_avg)
        image_canvas = self.__draw_key_point(all_peaks, ori_img_bgr)
        ImageHelper.save(image_canvas, save_path)
Пример #3
0
    def test(self, test_dir, out_dir):
        for _, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            data_dict['testing'] = True
            loc, conf = self.det_net(data_dict)
            meta_list = DCHelper.tolist(data_dict['meta'])
            batch_detections = self.decode(loc, conf, self.configer, meta_list)
            for i in range(len(meta_list)):
                ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'],
                                                     tool='cv2',
                                                     mode='BGR')
                json_dict = self.__get_info_tree(batch_detections[i])
                image_canvas = self.det_parser.draw_bboxes(
                    ori_img_bgr.copy(),
                    json_dict,
                    conf_threshold=self.configer.get('res', 'vis_conf_thre'))
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(
                                         meta_list[i]['filename'])))

                Log.info('Json Path: {}'.format(
                    os.path.join(
                        out_dir,
                        'json/{}.json'.format(meta_list[i]['filename']))))
                JsonHelper.save_file(json_dict,
                                     save_path=os.path.join(
                                         out_dir, 'json/{}.json'.format(
                                             meta_list[i]['filename'])))
Пример #4
0
    def process_3d(self, data_dir):
        new_data_dir = '{}_new'.format(data_dir.rstrip('/'))
        if os.path.exists(new_data_dir):
            shutil.rmtree(new_data_dir)

        os.makedirs(new_data_dir)

        for filename in FileHelper.list_dir(data_dir):
            if not ImageHelper.is_img(filename) or 'depth' in filename:
                Log.info('Image Path: {}'.format(
                    os.path.join(data_dir, filename)))
                continue

            file_path = os.path.join(data_dir, filename)
            img = io.imread(file_path)
            kpts = self.detect_face(img)
            if kpts is None:
                Log.info('Invliad face detected in {}'.format(file_path))
                continue

            depth = np.array(
                io.imread(
                    os.path.join(data_dir, filename.replace('rgb', 'depth'))))
            face_depth, kpts = self.align_face(
                [np.array(img), np.array(depth)], kpts)
            if face_depth is None:
                Log.info('Invliad face detected in {}'.format(file_path))
                continue
            ImageHelper.save(ImageHelper.rgb2bgr(face_depth[0]),
                             os.path.join(new_data_dir, filename))
            ImageHelper.save(
                ImageHelper.rgb2bgr(face_depth[1]),
                os.path.join(new_data_dir, filename.replace('rgb', 'depth')))
Пример #5
0
    def test(self, test_dir, out_dir):
        imgA_dir = os.path.join(test_dir, 'imgA')
        imgB_dir = os.path.join(test_dir, 'imgB')

        if os.path.exists(imgA_dir):
            Log.info('ImageA Dir: {}'.format(imgA_dir))
            for data_dict in self.test_loader.get_testloader(test_dir=imgA_dir):
                new_data_dict = dict(imgA=data_dict['img'])
                out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.item():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        filename = img_path.rstrip().split('/')[-1]
                        ImageHelper.save(img_bgr, os.path.join(out_dir, key, filename))

        if os.path.exists(imgB_dir):
            Log.info('ImageB Dir: {}'.format(imgB_dir))
            for data_dict in self.test_loader.get_testloader(test_dir=imgB_dir):
                new_data_dict = dict(imgB=data_dict['img'])
                out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.item():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        filename = img_path.rstrip().split('/')[-1]
                        ImageHelper.save(img_bgr, os.path.join(out_dir, key, filename))
Пример #6
0
    def test(self, test_dir, out_dir):
        for i, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            total_logits = None
            if self.configer.get('test', 'mode') == 'ss_test':
                total_logits = self.ss_test(data_dict)

            elif self.configer.get('test', 'mode') == 'sscrop_test':
                total_logits = self.sscrop_test(data_dict,
                                                params_dict=self.configer.get(
                                                    'test', 'sscrop_test'))

            elif self.configer.get('test', 'mode') == 'ms_test':
                total_logits = self.ms_test(data_dict,
                                            params_dict=self.configer.get(
                                                'test', 'ms_test'))

            elif self.configer.get('test', 'mode') == 'mscrop_test':
                total_logits = self.mscrop_test(data_dict,
                                                params_dict=self.configer.get(
                                                    'test', 'mscrop_test'))

            else:
                Log.error('Invalid test mode:{}'.format(
                    self.configer.get('test', 'mode')))
                exit(1)

            meta_list = DCHelper.tolist(data_dict['meta'])
            img_list = DCHelper.tolist(data_dict['img'])
            for i in range(len(meta_list)):
                filename = meta_list[i]['img_path'].split('/')[-1].split(
                    '.')[0]
                label_map = np.argmax(total_logits[i], axis=-1)
                label_img = np.array(label_map, dtype=np.uint8)
                ori_img_bgr = self.blob_helper.tensor2bgr(img_list[i][0])
                ori_img_bgr = ImageHelper.resize(
                    ori_img_bgr,
                    target_size=meta_list[i]['ori_img_size'],
                    interpolation='linear')
                image_canvas = self.seg_parser.colorize(
                    label_img, image_canvas=ori_img_bgr)
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(filename)))

                if self.configer.exists('data', 'label_list'):
                    label_img = self.__relabel(label_img)

                if self.configer.exists(
                        'data', 'reduce_zero_label') and self.configer.get(
                            'data', 'reduce_zero_label'):
                    label_img = label_img + 1
                    label_img = label_img.astype(np.uint8)

                label_img = Image.fromarray(label_img, 'P')
                label_path = os.path.join(out_dir,
                                          'label/{}.png'.format(filename))
                Log.info('Label Path: {}'.format(label_path))
                ImageHelper.save(label_img, label_path)
Пример #7
0
    def __test_img(self, image_path, json_path, raw_path, vis_path):

        Log.info('Image Path: {}'.format(image_path))
        ori_image = ImageHelper.read_image(image_path,
                                           tool=self.configer.get('data', 'image_tool'),
                                           mode=self.configer.get('data', 'input_mode'))

        ori_width, ori_height = ImageHelper.get_size(ori_image)
        ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode'))
        heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out')))
        paf_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'paf_out')))
        multiplier = [scale * self.configer.get('test', 'input_size')[1] / ori_height
                      for scale in self.configer.get('test', 'scale_search')]
        stride = self.configer.get('network', 'stride')
        for i, scale in enumerate(multiplier):
            image, border_hw = self._get_blob(ori_image, scale=scale)
            with torch.no_grad():
                paf_out_list, heatmap_out_list = self.pose_net(image)
                paf_out = paf_out_list[-1]
                heatmap_out = heatmap_out_list[-1]

                # extract outputs, resize, and remove padding
                heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)

                heatmap = cv2.resize(heatmap, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
                heatmap = cv2.resize(heatmap[:border_hw[0], :border_hw[1]],
                                     (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                paf = paf_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)
                paf = cv2.resize(paf, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
                paf = cv2.resize(paf[:border_hw[0], :border_hw[1]],
                                 (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                heatmap_avg = heatmap_avg + heatmap / len(multiplier)
                paf_avg = paf_avg + paf / len(multiplier)

        all_peaks = self.__extract_heatmap_info(heatmap_avg)
        special_k, connection_all = self.__extract_paf_info(ori_img_bgr, paf_avg, all_peaks)
        subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks)
        json_dict = self.__get_info_tree(ori_img_bgr, subset, candidate)

        image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict)
        image_canvas = self.pose_parser.link_points(image_canvas, json_dict)

        ImageHelper.save(image_canvas, vis_path)
        ImageHelper.save(ori_img_bgr, raw_path)
        Log.info('Json Save Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)
Пример #8
0
    def __test_img(self, image_path, label_path, vis_path, raw_path):
        Log.info('Image Path: {}'.format(image_path))
        ori_image = ImageHelper.read_image(image_path,
                                           tool=self.configer.get('data', 'image_tool'),
                                           mode=self.configer.get('data', 'input_mode'))

        ori_width, ori_height = ImageHelper.get_size(ori_image)

        total_logits = np.zeros((ori_height, ori_width, self.configer.get('data', 'num_classes')), np.float32)
        for scale in self.configer.get('test', 'scale_search'):
            image = self.blob_helper.make_input(image=ori_image,
                                                input_size=self.configer.get('test', 'input_size'),
                                                scale=scale)
            if self.configer.get('test', 'crop_test'):
                crop_size = self.configer.get('test', 'crop_size')
                if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]:
                    results = self._crop_predict(image, crop_size)
                else:
                    results = self._predict(image)
            else:
                results = self._predict(image)

            results = cv2.resize(results, (ori_width, ori_height), interpolation=cv2.INTER_LINEAR)
            total_logits += results

        if self.configer.get('test', 'mirror'):
            if self.configer.get('data', 'image_tool') == 'cv2':
                image = cv2.flip(ori_image, 1)
            else:
                image = ori_image.transpose(Image.FLIP_LEFT_RIGHT)

            image = self.blob_helper.make_input(image, input_size=self.configer.get('test', 'input_size'), scale=1.0)
            if self.configer.get('test', 'crop_test'):
                crop_size = self.configer.get('test', 'crop_size')
                if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]:
                    results = self._crop_predict(image, crop_size)
                else:
                    results = self._predict(image)
            else:
                results = self._predict(image)

            results = cv2.resize(results[:, ::-1], (ori_width, ori_height), interpolation=cv2.INTER_LINEAR)
            total_logits += results

        label_map = np.argmax(total_logits, axis=-1)
        label_img = np.array(label_map, dtype=np.uint8)
        image_bgr = cv2.cvtColor(np.array(ori_image), cv2.COLOR_RGB2BGR)
        image_canvas = self.seg_parser.colorize(label_img, image_canvas=image_bgr)
        ImageHelper.save(image_canvas, save_path=vis_path)
        ImageHelper.save(ori_image, save_path=raw_path)

        if not self.configer.is_empty('data', 'label_list'):
            label_img = self.__relabel(label_img)

        label_img = Image.fromarray(label_img, 'P')
        Log.info('Label Path: {}'.format(label_path))
        ImageHelper.save(label_img, label_path)
Пример #9
0
    def test_img(self, image_path, label_path, vis_path, raw_path):
        Log.info('Image Path: {}'.format(image_path))
        ori_image = ImageHelper.read_image(
            image_path,
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        total_logits = None
        if self.configer.get('test', 'mode') == 'ss_test':
            total_logits = self.ss_test(ori_image)

        elif self.configer.get('test', 'mode') == 'sscrop_test':
            total_logits = self.sscrop_test(ori_image)

        elif self.configer.get('test', 'mode') == 'ms_test':
            total_logits = self.ms_test(ori_image)

        elif self.configer.get('test', 'mode') == 'mscrop_test':
            total_logits = self.mscrop_test(ori_image)

        else:
            Log.error('Invalid test mode:{}'.format(
                self.configer.get('test', 'mode')))
            exit(1)

        label_map = np.argmax(total_logits, axis=-1)
        label_img = np.array(label_map, dtype=np.uint8)
        ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image,
                                              mode=self.configer.get(
                                                  'data', 'input_mode'))
        image_canvas = self.seg_parser.colorize(label_img,
                                                image_canvas=ori_img_bgr)
        ImageHelper.save(image_canvas, save_path=vis_path)
        ImageHelper.save(ori_image, save_path=raw_path)

        if self.configer.exists('data', 'label_list'):
            label_img = self.__relabel(label_img)

        if self.configer.exists('data',
                                'reduce_zero_label') and self.configer.get(
                                    'data', 'reduce_zero_label'):
            label_img = label_img + 1
            label_img = label_img.astype(np.uint8)

        label_img = Image.fromarray(label_img, 'P')
        Log.info('Label Path: {}'.format(label_path))
        ImageHelper.save(label_img, label_path)
Пример #10
0
    def test(self, test_dir, out_dir):
        gallery_file_list = '*_gallery_*.txt'
        probe_file_list = '*_probe_*.txt'
        gallery_file_list = glob.glob(test_dir + '/' + gallery_file_list)
        probe_file_list = glob.glob(test_dir + '/' + probe_file_list)
        # remove *_dev.txt file in both list
        gallery_file_list = sorted(gallery_file_list)
        probe_file_list = sorted(probe_file_list)
        rank1_acc = []
        vr_acc = []
        for i in range(len(gallery_file_list)):
            probe_features = []
            gallery_features = []
            probe_names = []
            gallery_names = []
            Log.info('Gallery File: {}'.format(gallery_file_list[i]))
            for data_dict in self.test_loader.get_testloader(
                    list_path=gallery_file_list[i]):
                new_data_dict = dict(gallery=data_dict['img'])
                out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])

                for idx in range(len(out_dict['feat'])):
                    gallery_features.append(
                        out_dict['feat'][idx].cpu().numpy())
                    gallery_names.append(
                        meta_list[idx]['img_path'].split("/")[-2])

            Log.info('Probe File: {}'.format(probe_file_list[i]))
            for data_dict in self.test_loader.get_testloader(
                    list_path=probe_file_list[i]):
                new_data_dict = dict(probe=data_dict['img'])
                out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])

                for key, value in out_dict.item():
                    if 'feat' in key:
                        for idx in range(len(value)):
                            probe_features.append(value[idx].cpu().numpy())
                            probe_names.append(
                                meta_list[idx]['img_path'].split("/")[-2])

                        continue
                    else:
                        for idx in range(len(value)):
                            img_bgr = self.blob_helper.tensor2bgr(value[idx])
                            filename = meta_list[idx]['img_path'].rstrip(
                            ).split('/')[-1]
                            ImageHelper.save(
                                img_bgr, os.path.join(out_dir, key, filename))

            probe_features = np.array(probe_features)
            gallery_features = np.array(gallery_features)
            score = cosine_similarity(gallery_features, probe_features).T
            r_acc, tpr = self.compute_metric(score, probe_names, gallery_names)
            # print('score={}, probe_names={}, gallery_names={}'.format(score, probe_names, gallery_names))
            rank1_acc.append(r_acc)
            vr_acc.append(tpr)

        avg_r_a = np.mean(np.array(rank1_acc))
        std_r_a = np.std(np.array(rank1_acc))
        avg_v_a = np.mean(np.array(vr_acc))
        std_v_a = np.std(np.array(vr_acc))
        # avg_vr_acc = sum(vr_acc)/(len(vr_acc) + 1e-5)
        print()
        print('=====================================================')
        print('Final Rank1 accuracy is', avg_r_a * 100, "% +", std_r_a)
        print('Final VR@FAR=0.1% accuracy is', avg_v_a * 100, "% +", std_v_a)
        print('=====================================================')
        print()
        return avg_r_a, std_r_a, avg_v_a, std_v_a
Пример #11
0
    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(
                self.configer.get('test', 'test_dir'),
                'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(
                json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(
                self.configer.get('test', 'test_dir'),
                'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(
                json_path=jsonB_path) if os.path.exists(jsonB_path) else None
        elif self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'pix2pix':
            imgA_dir = os.path.join(test_dir, 'imageA')
            test_loader_A = self.test_loader.get_testloader(
                test_dir=imgA_dir) if os.path.exists(imgA_dir) else None
            test_loader_B = None
        else:
            imgA_dir = os.path.join(test_dir, 'imageA')
            test_loader_A = self.test_loader.get_testloader(
                test_dir=imgA_dir) if os.path.exists(imgA_dir) else None
            imgB_dir = os.path.join(test_dir, 'imageB')
            test_loader_B = self.test_loader.get_testloader(
                test_dir=imgB_dir) if os.path.exists(imgB_dir) else None

        if test_loader_A is not None:
            for data_dict in test_loader_A:
                new_data_dict = dict(imgA=data_dict['img'])
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict, testing=True)

                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        # filename = '_'.join(img_path.rstrip().split('/')[-2:])
                        img_bgr = ImageHelper.resize(
                            img_bgr,
                            target_size=self.configer.get('test', 'out_size'),
                            interpolation='linear')
                        ImageHelper.save(
                            img_bgr,
                            os.path.join(out_dir, key,
                                         meta_list[i]['filename']))

        if test_loader_B is not None:
            for data_dict in test_loader_B:
                new_data_dict = dict(imgB=data_dict['img'])
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict, testing=True)
                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        # filename = img_path.rstrip().split('/')[-1]
                        # filename = '_'.join(img_path.rstrip().split('/')[-2:])
                        img_bgr = ImageHelper.resize(
                            img_bgr,
                            target_size=self.configer.get('test', 'out_size'),
                            interpolation='linear')
                        ImageHelper.save(
                            img_bgr,
                            os.path.join(out_dir, key,
                                         meta_list[i]['filename']))
Пример #12
0
    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(
                test_dir,
                'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(
                json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(
                test_dir,
                'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(
                json_path=jsonB_path) if os.path.exists(jsonB_path) else None

        else:
            test_loader_A, test_loader_B = None, None
            Log.error('Test Mode not Exists!')
            exit(1)

        assert test_loader_A is not None and test_loader_B is not None
        probe_features = []
        gallery_features = []
        probe_labels = []
        gallery_labels = []
        for data_dict in test_loader_A:
            new_data_dict = dict(imgA=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                probe_features.append(out_dict['featA'][idx].cpu().numpy())
                probe_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get(
                                                     'test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(
                        img_bgr,
                        os.path.join(out_dir, key, meta_list[i]['filename']))

        for data_dict in test_loader_B:
            new_data_dict = dict(imgB=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                gallery_features.append(out_dict['feat'][idx].cpu().numpy())
                gallery_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get(
                                                     'test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(
                        img_bgr,
                        os.path.join(out_dir, key, meta_list[i]['filename']))

        r_acc, tpr = self.decode(probe_features, gallery_features,
                                 probe_labels, gallery_labels)
        Log.info('Final Rank1 accuracy is {}'.format(r_acc))
        Log.info('Final VR@FAR=0.1% accuracy is {}'.format(tpr))