예제 #1
0
    def __init__(self, configer):
        self.configer = configer
        self.blob_helper = BlobHelper(configer)
        self.model_manager = ModelManager(configer)
        self.test_loader = TestDataLoader(configer)
        self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')
        self.gan_net = None

        self._init_model()
예제 #2
0
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.model_manager = ModelManager(configer)
        self.seg_data_loader = DataLoader(configer)

        self.gan_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.scheduler = None
        self.runner_state = dict()

        self._init_model()
예제 #3
0
class ImageTranslatorTest(object):
    def __init__(self, configer):
        self.configer = configer
        self.blob_helper = BlobHelper(configer)
        self.model_manager = ModelManager(configer)
        self.test_loader = TestDataLoader(configer)
        self.device = torch.device(
            'cpu' if self.configer.get('gpu') is None else 'cuda')
        self.gan_net = None

        self._init_model()

    def _init_model(self):
        self.gan_net = self.model_manager.gan_model()
        self.gan_net = RunnerHelper.load_net(self, self.gan_net)
        self.gan_net.eval()

    def test(self, test_dir, out_dir):
        imgA_dir = os.path.join(test_dir, 'imageA')
        imgB_dir = os.path.join(test_dir, 'imageB')
        if os.path.exists(imgA_dir):
            Log.info('ImageA Dir: {}'.format(imgA_dir))
            for data_dict in self.test_loader.get_testloader(
                    test_dir=imgA_dir):
                new_data_dict = dict(imgA=data_dict['img'])
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict, testing=True)

                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        filename = img_path.rstrip().split('/')[-1]
                        ImageHelper.save(img_bgr,
                                         os.path.join(out_dir, key, filename))

        if os.path.exists(imgB_dir):
            Log.info('ImageB Dir: {}'.format(imgB_dir))
            for data_dict in self.test_loader.get_testloader(
                    test_dir=imgB_dir):
                new_data_dict = dict(imgB=data_dict['img'])
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict, testing=True)
                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        filename = img_path.rstrip().split('/')[-1]
                        ImageHelper.save(img_bgr,
                                         os.path.join(out_dir, key, filename))
예제 #4
0
class FaceGAN(object):
    """
      The class for Pose Estimation. Include train, val, val & predict.
    """
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.model_manager = ModelManager(configer)
        self.seg_data_loader = DataLoader(configer)

        self.gan_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.scheduler = None
        self.runner_state = dict()

        self._init_model()

    def _init_model(self):
        self.gan_net = self.model_manager.gan_model()
        self.gan_net = RunnerHelper.load_net(self, self.gan_net)

        self.optimizer, self.scheduler = Trainer.init(
            self._get_parameters(), self.configer.get('solver'))

        self.train_loader = self.seg_data_loader.get_trainloader()
        self.val_loader = self.seg_data_loader.get_valloader()

    def _get_parameters(self):

        return self.gan_net.parameters()

    def train(self):
        """
          Train function of every epoch during train phase.
        """
        self.gan_net.train()
        start_time = time.time()
        # Adjust the learning rate after every epoch.
        for i, data_dict in enumerate(self.train_loader):
            Trainer.update(self, solver_dict=self.configer.get('solver'))
            inputs = data_dict['imgA']
            self.data_time.update(time.time() - start_time)

            # Forward pass.
            out_dict = self.gan_net(inputs)
            # outputs = self.module_utilizer.gather(outputs)
            loss = out_dict['loss'].mean()
            self.train_losses.update(loss.item(), inputs.size(0))
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.runner_state['iters'] += 1

            # Print the log info & reset the states.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'display_iter') == 0:
                Log.info(
                    'Train Epoch: {0}\tTrain Iteration: {1}\t'
                    'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t'
                    'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n'
                    'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'
                    .format(self.runner_state['epoch'],
                            self.runner_state['iters'],
                            self.configer.get('solver', 'display_iter'),
                            RunnerHelper.get_lr(self.optimizer),
                            batch_time=self.batch_time,
                            data_time=self.data_time,
                            loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            if self.configer.get('solver', 'lr')['metric'] == 'iters' \
                    and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'):
                break

            # Check to val the current model.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'test_interval') == 0:
                self.val()

        self.runner_state['epoch'] += 1

    def val(self, data_loader=None):
        """
          Validation function during the train phase.
        """
        self.gan_net.eval()
        start_time = time.time()

        data_loader = self.val_loader if data_loader is None else data_loader
        for j, data_dict in enumerate(data_loader):
            inputs = data_dict['imgA']

            with torch.no_grad():
                # Forward pass.
                out_dict = self.gan_net(data_dict)
                # Compute the loss of the val batch.

            self.val_losses.update(out_dict['loss'].mean().item(),
                                   inputs.size(0))
            meta_list = DCHelper.tolist(data_dict['meta'])
            probe_features = []
            gallery_features = []
            probe_labels = []
            gallery_labels = []
            for idx in range(len(meta_list)):
                gallery_features.append(out_dict['featB'][idx].cpu().numpy())
                gallery_labels.append(meta_list[idx]['labelB'])
                probe_features.append(out_dict['featA'][idx].cpu().numpy())
                probe_labels.append(meta_list[idx]['labelA'])

            rank_1, vr_far_001 = FaceGANTest.decode(probe_features,
                                                    gallery_features,
                                                    probe_labels,
                                                    gallery_labels)
            Log.info('Rank1 accuracy is {}'.format(rank_1))
            Log.info('VR@FAR=0.1% accuracy is {}'.format(vr_far_001))
            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        RunnerHelper.save_net(self, self.gan_net, val_loss=self.val_losses.avg)

        # Print the log info & reset the states.
        Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                                loss=self.val_losses))
        self.batch_time.reset()
        self.val_losses.reset()
        self.gan_net.train()
예제 #5
0
class FaceGANTest(object):
    def __init__(self, configer):
        self.configer = configer
        self.blob_helper = BlobHelper(configer)
        self.model_manager = ModelManager(configer)
        self.test_loader = TestDataLoader(configer)
        self.device = torch.device(
            'cpu' if self.configer.get('gpu') is None else 'cuda')
        self.gan_net = None

        self._init_model()

    def _init_model(self):
        self.gan_net = self.model_manager.gan_model()
        self.gan_net = RunnerHelper.load_net(self, self.gan_net)
        self.gan_net.eval()

    def test(self, test_dir, out_dir):
        gallery_file_list = '*_gallery_*.txt'
        probe_file_list = '*_probe_*.txt'
        gallery_file_list = glob.glob(test_dir + '/' + gallery_file_list)
        probe_file_list = glob.glob(test_dir + '/' + probe_file_list)
        # remove *_dev.txt file in both list
        gallery_file_list = sorted(gallery_file_list)
        probe_file_list = sorted(probe_file_list)
        rank1_acc = []
        vr_acc = []
        for i in range(len(gallery_file_list)):
            probe_features = []
            gallery_features = []
            probe_names = []
            gallery_names = []
            Log.info('Gallery File: {}'.format(gallery_file_list[i]))
            for data_dict in self.test_loader.get_testloader(
                    list_path=gallery_file_list[i]):
                new_data_dict = dict(gallery=data_dict['img'])
                out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])

                for idx in range(len(out_dict['feat'])):
                    gallery_features.append(
                        out_dict['feat'][idx].cpu().numpy())
                    gallery_names.append(
                        meta_list[idx]['img_path'].split("/")[-2])

            Log.info('Probe File: {}'.format(probe_file_list[i]))
            for data_dict in self.test_loader.get_testloader(
                    list_path=probe_file_list[i]):
                new_data_dict = dict(probe=data_dict['img'])
                out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])

                for key, value in out_dict.item():
                    if 'feat' in key:
                        for idx in range(len(value)):
                            probe_features.append(value[idx].cpu().numpy())
                            probe_names.append(
                                meta_list[idx]['img_path'].split("/")[-2])

                        continue
                    else:
                        for idx in range(len(value)):
                            img_bgr = self.blob_helper.tensor2bgr(value[idx])
                            filename = meta_list[idx]['img_path'].rstrip(
                            ).split('/')[-1]
                            ImageHelper.save(
                                img_bgr, os.path.join(out_dir, key, filename))

            probe_features = np.array(probe_features)
            gallery_features = np.array(gallery_features)
            score = cosine_similarity(gallery_features, probe_features).T
            r_acc, tpr = self.compute_metric(score, probe_names, gallery_names)
            # print('score={}, probe_names={}, gallery_names={}'.format(score, probe_names, gallery_names))
            rank1_acc.append(r_acc)
            vr_acc.append(tpr)

        avg_r_a = np.mean(np.array(rank1_acc))
        std_r_a = np.std(np.array(rank1_acc))
        avg_v_a = np.mean(np.array(vr_acc))
        std_v_a = np.std(np.array(vr_acc))
        # avg_vr_acc = sum(vr_acc)/(len(vr_acc) + 1e-5)
        print()
        print('=====================================================')
        print('Final Rank1 accuracy is', avg_r_a * 100, "% +", std_r_a)
        print('Final VR@FAR=0.1% accuracy is', avg_v_a * 100, "% +", std_v_a)
        print('=====================================================')
        print()
        return avg_r_a, std_r_a, avg_v_a, std_v_a

    def compute_metric(self, score, probe_names, gallery_names):
        # print('score.shape =', score.shape)
        # print('probe_names =', np.array(probe_names).shape)
        # print('gallery_names =', np.array(gallery_names).shape)
        print('===> compute metrics')
        # print(probe_names[1], type(probe_names[1]))
        # exit()
        label = np.zeros_like(score)
        maxIndex = np.argmax(score, axis=1)
        # print('len = ', len(maxIndex))
        count = 0
        for i in range(len(maxIndex)):
            probe_names_repeat = np.repeat([probe_names[i]],
                                           len(gallery_names),
                                           axis=0).T
            # compare two string list
            result = np.equal(probe_names_repeat, gallery_names) * 1
            # result = np.core.defchararray.equal(probe_names_repeat, gallery_names) * 1
            # find the index of image in the gallery that has the same name as probe image
            # print(result)
            # print('++++++++++++++++++++++++++++++++=')
            index = np.nonzero(result == 1)

            # if i == 10:
            #     exit()
            assert len(index[0]) == 1
            label[i][index[0][0]] = 1

            # find the max similarty score in gallery has the same name as probe image
            if np.equal(int(probe_names[i]), int(gallery_names[maxIndex[i]])):
                count += 1
            else:
                pass
                # print(probe_img_list[i], gallery_img_list[ind])

        r_acc = count / (len(probe_names) + 1e-5)
        fpr, tpr, thresholds = roc_curve(label.flatten(), score.flatten())
        print("In sub_experiment", label.size(0), 'count of true label :',
              count)
        print('rank1 accuracy =', r_acc)
        print('VR@FAR=0.1% accuracy =', tpr[fpr <= 0.001][-1])

        # plot_roc(fpr, tpr, thresholds, g_count)
        return r_acc, tpr[fpr <= 0.001][-1]
예제 #6
0
class ImageTranslatorTest(object):
    def __init__(self, configer):
        self.configer = configer
        self.blob_helper = BlobHelper(configer)
        self.model_manager = ModelManager(configer)
        self.test_loader = TestDataLoader(configer)
        self.device = torch.device(
            'cpu' if self.configer.get('gpu') is None else 'cuda')
        self.gan_net = None

        self._init_model()

    def _init_model(self):
        self.gan_net = self.model_manager.gan_model()
        self.gan_net = RunnerHelper.load_net(self, self.gan_net)
        self.gan_net.eval()

    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(
                self.configer.get('test', 'test_dir'),
                'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(
                json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(
                self.configer.get('test', 'test_dir'),
                'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(
                json_path=jsonB_path) if os.path.exists(jsonB_path) else None
        elif self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'pix2pix':
            imgA_dir = os.path.join(test_dir, 'imageA')
            test_loader_A = self.test_loader.get_testloader(
                test_dir=imgA_dir) if os.path.exists(imgA_dir) else None
            test_loader_B = None
        else:
            imgA_dir = os.path.join(test_dir, 'imageA')
            test_loader_A = self.test_loader.get_testloader(
                test_dir=imgA_dir) if os.path.exists(imgA_dir) else None
            imgB_dir = os.path.join(test_dir, 'imageB')
            test_loader_B = self.test_loader.get_testloader(
                test_dir=imgB_dir) if os.path.exists(imgB_dir) else None

        if test_loader_A is not None:
            for data_dict in test_loader_A:
                new_data_dict = dict(imgA=data_dict['img'])
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict, testing=True)

                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        # filename = '_'.join(img_path.rstrip().split('/')[-2:])
                        img_bgr = ImageHelper.resize(
                            img_bgr,
                            target_size=self.configer.get('test', 'out_size'),
                            interpolation='linear')
                        ImageHelper.save(
                            img_bgr,
                            os.path.join(out_dir, key,
                                         meta_list[i]['filename']))

        if test_loader_B is not None:
            for data_dict in test_loader_B:
                new_data_dict = dict(imgB=data_dict['img'])
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict, testing=True)
                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        # filename = img_path.rstrip().split('/')[-1]
                        # filename = '_'.join(img_path.rstrip().split('/')[-2:])
                        img_bgr = ImageHelper.resize(
                            img_bgr,
                            target_size=self.configer.get('test', 'out_size'),
                            interpolation='linear')
                        ImageHelper.save(
                            img_bgr,
                            os.path.join(out_dir, key,
                                         meta_list[i]['filename']))
예제 #7
0
class ImageTranslator(object):
    """
      The class for Pose Estimation. Include train, val, val & predict.
    """
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.model_manager = ModelManager(configer)
        self.seg_data_loader = DataLoader(configer)

        self.gan_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.scheduler = None
        self.runner_state = dict()

        self._init_model()

    def _init_model(self):
        self.gan_net = self.model_manager.gan_model()
        self.gan_net = RunnerHelper.load_net(self, self.gan_net)

        self.optimizer_G, self.scheduler_G = Trainer.init(
            self._get_parameters()[0], self.configer.get('solver'))
        self.optimizer_D, self.scheduler_D = Trainer.init(
            self._get_parameters()[1], self.configer.get('solver'))

        self.train_loader = self.seg_data_loader.get_trainloader()
        self.val_loader = self.seg_data_loader.get_valloader()

    def _get_parameters(self):
        params_G = []
        params_D = []
        params_dict = dict(self.gan_net.named_parameters())
        for key, value in params_dict.items():
            if 'G' not in key:
                params_D.append(value)
            else:
                params_G.append(value)

        return params_G, params_D

    def train(self):
        """
          Train function of every epoch during train phase.
        """
        self.gan_net.train()
        start_time = time.time()
        # Adjust the learning rate after every epoch.
        self.scheduler_G.step(self.runner_state['epoch'])
        self.scheduler_D.step(self.runner_state['epoch'])
        for i, data_dict in enumerate(self.train_loader):
            inputs = data_dict['imgA']
            self.data_time.update(time.time() - start_time)

            # Forward pass.
            out_dict = self.gan_net(data_dict)
            # outputs = self.module_utilizer.gather(outputs)
            self.optimizer_G.zero_grad()
            loss_G = out_dict['loss_G'].mean()
            loss_G.backward()
            self.optimizer_G.step()

            self.optimizer_D.zero_grad()
            loss_D = out_dict['loss_D'].mean()
            loss_D.backward()
            self.optimizer_D.step()
            loss = loss_G + loss_D
            self.train_losses.update(loss.item(), inputs.size(0))

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.runner_state['iters'] += 1

            # Print the log info & reset the states.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'display_iter') == 0:
                Log.info(
                    'Train Epoch: {0}\tTrain Iteration: {1}\t'
                    'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t'
                    'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n'
                    'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'
                    .format(self.runner_state['epoch'],
                            self.runner_state['iters'],
                            self.configer.get('solver', 'display_iter'), [
                                RunnerHelper.get_lr(self.optimizer_G),
                                RunnerHelper.get_lr(self.optimizer_D)
                            ],
                            batch_time=self.batch_time,
                            data_time=self.data_time,
                            loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            if self.configer.get('solver', 'lr')['metric'] == 'iters' \
                    and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'):
                break

            # Check to val the current model.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'test_interval') == 0:
                self.val()

        self.runner_state['epoch'] += 1

    def val(self, data_loader=None):
        """
          Validation function during the train phase.
        """
        self.gan_net.eval()
        start_time = time.time()

        data_loader = self.val_loader if data_loader is None else data_loader
        for j, data_dict in enumerate(data_loader):
            inputs = data_dict['imgA']

            with torch.no_grad():
                # Forward pass.
                out_dict = self.gan_net(data_dict)
                # Compute the loss of the val batch.

            self.val_losses.update(
                out_dict['loss_G'].mean().item() +
                out_dict['loss_D'].mean().item(), inputs.size(0))
            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        RunnerHelper.save_net(self, self.gan_net, val_loss=self.val_losses.avg)

        # Print the log info & reset the states.
        Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                                loss=self.val_losses))
        self.batch_time.reset()
        self.val_losses.reset()
        self.gan_net.train()
예제 #8
0
class FaceGANTest(object):
    def __init__(self, configer):
        self.configer = configer
        self.blob_helper = BlobHelper(configer)
        self.model_manager = ModelManager(configer)
        self.test_loader = TestDataLoader(configer)
        self.device = torch.device(
            'cpu' if self.configer.get('gpu') is None else 'cuda')
        self.gan_net = None

        self._init_model()

    def _init_model(self):
        self.gan_net = self.model_manager.gan_model()
        self.gan_net = RunnerHelper.load_net(self, self.gan_net)
        self.gan_net.eval()

    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(
                test_dir,
                'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(
                json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(
                test_dir,
                'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(
                json_path=jsonB_path) if os.path.exists(jsonB_path) else None

        else:
            test_loader_A, test_loader_B = None, None
            Log.error('Test Mode not Exists!')
            exit(1)

        assert test_loader_A is not None and test_loader_B is not None
        probe_features = []
        gallery_features = []
        probe_labels = []
        gallery_labels = []
        for data_dict in test_loader_A:
            new_data_dict = dict(imgA=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                probe_features.append(out_dict['featA'][idx].cpu().numpy())
                probe_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get(
                                                     'test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(
                        img_bgr,
                        os.path.join(out_dir, key, meta_list[i]['filename']))

        for data_dict in test_loader_B:
            new_data_dict = dict(imgB=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                gallery_features.append(out_dict['feat'][idx].cpu().numpy())
                gallery_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get(
                                                     'test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(
                        img_bgr,
                        os.path.join(out_dir, key, meta_list[i]['filename']))

        r_acc, tpr = self.decode(probe_features, gallery_features,
                                 probe_labels, gallery_labels)
        Log.info('Final Rank1 accuracy is {}'.format(r_acc))
        Log.info('Final VR@FAR=0.1% accuracy is {}'.format(tpr))

    @staticmethod
    def decode(probe_features, gallery_features, probe_labels, gallery_labels):
        probe_features = np.array(probe_features)
        gallery_features = np.array(gallery_features)
        score = cosine_similarity(gallery_features, probe_features).T
        # print('score.shape =', score.shape)
        # print('probe_names =', np.array(probe_names).shape)
        # print('gallery_names =', np.array(gallery_names).shape)
        print('===> compute metrics')
        # print(probe_names[1], type(probe_names[1]))
        # exit()
        label = np.zeros_like(score)
        maxIndex = np.argmax(score, axis=1)
        # print('len = ', len(maxIndex))
        count = 0
        for i in range(len(maxIndex)):
            probe_names_repeat = np.repeat([probe_labels[i]],
                                           len(gallery_labels),
                                           axis=0).T
            # compare two string list
            result = np.equal(probe_names_repeat, gallery_labels) * 1
            # result = np.core.defchararray.equal(probe_names_repeat, gallery_names) * 1
            # find the index of image in the gallery that has the same name as probe image
            # print(result)
            # print('++++++++++++++++++++++++++++++++=')
            index = np.nonzero(result == 1)

            # if i == 10:
            #     exit()
            assert len(index[0]) == 1
            label[i][index[0][0]] = 1

            # find the max similarty score in gallery has the same name as probe image
            if np.equal(int(probe_labels[i]),
                        int(gallery_labels[maxIndex[i]])):
                count += 1
            else:
                pass
                # print(probe_img_list[i], gallery_img_list[ind])

        r_acc = count / (len(probe_labels) + 1e-5)
        fpr, tpr, thresholds = roc_curve(label.flatten(), score.flatten())
        # print("In sub_experiment", label.size(0), 'count of true label :', count)
        # print('rank1 accuracy =', r_acc)
        # print('VR@FAR=0.1% accuracy =', tpr[fpr <= 0.001][-1])

        # plot_roc(fpr, tpr, thresholds, g_count)
        return r_acc, tpr[fpr <= 0.001][-1]