Пример #1
0
    def mscrop_test(self, in_data_dict):
        total_logits = [
            np.zeros((meta['ori_img_size'][1], meta['ori_img_size'][0],
                      self.configer.get('data', 'num_classes')), np.float32)
            for meta in DCHelper.tolist(in_data_dict['meta'])
        ]
        for scale in self.configer.get('test', 'scale_search'):
            data_dict = self.blob_helper.get_blob(in_data_dict, scale=scale)
            crop_size = self.configer.get('test', 'crop_size')
            if any(image.size()[3] < crop_size[0]
                   or image.size()[2] < crop_size[1]
                   for image in DCHelper.tolist(data_dict['img'])):
                results = self._predict(data_dict)
            else:
                results = self._crop_predict(data_dict, crop_size)

            for i in range(len(total_logits)):
                total_logits[i] += results[i]

        for scale in self.configer.get('test', 'scale_search'):
            data_dict = self.blob_helper.get_blob(in_data_dict,
                                                  scale=scale,
                                                  flip=True)
            crop_size = self.configer.get('test', 'crop_size')
            if any(image.size()[3] < crop_size[0]
                   or image.size()[2] < crop_size[1]
                   for image in DCHelper.tolist(data_dict['img'])):
                results = self._predict(data_dict)
            else:
                results = self._crop_predict(data_dict, crop_size)

            for i in range(len(total_logits)):
                total_logits[i] += results[i][:, ::-1]

        return total_logits
Пример #2
0
    def test(self, test_dir, out_dir):
        imgA_dir = os.path.join(test_dir, 'imgA')
        imgB_dir = os.path.join(test_dir, 'imgB')

        if os.path.exists(imgA_dir):
            Log.info('ImageA Dir: {}'.format(imgA_dir))
            for data_dict in self.test_loader.get_testloader(test_dir=imgA_dir):
                new_data_dict = dict(imgA=data_dict['img'])
                out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.item():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        filename = img_path.rstrip().split('/')[-1]
                        ImageHelper.save(img_bgr, os.path.join(out_dir, key, filename))

        if os.path.exists(imgB_dir):
            Log.info('ImageB Dir: {}'.format(imgB_dir))
            for data_dict in self.test_loader.get_testloader(test_dir=imgB_dir):
                new_data_dict = dict(imgB=data_dict['img'])
                out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.item():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        filename = img_path.rstrip().split('/')[-1]
                        ImageHelper.save(img_bgr, os.path.join(out_dir, key, filename))
Пример #3
0
    def get_blob(self, data_dict, scale=None, flip=False):
        assert scale is not None

        img_list, meta_list = [], []
        for image, meta in zip(DCHelper.tolist(data_dict['img']), DCHelper.tolist(data_dict['meta'])):
            c, h, w = image.size()
            border_hw = [int(h*scale), int(w*scale)]
            meta['border_hw'] = border_hw
            image = TensorHelper.resize(image, border_hw, mode='bilinear', align_corners=True)
            if flip:
                image = image.flip([2])

            if self.configer.exists('test', 'fit_stride'):
                stride = self.configer.get('test', 'fit_stride')

                pad_w = 0 if (border_hw[1] % stride == 0) else stride - (border_hw[1] % stride)  # right
                pad_h = 0 if (border_hw[0] % stride == 0) else stride - (border_hw[0] % stride)  # down

                expand_image = torch.zeros((c, border_hw[0] + pad_h, border_hw[1] + pad_w)).to(image.device)
                expand_image[:, 0:border_hw[0], 0:border_hw[1]] = image
                image = expand_image

            img_list.append(image)
            meta_list.append(meta)

        new_data_dict = dict(
            img=DCHelper.todc(img_list, stack=True, samples_per_gpu=True),
            meta=DCHelper.todc(meta_list, samples_per_gpu=True, cpu_only=True)
        )
        return new_data_dict
Пример #4
0
    def val(self):
        """
          Validation function during the train phase.
        """
        self.cls_net.eval()
        start_time = time.time()

        with torch.no_grad():
            for j, data_dict in enumerate(self.val_loader):
                # Forward pass.
                out_dict = self.cls_net(data_dict)
                # Compute the loss of the val batch.
                loss = self.ce_loss(out_dict, data_dict, gathered=self.configer.get('network', 'gathered'))
                out_dict = RunnerHelper.gather(self, out_dict)
                self.cls_running_score.update(out_dict['out'], DCHelper.tolist(data_dict['labels']))
                self.val_losses.update(loss.item(), len(DCHelper.tolist(data_dict['meta'])))

                # Update the vars of the val phase.
                self.batch_time.update(time.time() - start_time)
                start_time = time.time()

            RunnerHelper.save_net(self, self.cls_net, performance=self.cls_running_score.get_top1_acc())
            self.runner_state['performance'] = self.cls_running_score.get_top1_acc()
            # Print the log info & reset the states.
            Log.info('Test Time {batch_time.sum:.3f}s'.format(batch_time=self.batch_time))
            Log.info('TestLoss = {loss.avg:.8f}'.format(loss=self.val_losses))
            Log.info('Top1 ACC = {}'.format(self.cls_running_score.get_top1_acc()))
            Log.info('Top5 ACC = {}'.format(self.cls_running_score.get_top5_acc()))
            self.batch_time.reset()
            self.val_losses.reset()
            self.cls_running_score.reset()
            self.cls_net.train()
Пример #5
0
    def test(self, test_dir, out_dir):
        for i, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            total_logits = None
            if self.configer.get('test', 'mode') == 'ss_test':
                total_logits = self.ss_test(data_dict)

            elif self.configer.get('test', 'mode') == 'sscrop_test':
                total_logits = self.sscrop_test(data_dict,
                                                params_dict=self.configer.get(
                                                    'test', 'sscrop_test'))

            elif self.configer.get('test', 'mode') == 'ms_test':
                total_logits = self.ms_test(data_dict,
                                            params_dict=self.configer.get(
                                                'test', 'ms_test'))

            elif self.configer.get('test', 'mode') == 'mscrop_test':
                total_logits = self.mscrop_test(data_dict,
                                                params_dict=self.configer.get(
                                                    'test', 'mscrop_test'))

            else:
                Log.error('Invalid test mode:{}'.format(
                    self.configer.get('test', 'mode')))
                exit(1)

            meta_list = DCHelper.tolist(data_dict['meta'])
            img_list = DCHelper.tolist(data_dict['img'])
            for i in range(len(meta_list)):
                filename = meta_list[i]['img_path'].split('/')[-1].split(
                    '.')[0]
                label_map = np.argmax(total_logits[i], axis=-1)
                label_img = np.array(label_map, dtype=np.uint8)
                ori_img_bgr = self.blob_helper.tensor2bgr(img_list[i][0])
                ori_img_bgr = ImageHelper.resize(
                    ori_img_bgr,
                    target_size=meta_list[i]['ori_img_size'],
                    interpolation='linear')
                image_canvas = self.seg_parser.colorize(
                    label_img, image_canvas=ori_img_bgr)
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(filename)))

                if self.configer.exists('data', 'label_list'):
                    label_img = self.__relabel(label_img)

                if self.configer.exists(
                        'data', 'reduce_zero_label') and self.configer.get(
                            'data', 'reduce_zero_label'):
                    label_img = label_img + 1
                    label_img = label_img.astype(np.uint8)

                label_img = Image.fromarray(label_img, 'P')
                label_path = os.path.join(out_dir,
                                          'label/{}.png'.format(filename))
                Log.info('Label Path: {}'.format(label_path))
                ImageHelper.save(label_img, label_path)
Пример #6
0
    def val(self):
        """
          Validation function during the train phase.
        """
        self.det_net.eval()
        start_time = time.time()
        with torch.no_grad():
            for j, data_dict in enumerate(self.val_loader):
                inputs = data_dict['img']
                batch_gt_bboxes = data_dict['bboxes']
                batch_gt_labels = data_dict['labels']
                metas = data_dict['meta']
                data_dict['bboxes'] = DCHelper.todc(
                    batch_gt_bboxes,
                    gpu_list=self.configer.get('gpu'),
                    cpu_only=True)
                data_dict['labels'] = DCHelper.todc(
                    batch_gt_labels,
                    gpu_list=self.configer.get('gpu'),
                    cpu_only=True)
                data_dict['meta'] = DCHelper.todc(
                    metas, gpu_list=self.configer.get('gpu'), cpu_only=True)
                # Forward pass.
                inputs = RunnerHelper.to_device(self, inputs)
                loss, test_group = self.det_net(data_dict)
                # Compute the loss of the train batch & backward.
                loss = loss.mean()
                self.val_losses.update(loss.item(), inputs.size(0))
                test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = test_group
                batch_detections = FastRCNNTest.decode(test_roi_locs,
                                                       test_roi_scores,
                                                       test_indices_and_rois,
                                                       test_rois_num,
                                                       self.configer, metas)
                batch_pred_bboxes = self.__get_object_list(batch_detections)
                self.det_running_score.update(batch_pred_bboxes,
                                              batch_gt_bboxes, batch_gt_labels)

                # Update the vars of the val phase.
                self.batch_time.update(time.time() - start_time)
                start_time = time.time()

            RunnerHelper.save_net(self,
                                  self.det_net,
                                  iters=self.runner_state['iters'])
            # Print the log info & reset the states.
            Log.info(
                'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                               loss=self.val_losses))
            Log.info('Val mAP: {}\n'.format(self.det_running_score.get_mAP()))
            self.det_running_score.reset()
            self.batch_time.reset()
            self.val_losses.reset()
            self.det_net.train()
Пример #7
0
    def val(self, data_loader=None):
        """
          Validation function during the train phase.
        """
        self.seg_net.eval()
        start_time = time.time()

        data_loader = self.val_loader if data_loader is None else data_loader
        for j, data_dict in enumerate(data_loader):

            with torch.no_grad():
                # Forward pass.
                out_dict = self.seg_net(data_dict)
                # Compute the loss of the val batch.
                loss_dict = self.pixel_loss(out_dict,
                                            data_dict,
                                            gathered=self.configer.get(
                                                'network', 'gathered'))
                out_dict = RunnerHelper.gather(self, out_dict)

            self.val_losses.update(loss_dict['loss'].item(),
                                   len(DCHelper.tolist(data_dict['meta'])))
            self._update_running_score(out_dict['out'],
                                       DCHelper.tolist(data_dict['meta']))

            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        self.runner_state['performance'] = self.seg_running_score.get_mean_iou(
        )
        self.runner_state['val_loss'] = self.val_losses.avg
        RunnerHelper.save_net(
            self,
            self.seg_net,
            performance=self.seg_running_score.get_mean_iou(),
            val_loss=self.val_losses.avg)

        # Print the log info & reset the states.
        Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                                loss=self.val_losses))
        Log.info('Mean IOU: {}\n'.format(
            self.seg_running_score.get_mean_iou()))
        Log.info('Pixel ACC: {}\n'.format(
            self.seg_running_score.get_pixel_acc()))
        self.batch_time.reset()
        self.val_losses.reset()
        self.seg_running_score.reset()
        self.seg_net.train()
Пример #8
0
    def _crop_predict(self, data_dict, crop_size, crop_stride_ratio):
        split_batch = list()
        height_starts_list = list()
        width_starts_list = list()
        hw_list = list()
        for image in DCHelper.tolist(data_dict['img']):
            height, width = image.size()[2:]
            hw_list.append([height, width])
            np_image = image.squeeze(0).permute(1, 2, 0).cpu().numpy()
            height_starts = self._decide_intersection(height, crop_size[1], crop_stride_ratio)
            width_starts = self._decide_intersection(width, crop_size[0], crop_stride_ratio)
            split_crops = []
            for height in height_starts:
                for width in width_starts:
                    image_crop = np_image[height:height + crop_size[1], width:width + crop_size[0]]
                    split_crops.append(image_crop[np.newaxis, :])

            height_starts_list.append(height_starts)
            width_starts_list.append(width_starts)
            split_crops = np.concatenate(split_crops, axis=0)  # (n, crop_image_size, crop_image_size, 3)
            inputs = torch.from_numpy(split_crops).permute(0, 3, 1, 2).to(self.device)
            split_batch.append(inputs)

        out_list = list()
        with torch.no_grad():
            results = self.seg_net.forward(DCHelper.todc(split_batch, stack=True, samples_per_gpu=1))
            for res in results:
                out_list.append(res[-1].permute(0, 2, 3, 1).cpu().numpy())

        total_logits = [np.zeros((hw[0], hw[1],
                                  self.configer.get('data', 'num_classes')), np.float32) for hw in hw_list]
        count_predictions = [np.zeros((hw[0], hw[1],
                                       self.configer.get('data', 'num_classes')), np.float32) for hw in hw_list]
        for i in range(len(height_starts_list)):
            index = 0
            for height in height_starts_list[i]:
                for width in width_starts_list[i]:
                    total_logits[i][height:height+crop_size[1], width:width+crop_size[0]] += out_list[i][index]
                    count_predictions[i][height:height+crop_size[1], width:width+crop_size[0]] += 1
                    index += 1

        for i in range(len(total_logits)):
            total_logits[i] /= count_predictions[i]

        for i, meta in enumerate(DCHelper.tolist(data_dict['meta'])):
            total_logits[i] = cv2.resize(total_logits[i][:meta['border_hw'][0], :meta['border_hw'][1]],
                                         (meta['ori_img_size'][0], meta['ori_img_size'][1]),
                                         interpolation=cv2.INTER_CUBIC)

        return total_logits
Пример #9
0
    def val(self):
        """
          Validation function during the train phase.
        """
        self.pose_net.eval()
        start_time = time.time()

        with torch.no_grad():
            for j, data_dict in enumerate(self.val_loader):
                # Forward pass.
                out_dict = self.pose_net(data_dict)

                # Compute the loss of the val batch.
                loss = self.cpm_loss(out_dict, data_dict, gathered=self.configer.get('network', 'gathered'))

                self.val_losses.update(loss.item(), len(DCHelper.tolist(data_dict['meta'])))

                # Update the vars of the val phase.
                self.batch_time.update(time.time() - start_time)
                start_time = time.time()

            RunnerHelper.save_net(self, self.pose_net, iters=self.runner_state['iters'])
            # Print the log info & reset the states.
            Log.info(
                'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                'Loss {loss.avg:.8f}\n'.format(
                    batch_time=self.batch_time, loss=self.val_losses))
            self.batch_time.reset()
            self.val_losses.reset()
            self.pose_net.train()
Пример #10
0
    def val(self):
        """
          Validation function during the train phase.
        """
        self.gan_net.eval()
        start_time = time.time()

        for j, data_dict in enumerate(self.val_loader):
            with torch.no_grad():
                # Forward pass.
                out_dict = self.gan_net(data_dict)
                # Compute the loss of the val batch.

            self.val_losses.update(
                out_dict['loss_G'].mean().item() +
                out_dict['loss_D'].mean().item(),
                len(DCHelper.tolist(data_dict['meta'])))
            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        RunnerHelper.save_net(self, self.gan_net, val_loss=self.val_losses.avg)

        # Print the log info & reset the states.
        Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                                loss=self.val_losses))
        self.batch_time.reset()
        self.val_losses.reset()
        self.gan_net.train()
Пример #11
0
    def train(self):
        """
          Train function of every epoch during train phase.
        """
        self.det_net.train()
        start_time = time.time()
        # Adjust the learning rate after every epoch.
        self.runner_state['epoch'] += 1

        # data_tuple: (inputs, heatmap, maskmap, vecmap)
        for i, data_dict in enumerate(self.train_loader):
            Trainer.update(self,
                           backbone_list=(0, ),
                           backbone_lr_list=(self.configer.get(
                               'solver', 'lr')['base_lr'], ),
                           solver_dict=self.configer.get('solver'))
            self.data_time.update(time.time() - start_time)
            # Forward pass.
            out_dict = self.det_net(data_dict)
            loss = out_dict['loss'].mean()
            self.train_losses.update(loss.item(),
                                     len(DCHelper.tolist(data_dict['meta'])))

            self.optimizer.zero_grad()
            loss.backward()
            RunnerHelper.clip_grad(self.det_net, 10.)
            self.optimizer.step()

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.runner_state['iters'] += 1

            # Print the log info & reset the states.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'display_iter') == 0:
                Log.info(
                    'Train Epoch: {0}\tTrain Iteration: {1}\t'
                    'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t'
                    'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n'
                    'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'
                    .format(self.runner_state['epoch'],
                            self.runner_state['iters'],
                            self.configer.get('solver', 'display_iter'),
                            RunnerHelper.get_lr(self.optimizer),
                            batch_time=self.batch_time,
                            data_time=self.data_time,
                            loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            if self.configer.get('solver', 'lr')['metric'] == 'iters' \
                    and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'):
                break

            # Check to val the current model.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'test_interval') == 0:
                self.val()
Пример #12
0
    def test(self, test_dir, out_dir):
        for _, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            data_dict['testing'] = True
            loc, conf = self.det_net(data_dict)
            meta_list = DCHelper.tolist(data_dict['meta'])
            batch_detections = self.decode(loc, conf, self.configer, meta_list)
            for i in range(len(meta_list)):
                ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'],
                                                     tool='cv2',
                                                     mode='BGR')
                json_dict = self.__get_info_tree(batch_detections[i])
                image_canvas = self.det_parser.draw_bboxes(
                    ori_img_bgr.copy(),
                    json_dict,
                    conf_threshold=self.configer.get('res', 'vis_conf_thre'))
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(
                                         meta_list[i]['filename'])))

                Log.info('Json Path: {}'.format(
                    os.path.join(
                        out_dir,
                        'json/{}.json'.format(meta_list[i]['filename']))))
                JsonHelper.save_file(json_dict,
                                     save_path=os.path.join(
                                         out_dir, 'json/{}.json'.format(
                                             meta_list[i]['filename'])))
Пример #13
0
    def val(self, data_loader=None):
        """
          Validation function during the train phase.
        """
        self.gan_net.eval()
        start_time = time.time()

        data_loader = self.val_loader if data_loader is None else data_loader
        for j, data_dict in enumerate(data_loader):

            with torch.no_grad():
                # Forward pass.
                out_dict = self.gan_net(data_dict)
                # Compute the loss of the val batch.

            self.val_losses.update(out_dict['loss'].mean().item(),
                                   len(DCHelper.tolist(data_dict['meta'])))
            meta_list = DCHelper.tolist(data_dict['meta'])
            probe_features = []
            gallery_features = []
            probe_labels = []
            gallery_labels = []
            for idx in range(len(meta_list)):
                gallery_features.append(out_dict['featB'][idx].cpu().numpy())
                gallery_labels.append(meta_list[idx]['labelB'])
                probe_features.append(out_dict['featA'][idx].cpu().numpy())
                probe_labels.append(meta_list[idx]['labelA'])

            rank_1, vr_far_001 = FaceGANTest.decode(probe_features,
                                                    gallery_features,
                                                    probe_labels,
                                                    gallery_labels)
            Log.info('Rank1 accuracy is {}'.format(rank_1))
            Log.info('VR@FAR=0.1% accuracy is {}'.format(vr_far_001))
            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        RunnerHelper.save_net(self, self.gan_net, val_loss=self.val_losses.avg)

        # Print the log info & reset the states.
        Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                                loss=self.val_losses))
        self.batch_time.reset()
        self.val_losses.reset()
        self.gan_net.train()
Пример #14
0
    def val(self):
        """
          Validation function during the train phase.
        """
        self.det_net.eval()
        start_time = time.time()
        with torch.no_grad():
            for j, data_dict in enumerate(self.val_loader):
                # Forward pass.
                out_dict = self.det_net(data_dict)
                # Compute the loss of the train batch & backward.
                loss = out_dict['loss'].mean()
                self.val_losses.update(loss.item(),
                                       len(DCHelper.tolist(data_dict['meta'])))
                test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = out_dict[
                    'test_group']
                batch_detections = FastRCNNTest.decode(
                    test_roi_locs, test_roi_scores, test_indices_and_rois,
                    test_rois_num, self.configer,
                    DCHelper.tolist(data_dict['meta']))
                batch_pred_bboxes = self.__get_object_list(batch_detections)
                self.det_running_score.update(batch_pred_bboxes, [
                    item['ori_bboxes']
                    for item in DCHelper.tolist(data_dict['meta'])
                ], [
                    item['ori_labels']
                    for item in DCHelper.tolist(data_dict['meta'])
                ])

                # Update the vars of the val phase.
                self.batch_time.update(time.time() - start_time)
                start_time = time.time()

            RunnerHelper.save_net(self,
                                  self.det_net,
                                  iters=self.runner_state['iters'])
            # Print the log info & reset the states.
            Log.info(
                'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                               loss=self.val_losses))
            Log.info('Val mAP: {}\n'.format(self.det_running_score.get_mAP()))
            self.det_running_score.reset()
            self.batch_time.reset()
            self.val_losses.reset()
            self.det_net.train()
Пример #15
0
    def sscrop_test(self, in_data_dict, params_dict):
        data_dict = self.blob_helper.get_blob(in_data_dict, scale=1.0)
        if any(image.size()[3] < params_dict['crop_size'][0] or image.size()[2] < params_dict['crop_size'][1]
                   for image in DCHelper.tolist(data_dict['img'])):
            results = self._predict(data_dict)
        else:
            results = self._crop_predict(data_dict, params_dict['crop_size'], params_dict['crop_stride_ratio'])

        return results
Пример #16
0
    def sscrop_test(self, in_data_dict):
        data_dict = self.blob_helper.get_blob(in_data_dict, scale=1.0)
        crop_size = self.configer.get('test', 'crop_size')
        if any(image.size()[3] < crop_size[0] or image.size()[2] < crop_size[1]
               for image in DCHelper.tolist(data_dict['img'])):
            results = self._predict(data_dict)
        else:
            results = self._crop_predict(data_dict, crop_size)

        return results
Пример #17
0
    def _predict(self, data_dict):
        with torch.no_grad():
            total_logits = list()
            results = self.seg_net.forward(data_dict['img'])
            for res in results:
                total_logits.append(res[-1].squeeze(0).permute(1, 2, 0).cpu().numpy())

            for i, meta in enumerate(DCHelper.tolist(data_dict['meta'])):
                total_logits[i] = cv2.resize(total_logits[i][:meta['border_hw'][0], :meta['border_hw'][1]],
                                             (meta['ori_img_size'][0], meta['ori_img_size'][1]),
                                             interpolation=cv2.INTER_CUBIC)

        return total_logits
Пример #18
0
    def ms_test(self, in_data_dict, params_dict):
        total_logits = [np.zeros((meta['ori_img_size'][1], meta['ori_img_size'][0],
                                  self.configer.get('data', 'num_classes')), np.float32)
                        for meta in DCHelper.tolist(in_data_dict['meta'])]
        for scale in params_dict['scale_search']:
            data_dict = self.blob_helper.get_blob(in_data_dict, scale=scale)
            results = self._predict(data_dict)
            for i in range(len(total_logits)):
                total_logits[i] += results[i]

        for scale in params_dict['scale_search']:
            data_dict = self.blob_helper.get_blob(in_data_dict, scale=scale, flip=True)
            results = self._predict(data_dict)
            for i in range(len(total_logits)):
                total_logits[i] += results[i][:, ::-1]

        return total_logits
Пример #19
0
    def test(self, test_dir, out_dir):
        gallery_file_list = '*_gallery_*.txt'
        probe_file_list = '*_probe_*.txt'
        gallery_file_list = glob.glob(test_dir + '/' + gallery_file_list)
        probe_file_list = glob.glob(test_dir + '/' + probe_file_list)
        # remove *_dev.txt file in both list
        gallery_file_list = sorted(gallery_file_list)
        probe_file_list = sorted(probe_file_list)
        rank1_acc = []
        vr_acc = []
        for i in range(len(gallery_file_list)):
            probe_features = []
            gallery_features = []
            probe_names = []
            gallery_names = []
            Log.info('Gallery File: {}'.format(gallery_file_list[i]))
            for data_dict in self.test_loader.get_testloader(
                    list_path=gallery_file_list[i]):
                new_data_dict = dict(gallery=data_dict['img'])
                out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])

                for idx in range(len(out_dict['feat'])):
                    gallery_features.append(
                        out_dict['feat'][idx].cpu().numpy())
                    gallery_names.append(
                        meta_list[idx]['img_path'].split("/")[-2])

            Log.info('Probe File: {}'.format(probe_file_list[i]))
            for data_dict in self.test_loader.get_testloader(
                    list_path=probe_file_list[i]):
                new_data_dict = dict(probe=data_dict['img'])
                out_dict = self.gan_net(new_data_dict)
                meta_list = DCHelper.tolist(data_dict['meta'])

                for key, value in out_dict.item():
                    if 'feat' in key:
                        for idx in range(len(value)):
                            probe_features.append(value[idx].cpu().numpy())
                            probe_names.append(
                                meta_list[idx]['img_path'].split("/")[-2])

                        continue
                    else:
                        for idx in range(len(value)):
                            img_bgr = self.blob_helper.tensor2bgr(value[idx])
                            filename = meta_list[idx]['img_path'].rstrip(
                            ).split('/')[-1]
                            ImageHelper.save(
                                img_bgr, os.path.join(out_dir, key, filename))

            probe_features = np.array(probe_features)
            gallery_features = np.array(gallery_features)
            score = cosine_similarity(gallery_features, probe_features).T
            r_acc, tpr = self.compute_metric(score, probe_names, gallery_names)
            # print('score={}, probe_names={}, gallery_names={}'.format(score, probe_names, gallery_names))
            rank1_acc.append(r_acc)
            vr_acc.append(tpr)

        avg_r_a = np.mean(np.array(rank1_acc))
        std_r_a = np.std(np.array(rank1_acc))
        avg_v_a = np.mean(np.array(vr_acc))
        std_v_a = np.std(np.array(vr_acc))
        # avg_vr_acc = sum(vr_acc)/(len(vr_acc) + 1e-5)
        print()
        print('=====================================================')
        print('Final Rank1 accuracy is', avg_r_a * 100, "% +", std_r_a)
        print('Final VR@FAR=0.1% accuracy is', avg_v_a * 100, "% +", std_v_a)
        print('=====================================================')
        print()
        return avg_r_a, std_r_a, avg_v_a, std_v_a
Пример #20
0
    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(
                self.configer.get('test', 'test_dir'),
                'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(
                json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(
                self.configer.get('test', 'test_dir'),
                'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(
                json_path=jsonB_path) if os.path.exists(jsonB_path) else None
        elif self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'pix2pix':
            imgA_dir = os.path.join(test_dir, 'imageA')
            test_loader_A = self.test_loader.get_testloader(
                test_dir=imgA_dir) if os.path.exists(imgA_dir) else None
            test_loader_B = None
        else:
            imgA_dir = os.path.join(test_dir, 'imageA')
            test_loader_A = self.test_loader.get_testloader(
                test_dir=imgA_dir) if os.path.exists(imgA_dir) else None
            imgB_dir = os.path.join(test_dir, 'imageB')
            test_loader_B = self.test_loader.get_testloader(
                test_dir=imgB_dir) if os.path.exists(imgB_dir) else None

        if test_loader_A is not None:
            for data_dict in test_loader_A:
                new_data_dict = dict(imgA=data_dict['img'])
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict, testing=True)

                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        # filename = '_'.join(img_path.rstrip().split('/')[-2:])
                        img_bgr = ImageHelper.resize(
                            img_bgr,
                            target_size=self.configer.get('test', 'out_size'),
                            interpolation='linear')
                        ImageHelper.save(
                            img_bgr,
                            os.path.join(out_dir, key,
                                         meta_list[i]['filename']))

        if test_loader_B is not None:
            for data_dict in test_loader_B:
                new_data_dict = dict(imgB=data_dict['img'])
                with torch.no_grad():
                    out_dict = self.gan_net(new_data_dict, testing=True)
                meta_list = DCHelper.tolist(data_dict['meta'])
                for key, value in out_dict.items():
                    for i in range(len(value)):
                        img_bgr = self.blob_helper.tensor2bgr(value[i])
                        img_path = meta_list[i]['img_path']
                        Log.info('Image Path: {}'.format(img_path))
                        # filename = img_path.rstrip().split('/')[-1]
                        # filename = '_'.join(img_path.rstrip().split('/')[-2:])
                        img_bgr = ImageHelper.resize(
                            img_bgr,
                            target_size=self.configer.get('test', 'out_size'),
                            interpolation='linear')
                        ImageHelper.save(
                            img_bgr,
                            os.path.join(out_dir, key,
                                         meta_list[i]['filename']))
Пример #21
0
    def __test_img(self, image_path, json_path, raw_path, vis_path):
        Log.info('Image Path: {}'.format(image_path))
        image = ImageHelper.read_image(
            image_path,
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        ori_img_bgr = ImageHelper.get_cv2_bgr(image,
                                              mode=self.configer.get(
                                                  'data', 'input_mode'))
        width, height = ImageHelper.get_size(image)
        scale1 = self.configer.get('test', 'resize_bound')[0] / min(
            width, height)
        scale2 = self.configer.get('test', 'resize_bound')[1] / max(
            width, height)
        scale = min(scale1, scale2)
        inputs = self.blob_helper.make_input(image, scale=scale)
        b, c, h, w = inputs.size()
        border_wh = [w, h]
        if self.configer.exists('test', 'fit_stride'):
            stride = self.configer.get('test', 'fit_stride')

            pad_w = 0 if (w % stride == 0) else stride - (w % stride)  # right
            pad_h = 0 if (h % stride == 0) else stride - (h % stride)  # down

            expand_image = torch.zeros(
                (b, c, h + pad_h, w + pad_w)).to(inputs.device)
            expand_image[:, :, 0:h, 0:w] = inputs
            inputs = expand_image

        data_dict = dict(
            img=inputs,
            meta=DataContainer([[
                dict(ori_img_size=ImageHelper.get_size(ori_img_bgr),
                     aug_img_size=border_wh,
                     img_scale=scale,
                     input_size=[inputs.size(3),
                                 inputs.size(2)])
            ]],
                               cpu_only=True))

        with torch.no_grad():
            # Forward pass.
            test_group = self.det_net(data_dict)

            test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = test_group

        batch_detections = self.decode(test_roi_locs, test_roi_scores,
                                       test_indices_and_rois, test_rois_num,
                                       self.configer,
                                       DCHelper.tolist(data_dict['meta']))
        json_dict = self.__get_info_tree(batch_detections[0],
                                         ori_img_bgr,
                                         scale=scale)

        image_canvas = self.det_parser.draw_bboxes(
            ori_img_bgr.copy(),
            json_dict,
            conf_threshold=self.configer.get('res', 'vis_conf_thre'))
        cv2.imwrite(vis_path, image_canvas)
        cv2.imwrite(raw_path, ori_img_bgr)

        Log.info('Json Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)
        return json_dict
Пример #22
0
    def train(self):
        """
          Train function of every epoch during train phase.
        """
        self.gan_net.train()
        start_time = time.time()
        # Adjust the learning rate after every epoch.
        self.scheduler_G.step(self.runner_state['epoch'])
        self.scheduler_D.step(self.runner_state['epoch'])
        for i, data_dict in enumerate(self.train_loader):
            self.data_time.update(time.time() - start_time)

            # Forward pass.
            out_dict = self.gan_net(data_dict)
            # outputs = self.module_utilizer.gather(outputs)
            self.optimizer_G.zero_grad()
            loss_G = out_dict['loss_G'].mean()
            loss_G.backward()
            self.optimizer_G.step()

            self.optimizer_D.zero_grad()
            loss_D = out_dict['loss_D'].mean()
            loss_D.backward()
            self.optimizer_D.step()
            loss = loss_G + loss_D
            self.train_losses.update(loss.item(),
                                     len(DCHelper.tolist(data_dict['meta'])))

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.runner_state['iters'] += 1

            # Print the log info & reset the states.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'display_iter') == 0:
                Log.info(
                    'Train Epoch: {0}\tTrain Iteration: {1}\t'
                    'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t'
                    'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n'
                    'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'
                    .format(self.runner_state['epoch'],
                            self.runner_state['iters'],
                            self.configer.get('solver', 'display_iter'), [
                                RunnerHelper.get_lr(self.optimizer_G),
                                RunnerHelper.get_lr(self.optimizer_D)
                            ],
                            batch_time=self.batch_time,
                            data_time=self.data_time,
                            loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            if self.configer.get('solver', 'lr')['metric'] == 'iters' \
                    and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'):
                break

            # Check to val the current model.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'test_interval') == 0:
                self.val()

        self.runner_state['epoch'] += 1
Пример #23
0
    def test(self, test_dir, out_dir):
        if self.configer.exists('test', 'mode') and self.configer.get(
                'test', 'mode') == 'nir2vis':
            jsonA_path = os.path.join(
                test_dir,
                'val_label{}A.json'.format(self.configer.get('data', 'tag')))
            test_loader_A = self.test_loader.get_testloader(
                json_path=jsonA_path) if os.path.exists(jsonA_path) else None
            jsonB_path = os.path.join(
                test_dir,
                'val_label{}B.json'.format(self.configer.get('data', 'tag')))
            test_loader_B = self.test_loader.get_testloader(
                json_path=jsonB_path) if os.path.exists(jsonB_path) else None

        else:
            test_loader_A, test_loader_B = None, None
            Log.error('Test Mode not Exists!')
            exit(1)

        assert test_loader_A is not None and test_loader_B is not None
        probe_features = []
        gallery_features = []
        probe_labels = []
        gallery_labels = []
        for data_dict in test_loader_A:
            new_data_dict = dict(imgA=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                probe_features.append(out_dict['featA'][idx].cpu().numpy())
                probe_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get(
                                                     'test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(
                        img_bgr,
                        os.path.join(out_dir, key, meta_list[i]['filename']))

        for data_dict in test_loader_B:
            new_data_dict = dict(imgB=data_dict['img'])
            with torch.no_grad():
                out_dict = self.gan_net(new_data_dict, testing=True)

            meta_list = DCHelper.tolist(data_dict['meta'])

            for idx in range(len(meta_list)):
                gallery_features.append(out_dict['feat'][idx].cpu().numpy())
                gallery_labels.append(meta_list[idx]['label'])

            for key, value in out_dict.items():
                for i in range(len(value)):
                    if 'feat' in key:
                        continue

                    img_bgr = self.blob_helper.tensor2bgr(value[i])
                    img_path = meta_list[i]['img_path']
                    Log.info('Image Path: {}'.format(img_path))
                    img_bgr = ImageHelper.resize(img_bgr,
                                                 target_size=self.configer.get(
                                                     'test', 'out_size'),
                                                 interpolation='linear')
                    ImageHelper.save(
                        img_bgr,
                        os.path.join(out_dir, key, meta_list[i]['filename']))

        r_acc, tpr = self.decode(probe_features, gallery_features,
                                 probe_labels, gallery_labels)
        Log.info('Final Rank1 accuracy is {}'.format(r_acc))
        Log.info('Final VR@FAR=0.1% accuracy is {}'.format(tpr))
Пример #24
0
    def train(self):
        """
          Train function of every epoch during train phase.
        """
        self.pose_net.train()
        start_time = time.time()
        # Adjust the learning rate after every epoch.
        self.runner_state['epoch'] += 1
        for i, data_dict in enumerate(self.train_loader):
            Trainer.update(self,
                           backbone_list=(0, ),
                           solver_dict=self.configer.get('solver'))
            self.data_time.update(time.time() - start_time)
            # Forward pass.
            out_dict = self.pose_net(data_dict)

            # Compute the loss of the train batch & backward.
            loss_dict = self.mse_loss(out_dict,
                                      data_dict,
                                      gathered=self.configer.get(
                                          'network', 'gathered'))

            loss = loss_dict['loss']
            self.train_losses.update(loss.item(),
                                     len(DCHelper.tolist(data_dict['meta'])))

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.runner_state['iters'] += 1

            # Print the log info & reset the states.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'display_iter') == 0:
                Log.info('Loss Heatmap:{}, Loss Asso: {}'.format(
                    self.train_loss_heatmap.avg,
                    self.train_loss_associate.avg))
                Log.info(
                    'Train Epoch: {0}\tTrain Iteration: {1}\t'
                    'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t'
                    'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n'
                    'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'
                    .format(self.runner_state['epoch'],
                            self.runner_state['iters'],
                            self.configer.get('solver', 'display_iter'),
                            RunnerHelper.get_lr(self.optimizer),
                            batch_time=self.batch_time,
                            data_time=self.data_time,
                            loss=self.train_losses))

                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()
                self.train_loss_heatmap.reset()
                self.train_loss_associate.reset()

            if self.configer.get('solver', 'lr')['metric'] == 'iters' \
                    and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'):
                break

            # Check to val the current model.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'test_interval') == 0:
                self.val()
Пример #25
0
    def train(self):
        """
          Train function of every epoch during train phase.
        """
        self.det_net.train()
        start_time = time.time()
        # Adjust the learning rate after every epoch.
        self.runner_state['epoch'] += 1

        for i, data_dict in enumerate(self.train_loader):
            Trainer.update(self)
            batch_gt_bboxes = data_dict['bboxes']
            batch_gt_labels = data_dict['labels']
            metas = data_dict['meta']
            data_dict['bboxes'] = DCHelper.todc(
                batch_gt_bboxes,
                gpu_list=self.configer.get('gpu'),
                cpu_only=True)
            data_dict['labels'] = DCHelper.todc(
                batch_gt_labels,
                gpu_list=self.configer.get('gpu'),
                cpu_only=True)
            data_dict['meta'] = DCHelper.todc(
                metas, gpu_list=self.configer.get('gpu'), cpu_only=True)
            self.data_time.update(time.time() - start_time)
            # Forward pass.
            loss = self.det_net(data_dict)
            loss = loss.mean()
            self.train_losses.update(loss.item(), data_dict['img'].size(0))

            self.optimizer.zero_grad()
            loss.backward()
            RunnerHelper.clip_grad(self.det_net, 10.)
            self.optimizer.step()

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.runner_state['iters'] += 1

            # Print the log info & reset the states.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'display_iter') == 0:
                Log.info(
                    'Train Epoch: {0}\tTrain Iteration: {1}\t'
                    'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t'
                    'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n'
                    'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'
                    .format(self.runner_state['epoch'],
                            self.runner_state['iters'],
                            self.configer.get('solver', 'display_iter'),
                            RunnerHelper.get_lr(self.optimizer),
                            batch_time=self.batch_time,
                            data_time=self.data_time,
                            loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            if self.configer.get('lr', 'metric') == 'iters' \
                    and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'):
                break

            # Check to val the current model.
            if self.runner_state['iters'] % self.configer.get(
                    'solver', 'test_interval') == 0:
                self.val()