Beispiel #1
0
def multi_gpu_test(model, data_loader):
    model.eval()
    func = lambda **x: model(mode='test', **x)
    rank, world_size = get_dist_info()
    results = dist_forward_collect(func, data_loader, rank,
                                   len(data_loader.dataset))
    return results
Beispiel #2
0
    def _get_net_acc(self, runner, choice_indices):
        if self.lookup is not None:
            choice_indices_text = ''.join([str(i) for i in choice_indices])
            if choice_indices_text in self.lookup:
                return self.lookup[choice_indices_text]

        if self.eval_kwargs['bn'] == 'update':
            frozen_stages = runner.model.module.backbone.frozen_stages
            runner.model.module.backbone.frozen_stages = -1
            update_bn_stats(self.train_data_loader,
                            runner,
                            choice_indices,
                            num_iters=49)
            runner.model.module.backbone.frozen_stages = frozen_stages

        func = lambda **x: runner.model(
            mode='test', **x, choice_indices=choice_indices)
        if self.dist_mode:
            results = dist_forward_collect(
                func, self.val_data_loader, runner.rank,
                len(self.val_dataset))  # dict{key: np.ndarray}
        else:
            results = nondist_forward_collect(func, self.val_data_loader,
                                              len(self.val_dataset))

        results = {name: self._evaluate(runner, torch.from_numpy(val), name)\
                       for name, val in results.items()}
        return results['head0']['head0_top1']
Beispiel #3
0
 def extract(self, model, data_loader, distributed=False):
     model.eval()
     func = lambda **x: self._forward_func(model, **x)
     if distributed:
         rank, world_size = get_dist_info()
         results = dist_forward_collect(func, data_loader, rank,
                                        len(data_loader.dataset))
     else:
         results = nondist_forward_collect(func, data_loader,
                                           len(data_loader.dataset))
     return results
Beispiel #4
0
 def __call__(self, runner):
     func = lambda **x: self._forward_func(runner, **x)
     if self.dist_mode:
         feats = dist_forward_collect(func,
                                      self.data_loader,
                                      runner.rank,
                                      len(self.dataset),
                                      ret_rank=-1)['feature']  # NxD
     else:
         feats = nondist_forward_collect(func, self.data_loader,
                                         len(self.dataset))['feature']
     return feats
Beispiel #5
0
 def _run_validate(self, runner):
     runner.model.eval()
     func = lambda **x: runner.model(mode='test', **x)
     if self.dist_mode:
         results = dist_forward_collect(
             func, self.data_loader, runner.rank,
             len(self.dataset))  # dict{key: np.ndarray}
     else:
         results = nondist_forward_collect(func, self.data_loader,
                                           len(self.dataset))
     if runner.rank == 0:
         for name, val in results.items():
             self._evaluate(runner, torch.from_numpy(val), name)
     runner.model.train()
    def _run_validate(self, runner):
        runner.model.eval()
        func = lambda **x: runner.model(mode='test', **x)
        if self.dist_mode:
            results = dist_forward_collect(
                func, self.data_loader, runner.rank,
                len(self.dataset))  # dict{key: np.ndarray}

            #validation loss criterion
            target = self.dataset.get_labels()
            criterion = nn.BCEWithLogitsLoss()
            val_loss = criterion(torch.from_numpy(results['head0']), target)

            print("val_loss: ", val_loss)
        else:
            results = nondist_forward_collect(func, self.data_loader,
                                              len(self.dataset))
        if runner.rank == 0:
            for name, val in results.items():
                self._evaluate(runner, torch.from_numpy(val), name)
        runner.log_buffer.output["val_loss"] = str(val_loss.numpy())
        runner.log_buffer.ready = True
        runner.model.train()