コード例 #1
0
ファイル: gpu_evaluator.py プロジェクト: zhwzhong/vega
    def valid(self, valid_loader):
        """Validate one step of mode.

        :param loader: valid data loader
        """
        self.model.eval()
        metrics = Metrics(self.cfg.metric)
        data_num = 0
        latency_sum = 0.0
        with torch.no_grad():
            for step, (data, target) in enumerate(valid_loader):
                if self.cfg.cuda:
                    data, target = data.cuda(), target.cuda()
                    self.model = self.model.cuda()
                time_start = time.time()
                logits = self.model(data)
                latency_sum += time.time() - time_start
                metrics(logits, target)
                n = data.size(0)
                data_num += n
                if self._first_rank and step % self.cfg.report_freq == 0:
                    logging.info("step [{}/{}], valid metric [{}]".format(
                        step + 1, len(valid_loader), str(metrics.results_dict)))
        latency = latency_sum / data_num
        pfms = metrics.results_dict
        performance = [pfms[list(pfms.keys())[0]]]
        if self.cfg.evaluate_latency:
            performance.append(latency)
        logging.info("valid performance: {}".format(performance))
        return performance
コード例 #2
0
ファイル: cars_alg.py プロジェクト: zeyefkey/vega
    def search_infer_step(self, alpha):
        """Infer in search stage.

        :param valid_queue: valid dataloader
        :type valid_queue: dataloader
        :param model: The model to be trained
        :type model: nn.Module
        :param alpha: encoding of a model
        :type alpha: array
        :return: Average top1 acc and loss
        :rtype: nn.Tensor
        """
        if vega.is_torch_backend():
            metrics = Metrics()
            alpha_tensor = torch.from_numpy(alpha).cuda()
            self.trainer.model.eval()
            with torch.no_grad():
                for step, (input,
                           target) in enumerate(self.trainer.valid_loader):
                    input = input.cuda()
                    target = target.cuda(non_blocking=True)
                    logits = self.trainer.model(input, alpha_tensor)
                    metrics(logits, target)
        elif vega.is_tf_backend():
            # self.trainer.valid_alpha = tf.convert_to_tensor(alpha)
            metrics = self.trainer.valid_metrics
            setattr(self.trainer, 'valid_alpha', alpha)
            eval_results = self.trainer.estimator.evaluate(
                input_fn=self.trainer.valid_loader.input_fn,
                steps=len(self.trainer.valid_loader))
            metrics.update(eval_results)
        performance = metrics.results
        objectives = metrics.objectives
        # support min
        for key, mode in objectives.items():
            if mode == 'MIN':
                performance[key] = -1 * performance[key]
        performance.update({'kparams': self.eval_model_sizes(alpha)})
        return performance
コード例 #3
0
ファイル: sr_evaluator.py プロジェクト: zhwzhong/vega
    def valid(self, loader):
        """Validate one step of model.

        :param loader: validation dataloader
        """
        metrics = Metrics(self.cfg.metric)
        self.model.eval()
        with torch.no_grad():
            for batch in loader:
                img_lr, img_hr = batch["LR"].cuda() / 255.0, batch["HR"].cuda() / 255.0
                image_sr = self.model(img_lr)
                metrics(image_sr, img_hr)  # round images gives lower results
        performance = metrics.results
        logging.info('Valid metric: {}'.format(performance))
        return performance
コード例 #4
0
 def valid(self):
     """Validate the latency in davinci or bolt."""
     test_data = "./input.bin"
     latency_sum = 0
     metrics = Metrics(self.config.metric)
     data_num = 0
     for step, batch in enumerate(self.valid_loader):
         if isinstance(batch, list):
             data = batch[0]
             target = batch[1]
         elif isinstance(batch, dict):
             data = batch["LR"] / 255.0
             target = batch["HR"] / 255.0
         else:
             raise ValueError("The dataset formart is invalid.")
         input_shape = data.shape
         data_num += data.size(0)
         if data.size(0) != 1:
             logging.error(
                 "The batch_size should be 1, but get {}.".formart(
                     data.size(0)))
         if torch.is_tensor(data):
             data = data.numpy()
         data.tofile(test_data)
         results = evaluate(self.framework, self.backend, self.remote_host,
                            self.model, None, test_data, input_shape)
         latency = np.float(results.get("latency"))
         latency_sum += latency
         output = results.get("out_data")
         output = torch.Tensor(output)
         metrics(output, target)
         if step % self.config.report_freq == 0:
             logging.info(
                 "step [{}/{}], latency [{}], valid metric [{}]".format(
                     step + 1, len(self.valid_loader), latency,
                     str(metrics.results)))
     latency_avg = latency_sum / data_num
     logging.info("The latency in {} is {} ms.".format(
         self.backend, latency_avg))
     pfms = metrics.results
     if self.config.evaluate_latency:
         pfms["latency"] = latency_avg
     logging.info("valid performance: {}".format(pfms))
     return pfms
コード例 #5
0
    def search_infer_step(self, alpha):
        """Infer in search stage.

        :param valid_queue: valid dataloader
        :type valid_queue: dataloader
        :param model: The model to be trained
        :type model: nn.Module
        :param alpha: encoding of a model
        :type alpha: array
        :return: Average top1 acc and loss
        :rtype: nn.Tensor
        """
        metrics = Metrics(self.trainer.cfg.metric)
        self.trainer.model.eval()
        with torch.no_grad():
            for step, (input, target) in enumerate(self.trainer.valid_loader):
                input = input.cuda()
                target = target.cuda(non_blocking=True)
                logits = self.trainer.model(input, alpha)
                metrics(logits, target)
        top1 = metrics.results[0]
        return top1
コード例 #6
0
ファイル: gpu_evaluator.py プロジェクト: zeyefkey/vega
    def valid(self, valid_loader):
        """Validate one step of mode.

        :param loader: valid data loader
        """
        self.model.eval()
        metrics = Metrics(self.config.metric)
        data_num = 0
        latency_sum = 0.0
        with torch.no_grad():
            for step, batch in enumerate(valid_loader):
                if isinstance(batch, list):
                    data = batch[0]
                    target = batch[1]
                elif isinstance(batch, dict):
                    data = batch["LR"] / 255.0
                    target = batch["HR"] / 255.0
                else:
                    raise ValueError("The dataset formart is invalid.")
                if self.config.cuda:
                    data, target = data.cuda(), target.cuda()
                    self.model = self.model.cuda()
                time_start = time.time()
                logits = self.model(data)
                latency_sum += time.time() - time_start
                metrics(logits, target)
                n = data.size(0)
                data_num += n
                if step % self.config.report_freq == 0:
                    logging.info("step [{}/{}], valid metric [{}]".format(
                        step + 1, len(valid_loader), str(metrics.results)))
        latency = latency_sum / data_num
        pfms = metrics.results
        if self.config.evaluate_latency:
            pfms["latency"] = latency
        logging.info("valid performance: {}".format(pfms))
        return pfms