예제 #1
0
    def evaluate_dataloader(self,
                            data: DataLoader,
                            criterion: Callable,
                            metric: MetricDict = None,
                            output=False,
                            logger=None,
                            ratio_width=None,
                            **kwargs):

        metric.reset()
        self.model.eval()
        timer = CountdownTimer(len(data))
        total_loss = 0
        for idx, batch in enumerate(data):
            out, mask = self.feed_batch(batch)
            loss = out['loss']
            total_loss += loss.item()
            self.decode_output(out, mask, batch)
            self.update_metrics(metric, batch, out, mask)
            report = f'loss: {total_loss / (idx + 1):.4f} {metric.cstr()}'
            timer.log(report,
                      logger=logger,
                      ratio_percentage=False,
                      ratio_width=ratio_width)
            del loss
            del out
            del mask
        return total_loss / len(data), metric
예제 #2
0
 def build_metric(self, **kwargs):
     metrics = MetricDict()
     for key, task in self.tasks.items():
         metric = task.build_metric(**kwargs)
         assert metric, f'Please implement `build_metric` of {type(task)} to return a metric.'
         metrics[key] = metric
     return metrics
예제 #3
0
 def build_metric(self, **kwargs):
     return MetricDict({
         'lemmas': CategoricalAccuracy(),
         'upos': CategoricalAccuracy(),
         'deps': AttachmentScore(),
         'feats': CategoricalAccuracy(),
     })
예제 #4
0
 def update_metrics(self, batch: Dict[str, Any], output_dict: Dict[str, Any], metrics: MetricDict, task_name):
     task = self.tasks[task_name]
     output_per_task = output_dict.get(task_name, None)
     if output_per_task:
         output = output_per_task['output']
         prediction = output_per_task['prediction']
         metric = metrics.get(task_name, None)
         task.update_metrics(batch, output, prediction, metric)
예제 #5
0
 def build_metric(self, **kwargs):
     # noinspection PyCallByClass
     return MetricDict({
         '1st':
         super().build_metric(**kwargs),
         '2nd':
         BiaffineSemanticDependencyParser.build_metric(self, **kwargs)
     })
예제 #6
0
 def fit_dataloader(self,
                    trn: DataLoader,
                    criterion,
                    optimizer,
                    metric: MetricDict,
                    logger: logging.Logger,
                    history: History,
                    gradient_accumulation=1,
                    grad_norm=None,
                    ratio_width=None,
                    eval_trn=True,
                    **kwargs):
     optimizer, scheduler = optimizer
     metric.reset()
     self.model.train()
     timer = CountdownTimer(
         history.num_training_steps(
             len(trn), gradient_accumulation=gradient_accumulation))
     total_loss = 0
     for idx, batch in enumerate(trn):
         out, mask = self.feed_batch(batch)
         loss = out['loss']
         if gradient_accumulation and gradient_accumulation > 1:
             loss /= gradient_accumulation
         loss.backward()
         total_loss += loss.item()
         if eval_trn:
             self.decode_output(out, mask, batch)
             self.update_metrics(metric, batch, out, mask)
         if history.step(gradient_accumulation):
             self._step(optimizer, scheduler, grad_norm)
             report = f'loss: {total_loss / (idx + 1):.4f} {metric.cstr()}' if eval_trn \
                 else f'loss: {total_loss / (idx + 1):.4f}'
             timer.log(report,
                       logger=logger,
                       ratio_percentage=False,
                       ratio_width=ratio_width)
         del loss
         del out
         del mask
 def build_metric(self, **kwargs):
     metrics = MetricDict()
     for criteria in self.config.criteria_token_map:
         metrics[criteria] = F1()
     return metrics
예제 #8
0
파일: rank_srl.py 프로젝트: Tirwanove/HanLP
 def build_metric(self, **kwargs):
     predicate_f1, end_to_end_f1 = SpanRankingSemanticRoleLabeler.build_metric(self, **kwargs)
     return MetricDict({'predicate': predicate_f1, 'e2e': end_to_end_f1})