Exemplo n.º 1
0
 def execute_training_loop(self, trn, dev, devices, epochs, logger,
                           patience, save_dir, optimizer,
                           gradient_accumulation, **kwargs):
     optimizer, scheduler, transformer_optimizer, transformer_scheduler = optimizer
     criterion = self.build_criterion()
     best_e, best_metric = 0, self.build_metric()
     timer = CountdownTimer(epochs)
     history = History()
     ratio_width = len(
         f'{len(trn) // gradient_accumulation}/{len(trn) // gradient_accumulation}'
     )
     for epoch in range(1, epochs + 1):
         # train one epoch and update the parameters
         logger.info(f"[yellow]Epoch {epoch} / {epochs}:[/yellow]")
         self.fit_dataloader(trn,
                             optimizer,
                             scheduler,
                             criterion,
                             epoch,
                             logger,
                             history,
                             transformer_optimizer,
                             transformer_scheduler,
                             gradient_accumulation=gradient_accumulation,
                             eval_trn=self.config.eval_trn)
         loss, dev_metric = self.evaluate_dataloader(
             dev, criterion, ratio_width=ratio_width, logger=logger)
         timer.update()
         # logger.info(f"{'Dev' + ' ' * ratio_width} loss: {loss:.4f} {dev_metric}")
         # save the model if it is the best so far
         report = f"{timer.elapsed_human} / {timer.total_time_human} ETA: {timer.eta_human}"
         if dev_metric > best_metric:
             best_e, best_metric = epoch, dev_metric
             self.save_weights(save_dir)
             report += ' ([red]saved[/red])'
         else:
             if patience != epochs:
                 report += f' ({epoch - best_e}/{patience})'
             else:
                 report += f' ({epoch - best_e})'
         logger.info(report)
         if patience is not None and epoch - best_e >= patience:
             logger.info(
                 f'LAS has stopped improving for {patience} epochs, early stop.'
             )
             break
     timer.stop()
     if not best_e:
         self.save_weights(save_dir)
     elif best_e != epoch:
         self.load_weights(save_dir)
     logger.info(
         f"Max score of dev is {best_metric.score:.2%} at epoch {best_e}")
     logger.info(
         f"Average time of each epoch is {timer.elapsed_average_human}")
     logger.info(f"{timer.elapsed_human} elapsed")
Exemplo n.º 2
0
    def execute_training_loop(self, trn: DataLoader, dev: DataLoader, epochs,
                              criterion, optimizer, metric, save_dir, logger,
                              patience, **kwargs):
        max_e, max_metric = 0, -1

        criterion = self.build_criterion()
        timer = CountdownTimer(epochs)
        ratio_width = len(f'{len(trn)}/{len(trn)}')
        scheduler = self.build_scheduler(
            **merge_dict(self.config, optimizer=optimizer, overwrite=True))
        if not patience:
            patience = epochs
        for epoch in range(1, epochs + 1):
            logger.info(f"[yellow]Epoch {epoch} / {epochs}:[/yellow]")
            self.fit_dataloader(trn,
                                criterion,
                                optimizer,
                                metric,
                                logger,
                                ratio_width=ratio_width)
            loss, dev_metric = self.evaluate_dataloader(dev, criterion, logger)
            if scheduler:
                if isinstance(scheduler, ReduceLROnPlateau):
                    scheduler.step(dev_metric.score)
                else:
                    scheduler.step(epoch)
            report_patience = f'Patience: {epoch - max_e}/{patience}'
            # save the model if it is the best so far
            if dev_metric > max_metric:
                self.save_weights(save_dir)
                max_e, max_metric = epoch, dev_metric
                report_patience = '[red]Saved[/red] '
            stop = epoch - max_e >= patience
            if stop:
                timer.stop()
            timer.log(
                f'{report_patience} lr: {optimizer.param_groups[0]["lr"]:.4f}',
                ratio_percentage=False,
                newline=True,
                ratio=False)
            if stop:
                break
        timer.stop()
        if max_e != epoch:
            self.load_weights(save_dir)
        logger.info(
            f"Max score of dev is {max_metric.score:.2%} at epoch {max_e}")
        logger.info(
            f"{timer.elapsed_human} elapsed, average time of each epoch is {timer.elapsed_average_human}"
        )
Exemplo n.º 3
0
 def build_vocabs(self, dataset, logger, **kwargs):
     self.vocabs.srl_label = Vocab(pad_token=None, unk_token=None)
     # Use null to indicate no relationship
     self.vocabs.srl_label.add('<null>')
     timer = CountdownTimer(len(dataset))
     max_seq_len = 0
     for each in dataset:
         max_seq_len = max(max_seq_len, len(each['token_input_ids']))
         timer.log(f'Building vocabs (max sequence length {max_seq_len}) [blink][yellow]...[/yellow][/blink]')
         pass
     timer.stop()
     timer.erase()
     self.vocabs['srl_label'].set_unk_as_safe_unk()
     self.vocabs.lock()
     self.vocabs.summary(logger)