Exemplo n.º 1
0
def training_pipeline(params, save_dir=None, callbacks=None):
    params['data']['train'] = True

    data_params = params['data']
    dataset, tokenizer = generate_training_dataset(data_params,
                                                   save_dir=save_dir)

    model = train_model(dataset, tokenizer, params, callbacks=callbacks)

    plot_loss(model.history.history['loss'], model.history.history['val_loss'])

    return model
Exemplo n.º 2
0
    def train(self, epochs, patience=None):
        epochs = int(epochs)
        patience = int(patience)

        start_epoch = self.epoch
        end_epoch = self.epoch + epochs

        self.curr_patience = 0

        for epoch in tqdm(range(start_epoch, end_epoch), desc='> Epochs  '):

            # TRAINING STEP
            start = time()
            train_loss = self._train_one_epoch()

            self.losses['train'].append(train_loss)

            # EVALIUATION STEP
            eval_loss = self._evaluate()
            end = time()
            self.losses['eval'].append(eval_loss)

            if (self.verbose):
                self.print_epoch_result(start, end, train_loss, eval_loss,
                                        epoch, end_epoch)

            if (self.save):
                self.save_ckp()

            # EARLY STOPPING
            if (self.early_stopping(patience) and not self.production):
                break

            self.epoch += 1

            if (self.production_stop_loss(train_loss)):
                break

        plot_loss(self.losses['train'],
                  self.losses['eval'],
                  save=self.save,
                  save_dir=f'{self.save_dir}/',
                  plot=self.plot,
                  plot_dir=self.static_dir)
        if (self.plot_type == 'tb'):
            writer.close()
    def walk_forward_train(self, epochs, patience=None):

        for i in tqdm(range(self.n_folds),
                      desc='> Folds  ',
                      file=sys.stdout):

            self.losses = {'train':[], 'eval':[]}
            self.epoch = 0
            self.i_fold = i

            self.train(epochs, patience)

            self.fold_losses.append(self.losses)

            save_training_details(self,
                                  save_dir=f'{self.save_dir}training_details/',
                                  filename=f'4-{i}.training_details')
            plot_loss(self.losses['train'],
                      self.losses['eval'],
                      save=self.save,
                      save_dir=f'{self.save_dir}plot/',
                      filename=f'plot_loss_{i}.png')

        return self.fold_losses
Exemplo n.º 4
0
def training_monitor():

    losses_filepath = f'{STATIC_DIR}losses.json'
    losses_dict = load_json(losses_filepath)

    train_losses = losses_dict['train']
    eval_losses = losses_dict['eval']

    fig = plot_loss(train_losses,
                    eval_losses,
                    figsize=(7, 5),
                    save=False,
                    plot=False)

    return show_plot(fig)
Exemplo n.º 5
0
    def print_epoch_result(self, start, end, train_loss, eval_loss, epoch,
                           end_epoch):
        print(f'\n> Epoch {epoch + 1}/{end_epoch}')
        print(f'> Time Spent:     {spent_time(start, end)}')
        print(f'> Training Loss:   {train_loss:.5f}')
        print(f'> Evaluation Loss: {eval_loss:.5f}')
        print('')

        # TENSORBOARD plot
        if (self.plot_type == 'tb'):
            tb_update_loss(train_loss, eval_loss, epoch)

        # PYPLOT plot
        if (epoch != 0 and self.plot_freq is not None
                and epoch % self.plot_freq == 0
                and self.plot_type == 'pyplot'):
            plot_loss(self.losses['train'],
                      self.losses['eval'],
                      save=self.save,
                      save_dir=f'{self.save_dir}',
                      plot_dir=self.static_dir)

        loss_filepath = f'{self.static_dir}losses.json'
        save_json(self.losses, loss_filepath)