def pre_train(self, x, y, epochs, batch_size=4, log_base_dir='./logs'): print( f"Pre Training for {epochs} epochs bs: {batch_size} storing logs: {log_base_dir}" ) from framework import train history, trained_model = train(model=self.model, x=x, y=y, epochs=epochs, batch_size=batch_size, log_base_dir=log_base_dir) self.model = trained_model
loss_fn, [], logger=logger, callbacks=[prec_rec]) elif args.action == 'train': scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, 'min', patience=2, verbose=True, cooldown=1) for epoch in range(start_epoch, args.epochs): train_loss = framework.train(network, train_loader, loss_fn, optim, epoch, writer=writer, early_stop=args.early_stop) prec_rec = PrecRec(n_thresholds=100) framework.test( network, val_loader, loss_fn, [prec_rec], epoch, writer=writer, early_stop=args.early_stop, )
val_loader, criteria, logger=logger, callbacks=[prec_rec]) f1, thres = prec_rec.best_f1() print('F1', f1, 'at', thres) elif args.action == 'train': scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optim, 'min', patience=2, verbose=True, cooldown=1) for epoch in range(start_epoch, args.epochs): train_loss = framework.train(network, train_loader, loss_fn, optim, epoch, early_stop=args.early_stop, logger=logger) score = framework.test(network, val_loader, criteria, early_stop=args.early_stop, logger=logger) scheduler.step(train_loss) framework.save_checkpoint(epoch, score, network, optim, path=args.artifacts)