def _init_checkpoint_handler(self, save_dir=None) -> None: save_dir = self.save_dir if save_dir is None else save_dir if self.use_ensemble: checkpoint_save = {f'model_{i}': model for i, model in enumerate(self.ens_models)} else: # checkpoint_save = {'model': self.model, 'optimizer': self.optimizer} checkpoint_save = {'model': self.model} best_ckpoint_handler = handlers.ModelCheckpoint(save_dir, 'best', n_saved=1, require_empty=False, score_function=self.eval_func, save_as_state_dict=True) final_checkpoint_handler = handlers.ModelCheckpoint(save_dir, 'final', save_interval=1, n_saved=1, require_empty=False, save_as_state_dict=True) self.evaluator.add_event_handler(Events.EPOCH_COMPLETED, best_ckpoint_handler, checkpoint_save) self.trainer.add_event_handler(Events.COMPLETED, final_checkpoint_handler, checkpoint_save)
def attach_model_checkpoint(trainer: engine.Engine, models: Dict[str, nn.Module]): def to_epoch(trainer: engine.Engine, event_name: str): return trainer.state.epoch handler = handlers.ModelCheckpoint( './models', 'model', create_dir=True, require_empty=False, n_saved=None, global_step_transform=to_epoch, ) trainer.add_event_handler(engine.Events.EPOCH_COMPLETED, handler, models)
def _init_checkpoint_handlers(self) -> typ.Tuple[ih.ModelCheckpoint, ih.ModelCheckpoint, dict]: require_empty = not self.opts.resume ckpt_dir = ct.WORK_ROOT / self.opts.run_dir / 'ckpt' ckpt_args = { 'model': self.model.module if hasattr(self.model, 'module') else self.model, # noqa 'optimizer': self.optimizer, 'lr_scheduler': self.lr_scheduler } score_fn = self._neg_dev_total_loss if self.opts.debug else self._dev_acc_1 best_ckpt = ih.ModelCheckpoint(dirname=ckpt_dir.as_posix(), filename_prefix='best', n_saved=1, require_empty=require_empty, save_as_state_dict=True, score_function=score_fn, score_name='dev_acc_1') latest_ckpt = ih.ModelCheckpoint(dirname=ckpt_dir.as_posix(), filename_prefix='latest', n_saved=1, require_empty=require_empty, save_as_state_dict=True, save_interval=1) if self.opts.resume: with open((ct.WORK_ROOT / self.opts.run_dir / 'ckpt' / 'trainer_state.json').as_posix(), 'r') as file: state = json.load(file) best_ckpt._iteration = state['epoch'] latest_ckpt._iteration = state['epoch'] return latest_ckpt, best_ckpt, ckpt_args
def run_utk(model, optimizer, epochs, log_interval, dataloaders, dirname='resnet_models', filename_prefix='resnet', n_saved=2, log_dir='../../fer2013/logs', launch_tensorboard=False, patience=10, resume_model=None, resume_optimizer=None, backup_step=1, backup_path=None, n_epochs_freeze=5, n_cycle=None, lr_after_freeze=1e-3, lr_cycle_start=1e-4, lr_cycle_end=1e-1, loss_weights=[1 / 10, 1 / 0.16, 1 / 0.44], lr_plot=True): """ Utility function that encapsulates pytorch models training routine. :param model: pytorch model to be trained :param optimizer: pytorch optimizer that updates the `model`'s parameters :param epochs: maximum number of epoch to train for :param log_interval: print training loss each `log_interval` iterations during training :param dataloaders: dictionary with `train` and `valid` as keys, the corresponding values being resp. train and validation pytorch `DataLoader` objects :param dirname: path to the directory where to save model checkpoints during training :param filename_prefix: string, name under which to save the model checkpoint file :param n_saved: int, save n_saved best model during training :param log_dir: optional path to a directory where to write tensorboard logs :param launch_tensorboard: boolean, whether to write metrics and histograms using tensorboard :param patience: int, number of epochs to wait for before stopping training if no improvement is recorded :param resume_model: optional path to checkpoint of trained model to load weights from and continue training :param resume_optimizer: optional path to a previous optimizer checkpoint to load state_dict from :param backup_step: optional, copy the model checkpoints from `dirname` each `backup_step` epochs, This is useful for me in situation where I train on google colab and want backup my checkpoints to my google drive :param backup_path: optional path to backup (copy) model checkpoints to, each `backup_step` epochs. :param n_epochs_freeze: after `n_epochs_freeze` unfreeze the model's frozen layers, useful when doing transfer learning :param n_cycle: optional int, in terms of number of epochs, to be used for cycle size when doing learning rate scheduling :param lr_after_freeze: float, the new learning rate to set after unfreezing the model's layer for finetuning :param lr_cycle_start: starting value for learning rate when doing learning rate scheduling :param lr_cycle_end: end value for learning rate when doing learning rate scheduling :param loss_weights: list of float to be used for weighting model's outputs :return: """ count_parameters(model) # create the tensorboard log directory if relevant if launch_tensorboard: os.makedirs(log_dir, exist_ok=True) # In case a path of previous model and optimizer checkpoints are provided load weights and state from them if resume_model: model.load_state_dict(torch.load(resume_model)) if resume_optimizer: optimizer.load_state_dict(torch.load(resume_optimizer)) for state in optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() # Get the training and validation data loaders train_loader, val_loader = dataloaders['train'], dataloaders['valid'] # create tensorboard writers if launch_tensorboard: writer, val_writer = create_summary_writer(model, train_loader, log_dir) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # create trainer and evaluator engines that handle model training and evaluation resp. trainer = create_supervised_trainer_multitask(model, optimizer, loss_fn=my_multi_task_loss, loss_weights=loss_weights, device=device) evaluator = create_supervised_evaluator_multitask( model, metrics={ 'mt_accuracy': MultiTaskAccuracy(), 'mt_loss': MutliTaskLoss() }, device=device, loss_weights=loss_weights) # function to schedule learning rate if needed @trainer.on(Events.EPOCH_STARTED) def schedule_learning_rate(engine): if engine.state.epoch > n_epochs_freeze and n_cycle not in [None, 0] \ and not getattr(trainer, 'scheduler_set', False): scheduler = LinearCyclicalScheduler(optimizer, 'lr', lr_cycle_start, lr_cycle_end, len(train_loader) * n_cycle) trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) setattr(trainer, 'scheduler_set', True) # functions to write metrics during training desc = "ITERATION - loss: {:.3f}" pbar = tqdm.tqdm(initial=0, leave=False, total=len(train_loader), desc=desc.format(0)) @trainer.on(Events.ITERATION_COMPLETED) def log_training_loss(engine): iter_ = (engine.state.iteration - 1) % len(train_loader) + 1 if iter_ % log_interval == 0: pbar.desc = desc.format(engine.state.output) pbar.update(log_interval) if launch_tensorboard: writer.add_scalar('training/loss', engine.state.output, engine.state.iteration) @trainer.on(Events.EPOCH_COMPLETED) def log_training_results(engine): pbar.refresh() # print metrics on training set evaluator.run(train_loader) metrics = evaluator.state.metrics age_l1_loss, gender_acc, race_acc = metrics['mt_accuracy'] avg_nll = metrics['mt_loss'] tqdm.tqdm.write( "Training Results - Epoch: {} Age L1-loss: {:.3f} ** Gender accuracy: {:.3f} " "** Race accuracy: {:.3f} ** Avg loss: {:.3f}".format( engine.state.epoch, age_l1_loss, gender_acc, race_acc, avg_nll)) if launch_tensorboard: writer.add_scalar('avg_loss', avg_nll, engine.state.epoch) writer.add_scalar('age_l1_loss', age_l1_loss, engine.state.epoch) writer.add_scalar('gender_accuracy', gender_acc, engine.state.epoch) writer.add_scalar('race_accuracy', race_acc, engine.state.epoch) # print metrics on validation set evaluator.run(val_loader) metrics = evaluator.state.metrics age_l1_loss, gender_acc, race_acc = metrics['mt_accuracy'] avg_nll = metrics['mt_loss'] tqdm.tqdm.write( "Validation Results - Epoch: {} Age L1-loss: {:.3f} ** Gender accuracy: {:.3f} **" " Race accuracy: {:.3f} ** Avg loss: {:.3f}".format( engine.state.epoch, age_l1_loss, gender_acc, race_acc, avg_nll)) global val_loss val_loss.append(avg_nll) if launch_tensorboard: val_writer.add_scalar('avg_loss', avg_nll, engine.state.epoch) val_writer.add_scalar('age_l1_loss', age_l1_loss, engine.state.epoch) val_writer.add_scalar('gender_accuracy', gender_acc, engine.state.epoch) val_writer.add_scalar('race_accuracy', race_acc, engine.state.epoch) pbar.n = pbar.last_print_n = 0 # Utility function for unfreezing frozen layer for finetuning @trainer.on(Events.EPOCH_STARTED) def unfreeze(engine): if engine.state.epoch == n_epochs_freeze: print('****Unfreezing frozen layers ... ***') for param in model.parameters(): if not param.requires_grad: param.requires_grad = True optimizer.add_param_group({ 'params': param, "lr": lr_after_freeze }) count_parameters(model) # Function that returns the negative validation loss, useful for saving the best checkpoint at each epoch def get_val_loss(_): global val_loss return -val_loss[-1] # callback to save the best model during training checkpointer = handlers.ModelCheckpoint( dirname=dirname, filename_prefix=filename_prefix, score_function=get_val_loss, # score_function=log_validation_results, score_name='val_loss', n_saved=n_saved, create_dir=True, require_empty=False, save_as_state_dict=True) # callback to stop training if no improvement is observed patience *= 2 # because the evaluator is called twice (on training set and validation set) earlystop = handlers.EarlyStopping(patience, get_val_loss, trainer) # evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, { 'optimizer': optimizer, 'model': model }) evaluator.add_event_handler(Events.COMPLETED, earlystop) # optimizer and model that are in the backup_path, created from a previous run if backup_path is not None: original_files = glob.glob(os.path.join(backup_path, '*.pth*')) # utility function to periodically copy best model to `backup_path` folder @trainer.on(Events.EPOCH_COMPLETED) def backup_checkpoints(engine): if backup_path is not None: if engine.state.epoch % backup_step == 0: # get old model and optimizer files paths so that we can remove them after copying the newer ones old_files = glob.glob(os.path.join(backup_path, '*.pth')) # get new model and optimizer checkpoints new_files = glob.glob(os.path.join(dirname, '*.pth*')) if len( new_files ) > 0: # copy new checkpoints from local checkpoint folder to the backup_path folder for f_ in new_files: shutil.copy2(f_, backup_path) if len( old_files ) > 0: # remove older checkpoints as the new ones have been copied for f_ in old_files: if f_ not in original_files: os.remove(f_) @trainer.on(Events.COMPLETED) def final_backup(_): if backup_path is not None: new_files = glob.glob(os.path.join(dirname, '*.pth*')) if len(new_files) > 0: for f_ in new_files: shutil.copy2(f_, backup_path) # plot learning rate list_lr = [p['lr'] for i, p in enumerate(optimizer.param_groups) if i == 0] list_steps = [0] @trainer.on(Events.ITERATION_COMPLETED) def track_learning_rate(engine): if lr_plot is True: list_steps.append(engine.state.iteration) list_lr.extend([ p['lr'] for i, p in enumerate(optimizer.param_groups) if i == 0 ]) @trainer.on(Events.EPOCH_COMPLETED) def add_histograms(engine): if launch_tensorboard: for name, param in model.named_parameters(): writer.add_histogram(name, param.clone().cpu().data.numpy(), engine.state.epoch) trainer.run(train_loader, max_epochs=epochs) pbar.close() if launch_tensorboard: writer.close() val_writer.close() if lr_plot: plot_lr(list_lr, list_steps)
def evaluator_epoch_comp_callback(engine): # save masks for each batch batch_output = engine.state.output input_filenames = batch_output['input_filename'] masks = batch_output['mask'] for i, input_filename in enumerate(input_filenames): mask = cv2.resize(masks[i], dsize=(utils.cropped_width, utils.cropped_height), interpolation=cv2.INTER_AREA) # if pad: # h_start, w_start = utils.h_start, utils.w_start # h, w = mask.shape # # recover to original shape # full_mask = np.zeros((original_height, original_width)) # full_mask[h_start:h_start + h, w_start:w_start + w] = t_mask # mask = full_mask #print("Input Filename-->", input_filename) #instrument_folder_name = input_filename.parent.parent.name instrument_folder_name = os.path.basename( os.path.dirname(os.path.dirname(input_filename))) #print("instrument_folder_name-->", instrument_folder_name) # mask_folder/instrument_dataset_x/problem_type_masks/framexxx.png mask_folder = mask_save_dir / instrument_folder_name / utils.mask_folder[ args.problem_type] mask_folder.mkdir(exist_ok=True, parents=True) mask_filename = mask_folder / os.path.basename(input_filename) #print("mask_filename-->", mask_filename) cv2.imwrite(str(mask_filename), mask) if 'TAPNet' in args.model: attmap = batch_output['attmap'][i] attmap_folder = mask_save_dir / instrument_folder_name / '_'.join( args.problem_type, 'attmaps') attmap_folder.mkdir(exist_ok=True, parents=True) attmap_filename = attmap_folder / input_filename.name cv2.imwrite(str(attmap_filename), attmap) evaluator.run(eval_loader) # validator engine validator = engine.Engine(valid_step) # monitor loss valid_ra_loss = imetrics.RunningAverage( output_transform=lambda x: x['loss'], alpha=0.98) valid_ra_loss.attach(validator, 'valid_ra_loss') # monitor validation loss over epoch valid_loss = imetrics.Loss(loss_func, output_transform=lambda x: (x['output'], x['target'])) valid_loss.attach(validator, 'valid_loss') # monitor <data> mean metrics valid_data_miou = imetrics.RunningAverage( output_transform=lambda x: x['iou'].data_mean()['mean'], alpha=0.98) valid_data_miou.attach(validator, 'mIoU') valid_data_mdice = imetrics.RunningAverage( output_transform=lambda x: x['dice'].data_mean()['mean'], alpha=0.98) valid_data_mdice.attach(validator, 'mDice') # show metrics on progress bar (after every iteration) valid_pbar = c_handlers.ProgressBar(persist=True, dynamic_ncols=True) valid_metric_names = ['valid_ra_loss', 'mIoU', 'mDice'] valid_pbar.attach(validator, metric_names=valid_metric_names) # ## monitor ignite IoU (the same as iou we are using) ### # cm = imetrics.ConfusionMatrix(num_classes, # output_transform=lambda x: (x['output'], x['target'])) # imetrics.IoU(cm, # ignore_index=0 # ).attach(validator, 'iou') # # monitor ignite mean iou (over all classes even not exist in gt) # mean_iou = imetrics.mIoU(cm, # ignore_index=0 # ).attach(validator, 'mean_iou') @validator.on(engine.Events.STARTED) def validator_start_callback(engine): pass @validator.on(engine.Events.EPOCH_STARTED) def validator_epoch_start_callback(engine): engine.state.epoch_metrics = { # directly use definition to calculate 'iou': MetricRecord(), 'dice': MetricRecord(), 'confusion_matrix': np.zeros((num_classes, num_classes), dtype=np.uint32), } # evaluate after iter finish @validator.on(engine.Events.ITERATION_COMPLETED) def validator_iter_comp_callback(engine): pass # evaluate after epoch finish @validator.on(engine.Events.EPOCH_COMPLETED) def validator_epoch_comp_callback(engine): # log ignite metrics # logging_logger.info(engine.state.metrics) # ious = engine.state.metrics['iou'] # msg = 'IoU: ' # for ins_id, iou in enumerate(ious): # msg += '{:d}: {:.3f}, '.format(ins_id + 1, iou) # logging_logger.info(msg) # logging_logger.info('nonzero mean IoU for all data: {:.3f}'.format(ious[ious > 0].mean())) # log monitored epoch metrics epoch_metrics = engine.state.epoch_metrics ######### NOTICE: Two metrics are available but different ########## ### 1. mean metrics for all data calculated by confusion matrix #### ''' compared with using confusion_matrix[1:, 1:] in original code, we use the full confusion matrix and only present non-background result ''' confusion_matrix = epoch_metrics['confusion_matrix'] # [1:, 1:] ious = calculate_iou(confusion_matrix) dices = calculate_dice(confusion_matrix) mean_ious = np.mean(list(ious.values())) mean_dices = np.mean(list(dices.values())) std_ious = np.std(list(ious.values())) std_dices = np.std(list(dices.values())) logging_logger.info('mean IoU: %.3f, std: %.3f, for each class: %s' % (mean_ious, std_ious, ious)) logging_logger.info('mean Dice: %.3f, std: %.3f, for each class: %s' % (mean_dices, std_dices, dices)) ### 2. mean metrics for all data calculated by definition ### iou_data_mean = epoch_metrics['iou'].data_mean() dice_data_mean = epoch_metrics['dice'].data_mean() logging_logger.info('data (%d) mean IoU: %.3f, std: %.3f' % (len(iou_data_mean['items']), iou_data_mean['mean'], iou_data_mean['std'])) logging_logger.info('data (%d) mean Dice: %.3f, std: %.3f' % (len(dice_data_mean['items']), dice_data_mean['mean'], dice_data_mean['std'])) # record metrics in trainer every epoch # trainer.state.metrics_records[trainer.state.epoch] = \ # {'miou': mean_ious, 'std_miou': std_ious, # 'mdice': mean_dices, 'std_mdice': std_dices} trainer.state.metrics_records[trainer.state.epoch] = \ {'miou': iou_data_mean['mean'], 'std_miou': iou_data_mean['std'], 'mdice': dice_data_mean['mean'], 'std_mdice': dice_data_mean['std']} # log interal variables(attention maps, outputs, etc.) on validation def tb_log_valid_iter_vars(engine, logger, event_name): log_tag = 'valid_iter' output = engine.state.output batch_size = output['output'].shape[0] res_grid = tvutils.make_grid( torch.cat([ output['output_argmax'].unsqueeze(1), output['target'].unsqueeze(1), ]), padding=2, normalize=False, # show origin image nrow=batch_size).cpu() logger.writer.add_image(tag='%s (outputs, targets)' % (log_tag), img_tensor=res_grid) if 'TAPNet' in args.model: # log attention maps and other internal values inter_vals_grid = tvutils.make_grid(torch.cat([ output['attmap'], ]), padding=2, normalize=True, nrow=batch_size).cpu() logger.writer.add_image(tag='%s internal vals' % (log_tag), img_tensor=inter_vals_grid) def tb_log_valid_epoch_vars(engine, logger, event_name): log_tag = 'valid_iter' # log monitored epoch metrics epoch_metrics = engine.state.epoch_metrics confusion_matrix = epoch_metrics['confusion_matrix'] # [1:, 1:] ious = calculate_iou(confusion_matrix) dices = calculate_dice(confusion_matrix) mean_ious = np.mean(list(ious.values())) mean_dices = np.mean(list(dices.values())) logger.writer.add_scalar('mIoU', mean_ious, engine.state.epoch) logger.writer.add_scalar('mIoU', mean_dices, engine.state.epoch) if args.tb_log: # log internal values tb_logger.attach(validator, log_handler=tb_log_valid_iter_vars, event_name=engine.Events.ITERATION_COMPLETED) tb_logger.attach(validator, log_handler=tb_log_valid_epoch_vars, event_name=engine.Events.EPOCH_COMPLETED) # tb_logger.attach(validator, log_handler=OutputHandler('valid_iter', valid_metric_names), # event_name=engine.Events.ITERATION_COMPLETED) tb_logger.attach(validator, log_handler=OutputHandler('valid_epoch', ['valid_loss']), event_name=engine.Events.EPOCH_COMPLETED) # score function for model saving ckpt_score_function = lambda engine: \ np.mean(list(calculate_iou(engine.state.epoch_metrics['confusion_matrix']).values())) # ckpt_score_function = lambda engine: engine.state.epoch_metrics['iou'].data_mean()['mean'] ckpt_filename_prefix = 'fold_%d' % fold # model saving handler model_ckpt_handler = handlers.ModelCheckpoint( dirname=args.model_save_dir, filename_prefix=ckpt_filename_prefix, score_function=ckpt_score_function, create_dir=True, require_empty=False, save_as_state_dict=True, atomic=True) validator.add_event_handler(event_name=engine.Events.EPOCH_COMPLETED, handler=model_ckpt_handler, to_save={ 'model': model, }) # early stop # trainer=trainer, but should be handled by validator early_stopping = handlers.EarlyStopping(patience=args.es_patience, score_function=ckpt_score_function, trainer=trainer) validator.add_event_handler(event_name=engine.Events.EPOCH_COMPLETED, handler=early_stopping) # evaluate after epoch finish @trainer.on(engine.Events.EPOCH_COMPLETED) def trainer_epoch_comp_callback(engine): validator.run(valid_loader) trainer.run(train_loader, max_epochs=args.max_epochs) if args.tb_log: # close tb_logger tb_logger.close() return trainer.state.metrics_records
def train_fold(fold, args): # loggers logging_logger = args.logging_logger if args.tb_log: tb_logger = args.tb_logger num_classes = utils.problem_class[args.problem_type] # init model model = eval(args.model)(in_channels=3, num_classes=num_classes, bn=False) model = nn.DataParallel(model, device_ids=args.device_ids).cuda() # transform for train/valid data train_transform, valid_transform = get_transform(args.model) # loss function loss_func = LossMulti(num_classes, args.jaccard_weight) if args.semi: loss_func_semi = LossMultiSemi(num_classes, args.jaccard_weight, args.semi_loss_alpha, args.semi_method) # train/valid filenames train_filenames, valid_filenames = utils.trainval_split(args.train_dir, fold) # DataLoader and Dataset args train_shuffle = True train_ds_kwargs = { 'filenames': train_filenames, 'problem_type': args.problem_type, 'transform': train_transform, 'model': args.model, 'mode': 'train', 'semi': args.semi, } valid_num_workers = args.num_workers valid_batch_size = args.batch_size if 'TAPNet' in args.model: # for TAPNet, cancel default shuffle, use self-defined shuffle in torch.Dataset instead train_shuffle = False train_ds_kwargs['batch_size'] = args.batch_size train_ds_kwargs['mf'] = args.mf if args.semi == True: train_ds_kwargs['semi_method'] = args.semi_method train_ds_kwargs['semi_percentage'] = args.semi_percentage # additional valid dataset kws valid_ds_kwargs = { 'filenames': valid_filenames, 'problem_type': args.problem_type, 'transform': valid_transform, 'model': args.model, 'mode': 'valid', } if 'TAPNet' in args.model: # in validation, num_workers should be set to 0 for sequences valid_num_workers = 0 # in validation, batch_size should be set to 1 for sequences valid_batch_size = 1 valid_ds_kwargs['mf'] = args.mf # train dataloader train_loader = DataLoader( dataset=RobotSegDataset(**train_ds_kwargs), shuffle=train_shuffle, # set to False to disable pytorch dataset shuffle num_workers=args.num_workers, batch_size=args.batch_size, pin_memory=True ) # valid dataloader valid_loader = DataLoader( dataset=RobotSegDataset(**valid_ds_kwargs), shuffle=False, # in validation, no need to shuffle num_workers=valid_num_workers, batch_size=valid_batch_size, # in valid time. have to use one image by one pin_memory=True ) # optimizer optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, # weight_decay=args.weight_decay, nesterov=True) # ignite trainer process function def train_step(engine, batch): # set model to train model.train() # clear gradients optimizer.zero_grad() # additional params to feed into model add_params = {} inputs = batch['input'].cuda(non_blocking=True) with torch.no_grad(): targets = batch['target'].cuda(non_blocking=True) # for TAPNet, add attention maps if 'TAPNet' in args.model: add_params['attmap'] = batch['attmap'].cuda(non_blocking=True) outputs = model(inputs, **add_params) loss_kwargs = {} if args.semi: loss_kwargs['labeled'] = batch['labeled'] if args.semi_method == 'rev_flow': loss_kwargs['optflow'] = batch['optflow'] loss = loss_func_semi(outputs, targets, **loss_kwargs) else: loss = loss_func(outputs, targets, **loss_kwargs) loss.backward() optimizer.step() return_dict = { 'output': outputs, 'target': targets, 'loss_kwargs': loss_kwargs, 'loss': loss.item(), } # for TAPNet, update attention maps after each iteration if 'TAPNet' in args.model: # output_classes and target_classes: <b, h, w> output_softmax_np = torch.softmax(outputs, dim=1).detach().cpu().numpy() # update attention maps train_loader.dataset.update_attmaps(output_softmax_np, batch['abs_idx'].numpy()) return_dict['attmap'] = add_params['attmap'] return return_dict # init trainer trainer = engine.Engine(train_step) # lr scheduler and handler # cyc_scheduler = optim.lr_scheduler.CyclicLR(optimizer, args.lr / 100, args.lr) # lr_scheduler = c_handlers.param_scheduler.LRScheduler(cyc_scheduler) # trainer.add_event_handler(engine.Events.ITERATION_COMPLETED, lr_scheduler) step_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_decay_epochs, gamma=args.lr_decay) lr_scheduler = c_handlers.param_scheduler.LRScheduler(step_scheduler) trainer.add_event_handler(engine.Events.EPOCH_STARTED, lr_scheduler) @trainer.on(engine.Events.STARTED) def trainer_start_callback(engine): logging_logger.info('training fold {}, {} train / {} valid files'. \ format(fold, len(train_filenames), len(valid_filenames))) # resume training if args.resume: # ckpt for current fold fold_<fold>_model_<epoch>.pth ckpt_dir = Path(args.ckpt_dir) ckpt_filename = ckpt_dir.glob('fold_%d_model_[0-9]*.pth' % fold)[0] res = re.match(r'fold_%d_model_(\d+).pth' % fold, ckpt_filename) # restore epoch engine.state.epoch = int(res.groups()[0]) # load model state dict model.load_state_dict(torch.load(str(ckpt_filename))) logging_logger.info('restore model [{}] from epoch {}.'.format(args.model, engine.state.epoch)) else: logging_logger.info('train model [{}] from scratch'.format(args.model)) # record metrics history every epoch engine.state.metrics_records = {} @trainer.on(engine.Events.EPOCH_STARTED) def trainer_epoch_start_callback(engine): # log learning rate on pbar train_pbar.log_message('model: %s, problem type: %s, fold: %d, lr: %.5f, batch size: %d' % \ (args.model, args.problem_type, fold, lr_scheduler.get_param(), args.batch_size)) # for TAPNet, change dataset schedule to random after the first epoch if 'TAPNet' in args.model and engine.state.epoch > 1: train_loader.dataset.set_dataset_schedule("shuffle") @trainer.on(engine.Events.ITERATION_COMPLETED) def trainer_iter_comp_callback(engine): # logging_logger.info(engine.state.metrics) pass # monitor loss # running average loss train_ra_loss = imetrics.RunningAverage(output_transform= lambda x: x['loss'], alpha=0.98) train_ra_loss.attach(trainer, 'train_ra_loss') # monitor train loss over epoch if args.semi: train_loss = imetrics.Loss(loss_func_semi, output_transform=lambda x: (x['output'], x['target'], x['loss_kwargs'])) else: train_loss = imetrics.Loss(loss_func, output_transform=lambda x: (x['output'], x['target'])) train_loss.attach(trainer, 'train_loss') # progress bar train_pbar = c_handlers.ProgressBar(persist=True, dynamic_ncols=True) train_metric_names = ['train_ra_loss'] train_pbar.attach(trainer, metric_names=train_metric_names) # tensorboardX: log train info if args.tb_log: tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer, 'lr'), event_name=engine.Events.EPOCH_STARTED) tb_logger.attach(trainer, log_handler=OutputHandler('train_iter', train_metric_names), event_name=engine.Events.ITERATION_COMPLETED) tb_logger.attach(trainer, log_handler=OutputHandler('train_epoch', ['train_loss']), event_name=engine.Events.EPOCH_COMPLETED) tb_logger.attach(trainer, log_handler=WeightsScalarHandler(model, reduction=torch.norm), event_name=engine.Events.ITERATION_COMPLETED) # tb_logger.attach(trainer, log_handler=tb_log_train_vars, # event_name=engine.Events.ITERATION_COMPLETED) # ignite validator process function def valid_step(engine, batch): with torch.no_grad(): model.eval() inputs = batch['input'].cuda(non_blocking=True) targets = batch['target'].cuda(non_blocking=True) # additional arguments add_params = {} # for TAPNet, add attention maps if 'TAPNet' in args.model: add_params['attmap'] = batch['attmap'].cuda(non_blocking=True) # output logits outputs = model(inputs, **add_params) # loss loss = loss_func(outputs, targets) output_softmaxs = torch.softmax(outputs, dim=1) output_argmaxs = output_softmaxs.argmax(dim=1) # output_classes and target_classes: <b, h, w> output_classes = output_argmaxs.cpu().numpy() target_classes = targets.cpu().numpy() # record current batch metrics iou_mRecords = MetricRecord() dice_mRecords = MetricRecord() cm_b = np.zeros((num_classes, num_classes), dtype=np.uint32) for output_class, target_class in zip(output_classes, target_classes): # calculate metrics for each frame # calculate using confusion matrix or dirctly using definition cm = calculate_confusion_matrix_from_arrays(output_class, target_class, num_classes) iou_mRecords.update_record(calculate_iou(cm)) dice_mRecords.update_record(calculate_dice(cm)) cm_b += cm ######## calculate directly using definition ########## # iou_mRecords.update_record(iou_multi_np(target_class, output_class)) # dice_mRecords.update_record(dice_multi_np(target_class, output_class)) # accumulate batch metrics to engine state engine.state.epoch_metrics['confusion_matrix'] += cm_b engine.state.epoch_metrics['iou'].merge(iou_mRecords) engine.state.epoch_metrics['dice'].merge(dice_mRecords) return_dict = { 'loss': loss.item(), 'output': outputs, 'output_argmax': output_argmaxs, 'target': targets, # for monitoring 'iou': iou_mRecords, 'dice': dice_mRecords, } if 'TAPNet' in args.model: # for TAPNet, update attention maps after each iteration valid_loader.dataset.update_attmaps(output_softmaxs.cpu().numpy(), batch['abs_idx'].numpy()) # for TAPNet, return extra internal values return_dict['attmap'] = add_params['attmap'] # TODO: for TAPNet, return internal self-learned attention maps return return_dict # validator engine validator = engine.Engine(valid_step) # monitor loss valid_ra_loss = imetrics.RunningAverage(output_transform= lambda x: x['loss'], alpha=0.98) valid_ra_loss.attach(validator, 'valid_ra_loss') # monitor validation loss over epoch valid_loss = imetrics.Loss(loss_func, output_transform=lambda x: (x['output'], x['target'])) valid_loss.attach(validator, 'valid_loss') # monitor <data> mean metrics valid_data_miou = imetrics.RunningAverage(output_transform= lambda x: x['iou'].data_mean()['mean'], alpha=0.98) valid_data_miou.attach(validator, 'mIoU') valid_data_mdice = imetrics.RunningAverage(output_transform= lambda x: x['dice'].data_mean()['mean'], alpha=0.98) valid_data_mdice.attach(validator, 'mDice') # show metrics on progress bar (after every iteration) valid_pbar = c_handlers.ProgressBar(persist=True, dynamic_ncols=True) valid_metric_names = ['valid_ra_loss', 'mIoU', 'mDice'] valid_pbar.attach(validator, metric_names=valid_metric_names) # ## monitor ignite IoU (the same as iou we are using) ### # cm = imetrics.ConfusionMatrix(num_classes, # output_transform=lambda x: (x['output'], x['target'])) # imetrics.IoU(cm, # ignore_index=0 # ).attach(validator, 'iou') # # monitor ignite mean iou (over all classes even not exist in gt) # mean_iou = imetrics.mIoU(cm, # ignore_index=0 # ).attach(validator, 'mean_iou') @validator.on(engine.Events.STARTED) def validator_start_callback(engine): pass @validator.on(engine.Events.EPOCH_STARTED) def validator_epoch_start_callback(engine): engine.state.epoch_metrics = { # directly use definition to calculate 'iou': MetricRecord(), 'dice': MetricRecord(), 'confusion_matrix': np.zeros((num_classes, num_classes), dtype=np.uint32), } # evaluate after iter finish @validator.on(engine.Events.ITERATION_COMPLETED) def validator_iter_comp_callback(engine): pass # evaluate after epoch finish @validator.on(engine.Events.EPOCH_COMPLETED) def validator_epoch_comp_callback(engine): # log ignite metrics # logging_logger.info(engine.state.metrics) # ious = engine.state.metrics['iou'] # msg = 'IoU: ' # for ins_id, iou in enumerate(ious): # msg += '{:d}: {:.3f}, '.format(ins_id + 1, iou) # logging_logger.info(msg) # logging_logger.info('nonzero mean IoU for all data: {:.3f}'.format(ious[ious > 0].mean())) # log monitored epoch metrics epoch_metrics = engine.state.epoch_metrics ######### NOTICE: Two metrics are available but different ########## ### 1. mean metrics for all data calculated by confusion matrix #### ''' compared with using confusion_matrix[1:, 1:] in original code, we use the full confusion matrix and only present non-background result ''' confusion_matrix = epoch_metrics['confusion_matrix']# [1:, 1:] ious = calculate_iou(confusion_matrix) dices = calculate_dice(confusion_matrix) mean_ious = np.mean(list(ious.values())) mean_dices = np.mean(list(dices.values())) std_ious = np.std(list(ious.values())) std_dices = np.std(list(dices.values())) logging_logger.info('mean IoU: %.3f, std: %.3f, for each class: %s' % (mean_ious, std_ious, ious)) logging_logger.info('mean Dice: %.3f, std: %.3f, for each class: %s' % (mean_dices, std_dices, dices)) ### 2. mean metrics for all data calculated by definition ### iou_data_mean = epoch_metrics['iou'].data_mean() dice_data_mean = epoch_metrics['dice'].data_mean() logging_logger.info('data (%d) mean IoU: %.3f, std: %.3f' % (len(iou_data_mean['items']), iou_data_mean['mean'], iou_data_mean['std'])) logging_logger.info('data (%d) mean Dice: %.3f, std: %.3f' % (len(dice_data_mean['items']), dice_data_mean['mean'], dice_data_mean['std'])) # record metrics in trainer every epoch # trainer.state.metrics_records[trainer.state.epoch] = \ # {'miou': mean_ious, 'std_miou': std_ious, # 'mdice': mean_dices, 'std_mdice': std_dices} trainer.state.metrics_records[trainer.state.epoch] = \ {'miou': iou_data_mean['mean'], 'std_miou': iou_data_mean['std'], 'mdice': dice_data_mean['mean'], 'std_mdice': dice_data_mean['std']} # log interal variables(attention maps, outputs, etc.) on validation def tb_log_valid_iter_vars(engine, logger, event_name): log_tag = 'valid_iter' output = engine.state.output batch_size = output['output'].shape[0] res_grid = tvutils.make_grid(torch.cat([ output['output_argmax'].unsqueeze(1), output['target'].unsqueeze(1), ]), padding=2, normalize=False, # show origin image nrow=batch_size).cpu() logger.writer.add_image(tag='%s (outputs, targets)' % (log_tag), img_tensor=res_grid) if 'TAPNet' in args.model: # log attention maps and other internal values inter_vals_grid = tvutils.make_grid(torch.cat([ output['attmap'], ]), padding=2, normalize=True, nrow=batch_size).cpu() logger.writer.add_image(tag='%s internal vals' % (log_tag), img_tensor=inter_vals_grid) def tb_log_valid_epoch_vars(engine, logger, event_name): log_tag = 'valid_iter' # log monitored epoch metrics epoch_metrics = engine.state.epoch_metrics confusion_matrix = epoch_metrics['confusion_matrix']# [1:, 1:] ious = calculate_iou(confusion_matrix) dices = calculate_dice(confusion_matrix) mean_ious = np.mean(list(ious.values())) mean_dices = np.mean(list(dices.values())) logger.writer.add_scalar('mIoU', mean_ious, engine.state.epoch) logger.writer.add_scalar('mIoU', mean_dices, engine.state.epoch) if args.tb_log: # log internal values tb_logger.attach(validator, log_handler=tb_log_valid_iter_vars, event_name=engine.Events.ITERATION_COMPLETED) tb_logger.attach(validator, log_handler=tb_log_valid_epoch_vars, event_name=engine.Events.EPOCH_COMPLETED) # tb_logger.attach(validator, log_handler=OutputHandler('valid_iter', valid_metric_names), # event_name=engine.Events.ITERATION_COMPLETED) tb_logger.attach(validator, log_handler=OutputHandler('valid_epoch', ['valid_loss']), event_name=engine.Events.EPOCH_COMPLETED) # score function for model saving ckpt_score_function = lambda engine: \ np.mean(list(calculate_iou(engine.state.epoch_metrics['confusion_matrix']).values())) # ckpt_score_function = lambda engine: engine.state.epoch_metrics['iou'].data_mean()['mean'] ckpt_filename_prefix = 'fold_%d' % fold # model saving handler model_ckpt_handler = handlers.ModelCheckpoint( dirname=args.model_save_dir, filename_prefix=ckpt_filename_prefix, score_function=ckpt_score_function, create_dir=True, require_empty=False, save_as_state_dict=True, atomic=True) validator.add_event_handler(event_name=engine.Events.EPOCH_COMPLETED, handler=model_ckpt_handler, to_save={ 'model': model, }) # early stop # trainer=trainer, but should be handled by validator early_stopping = handlers.EarlyStopping(patience=args.es_patience, score_function=ckpt_score_function, trainer=trainer ) validator.add_event_handler(event_name=engine.Events.EPOCH_COMPLETED, handler=early_stopping) # evaluate after epoch finish @trainer.on(engine.Events.EPOCH_COMPLETED) def trainer_epoch_comp_callback(engine): validator.run(valid_loader) trainer.run(train_loader, max_epochs=args.max_epochs) if args.tb_log: # close tb_logger tb_logger.close() return trainer.state.metrics_records
# Handlers if main_proc and args.visdom: LOG.info('Logging into visdom...') visdom = Visdom(args.config.model.name) if main_proc and args.tensorboard: LOG.info('Logging into Tensorboard') tensorboard = TensorboardX( os.path.join(args.save_folder, args.config.model.name, 'tensorboard')) # Epoch checkpoint ckpt_handler = handlers.ModelCheckpoint( os.path.join(args.save_folder, args.config.model.name), 'model', save_interval=1, n_saved=args.config.training.num_epochs, require_empty=False) # best WER checkpoint best_ckpt_handler = handlers.ModelCheckpoint( os.path.join(args.save_folder, args.config.model.name), 'model', score_function=lambda engine: engine.state.metrics['cer'], n_saved=5, require_empty=False) if args.checkpoint_per_batch: # batch checkpoint batch_ckpt_handler = handlers.ModelCheckpoint( os.path.join(args.save_folder, args.config.model.name),
def run(experiment_name: str, visdom_host: str, visdom_port: int, visdom_env_path: str, model_class: str, model_args: Dict[str, Any], optimizer_class: str, optimizer_args: Dict[str, Any], dataset_class: str, dataset_args: Dict[str, Any], batch_train: int, batch_test: int, workers_train: int, workers_test: int, transforms: List[Dict[str, Union[str, Dict[str, Any]]]], epochs: int, log_interval: int, saved_models_path: str, performance_metrics: Optional = None, scheduler_class: Optional[str] = None, scheduler_args: Optional[Dict[str, Any]] = None, model_suffix: Optional[str] = None, setup_suffix: Optional[str] = None, orig_stdout: Optional[io.TextIOBase] = None): with _utils.tqdm_stdout(orig_stdout) as orig_stdout: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') transforms_train = list() transforms_test = list() for idx, transform in enumerate(transforms): use_train = transform.get('train', True) use_test = transform.get('test', True) transform = _utils.load_class( transform['class'])(**transform['args']) if use_train: transforms_train.append(transform) if use_test: transforms_test.append(transform) transforms[idx]['train'] = use_train transforms[idx]['test'] = use_test transforms_train = tv.transforms.Compose(transforms_train) transforms_test = tv.transforms.Compose(transforms_test) Dataset: Type = _utils.load_class(dataset_class) train_loader, eval_loader = _utils.get_data_loaders( Dataset, dataset_args, batch_train, batch_test, workers_train, workers_test, transforms_train, transforms_test) Network: Type = _utils.load_class(model_class) model: _interfaces.AbstractNet = Network(**model_args) model = model.to(device) Optimizer: Type = _utils.load_class(optimizer_class) optimizer: torch.optim.Optimizer = Optimizer(model.parameters(), **optimizer_args) if scheduler_class is not None: Scheduler: Type = _utils.load_class(scheduler_class) if scheduler_args is None: scheduler_args = dict() scheduler: Optional[ torch.optim.lr_scheduler._LRScheduler] = Scheduler( optimizer, **scheduler_args) else: scheduler = None model_short_name = ''.join( [c for c in Network.__name__ if c == c.upper()]) model_name = '{}{}'.format( model_short_name, '-{}'.format(model_suffix) if model_suffix is not None else '') visdom_env_name = '{}_{}_{}{}'.format( Dataset.__name__, experiment_name, model_name, '-{}'.format(setup_suffix) if setup_suffix is not None else '') vis, vis_pid = _visdom.get_visdom_instance(visdom_host, visdom_port, visdom_env_name, visdom_env_path) prog_bar_epochs = tqdm.tqdm(total=epochs, desc='Epochs', file=orig_stdout, dynamic_ncols=True, unit='epoch') prog_bar_iters = tqdm.tqdm(desc='Batches', file=orig_stdout, dynamic_ncols=True) tqdm.tqdm.write(f'\n{repr(model)}\n') tqdm.tqdm.write('Total number of parameters: {:.2f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) def training_step(_: ieng.Engine, batch: _interfaces.TensorPair) -> torch.Tensor: model.train() optimizer.zero_grad() x, y = batch x = x.to(device) y = y.to(device) _, loss = model(x, y) loss.backward(retain_graph=False) optimizer.step(None) return loss.item() def eval_step(_: ieng.Engine, batch: _interfaces.TensorPair) -> _interfaces.TensorPair: model.eval() with torch.no_grad(): x, y = batch x = x.to(device) y = y.to(device) y_pred = model(x) return y_pred, y trainer = ieng.Engine(training_step) validator_train = ieng.Engine(eval_step) validator_eval = ieng.Engine(eval_step) # placeholder for summary window vis.text(text='', win=experiment_name, env=visdom_env_name, opts={ 'title': 'Summary', 'width': 940, 'height': 416 }, append=vis.win_exists(experiment_name, visdom_env_name)) default_metrics = { "Loss": { "window_name": None, "x_label": "#Epochs", "y_label": model.loss_fn_name, "width": 940, "height": 416, "lines": [{ "line_label": "SMA", "object": imet.RunningAverage(output_transform=lambda x: x), "test": False, "update_rate": "iteration" }, { "line_label": "Val.", "object": imet.Loss(model.loss_fn) }] } } performance_metrics = {**default_metrics, **performance_metrics} checkpoint_metrics = list() for scope_name, scope in performance_metrics.items(): scope['window_name'] = scope.get('window_name', scope_name) or scope_name for line in scope['lines']: if 'object' not in line: line['object']: imet.Metric = _utils.load_class( line['class'])(**line['args']) line['metric_label'] = '{}: {}'.format(scope['window_name'], line['line_label']) line['update_rate'] = line.get('update_rate', 'epoch') line_suffixes = list() if line['update_rate'] == 'iteration': line['object'].attach(trainer, line['metric_label']) line['train'] = False line['test'] = False line_suffixes.append(' Train.') if line.get('train', True): line['object'].attach(validator_train, line['metric_label']) line_suffixes.append(' Train.') if line.get('test', True): line['object'].attach(validator_eval, line['metric_label']) line_suffixes.append(' Eval.') if line.get('is_checkpoint', False): checkpoint_metrics.append(line['metric_label']) for line_suffix in line_suffixes: _visdom.plot_line( vis=vis, window_name=scope['window_name'], env=visdom_env_name, line_label=line['line_label'] + line_suffix, x_label=scope['x_label'], y_label=scope['y_label'], width=scope['width'], height=scope['height'], draw_marker=(line['update_rate'] == 'epoch')) if checkpoint_metrics: score_name = 'performance' def get_score(engine: ieng.Engine) -> float: current_mode = getattr( engine.state.dataloader.iterable.dataset, dataset_args['training']['key']) val_mode = dataset_args['training']['no'] score = 0.0 if current_mode == val_mode: for metric_name in checkpoint_metrics: try: score += engine.state.metrics[metric_name] except KeyError: pass return score model_saver = ihan.ModelCheckpoint(os.path.join( saved_models_path, visdom_env_name), filename_prefix=visdom_env_name, score_name=score_name, score_function=get_score, n_saved=3, save_as_state_dict=True, require_empty=False, create_dir=True) validator_eval.add_event_handler(ieng.Events.EPOCH_COMPLETED, model_saver, {model_name: model}) @trainer.on(ieng.Events.EPOCH_STARTED) def reset_progress_iterations(engine: ieng.Engine): prog_bar_iters.clear() prog_bar_iters.n = 0 prog_bar_iters.last_print_n = 0 prog_bar_iters.start_t = time.time() prog_bar_iters.last_print_t = time.time() prog_bar_iters.total = len(engine.state.dataloader) @trainer.on(ieng.Events.ITERATION_COMPLETED) def log_training(engine: ieng.Engine): prog_bar_iters.update(1) num_iter = (engine.state.iteration - 1) % len(train_loader) + 1 early_stop = np.isnan(engine.state.output) or np.isinf( engine.state.output) if num_iter % log_interval == 0 or num_iter == len( train_loader) or early_stop: tqdm.tqdm.write( 'Epoch[{}] Iteration[{}/{}] Loss: {:.4f}'.format( engine.state.epoch, num_iter, len(train_loader), engine.state.output)) x_pos = engine.state.epoch + num_iter / len(train_loader) - 1 for scope_name, scope in performance_metrics.items(): for line in scope['lines']: if line['update_rate'] == 'iteration': line_label = '{} Train.'.format(line['line_label']) line_value = engine.state.metrics[ line['metric_label']] if engine.state.epoch > 1: _visdom.plot_line( vis=vis, window_name=scope['window_name'], env=visdom_env_name, line_label=line_label, x_label=scope['x_label'], y_label=scope['y_label'], x=np.full(1, x_pos), y=np.full(1, line_value)) if early_stop: tqdm.tqdm.write( colored('Early stopping due to invalid loss value.', 'red')) trainer.terminate() def log_validation(engine: ieng.Engine, train: bool = True): if train: run_type = 'Train.' data_loader = train_loader validator = validator_train else: run_type = 'Eval.' data_loader = eval_loader validator = validator_eval prog_bar_validation = tqdm.tqdm(data_loader, desc=f'Validation {run_type}', file=orig_stdout, dynamic_ncols=True, leave=False) validator.run(prog_bar_validation) prog_bar_validation.clear() prog_bar_validation.close() tqdm_info = ['Epoch: {}'.format(engine.state.epoch)] for scope_name, scope in performance_metrics.items(): for line in scope['lines']: if line['update_rate'] == 'epoch': try: line_label = '{} {}'.format( line['line_label'], run_type) line_value = validator.state.metrics[ line['metric_label']] _visdom.plot_line(vis=vis, window_name=scope['window_name'], env=visdom_env_name, line_label=line_label, x_label=scope['x_label'], y_label=scope['y_label'], x=np.full(1, engine.state.epoch), y=np.full(1, line_value), draw_marker=True) tqdm_info.append('{}: {:.4f}'.format( line_label, line_value)) except KeyError: pass tqdm.tqdm.write('{} results - {}'.format(run_type, '; '.join(tqdm_info))) @trainer.on(ieng.Events.EPOCH_COMPLETED) def log_validation_train(engine: ieng.Engine): log_validation(engine, True) @trainer.on(ieng.Events.EPOCH_COMPLETED) def log_validation_eval(engine: ieng.Engine): log_validation(engine, False) if engine.state.epoch == 1: summary = _utils.build_summary_str( experiment_name=experiment_name, model_short_name=model_name, model_class=model_class, model_args=model_args, optimizer_class=optimizer_class, optimizer_args=optimizer_args, dataset_class=dataset_class, dataset_args=dataset_args, transforms=transforms, epochs=epochs, batch_train=batch_train, log_interval=log_interval, saved_models_path=saved_models_path, scheduler_class=scheduler_class, scheduler_args=scheduler_args) _visdom.create_summary_window(vis=vis, visdom_env_name=visdom_env_name, experiment_name=experiment_name, summary=summary) vis.save([visdom_env_name]) prog_bar_epochs.update(1) if scheduler is not None: scheduler.step(engine.state.epoch) trainer.run(train_loader, max_epochs=epochs) if vis_pid is not None: tqdm.tqdm.write('Stopping visdom') os.kill(vis_pid, signal.SIGTERM) del vis del train_loader del eval_loader prog_bar_iters.clear() prog_bar_iters.close() prog_bar_epochs.clear() prog_bar_epochs.close() tqdm.tqdm.write('\n')
def fit(self, train_loader: _data.DataLoader, val_loader: _data.DataLoader, epochs: int = 1, batches: int = None, learning_rate: float = 1e-3) -> None: if batches is None: batches = VocalExtractor.get_number_of_batches(train_loader) loss_fn = nn.BCELoss() optimizer = _optim.Adam(self.model.parameters(), lr=learning_rate) trainer = _engine.create_supervised_trainer(self.model, optimizer, loss_fn, device=self.device) _metrics.RunningAverage(output_transform=lambda x: x, device=self.device).attach(trainer, 'loss') progressbar = _chandlers.ProgressBar( bar_format= "{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar:20}| " "[{elapsed}<{remaining}]{postfix}", persist=True, ascii=" #") progressbar.attach(trainer, ['loss']) def get_metrics_fn() -> Dict[str, _metrics.Metric]: def rounded_transform(output): y_pred, y = output return torch.round(y_pred), y transform = rounded_transform accuracy = _metrics.Accuracy(transform, device=self.device) precision = _metrics.Precision(transform, device=self.device) recall = _metrics.Recall(transform, device=self.device) f1 = precision * recall * 2 / (precision + recall + 1e-20) return { 'loss': _metrics.Loss(loss_fn), 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1 } evaluator = _engine.create_supervised_evaluator( self.model, metrics=get_metrics_fn(), device=self.device) score_fn_name = "f1" def score_function(engine: _engine.Engine): return engine.state.metrics[score_fn_name] best_model_saver = _handlers.ModelCheckpoint( dirname="best_models", filename_prefix="vocal_extractor", score_name=score_fn_name, score_function=score_function, n_saved=5, create_dir=True) evaluator.add_event_handler(_engine.Events.COMPLETED, best_model_saver, {"model": self.model}) each_model_saver = _handlers.ModelCheckpoint( dirname="all_models", filename_prefix="vocal_extractor", score_name=score_fn_name, score_function=score_function, n_saved=None, create_dir=True) evaluator.add_event_handler(_engine.Events.COMPLETED, each_model_saver, {"model": self.model}) @trainer.on(_engine.Events.EPOCH_COMPLETED) def on_epoch_completed(engine: _engine.Engine) -> None: metrics = VocalExtractor.compute_metrics(val_loader, evaluator) string = ", ".join(f"val_{k}: {v:.4f}" for k, v in metrics.items()) progressbar.log_message(string + "\n") with _tb_logger.TensorboardLogger(log_dir="tb_logs") as tb_logger: global_step = _tb_logger.global_step_from_engine(trainer) train_running_loss_log_handler = _tb_logger.OutputHandler( tag="training", output_transform=lambda x: {'running_loss': x}) tb_logger.attach(trainer, log_handler=train_running_loss_log_handler, event_name=_engine.Events.ITERATION_COMPLETED) val_metrics_log_handler = _tb_logger.OutputHandler( tag="validation", metric_names=[name for name, _ in get_metrics_fn().items()], global_step_transform=global_step) tb_logger.attach(evaluator, log_handler=val_metrics_log_handler, event_name=_engine.Events.EPOCH_COMPLETED) tb_logger.attach( trainer, log_handler=_tb_logger.OptimizerParamsHandler(optimizer), event_name=_engine.Events.ITERATION_STARTED) tb_logger.attach(trainer, log_handler=_tb_logger.WeightsScalarHandler( self.model), event_name=_engine.Events.ITERATION_COMPLETED) tb_logger.attach(trainer, log_handler=_tb_logger.WeightsHistHandler( self.model), event_name=_engine.Events.EPOCH_COMPLETED) tb_logger.attach(trainer, log_handler=_tb_logger.GradsScalarHandler( self.model), event_name=_engine.Events.ITERATION_COMPLETED) tb_logger.attach(trainer, log_handler=_tb_logger.GradsHistHandler( self.model), event_name=_engine.Events.EPOCH_COMPLETED) torchsummary.summary(self.model, input_size=(1, self.freq_bins, self.time_bins), batch_size=train_loader.batch_size, device=self.device) trainer.run(data=train_loader, epoch_length=batches, max_epochs=epochs)
def run(path_to_model_script, epochs, log_interval, dataloaders, dirname='resnet_models', filename_prefix='resnet', n_saved=2, log_dir='../../fer2013/logs', launch_tensorboard=False, patience=10, resume_model=None, resume_optimizer=None, backup_step=1, backup_path=''): if launch_tensorboard: os.makedirs(log_dir, exist_ok=True) # os.system('pkill tensorboard') # os.system('tensorboard --logdir {} --host 0.0.0.0 --port 6006 &'.format(log_dir)) # os.system("npm install -g localtunnel") # os.system('lt --port 6006 >> /content/url.txt 2>&1 &') # os.system('cat /content/url.txt') # Get the model, optimizer and dataloaders from script model_script = dict() with open(path_to_model_script) as f: exec(f.read(), model_script) model = model_script['my_model'] optimizer = model_script['optimizer'] if resume_model: model.load_state_dict(torch.load(resume_model)) if resume_optimizer: optimizer.load_state_dict(torch.load(resume_model)) train_loader, val_loader = dataloaders['Training'], dataloaders[ 'PublicTest'] if launch_tensorboard: writer, val_writer = create_summary_writer(model, train_loader, log_dir) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') trainer = create_supervised_trainer(model, optimizer, F.cross_entropy, device=device) evaluator = create_supervised_evaluator(model, metrics={ 'accuracy': Accuracy(), 'nll': Loss(F.cross_entropy) }, device=device) def get_val_loss(engine): return -engine.state.metrics['nll'] checkpointer = handlers.ModelCheckpoint(dirname=dirname, filename_prefix=filename_prefix, score_function=get_val_loss, score_name='val_loss', n_saved=n_saved, create_dir=True, require_empty=False, save_as_state_dict=True) earlystop = handlers.EarlyStopping(patience, get_val_loss, trainer) evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, { 'optimizer': optimizer, 'model': model }) evaluator.add_event_handler(Events.EPOCH_COMPLETED, earlystop) desc = "ITERATION - loss: {:.3f}" pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=desc.format(0)) @trainer.on(Events.ITERATION_COMPLETED) def log_training_loss(engine): iter_ = (engine.state.iteration - 1) % len(train_loader) + 1 if iter_ % log_interval == 0: pbar.desc = desc.format(engine.state.output) pbar.update(log_interval) if launch_tensorboard: writer.add_scalar('training/loss', engine.state.output, engine.state.iteration) @trainer.on(Events.EPOCH_COMPLETED) def log_training_results(engine): pbar.refresh() evaluator.run(train_loader) metrics = evaluator.state.metrics avg_accuracy = metrics['accuracy'] avg_nll = metrics['nll'] tqdm.write( "Training Results - Epoch: {} Avg accuracy: {:.3f} Avg loss: {:.3f}" .format(engine.state.epoch, avg_accuracy, avg_nll)) if launch_tensorboard: writer.add_scalar('avg_loss', avg_nll, engine.state.epoch) writer.add_scalar('avg_accuracy', avg_accuracy, engine.state.epoch) @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(engine): evaluator.run(val_loader) metrics = evaluator.state.metrics avg_accuracy = metrics['accuracy'] avg_nll = metrics['nll'] tqdm.write( "Validation Results - Epoch: {} Avg accuracy: {:.3f} Avg loss: {:.3f}" .format(engine.state.epoch, avg_accuracy, avg_nll)) pbar.n = pbar.last_print_n = 0 if launch_tensorboard: val_writer.add_scalar('avg_loss', avg_nll, engine.state.epoch) val_writer.add_scalar('avg_accuracy', avg_accuracy, engine.state.epoch) # optimizer and model that are in the gdrive, created from a previous run original_files = glob.glob(os.path.join(backup_path, '*.pth*')) @trainer.on(Events.EPOCH_COMPLETED) def backup_checkpoints(engine): if engine.state.epoch % backup_step == 0: # get old model and optimizer files paths so that we can remove them after copying the newer ones old_files = glob.glob(os.path.join(backup_path, '*.pth')) # get new model and optimizer checkpoints new_files = glob.glob(os.path.join(dirname, '*.pth*')) if len( new_files ) > 0: # copy new checkpoints from local checkpoint folder to the backup_path folder for f_ in new_files: shutil.copy2(f_, backup_path) if len( old_files ) > 0: # remove older checkpoints as the new ones have been copied for f_ in old_files: if f_ not in original_files: os.remove(f_) if launch_tensorboard: @trainer.on(Events.EPOCH_COMPLETED) def add_histograms(engine): for name, param in model.named_parameters(): writer.add_histogram(name, param.clone().cpu().data.numpy(), engine.state.epoch) trainer.run(train_loader, max_epochs=epochs) pbar.close() if launch_tensorboard: writer.close() val_writer.close()
def run(path_to_model_script, epochs, log_interval, dataloaders, dirname='resnet_models', filename_prefix='resnet', n_saved=2, log_dir='../../fer2013/logs', launch_tensorboard=False, patience=10): # if launch_tensorboard: # os.system('pkill tensorboard') # os.system('tensorboard --logdir {} --host 0.0.0.0 --port 6006 &'.format(log_dir)) # os.system("npm install -g localtunnel") # os.system('lt --port 6006 >> /content/url.txt 2>&1 &') # os.system('cat /content/url.txt') # Get the model, optimizer and dataloaders from script model_script = dict() with open(path_to_model_script) as f: exec(f.read(), model_script) model = model_script['my_model'] optimizer = model_script['optimizer'] train_loader, val_loader = dataloaders['train'], dataloaders['valid'] if launch_tensorboard: writer, val_writer = create_summary_writer(model, train_loader, log_dir) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') trainer = create_supervised_trainer_multitask(model, optimizer, loss_fn=my_multi_task_loss, device=device) evaluator = create_supervised_evaluator_multitask(model, metrics={ 'mt_accuracy': MultiTaskAccuracy(), 'mt_loss': MutliTaskLoss() }, device=device) def get_val_loss(engine): return -engine.state.metrics['mt_loss'] checkpointer = handlers.ModelCheckpoint(dirname=dirname, filename_prefix=filename_prefix, score_function=get_val_loss, score_name='val_loss', n_saved=n_saved, create_dir=True, require_empty=False, save_as_state_dict=True) earlystop = handlers.EarlyStopping(patience, get_val_loss, trainer) # evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, { 'optimizer': optimizer, 'model': model }) evaluator.add_event_handler(Events.EPOCH_COMPLETED, earlystop) desc = "ITERATION - loss: {:.3f}" pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=desc.format(0)) @trainer.on(Events.ITERATION_COMPLETED) def log_training_loss(engine): iter_ = (engine.state.iteration - 1) % len(train_loader) + 1 if iter_ % log_interval == 0: pbar.desc = desc.format(engine.state.output) pbar.update(log_interval) if launch_tensorboard: writer.add_scalar('training/loss', engine.state.output, engine.state.iteration) @trainer.on(Events.EPOCH_COMPLETED) def log_training_results(engine): pbar.refresh() evaluator.run(train_loader) metrics = evaluator.state.metrics age_l1_loss, gender_acc, race_acc = metrics['mt_accuracy'] avg_nll = metrics['mt_loss'] tqdm.write( "Training Results - Epoch: {} Age L1-loss: {:.3f} ** Gender accuracy: {:.3f} " "** Race accuracy: {:.3f} ** Avg loss: {:.3f}".format( engine.state.epoch, age_l1_loss, gender_acc, race_acc, avg_nll)) # if launch_tensorboard: # writer.add_scalar('avg_loss', avg_nll, engine.state.epoch) # writer.add_scalar('avg_accuracy', avg_accuracy, engine.state.epoch) @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(engine): evaluator.run(val_loader) metrics = evaluator.state.metrics age_l1_loss, gender_acc, race_acc = metrics['mt_accuracy'] avg_nll = metrics['mt_loss'] tqdm.write( "Validation Results - Epoch: {} Age L1-loss: {:.3f} ** Gender accuracy: {:.3f} **" " Race accuracy: {:.3f} ** Avg loss: {:.3f}".format( engine.state.epoch, age_l1_loss, gender_acc, race_acc, avg_nll)) pbar.n = pbar.last_print_n = 0 # if launch_tensorboard: # val_writer.add_scalar('avg_loss', avg_nll, engine.state.epoch) # val_writer.add_scalar('avg_accuracy', avg_accuracy, engine.state.epoch) # if launch_tensorboard: # @trainer.on(Events.EPOCH_COMPLETED) # def add_histograms(engine): # for name, param in model.named_parameters(): # writer.add_histogram(name, param.clone().cpu().data.numpy(), engine.state.epoch) trainer.run(train_loader, max_epochs=epochs) pbar.close()