def train(config, compression_algo, model, criterion, is_inception, lr_scheduler, model_name, optimizer, train_loader, train_sampler, val_loader): global best_acc1 for epoch in range(config.start_epoch, config.epochs): config.cur_epoch = epoch if config.distributed: train_sampler.set_epoch(epoch) lr_scheduler.step(epoch if not isinstance( lr_scheduler, ReduceLROnPlateau) else best_acc1) # train for one epoch train_epoch(train_loader, model, criterion, optimizer, compression_algo, epoch, config, is_inception) # compute compression algo statistics stats = compression_algo.statistics() acc1 = best_acc1 if epoch % config.test_every_n_epochs == 0: # evaluate on validation set acc1, _ = validate(val_loader, model, criterion, config) # remember best acc@1 and save checkpoint is_best = acc1 > best_acc1 best_acc1 = max(acc1, best_acc1) # update compression scheduler state at the end of the epoch compression_algo.scheduler.epoch_step() if is_main_process(): print_statistics(stats) checkpoint_path = osp.join(config.checkpoint_save_dir, get_name(config) + '_last.pth') checkpoint = { 'epoch': epoch + 1, 'arch': model_name, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict(), 'scheduler': compression_algo.scheduler.state_dict() } torch.save(checkpoint, checkpoint_path) make_additional_checkpoints(checkpoint_path, is_best, epoch + 1, config) for key, value in stats.items(): if isinstance(value, (int, float)): config.tb.add_scalar( "compression/statistics/{0}".format(key), value, len(train_loader) * epoch)
def train_epoch_end(config, compression_algo, net, epoch, iteration, epoch_size, lr_scheduler, optimizer, test_data_loader, best_mAp): is_best = False test_freq_in_epochs = max(config.test_interval // epoch_size, 1) compression_algo.scheduler.epoch_step(epoch) if not isinstance(lr_scheduler, ReduceLROnPlateau): lr_scheduler.step(epoch) if epoch % test_freq_in_epochs == 0 and iteration != 0: if is_on_first_rank(config): print_statistics(compression_algo.statistics()) with torch.no_grad(): net.eval() mAP = test_net(net, config.device, test_data_loader, distributed=config.multiprocessing_distributed) if mAP > best_mAp: is_best = True best_mAp = mAP if config.metrics_dump is not None: write_metrics(mAP, config) if isinstance(lr_scheduler, ReduceLROnPlateau): lr_scheduler.step(mAP) net.train() if is_on_first_rank(config): checkpoint_file_path = osp.join(config.checkpoint_save_dir, "{}_last.pth".format(get_name(config))) torch.save( { 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict(), 'iter': iteration, 'scheduler': compression_algo.scheduler.state_dict() }, str(checkpoint_file_path)) make_additional_checkpoints(checkpoint_file_path, is_best=is_best, epoch=epoch + 1, config=config) return best_mAp
def train(model, model_without_dp, compression_ctrl, train_loader, val_loader, criterion, class_encoding, config, resuming_checkpoint): logger.info("\nTraining...\n") # Check if the network architecture is correct logger.info(model) optim_config = config.get('optimizer', {}) optim_params = optim_config.get('optimizer_params', {}) lr = optim_params.get("lr", 1e-4) params_to_optimize = get_params_to_optimize(model_without_dp, lr * 10, config) optimizer, lr_scheduler = make_optimizer(params_to_optimize, config) # Evaluation metric ignore_index = None ignore_unlabeled = config.get("ignore_unlabeled", True) if ignore_unlabeled and ('unlabeled' in class_encoding): ignore_index = list(class_encoding).index('unlabeled') metric = IoU(len(class_encoding), ignore_index=ignore_index) best_miou = -1 best_compression_level = CompressionLevel.NONE # Optionally resume from a checkpoint if resuming_checkpoint is not None: if optimizer is not None: optimizer.load_state_dict(resuming_checkpoint['optimizer']) start_epoch = resuming_checkpoint['epoch'] best_miou = resuming_checkpoint['miou'] if "scheduler" in resuming_checkpoint and compression_ctrl.scheduler is not None: compression_ctrl.scheduler.load_state_dict( resuming_checkpoint['scheduler']) logger.info("Resuming from model: Start epoch = {0} " "| Best mean IoU = {1:.4f}".format(start_epoch, best_miou)) config.start_epoch = start_epoch # Start Training train_obj = Train(model, train_loader, optimizer, criterion, compression_ctrl, metric, config.device, config.model) val_obj = Test(model, val_loader, criterion, metric, config.device, config.model) for epoch in range(config.start_epoch, config.epochs): compression_ctrl.scheduler.epoch_step() logger.info(">>>> [Epoch: {0:d}] Training".format(epoch)) if config.distributed: train_loader.sampler.set_epoch(epoch) epoch_loss, (iou, miou) = train_obj.run_epoch(config.print_step) if not isinstance(lr_scheduler, ReduceLROnPlateau): # Learning rate scheduling should be applied after optimizer’s update lr_scheduler.step(epoch) logger.info( ">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}". format(epoch, epoch_loss, miou)) if is_main_process(): config.tb.add_scalar("train/loss", epoch_loss, epoch) config.tb.add_scalar("train/mIoU", miou, epoch) config.tb.add_scalar("train/learning_rate", optimizer.param_groups[0]['lr'], epoch) config.tb.add_scalar("train/compression_loss", compression_ctrl.loss(), epoch) for key, value in compression_ctrl.statistics( quickly_collected_only=True).items(): if isinstance(value, (int, float)): config.tb.add_scalar( "compression/statistics/{0}".format(key), value, epoch) if (epoch + 1) % config.save_freq == 0 or epoch + 1 == config.epochs: logger.info(">>>> [Epoch: {0:d}] Validation".format(epoch)) loss, (iou, miou) = val_obj.run_epoch(config.print_step) logger.info( ">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}". format(epoch, loss, miou)) if is_main_process(): config.tb.add_scalar("val/mIoU", miou, epoch) config.tb.add_scalar("val/loss", loss, epoch) for i, (key, class_iou) in enumerate(zip(class_encoding.keys(), iou)): config.tb.add_scalar( "{}/mIoU_Cls{}_{}".format(config.dataset, i, key), class_iou, epoch) compression_level = compression_ctrl.compression_level() is_best_by_miou = miou > best_miou and compression_level == best_compression_level is_best = is_best_by_miou or compression_level > best_compression_level if is_best: best_miou = miou best_compression_level = max(compression_level, best_compression_level) if config.metrics_dump is not None: write_metrics(best_miou, config.metrics_dump) if isinstance(lr_scheduler, ReduceLROnPlateau): # Learning rate scheduling should be applied after optimizer’s update lr_scheduler.step(best_miou) # Print per class IoU on last epoch or if best iou if epoch + 1 == config.epochs or is_best: for key, class_iou in zip(class_encoding.keys(), iou): logger.info("{0}: {1:.4f}".format(key, class_iou)) # Save the model if it's the best thus far if is_main_process(): checkpoint_path = save_checkpoint(model, optimizer, epoch, best_miou, compression_level, compression_ctrl.scheduler, config) make_additional_checkpoints(checkpoint_path, is_best, epoch, config) print_statistics(compression_ctrl.statistics()) return model
def train(model, model_without_dp, compression_algo, train_loader, val_loader, class_weights, class_encoding, config): print("\nTraining...\n") # Check if the network architecture is correct print(model) optim_config = config.get('optimizer', {}) optim_params = optim_config.get('optimizer_params', {}) lr = optim_params.get("lr", 1e-4) params_to_optimize, criterion = get_aux_loss_dependent_params(model_without_dp, class_weights, lr * 10, config) optimizer, lr_scheduler = make_optimizer(params_to_optimize, config) # Evaluation metric ignore_index = None ignore_unlabeled = config.get("ignore_unlabeled", True) if ignore_unlabeled and ('unlabeled' in class_encoding): ignore_index = list(class_encoding).index('unlabeled') metric = IoU(len(class_encoding), ignore_index=ignore_index) best_miou = -1 resuming_checkpoint = config.resuming_checkpoint # Optionally resume from a checkpoint if resuming_checkpoint is not None: model, optimizer, start_epoch, best_miou, _ = \ load_checkpoint( model, resuming_checkpoint, config.device, optimizer, compression_algo.scheduler) print("Resuming from model: Start epoch = {0} " "| Best mean IoU = {1:.4f}".format(start_epoch, best_miou)) config.start_epoch = start_epoch # Start Training train_obj = Train(model, train_loader, optimizer, criterion, compression_algo, metric, config.device, config.model) val_obj = Test(model, val_loader, criterion, metric, config.device, config.model) for epoch in range(config.start_epoch, config.epochs): print(">>>> [Epoch: {0:d}] Training".format(epoch)) if config.distributed: train_loader.sampler.set_epoch(epoch) if not isinstance(lr_scheduler, ReduceLROnPlateau): lr_scheduler.step(epoch) epoch_loss, (iou, miou) = train_obj.run_epoch(config.print_step) compression_algo.scheduler.epoch_step() print(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}". format(epoch, epoch_loss, miou)) if is_main_process(): config.tb.add_scalar("train/loss", epoch_loss, epoch) config.tb.add_scalar("train/mIoU", miou, epoch) config.tb.add_scalar("train/learning_rate", optimizer.param_groups[0]['lr'], epoch) config.tb.add_scalar("train/compression_loss", compression_algo.loss(), epoch) for key, value in compression_algo.statistics().items(): if isinstance(value, (int, float)): config.tb.add_scalar("compression/statistics/{0}".format(key), value, epoch) if (epoch + 1) % config.save_freq == 0 or epoch + 1 == config.epochs: print(">>>> [Epoch: {0:d}] Validation".format(epoch)) loss, (iou, miou) = val_obj.run_epoch(config.print_step) print(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}". format(epoch, loss, miou)) if is_main_process(): config.tb.add_scalar("val/mIoU", miou, epoch) config.tb.add_scalar("val/loss", loss, epoch) for i, (key, class_iou) in enumerate(zip(class_encoding.keys(), iou)): config.tb.add_scalar("{}/mIoU_Cls{}_{}".format(config.dataset, i, key), class_iou, epoch) is_best = miou > best_miou best_miou = max(miou, best_miou) if isinstance(lr_scheduler, ReduceLROnPlateau): lr_scheduler.step(best_miou) # Print per class IoU on last epoch or if best iou if epoch + 1 == config.epochs or is_best: for key, class_iou in zip(class_encoding.keys(), iou): print("{0}: {1:.4f}".format(key, class_iou)) # Save the model if it's the best thus far if is_main_process(): checkpoint_path = save_checkpoint(model, optimizer, epoch + 1, best_miou, compression_algo.scheduler, config) make_additional_checkpoints(checkpoint_path, is_best, epoch + 1, config) print_statistics(compression_algo.statistics()) return model
def train(net, compression_algo, train_data_loader, test_data_loader, criterion, optimizer, config, lr_scheduler): net.train() # loss counters loc_loss = 0 # epoch conf_loss = 0 epoch_size = len(train_data_loader) print('Training ', config.model, ' on ', train_data_loader.dataset.name, ' dataset...') batch_iterator = None t_start = time.time() print_statistics(compression_algo.statistics()) for iteration in range(config.start_iter, config['max_iter']): if (not batch_iterator) or (iteration % epoch_size == 0): # create batch iterator batch_iterator = iter(train_data_loader) epoch = iteration // epoch_size if iteration % epoch_size == 0: train_epoch_end(config, compression_algo, net, epoch, iteration, epoch_size, lr_scheduler, optimizer, test_data_loader) compression_algo.scheduler.step(iteration - config.start_iter) optimizer.zero_grad() batch_iterator, batch_loss, batch_loss_c, batch_loss_l, loss_comp = train_step( batch_iterator, compression_algo, config, criterion, net, train_data_loader ) optimizer.step() batch_loss_l = batch_loss_l / config.iter_size batch_loss_c = batch_loss_c / config.iter_size model_loss = (batch_loss_l + batch_loss_c) / config.iter_size batch_loss = batch_loss / config.iter_size loc_loss += batch_loss_l.item() conf_loss += batch_loss_c.item() ########################### # Logging ########################### if is_on_first_rank(config): config.tb.add_scalar("train/loss_l", batch_loss_l.item(), iteration) config.tb.add_scalar("train/loss_c", batch_loss_c.item(), iteration) config.tb.add_scalar("train/loss", batch_loss.item(), iteration) checkpoint_file_path = osp.join(config.checkpoint_save_dir, "{}_last.pth".format(get_name(config))) torch.save({ 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict(), 'iter': config['max_iter'], 'scheduler': compression_algo.scheduler.state_dict() }, str(checkpoint_file_path)) make_additional_checkpoints(checkpoint_file_path, is_best=True, epoch=epoch + 1, config=config) if iteration % config.print_freq == 0: t_finish = time.time() t_elapsed = t_finish - t_start t_start = time.time() print('{}: iter {} epoch {} || Loss: {:.4} || Time {:.4}s || lr: {} || CR loss: {}'.format( config.rank, iteration, epoch, model_loss.item(), t_elapsed, optimizer.param_groups[0]['lr'], loss_comp.item() if isinstance(loss_comp, torch.Tensor) else loss_comp ))
def train(config, compression_ctrl, model, criterion, is_inception, lr_scheduler, model_name, optimizer, train_loader, train_sampler, val_loader, best_acc1=0): best_compression_level = CompressionLevel.NONE for epoch in range(config.start_epoch, config.epochs): config.cur_epoch = epoch if config.distributed: train_sampler.set_epoch(epoch) # train for one epoch train_epoch(train_loader, model, criterion, optimizer, compression_ctrl, epoch, config, is_inception) # Learning rate scheduling should be applied after optimizer’s update lr_scheduler.step(epoch if not isinstance( lr_scheduler, ReduceLROnPlateau) else best_acc1) # update compression scheduler state at the end of the epoch compression_ctrl.scheduler.epoch_step() # compute compression algo statistics stats = compression_ctrl.statistics() acc1 = best_acc1 if epoch % config.test_every_n_epochs == 0: # evaluate on validation set acc1, _ = validate(val_loader, model, criterion, config) compression_level = compression_ctrl.compression_level() # remember best acc@1, considering compression level. If current acc@1 less then the best acc@1, checkpoint # still can be best if current compression level is bigger then best one. Compression levels in ascending # order: NONE, PARTIAL, FULL. is_best_by_accuracy = acc1 > best_acc1 and compression_level == best_compression_level is_best = is_best_by_accuracy or compression_level > best_compression_level if is_best: best_acc1 = acc1 best_compression_level = max(compression_level, best_compression_level) acc = best_acc1 / 100 if config.metrics_dump is not None: write_metrics(acc, config.metrics_dump) if is_main_process(): print_statistics(stats) checkpoint_path = osp.join(config.checkpoint_save_dir, get_name(config) + '_last.pth') checkpoint = { 'epoch': epoch + 1, 'arch': model_name, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'compression_level': compression_level, 'acc1': acc1, 'optimizer': optimizer.state_dict(), 'scheduler': compression_ctrl.scheduler.state_dict() } torch.save(checkpoint, checkpoint_path) make_additional_checkpoints(checkpoint_path, is_best, epoch + 1, config) for key, value in stats.items(): if isinstance(value, (int, float)): config.tb.add_scalar( "compression/statistics/{0}".format(key), value, len(train_loader) * epoch)
def train(net, compression_ctrl, train_data_loader, test_data_loader, criterion, optimizer, config, lr_scheduler): net.train() # loss counters loc_loss = 0 # epoch conf_loss = 0 epoch_size = len(train_data_loader) logger.info('Training {} on {} dataset...'.format( config.model, train_data_loader.dataset.name)) batch_iterator = None t_start = time.time() print_statistics(compression_ctrl.statistics()) best_mAp = 0 best_compression_level = CompressionLevel.NONE test_freq_in_epochs = max(config.test_interval // epoch_size, 1) for iteration in range(config.start_iter, config['max_iter']): if (not batch_iterator) or (iteration % epoch_size == 0): # create batch iterator batch_iterator = iter(train_data_loader) epoch = iteration // epoch_size if (iteration + 1) % epoch_size == 0: compression_ctrl.scheduler.epoch_step(epoch) compression_level = compression_ctrl.compression_level() is_best = False if (epoch + 1) % test_freq_in_epochs == 0: if is_on_first_rank(config): print_statistics(compression_ctrl.statistics()) with torch.no_grad(): net.eval() mAP = test_net( net, config.device, test_data_loader, distributed=config.multiprocessing_distributed) is_best_by_mAP = mAP > best_mAp and compression_level == best_compression_level is_best = is_best_by_mAP or compression_level > best_compression_level if is_best: best_mAp = mAP best_compression_level = max(compression_level, best_compression_level) net.train() # Learning rate scheduling should be applied after optimizer’s update if not isinstance(lr_scheduler, ReduceLROnPlateau): lr_scheduler.step(epoch) else: lr_scheduler.step(mAP) if is_on_first_rank(config): logger.info('Saving state, iter: {}'.format(iteration)) checkpoint_file_path = osp.join( config.checkpoint_save_dir, "{}_last.pth".format(get_name(config))) torch.save( { 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict(), 'iter': config['max_iter'], 'scheduler': compression_ctrl.scheduler.state_dict(), 'compression_level': compression_level, }, str(checkpoint_file_path)) make_additional_checkpoints(checkpoint_file_path, is_best=is_best, epoch=epoch + 1, config=config) compression_ctrl.scheduler.step(iteration - config.start_iter) optimizer.zero_grad() batch_iterator, batch_loss, batch_loss_c, batch_loss_l, loss_comp = train_step( batch_iterator, compression_ctrl, config, criterion, net, train_data_loader) optimizer.step() batch_loss_l = batch_loss_l / config.iter_size batch_loss_c = batch_loss_c / config.iter_size model_loss = (batch_loss_l + batch_loss_c) / config.iter_size batch_loss = batch_loss / config.iter_size loc_loss += batch_loss_l.item() conf_loss += batch_loss_c.item() ########################### # Logging ########################### if is_on_first_rank(config): config.tb.add_scalar("train/loss_l", batch_loss_l.item(), iteration) config.tb.add_scalar("train/loss_c", batch_loss_c.item(), iteration) config.tb.add_scalar("train/loss", batch_loss.item(), iteration) if iteration % config.print_freq == 0: t_finish = time.time() t_elapsed = t_finish - t_start t_start = time.time() logger.info( '{}: iter {} epoch {} || Loss: {:.4} || Time {:.4}s || lr: {} || CR loss: {}' .format( config.rank, iteration, epoch, model_loss.item(), t_elapsed, optimizer.param_groups[0]['lr'], loss_comp.item() if isinstance(loss_comp, torch.Tensor) else loss_comp)) if config.metrics_dump is not None: write_metrics(best_mAp, config.metrics_dump)
def train_staged(config, compression_ctrl, model, criterion, is_inception, optimizer_scheduler, model_name, optimizer, train_loader, train_sampler, val_loader, kd_loss_calculator, batch_multiplier, best_acc1=0): best_compression_level = CompressionLevel.NONE for epoch in range(config.start_epoch, config.epochs): config.cur_epoch = epoch if config.distributed: train_sampler.set_epoch(epoch) # train for one epoch train_epoch_staged(train_loader, batch_multiplier, model, criterion, optimizer, optimizer_scheduler, kd_loss_calculator, compression_ctrl, epoch, config, is_inception) # compute compression algo statistics stats = compression_ctrl.statistics() acc1 = best_acc1 if epoch % config.test_every_n_epochs == 0: # evaluate on validation set acc1, _ = validate(val_loader, model, criterion, config) compression_level = compression_ctrl.compression_level() # remember best acc@1, considering compression level. If current acc@1 less then the best acc@1, checkpoint # still can be best if current compression level is bigger then best one. Compression levels in ascending # order: NONE, PARTIAL, FULL. is_best_by_accuracy = acc1 > best_acc1 and compression_level == best_compression_level is_best = is_best_by_accuracy or compression_level > best_compression_level best_acc1 = max(acc1, best_acc1) best_compression_level = max(compression_level, best_compression_level) # statistics (e.g. portion of the enabled quantizers) is related to the finished epoch, # hence printing should happen before epoch_step, which may inform about state of the next epoch (e.g. next # portion of enabled quantizers) if is_main_process(): print_statistics(stats) # update compression scheduler state at the end of the epoch compression_ctrl.scheduler.epoch_step() optimizer_scheduler.epoch_step() if is_main_process(): checkpoint_path = osp.join(config.checkpoint_save_dir, get_name(config) + '_last.pth') checkpoint = { 'epoch': epoch + 1, 'arch': model_name, 'state_dict': model.state_dict(), 'original_model_state_dict': kd_loss_calculator.original_model.state_dict(), 'best_acc1': best_acc1, 'compression_level': compression_level, 'optimizer': optimizer.state_dict(), 'compression_scheduler': compression_ctrl.scheduler.state_dict(), 'optimizer_scheduler': optimizer_scheduler.state_dict() } torch.save(checkpoint, checkpoint_path) make_additional_checkpoints(checkpoint_path, is_best, epoch + 1, config) for key, value in stats.items(): if isinstance(value, (int, float)): config.tb.add_scalar( "compression/statistics/{0}".format(key), value, len(train_loader) * epoch)