def train(model, train_sampler, train_data_loader, val_data_loader, device, distributed, config, args, ckpt_file_path): train_config = config['train'] optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(model, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) best_val_map = 0.0 if file_util.check_if_exists(ckpt_file_path): best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler) num_epochs = train_config['num_epochs'] log_freq = train_config['log_freq'] start_time = time.time() for epoch in range(num_epochs): if distributed: train_sampler.set_epoch(epoch) train_model(model, optimizer, train_data_loader, device, epoch, log_freq) lr_scheduler.step() # evaluate after every epoch coco_evaluator = main_util.evaluate(model, val_data_loader, device=device) # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] val_map = coco_evaluator.coco_eval['bbox'].stats[0] if val_map > best_val_map: print('Updating ckpt (Best BBox mAP: {:.4f} -> {:.4f})'.format(best_val_map, val_map)) best_val_map = val_map save_ckpt(model, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path) lr_scheduler.step() dist.barrier() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str))
def train(train_loader, valid_loader, input_shape, config, device, distributed, device_ids): ae_without_ddp, ae_type = ae_util.get_autoencoder(config, device) head_model = ae_util.get_head_model(config, input_shape, device) module_util.freeze_module_params(head_model) ckpt_file_path = config['autoencoder']['ckpt'] start_epoch, best_valid_acc = resume_from_ckpt(ckpt_file_path, ae_without_ddp) if best_valid_acc is None: best_valid_acc = 0.0 train_config = config['train'] criterion_config = train_config['criterion'] criterion = func_util.get_loss(criterion_config['type'], criterion_config['params']) optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(ae_without_ddp, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) interval = train_config['interval'] if interval <= 0: num_batches = len(train_loader) interval = num_batches // 20 if num_batches >= 20 else 1 autoencoder = ae_without_ddp if distributed: autoencoder = DistributedDataParallel(ae_without_ddp, device_ids=device_ids) head_model = DataParallel(head_model, device_ids=device_ids) elif device.type == 'cuda': autoencoder = DataParallel(ae_without_ddp) head_model = DataParallel(head_model) end_epoch = start_epoch + train_config['epoch'] start_time = time.time() for epoch in range(start_epoch, end_epoch): if distributed: train_loader.sampler.set_epoch(epoch) train_epoch(autoencoder, head_model, train_loader, optimizer, criterion, epoch, device, interval) valid_acc = validate(ae_without_ddp, valid_loader, config, device, distributed, device_ids) if valid_acc > best_valid_acc and main_util.is_main_process(): print( 'Updating ckpt (Best top1 accuracy: {:.4f} -> {:.4f})'.format( best_valid_acc, valid_acc)) best_valid_acc = valid_acc save_ckpt(ae_without_ddp, epoch, best_valid_acc, ckpt_file_path, ae_type) scheduler.step() dist.barrier() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) del head_model
def distill(teacher_model, student_model, train_sampler, train_data_loader, val_data_loader, device, distributed, distill_backbone_only, config, args): train_config = config['train'] distillation_box = DistillationBox(teacher_model, student_model, train_config['criterion']) ckpt_file_path = config['student_model']['ckpt'] optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) use_bottleneck_transformer = args.transform_bottleneck best_val_map = 0.0 if file_util.check_if_exists(ckpt_file_path): best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler) num_epochs = train_config['num_epochs'] log_freq = train_config['log_freq'] teacher_model_without_dp = teacher_model.module if isinstance( teacher_model, DataParallel) else teacher_model student_model_without_ddp =\ student_model.module if isinstance(student_model, DistributedDataParallel) else student_model start_time = time.time() for epoch in range(num_epochs): if distributed: train_sampler.set_epoch(epoch) teacher_model.eval() student_model.train() teacher_model_without_dp.distill_backbone_only = distill_backbone_only student_model_without_ddp.distill_backbone_only = distill_backbone_only student_model_without_ddp.backbone.body.layer1.use_bottleneck_transformer = False distill_model(distillation_box, train_data_loader, optimizer, log_freq, device, epoch) student_model_without_ddp.distill_backbone_only = False student_model_without_ddp.backbone.body.layer1.use_bottleneck_transformer = use_bottleneck_transformer coco_evaluator = main_util.evaluate(student_model, val_data_loader, device=device) # Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] val_map = coco_evaluator.coco_eval['bbox'].stats[0] if val_map > best_val_map and misc_util.is_main_process(): print('Updating ckpt (Best BBox mAP: {:.4f} -> {:.4f})'.format( best_val_map, val_map)) best_val_map = val_map save_ckpt(student_model_without_ddp, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path) lr_scheduler.step() dist.barrier() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str))
def distill(teacher_model, student_model, train_data_loader, val_data_loader, device, distributed, start_epoch, config, args): print('Start knowledge distillation') train_config = config['train'] distillation_box = DistillationBox(teacher_model, student_model, train_config['criterion']) ckpt_file_path = config['mimic_model']['ckpt'] optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) best_val_top1_accuracy = 0.0 if file_util.check_if_exists(ckpt_file_path): best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler) interval = train_config['interval'] if interval <= 0: num_batches = len(train_data_loader) interval = num_batches // 20 if num_batches >= 20 else 1 student_model_without_ddp = \ student_model.module if isinstance(student_model, DistributedDataParallel) else student_model start_time = time.time() for epoch in range(start_epoch, train_config['epoch']): if distributed: train_data_loader.sampler.set_epoch(epoch) teacher_model.eval() student_model.train() distill_one_epoch(distillation_box, train_data_loader, optimizer, device, epoch, interval, args.apex) val_top1_accuracy =\ evaluate(student_model, val_data_loader, device=device, interval=interval, split_name='Validation') if val_top1_accuracy > best_val_top1_accuracy and main_util.is_main_process( ): print( 'Updating ckpt (Best top1 accuracy: {:.4f} -> {:.4f})'.format( best_val_top1_accuracy, val_top1_accuracy)) best_val_top1_accuracy = val_top1_accuracy save_ckpt(student_model_without_ddp, optimizer, lr_scheduler, best_val_top1_accuracy, config, args, ckpt_file_path) lr_scheduler.step() dist.barrier() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str))
def distill(teacher_model, student_model, train_sampler, train_data_loader, val_data_loader, device, distributed, distill_backbone_only, config, args): train_config = config['train'] student_config = config['student_model'] distillation_box = DistillationBox(teacher_model, student_model, train_config['criterion'], student_config) ckpt_file_path = config['student_model']['ckpt'] optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) if file_util.check_if_exists(ckpt_file_path): best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler) save_ckpt(student_model, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path)
def train(model, train_loader, valid_loader, best_valid_acc, criterion, device, distributed, device_ids, train_config, num_epochs, start_epoch, init_lr, ckpt_file_path, model_type): model_without_ddp = model if distributed: model = DistributedDataParallel(model_without_ddp, device_ids=device_ids) elif device.type == 'cuda': model = DataParallel(model_without_ddp) optim_config = train_config['optimizer'] if init_lr is not None: optim_config['params']['lr'] = init_lr optimizer = func_util.get_optimizer(model, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) interval = train_config['interval'] if interval <= 0: num_batches = len(train_loader) interval = num_batches // 20 if num_batches >= 20 else 1 end_epoch = start_epoch + train_config[ 'epoch'] if num_epochs is None else start_epoch + num_epochs start_time = time.time() for epoch in range(start_epoch, end_epoch): if distributed: train_loader.sampler.set_epoch(epoch) train_epoch(model, train_loader, optimizer, criterion, epoch, device, interval) valid_acc = validate(model, valid_loader, device) if valid_acc > best_valid_acc and main_util.is_main_process(): print( 'Updating ckpt (Best top1 accuracy: {:.4f} -> {:.4f})'.format( best_valid_acc, valid_acc)) best_valid_acc = valid_acc save_ckpt(model_without_ddp, best_valid_acc, epoch, ckpt_file_path, model_type) scheduler.step() dist.barrier() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str))
def train(model, ext_classifier, train_sampler, train_data_loader, val_data_loader, device, distributed, config, args, ckpt_file_path): train_config = config['train'] optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(ext_classifier, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) best_val_roc_auc = 0.0 if file_util.check_if_exists(ckpt_file_path): best_val_roc_auc, _, _ =\ load_ckpt(ckpt_file_path, model=ext_classifier, optimizer=optimizer, lr_scheduler=lr_scheduler) num_epochs = train_config['num_epochs'] log_freq = train_config['log_freq'] start_time = time.time() for epoch in range(num_epochs): if distributed: train_sampler.set_epoch(epoch) train_model(model, optimizer, train_data_loader, device, epoch, log_freq) lr_scheduler.step() # evaluate after every epoch val_roc_auc = evaluate(model, val_data_loader, device, min_recall=args.min_recall, split_name='Validation') if val_roc_auc > best_val_roc_auc: print('Updating ckpt (Best ROC-AUC: {:.4f} -> {:.4f})'.format( best_val_roc_auc, val_roc_auc)) best_val_roc_auc = val_roc_auc save_ckpt(ext_classifier, optimizer, lr_scheduler, best_val_roc_auc, config, args, ckpt_file_path) dist.barrier() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str))
def main(args): config = yaml_util.load_yaml_file(args.config) if args.json is not None: main_util.overwrite_config(config, args.json) distributed, device_ids = main_util.init_distributed_mode(args.world_size, args.dist_url) device = torch.device(args.device if torch.cuda.is_available() else 'cpu') teacher_model = get_model(config['teacher_model'], device) module_util.freeze_module_params(teacher_model) student_model_config = config['student_model'] student_model = get_model(student_model_config, device) freeze_modules(student_model, student_model_config) ckpt_file_path = config['student_model']['ckpt'] train_config = config['train'] optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) if file_util.check_if_exists(ckpt_file_path): best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler) save_ckpt(student_model, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path)
def distill(train_loader, valid_loader, input_shape, aux_weight, config, device, distributed, device_ids): teacher_model_config = config['teacher_model'] teacher_model, teacher_model_type = mimic_util.get_teacher_model( teacher_model_config, input_shape, device) module_util.freeze_module_params(teacher_model) student_model_config = config['student_model'] student_model = mimic_util.get_student_model(teacher_model_type, student_model_config, config['dataset']['name']) student_model = student_model.to(device) start_epoch, best_valid_acc = mimic_util.resume_from_ckpt( student_model_config['ckpt'], student_model, is_student=True) if best_valid_acc is None: best_valid_acc = 0.0 train_config = config['train'] criterion_config = train_config['criterion'] criterion = func_util.get_loss(criterion_config['type'], criterion_config['params']) optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) interval = train_config['interval'] if interval <= 0: num_batches = len(train_loader) interval = num_batches // 20 if num_batches >= 20 else 1 student_model_without_ddp = student_model if distributed: teacher_model = DataParallel(teacher_model, device_ids=device_ids) student_model = DistributedDataParallel(student_model, device_ids=device_ids) student_model_without_ddp = student_model.module ckpt_file_path = student_model_config['ckpt'] end_epoch = start_epoch + train_config['epoch'] start_time = time.time() for epoch in range(start_epoch, end_epoch): if distributed: train_loader.sampler.set_epoch(epoch) distill_one_epoch(student_model, teacher_model, train_loader, optimizer, criterion, epoch, device, interval, aux_weight) valid_acc = validate(student_model, valid_loader, config, device, distributed, device_ids) if valid_acc > best_valid_acc and main_util.is_main_process(): print( 'Updating ckpt (Best top1 accuracy: {:.4f} -> {:.4f})'.format( best_valid_acc, valid_acc)) best_valid_acc = valid_acc save_ckpt(student_model_without_ddp, epoch, best_valid_acc, ckpt_file_path, teacher_model_type) scheduler.step() dist.barrier() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) del teacher_model del student_model
def distill(teacher_model, student_model, train_sampler, train_data_loader, val_data_loader, device, distributed, distill_backbone_only, config, args): train_config = config['train'] student_config = config['student_model'] distillation_box = DistillationBox(teacher_model, student_model, train_config['criterion'], student_config) ckpt_file_path = config['student_model']['ckpt'] optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) use_bottleneck_transformer = args.transform_bottleneck best_val_map = 0.0 if file_util.check_if_exists(ckpt_file_path): if args.ignore_optimizer: best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=None, lr_scheduler=None) else: best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler) num_epochs = train_config['num_epochs'] log_freq = train_config['log_freq'] teacher_model_without_dp = teacher_model.module if isinstance( teacher_model, DataParallel) else teacher_model student_model_without_ddp = \ student_model.module if isinstance(student_model, DistributedDataParallel) else student_model start_time = time.time() post_bn = False if 'post_batch_norm' in config['train']: post_bn = config['train']['post_batch_norm'] for epoch in range(lr_scheduler.last_epoch, num_epochs): if distributed: train_sampler.set_epoch(epoch) teacher_model.eval() student_model.train() teacher_model_without_dp.distill_backbone_only = distill_backbone_only student_model_without_ddp.distill_backbone_only = distill_backbone_only set_bottleneck_transformer(student_model_without_ddp, False) distill_model(distillation_box, train_data_loader, optimizer, log_freq, device, epoch) student_model_without_ddp.distill_backbone_only = False set_bottleneck_transformer(student_model_without_ddp, use_bottleneck_transformer) val_map = 0 width_list = [1.0] if 'slimmable' in student_config['backbone']['params']: width_list = [0.25, 0.5, 0.75, 1.0] width_list = [ w for w in width_list if w in student_config['backbone']['params']['width_mult_list'] ] for width in width_list: set_width(student_model, width) if post_bn: ComputeBN(student_model, train_data_loader) print('\n[Student model@width={}]'.format(width)) coco_evaluator = main_util.evaluate(student_model, val_data_loader, device=device) val_map += coco_evaluator.coco_eval['bbox'].stats[0] val_map = val_map / len(width_list) print('BBox mAP: {:.4f})'.format(val_map)) if val_map > best_val_map and misc_util.is_main_process(): print('Updating ckpt (Best BBox mAP: {:.4f} -> {:.4f})'.format( best_val_map, val_map)) best_val_map = val_map save_ckpt(student_model_without_ddp, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path) lr_scheduler.step() if distributed: dist.barrier() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str))
def main(args): if args.apex: if sys.version_info < (3, 0): raise RuntimeError( 'Apex currently only supports Python 3. Aborting.') if amp is None: raise RuntimeError( 'Failed to import apex. Please install apex from https://www.github.com/nvidia/apex ' 'to enable mixed-precision training.') distributed, device_ids = main_util.init_distributed_mode( args.world_size, args.dist_url) print(args) if torch.cuda.is_available(): torch.backends.cudnn.benchmark = True config = yaml_util.load_yaml_file(args.config) device = torch.device(args.device if torch.cuda.is_available() else 'cpu') dataset_config = config['dataset'] input_shape = config['input_shape'] train_config = config['train'] test_config = config['test'] train_data_loader, val_data_loader, test_data_loader =\ dataset_util.get_data_loaders(dataset_config, batch_size=train_config['batch_size'], rough_size=train_config['rough_size'], reshape_size=input_shape[1:3], jpeg_quality=-1, test_batch_size=test_config['batch_size'], distributed=distributed) teacher_model_config = config['teacher_model'] teacher_model, teacher_model_type = mimic_util.get_org_model( teacher_model_config, device) module_util.freeze_module_params(teacher_model) student_model = mimic_util.get_mimic_model_easily(config, device) student_model_config = config['mimic_model'] optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params']) use_apex = args.apex if use_apex: student_model, optimizer = amp.initialize( student_model, optimizer, opt_level=args.apex_opt_level) if distributed: teacher_model = DataParallel(teacher_model, device_ids=device_ids) student_model = DistributedDataParallel(student_model, device_ids=device_ids) start_epoch = args.start_epoch if not args.test_only: distill(teacher_model, student_model, train_data_loader, val_data_loader, device, distributed, start_epoch, config, args) student_model_without_ddp =\ student_model.module if isinstance(student_model, DistributedDataParallel) else student_model load_ckpt(student_model_config['ckpt'], model=student_model_without_ddp, strict=True) if not args.student_only: evaluate(teacher_model, test_data_loader, device, title='[Teacher: {}]'.format(teacher_model_type)) evaluate(student_model, test_data_loader, device, title='[Student: {}]'.format(student_model_config['type']))