def main(args): distributed, device_ids = main_util.init_distributed_mode( args.world_size, args.dist_url) config = yaml_util.load_yaml_file(args.config) if args.json is not None: main_util.overwrite_config(config, args.json) device = torch.device(args.device) print(args) print('Loading data') train_config = config['train'] train_sampler, train_data_loader, val_data_loader, test_data_loader =\ data_util.get_coco_data_loaders(config['dataset'], train_config['batch_size'], distributed) print('Creating model') model_config = config['model'] model = get_model(model_config, device) print('Model Created') if distributed: model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids) if args.train: print('Start training') start_time = time.time() train(model, train_sampler, train_data_loader, val_data_loader, device, distributed, config, args, model_config['ckpt']) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) main_util.evaluate(model, test_data_loader, device=device)
def run(args): distributed, device_ids = main_util.init_distributed_mode( args.world_size, args.dist_url) device = torch.device(args.device if torch.cuda.is_available() else 'cpu') if device.type == 'cuda': cudnn.benchmark = True print(args) config = yaml_util.load_yaml_file(args.config) input_shape = config['input_shape'] ckpt_file_path = config['autoencoder']['ckpt'] train_loader, valid_loader, test_loader = main_util.get_data_loaders( config, distributed) if not args.test_only: train(train_loader, valid_loader, input_shape, config, device, distributed, device_ids) autoencoder, _ = ae_util.get_autoencoder(config, device) resume_from_ckpt(ckpt_file_path, autoencoder) extended_model, model = ae_util.get_extended_model(autoencoder, config, input_shape, device) if not args.extended_only: if device.type == 'cuda': model = DistributedDataParallel(model, device_ids=device_ids) if distributed \ else DataParallel(model) evaluate(model, test_loader, device, title='[Original model]') if device.type == 'cuda': extended_model = DistributedDataParallel(extended_model, device_ids=device_ids) if distributed \ else DataParallel(extended_model) evaluate(extended_model, test_loader, device, title='[Mimic model]')
def main(args): config = yaml_util.load_yaml_file(args.config) if args.json is not None: main_util.overwrite_config(config, args.json) distributed, device_ids = main_util.init_distributed_mode( args.world_size, args.dist_url) device = torch.device(args.device if torch.cuda.is_available() else 'cpu') teacher_model = get_model(config['teacher_model'], device) module_util.freeze_module_params(teacher_model) student_model_config = config['student_model'] student_model = get_model(student_model_config, device) freeze_modules(student_model, student_model_config) print('Updatable parameters: {}'.format( module_util.get_updatable_param_names(student_model))) distill_backbone_only = student_model_config['distill_backbone_only'] train_config = config['train'] train_sampler, train_data_loader, val_data_loader, test_data_loader = \ data_util.get_coco_data_loaders(config['dataset'], train_config['batch_size'], distributed) if distributed: teacher_model = DataParallel(teacher_model, device_ids=device_ids) student_model = DistributedDataParallel(student_model, device_ids=device_ids) if args.distill: distill(teacher_model, student_model, train_sampler, train_data_loader, val_data_loader, device, distributed, distill_backbone_only, config, args) load_ckpt( config['student_model']['ckpt'], model=student_model.module if isinstance( student_model, DistributedDataParallel) else student_model) evaluate(teacher_model, student_model, test_data_loader, device, args.skip_teacher_eval, args.transform_bottleneck)
def run(args): distributed, device_ids = main_util.init_distributed_mode( args.world_size, args.dist_url) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device.type == 'cuda': cudnn.benchmark = True print(args) config = yaml_util.load_yaml_file(args.config) train_loader, valid_loader, test_loader = main_util.get_data_loaders( config, distributed) if 'mimic_model' in config: model = mimic_util.get_mimic_model_easily(config, device) model_config = config['mimic_model'] else: model = module_util.get_model(config, device) model_config = config['model'] model_type, best_valid_acc, start_epoch, ckpt_file_path =\ module_util.resume_from_ckpt(model, model_config, args.init) train_config = config['train'] criterion_config = train_config['criterion'] criterion = func_util.get_loss(criterion_config['type'], criterion_config['params']) if not args.evaluate: train(model, train_loader, valid_loader, best_valid_acc, criterion, device, distributed, device_ids, train_config, args.epoch, start_epoch, args.lr, ckpt_file_path, model_type) test(model, test_loader, device)
def main(args): distributed, device_ids = main_util.init_distributed_mode( args.world_size, args.dist_url) config = yaml_util.load_yaml_file(args.config) if args.json is not None: main_util.overwrite_config(config, args.json) device = torch.device(args.device) print(args) print('Loading data') train_config = config['train'] train_sampler, train_data_loader, val_data_loader, test_data_loader =\ data_util.get_coco_data_loaders(config['dataset'], train_config['batch_size'], distributed) print('Creating model') model_config = config['model'] model = get_model(model_config, device, strict=False) module_util.freeze_module_params(model) ext_classifier = model.get_ext_classifier() module_util.unfreeze_module_params(ext_classifier) print('Updatable parameters: {}'.format( module_util.get_updatable_param_names(model))) model.train_ext() if distributed: model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids) if args.train: print('Start training') start_time = time.time() ckpt_file_path = model_config['backbone']['ext_config']['ckpt'] train(model, ext_classifier, train_sampler, train_data_loader, val_data_loader, device, distributed, config, args, ckpt_file_path) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) load_ckpt(ckpt_file_path, model=ext_classifier) evaluate(model, test_data_loader, device=device, min_recall=args.min_recall, split_name='Test')
def run(args): distributed, device_ids = main_util.init_distributed_mode( args.world_size, args.dist_url) device = torch.device(args.device if torch.cuda.is_available() else 'cpu') if torch.cuda.is_available(): cudnn.benchmark = True print(args) config = yaml_util.load_yaml_file(args.config) dataset_config = config['dataset'] input_shape = config['input_shape'] train_config = config['train'] test_config = config['test'] train_loader, valid_loader, test_loader =\ dataset_util.get_data_loaders(dataset_config, batch_size=train_config['batch_size'], rough_size=train_config['rough_size'], reshape_size=input_shape[1:3], test_batch_size=test_config['batch_size'], jpeg_quality=-1, distributed=distributed) teacher_model_config = config['teacher_model'] if not args.test_only: distill(train_loader, valid_loader, input_shape, args.aux, config, device, distributed, device_ids) org_model, teacher_model_type = mimic_util.get_org_model( teacher_model_config, device) if not args.student_only: if distributed: org_model = DataParallel(org_model, device_ids=device_ids) evaluate(org_model, test_loader, device, title='[Original model]') mimic_model = mimic_util.get_mimic_model(config, org_model, teacher_model_type, teacher_model_config, device) mimic_model_without_dp = mimic_model.module if isinstance( mimic_model, DataParallel) else mimic_model file_util.save_pickle(mimic_model_without_dp, config['mimic_model']['ckpt']) if distributed: mimic_model = DistributedDataParallel(mimic_model_without_dp, device_ids=device_ids) evaluate(mimic_model, test_loader, device, title='[Mimic model]')
def main(args): config = yaml_util.load_yaml_file(args.config) if args.json is not None: main_util.overwrite_config(config, args.json) distributed, device_ids = main_util.init_distributed_mode(args.world_size, args.dist_url) device = torch.device(args.device if torch.cuda.is_available() else 'cpu') teacher_model = get_model(config['teacher_model'], device) module_util.freeze_module_params(teacher_model) student_model_config = config['student_model'] student_model = get_model(student_model_config, device) freeze_modules(student_model, student_model_config) ckpt_file_path = config['student_model']['ckpt'] train_config = config['train'] optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params']) scheduler_config = train_config['scheduler'] lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params']) if file_util.check_if_exists(ckpt_file_path): best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler) save_ckpt(student_model, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path)
def main(args): if args.apex: if sys.version_info < (3, 0): raise RuntimeError( 'Apex currently only supports Python 3. Aborting.') if amp is None: raise RuntimeError( 'Failed to import apex. Please install apex from https://www.github.com/nvidia/apex ' 'to enable mixed-precision training.') distributed, device_ids = main_util.init_distributed_mode( args.world_size, args.dist_url) print(args) if torch.cuda.is_available(): torch.backends.cudnn.benchmark = True config = yaml_util.load_yaml_file(args.config) device = torch.device(args.device if torch.cuda.is_available() else 'cpu') dataset_config = config['dataset'] input_shape = config['input_shape'] train_config = config['train'] test_config = config['test'] train_data_loader, val_data_loader, test_data_loader =\ dataset_util.get_data_loaders(dataset_config, batch_size=train_config['batch_size'], rough_size=train_config['rough_size'], reshape_size=input_shape[1:3], jpeg_quality=-1, test_batch_size=test_config['batch_size'], distributed=distributed) teacher_model_config = config['teacher_model'] teacher_model, teacher_model_type = mimic_util.get_org_model( teacher_model_config, device) module_util.freeze_module_params(teacher_model) student_model = mimic_util.get_mimic_model_easily(config, device) student_model_config = config['mimic_model'] optim_config = train_config['optimizer'] optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params']) use_apex = args.apex if use_apex: student_model, optimizer = amp.initialize( student_model, optimizer, opt_level=args.apex_opt_level) if distributed: teacher_model = DataParallel(teacher_model, device_ids=device_ids) student_model = DistributedDataParallel(student_model, device_ids=device_ids) start_epoch = args.start_epoch if not args.test_only: distill(teacher_model, student_model, train_data_loader, val_data_loader, device, distributed, start_epoch, config, args) student_model_without_ddp =\ student_model.module if isinstance(student_model, DistributedDataParallel) else student_model load_ckpt(student_model_config['ckpt'], model=student_model_without_ddp, strict=True) if not args.student_only: evaluate(teacher_model, test_data_loader, device, title='[Teacher: {}]'.format(teacher_model_type)) evaluate(student_model, test_data_loader, device, title='[Student: {}]'.format(student_model_config['type']))