def load_checkpoint(model, model_path, device_name, optimizer=None, compression_scheduler=None): """Loads the model from a specified directory with a specified name Keyword arguments: - model (``nn.Module``): The stored model state is copied to this model instance. - model_path: The model filename. - device_name: Device name for the model to be loaded into. - is_ddp: If true, model will be treated as a DistributedDataParallel instance and the actual model will be loaded into model.module - optimizer (``torch.optim``): The stored optimizer state is copied to this optimizer instance. - compression_algo: The compression scheduler for the saved state to be loaded into Returns: The ``model``, ``optimizer``, epoch, mean IoU and ``compression_scheduler``, loaded from the checkpoint. """ assert os.path.isfile( model_path), "The model file \"{0}\" doesn't exist.".format(model_path) # Load the stored model parameters to the model instance checkpoint = torch.load(model_path, map_location=device_name) load_state(model, checkpoint['state_dict'], is_resume=True) if optimizer is not None: optimizer.load_state_dict(checkpoint['optimizer']) epoch = checkpoint['epoch'] miou = checkpoint['miou'] if "scheduler" in checkpoint and compression_scheduler is not None: compression_scheduler.load_state_dict(checkpoint['scheduler']) return model, optimizer, epoch, miou, compression_scheduler
def test_load_state_interoperability(_algos, _models, is_resume): config_save = get_empty_config() config_save['compression'] = [{ 'algorithm': algo, 'params': {} } for algo in _algos['save_algos']] algo_save = create_test_compression_algo(config_save, _models['save_model']) model_save = algo_save.model saved_model_state = model_save.state_dict() ref_num_loaded = len(saved_model_state) config_resume = get_empty_config() config_resume['compression'] = [{ 'algorithm': algo, 'params': {} } for algo in _algos['load_algos']] algo_resume = create_test_compression_algo(config_resume, _models['resume_model']) model_resume = algo_resume.model if not is_resume or (is_resume and _algos['is_resume_ok']): act_num_loaded = load_state(model_resume, saved_model_state, is_resume) if ('magnitude_sparsity' in _algos['load_algos'] or 'const_sparsity' in _algos['load_algos']) \ and 'rb_sparsity' in _algos['save_algos']: # no need to load _mask and _uniform ref_num_loaded -= 2 assert act_num_loaded == ref_num_loaded else: with pytest.raises(RuntimeError): load_state(model_resume, saved_model_state, is_resume)
def test_can_restore_binary_mask_on_magnitude_quant_algo_resume(): config = get_empty_config() config["compression"] = [ {"algorithm": "magnitude_sparsity", "weight_importance": "abs", "params": {"schedule": "multistep", "sparsity_levels": [0.3, 0.5]}}, {"algorithm": "quantization"}] reset_context('orig') reset_context('quantized_graphs') magnitude_quant_algo = create_compression_algorithm(MagnitudeTestModel(), config) # load_state doesn't support CPU + Quantization sparse_model = torch.nn.DataParallel(magnitude_quant_algo.model) sparse_model.cuda() with torch.no_grad(): sparse_model(torch.ones([1, 1, 10, 10])) reset_context('orig') reset_context('quantized_graphs') config = get_empty_config() config["compression"] = [{"algorithm": "const_sparsity"}, {"algorithm": "quantization"}] const_algo = create_compression_algorithm(MagnitudeTestModel(), config) const_sparse_model = const_algo.model load_state(const_sparse_model, sparse_model.state_dict()) op = const_sparse_model.module.conv1.pre_ops['0'] check_equal(ref_mask_1, op.operand.binary_mask) op = const_sparse_model.module.conv2.pre_ops['0'] check_equal(ref_mask_2, op.operand.binary_mask)
def create_model(config): ssd_net = build_ssd(config.model, config.ssd_params, config.input_sample_size[-1], config.num_classes, config) ssd_net.to(config.device) compression_algo = create_compression_algorithm(ssd_net, config) ssd_net = compression_algo.model weights = config.get('weights') if weights: sd = torch.load(weights, map_location='cpu') load_state(ssd_net, sd) ssd_net.train() model, _ = prepare_model_for_execution(ssd_net, config) return compression_algo, model
def load_torch_model(config, cuda=False): weights = config.get('weights') model = load_model(config.model, pretrained=config.get('pretrained', True) if weights is None else False, num_classes=config.get('num_classes', 1000), model_params=config.get('model_params', {})) compression_algo, model = create_compressed_model(model, config) if weights: sd = torch.load(weights, map_location='cpu') load_state(model, sd) if cuda: model = model.cuda() model = torch.nn.DataParallel(model) print_statistics(compression_algo.statistics()) return model
def build_ssd_mobilenet(cfg, size, num_classes, config): if size != 300: raise ValueError("Only Mobilenet-SSD with input size 300 is supported") mobilenet_ssd = MobileNetSSD(num_classes, cfg) if config.basenet: print('Loading base network...') basenet_weights = torch.load(config.basenet)['state_dict'] new_weights = {} for wn, wv in basenet_weights.items(): wn = wn.replace('model.', '') new_weights[wn] = wv load_state(mobilenet_ssd.basenet, new_weights, strict=False) return mobilenet_ssd
def build_ssd_vgg(cfg, size, num_classes, config): ssd_vgg = SSD_VGG(cfg, size, num_classes, batch_norm=config.get('batchnorm', False)) print('Initializing weights...') # ssd_vgg.apply(weights_init) if config.basenet: print('Loading base network...') basenet_weights = torch.load(config.basenet) new_weights = {} for wn, wv in basenet_weights.items(): wn = wn.replace('features.', '') new_weights[wn] = wv load_state(ssd_vgg.basenet, new_weights, strict=False) return ssd_vgg
def resume_from_checkpoint(resuming_checkpoint, model, config, optimizer, compression_algo): global best_acc1 if osp.isfile(resuming_checkpoint): print("=> loading checkpoint '{}'".format(resuming_checkpoint)) checkpoint = torch.load(resuming_checkpoint, map_location='cpu') load_state(model, checkpoint['state_dict'], is_resume=True) if config.mode.lower() == 'train' and config.to_onnx is None: config.start_epoch = checkpoint['epoch'] best_acc1 = checkpoint['best_acc1'] compression_algo.scheduler.load_state_dict(checkpoint['scheduler']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch: {}, best_acc1: {:.3f})". format(resuming_checkpoint, checkpoint['epoch'], best_acc1)) else: print("=> loaded checkpoint '{}'".format(resuming_checkpoint)) else: raise FileNotFoundError( "no checkpoint found at '{}'".format(resuming_checkpoint)) return model, config, optimizer, compression_algo
def test_can_restore_binary_mask_on_magnitude_algo_resume(): config = get_empty_config() config['compression'] = {"algorithm": "magnitude_sparsity", "weight_importance": "abs", "params": {"schedule": "multistep", "sparsity_levels": [0.3, 0.5]}} magnitude_algo = create_compression_algorithm(MagnitudeTestModel(), config) sparse_model = magnitude_algo.model with torch.no_grad(): sparse_model(torch.ones([1, 1, 10, 10])) config = get_empty_config() config["compression"] = {"algorithm": "const_sparsity"} const_algo = create_compression_algorithm(MagnitudeTestModel(), config) const_sparse_model = const_algo.model load_state(const_sparse_model, sparse_model.state_dict()) op = const_sparse_model.conv1.pre_ops['0'] check_equal(ref_mask_1, op.operand.binary_mask) op = const_sparse_model.conv2.pre_ops['0'] check_equal(ref_mask_2, op.operand.binary_mask)
def test_load_state_skips_not_matched_params__from_smaller_to_larger(): ref_weights = torch.tensor([[[[3, 2], [2, 3]]]]) ref_bias = torch.tensor([2.]) model_save = BasicConvTestModel(out_channels=2) model_load = BasicConvTestModel(out_channels=1, weight_init=2, bias_init=2) num_loaded = load_state(model_load, model_save.state_dict()) assert num_loaded == 0 act_bias = model_load.conv.bias.data act_weights = model_load.conv.weight.data check_equal(act_bias, ref_bias) check_equal(act_weights, ref_weights)
def test_load_state_skips_not_matched_params__from_larger_to_smaller(): ref_weights = BasicConvTestModel.default_weight() ref_bias = BasicConvTestModel.default_bias() model_save = BasicConvTestModel(out_channels=1, weight_init=2, bias_init=2) model_load = BasicConvTestModel(out_channels=2) num_loaded = load_state(model_load, model_save.state_dict()) act_bias = model_load.conv.bias.data act_weights = model_load.conv.weight.data assert num_loaded == 0 check_equal(act_bias, ref_bias) check_equal(act_weights, ref_weights)
def test_ordinary_load(algo, _models, is_resume): config = get_empty_config() if algo: config['compression'] = {'algorithm': algo, 'params': {}} algo_save = create_test_compression_algo(config, _models['save_model']) model_save = algo_save.model algo_resume = create_test_compression_algo(config, _models['resume_model']) model_resume = algo_resume.model num_loaded = load_state(model_resume, model_save.state_dict(), is_resume) assert num_loaded == len(model_save.state_dict())
def main_worker(current_gpu, config): config.current_gpu = current_gpu config.distributed = config.execution_mode in (ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED) if config.distributed: configure_distributed(config) if is_main_process(): configure_logging(config) print_args(config) print(config) config.device = get_device(config) dataset = get_dataset(config.dataset) color_encoding = dataset.color_encoding num_classes = len(color_encoding) weights = config.get('weights') model = load_model(config.model, pretrained=config.get('pretrained', True) if weights is None else False, num_classes=num_classes, model_params=config.get('model_params', {})) compression_algo, model = create_compressed_model(model, config) if weights: sd = torch.load(weights, map_location='cpu') load_state(model, sd) model, model_without_dp = prepare_model_for_execution(model, config) if config.distributed: compression_algo.distributed() resuming_checkpoint = config.resuming_checkpoint if resuming_checkpoint is not None: if not config.pretrained: # Load the previously saved model state model, _, _, _, _ = \ load_checkpoint(model, resuming_checkpoint, config.device, compression_scheduler=compression_algo.scheduler) if config.to_onnx is not None: compression_algo.export_model(config.to_onnx) print("Saved to", config.to_onnx) return if config.mode.lower() == 'test': print(model) model_parameters = filter(lambda p: p.requires_grad, model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) print("Trainable argument count:{params}".format(params=params)) model = model.to(config.device) loaders, w_class = load_dataset(dataset, config) _, val_loader = loaders test(model, val_loader, w_class, color_encoding, config) print_statistics(compression_algo.statistics()) elif config.mode.lower() == 'train': loaders, w_class = load_dataset(dataset, config) train_loader, val_loader = loaders if not resuming_checkpoint: compression_algo.initialize(train_loader) model = \ train(model, model_without_dp, compression_algo, train_loader, val_loader, w_class, color_encoding, config) else: # Should never happen...but just in case it does raise RuntimeError( "\"{0}\" is not a valid choice for execution mode.".format( config.mode))
def main_worker(current_gpu, config): ################################# # Setup experiment environment ################################# config.current_gpu = current_gpu config.distributed = config.execution_mode in ( ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED) if config.distributed: configure_distributed(config) if is_on_first_rank(config): configure_logging(config) print_args(config) config.device = get_device(config) config.start_iter = 0 ################## # Prepare model ################## compression_algo, net = create_model(config) if config.distributed: config.batch_size //= config.ngpus_per_node config.workers //= config.ngpus_per_node compression_algo.distributed() ########################### # Criterion and optimizer ########################### params_to_optimize = get_parameter_groups(net, config) optimizer, lr_scheduler = make_optimizer(params_to_optimize, config) criterion = MultiBoxLoss(config, config['num_classes'], overlap_thresh=0.5, prior_for_matching=True, bkg_label=0, neg_mining=True, neg_pos=3, neg_overlap=0.5, encode_target=False, device=config.device) ########################### # Prepare data ########################### test_data_loader, train_data_loader = create_dataloaders(config) ########################### # Load checkpoint ########################### resuming_checkpoint = config.resuming_checkpoint if resuming_checkpoint: print('Resuming training, loading {}...'.format(resuming_checkpoint)) checkpoint = torch.load(resuming_checkpoint, map_location='cpu') # use checkpoint itself in case of only state dict is saved # i.e. checkpoint is created with `torch.save(module.state_dict())` state_dict = checkpoint.get('state_dict', checkpoint) load_state(net, state_dict, is_resume=True, strict=True) if config.mode.lower() == 'train' and config.to_onnx is None: compression_algo.scheduler.load_state_dict(checkpoint['scheduler']) optimizer.load_state_dict( checkpoint.get('optimizer', optimizer.state_dict())) config.start_iter = checkpoint.get('iter', 0) + 1 if config.to_onnx: compression_algo.export_model(config.to_onnx) print("Saved to {}".format(config.to_onnx)) return if config.mode.lower() == 'test': with torch.no_grad(): print_statistics(compression_algo.statistics()) net.eval() test_net(net, config.device, test_data_loader, distributed=config.distributed) return if not resuming_checkpoint: compression_algo.initialize(train_data_loader) train(net, compression_algo, train_data_loader, test_data_loader, criterion, optimizer, config, lr_scheduler)
def main_worker(current_gpu, config): global best_acc1 config.current_gpu = current_gpu config.distributed = config.execution_mode in ( ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED) if config.distributed: configure_distributed(config) config.device = get_device(config) if is_main_process(): configure_logging(config) print_args(config) if config.seed is not None: manual_seed(config.seed) cudnn.deterministic = True cudnn.benchmark = False # create model model_name = config['model'] weights = config.get('weights') model = load_model(model_name, pretrained=config.get('pretrained', True) if weights is None else False, num_classes=config.get('num_classes', 1000), model_params=config.get('model_params')) compression_algo, model = create_compressed_model(model, config) if weights: load_state(model, torch.load(weights, map_location='cpu')) model, _ = prepare_model_for_execution(model, config) if config.distributed: compression_algo.distributed() is_inception = 'inception' in model_name # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss() criterion = criterion.to(config.device) params_to_optimize = get_parameter_groups(model, config) optimizer, lr_scheduler = make_optimizer(params_to_optimize, config) resuming_checkpoint = config.resuming_checkpoint # optionally resume from a checkpoint if resuming_checkpoint is not None: model, config, optimizer, compression_algo = \ resume_from_checkpoint(resuming_checkpoint, model, config, optimizer, compression_algo) if config.to_onnx is not None: compression_algo.export_model(config.to_onnx) print("Saved to", config.to_onnx) return if config.execution_mode != ExecutionMode.CPU_ONLY: cudnn.benchmark = True # Data loading code train_loader, train_sampler, val_loader = create_dataloaders(config) if config.mode.lower() == 'test': print_statistics(compression_algo.statistics()) validate(val_loader, model, criterion, config) if config.mode.lower() == 'train': if not resuming_checkpoint: compression_algo.initialize(train_loader) train(config, compression_algo, model, criterion, is_inception, lr_scheduler, model_name, optimizer, train_loader, train_sampler, val_loader)