def load_from_metrics(cls, weights_path, tags_csv, on_gpu, map_location=None): """ Primary way of loading model from csv weights path :param weights_path: :param tags_csv: :param on_gpu: :param map_location: dic for mapping storage {'cuda:1':'cuda:0'} :return: """ hparams = load_hparams_from_tags_csv(tags_csv) hparams.__setattr__('on_gpu', on_gpu) if on_gpu: if map_location is not None: checkpoint = torch.load(weights_path, map_location=map_location) else: checkpoint = torch.load(weights_path) else: checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage) # load the state_dict on the model automatically model = cls(hparams) model.load_state_dict(checkpoint['state_dict']) # give model a chance to load something model.on_load_checkpoint(checkpoint) return model
def load_from_metrics(cls, weights_path, tags_csv, on_gpu): """ Primary way of loading model from csv weights path :param weights_path: :param tags_csv: :param on_gpu: :param map_location: dic for mapping storage {'cuda:1':'cuda:0'} :return: """ hparams = load_hparams_from_tags_csv(tags_csv) hparams.__setattr__('on_gpu', on_gpu) # load on CPU only to avoid OOM issues # then its up to user to put back on GPUs checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage) # load the state_dict on the model automatically model = cls(hparams) model.load_state_dict(checkpoint['state_dict']) # give model a chance to load something model.on_load_checkpoint(checkpoint) return model
def load_from_metrics(cls, weights_path, tags_csv, map_location=None): """Primary way of loading model from csv weights path. :param str weights_path: Path to a PyTorch checkpoint :param str tags_csv: Path to meta_tags.csv file generated by the test-tube Experiment :param dict map_location: A dictionary mapping saved weight GPU devices to new GPU devices for mapping storage {'cuda:1':'cuda:0'} :return: The pretrained LightningModule If you're using test tube, there is an alternate method which uses the meta_tags.csv file from test-tube to rebuild the model. The meta_tags.csv file can be found in the test-tube experiment save_dir. .. code-block:: python pretrained_model = MyLightningModule.load_from_metrics( weights_path='/path/to/pytorch_checkpoint.ckpt', tags_csv='/path/to/test_tube/experiment/version/meta_tags.csv', on_gpu=True, map_location=None ) # predict pretrained_model.eval() pretrained_model.freeze() y_hat = pretrained_model(x) This is the easiest/fastest way which loads hyperparameters and weights from a checkpoint, such as the one saved by the `ModelCheckpoint` callback .. code-block:: python pretrained_model = MyLightningModule.load_from_checkpoint( checkpoint_path='/path/to/pytorch_checkpoint.ckpt' ) # predict pretrained_model.eval() pretrained_model.freeze() y_hat = pretrained_model(x) """ hparams = load_hparams_from_tags_csv(tags_csv) hparams.__setattr__('on_gpu', False) if map_location is not None: checkpoint = torch.load(weights_path, map_location=map_location) else: checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage) # load the state_dict on the model automatically model = cls(hparams) model.load_state_dict(checkpoint['state_dict']) # give model a chance to load something model.on_load_checkpoint(checkpoint) return model
def test_loading_meta_tags(): hparams = get_hparams() # save tags exp = get_exp(False) exp.tag({'some_str': 'a_str', 'an_int': 1, 'a_float': 2.0}) exp.argparse(hparams) exp.save() # load tags tags_path = exp.get_data_path(exp.name, exp.version) + '/meta_tags.csv' tags = trainer_io.load_hparams_from_tags_csv(tags_path) assert tags.batch_size == 32 and tags.hidden_dim == 1000 clear_save_dir()
def load_from_metrics(cls, hparams, weights_path, tags_csv, on_gpu, map_location=None): prev_hparams = load_hparams_from_tags_csv(tags_csv) prev_hparams.__dict__.update(hparams.__dict__) hparams.__dict__.update({k: v for k, v in prev_hparams.__dict__.items() if k not in hparams.__dict__}) hparams.__setattr__('on_gpu', on_gpu) if on_gpu: if map_location is not None: checkpoint = torch.load(weights_path, map_location=map_location) else: checkpoint = torch.load(weights_path) else: checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage) running_config = yaml.safe_load(open(hparams.running_config_file, "r")) task_config = yaml.safe_load(open(hparams.task_config_file, 'r')) default_parameter = partial(get_default_hyperparameter, config=running_config, task_name=hparams.task_name, model_type=hparams.model_type, model_weight=hparams.model_weight) hparams.max_nb_epochs = default_parameter(field='max_nb_epochs') hparams.learning_rate = float(default_parameter(field='lr')) hparams.initializer_range = float(default_parameter(field='initializer_range')) hparams.dropout = float(default_parameter(field='dropout')) hparams.batch_size = default_parameter(field='batch_size') hparams.max_seq_len = default_parameter(field='max_seq_len') hparams.seed = default_parameter(field='seed') hparams.weight_decay = float(default_parameter(field='weight_decay')) hparams.warmup_steps = default_parameter(field='warmup_steps') hparams.adam_epsilon = float(default_parameter(field='adam_epsilon')) hparams.accumulate_grad_batches = default_parameter(field='accumulate_grad_batches') hparams.do_lower_case = task_config[hparams.task_name].get('do_lower_case', False) hparams.tokenizer_type = hparams.model_type if hparams.tokenizer_type is None else hparams.tokenizer_type hparams.tokenizer_weight = hparams.model_weight if hparams.tokenizer_weight is None else hparams.tokenizer_weight set_seed(hparams.seed) model = cls(hparams) model.load_state_dict(checkpoint['state_dict']) model.on_load_checkpoint(checkpoint) return model
def test_loading_meta_tags(tmpdir): tutils.reset_seed() from argparse import Namespace hparams = tutils.get_hparams() # save tags logger = tutils.get_test_tube_logger(tmpdir, False) logger.log_hyperparams(Namespace(some_str='a_str', an_int=1, a_float=2.0)) logger.log_hyperparams(hparams) logger.save() # load tags tags_path = logger.experiment.get_data_path( logger.experiment.name, logger.experiment.version) + '/meta_tags.csv' tags = trainer_io.load_hparams_from_tags_csv(tags_path) assert tags.batch_size == 32 and tags.hidden_dim == 1000
import torch import pickle from immersions.cpc_system import ContrastivePredictiveSystem from immersions.cpc_system_maestro import ContrastivePredictiveSystemMaestro from immersions.input_optimization.activation_utilities import ActivationStatistics from pytorch_lightning.trainer.trainer_io import load_hparams_from_tags_csv weights_path = '/home/idivinci3005/experiments/checkpoints/immersions_scalogram_resnet_maestro_smaller/0/_ckpt_epoch_3.ckpt' tags_path = '/home/idivinci3005/experiments/logs/immersions_scalogram_resnet_maestro_smaller/version_0/meta_tags.csv' ranges_path = '/home/idivinci3005/pycharm_immersions/immersions/misc/immersions_scalogram_resnet_house_smaller_ranges.p' # weights_path = '/home/idivinci3005/experiments/checkpoints/immersions_scalogram_resnet_house/0/_ckpt_epoch_8.ckpt' # tags_path = '/home/idivinci3005/experiments/logs/immersions_scalogram_resnet_house/version_0/meta_tags.csv' # ranges_path = '/home/idivinci3005/pycharm_immersions/immersions/misc/immersions_scalogram_resnet_house_ranges.p' hparams = load_hparams_from_tags_csv(tags_path) hparams.visible_steps = 76 hparams.__setattr__('on_gpu', torch.cuda.is_available()) # load on CPU only to avoid OOM issues # then its up to user to put back on GPUs checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage) # load the state_dict on the model automatically #model = ContrastivePredictiveSystemMaestro(hparams) model = ContrastivePredictiveSystemMaestro(hparams) model.load_state_dict(checkpoint['state_dict'], strict=False) # give model a chance to load something model.on_load_checkpoint(checkpoint)