def new_fresh_model(args, DEVICE): """ Reload a new-fresh model. useful to restart training with certainty that the model is at the same exact state at t=0. :param args: object. :param DEVICE: torch device. :return: """ ALLOW_MULTIGPUS = check_if_allow_multgpu_mode() NBRGPUS = torch.cuda.device_count() # ========================= Instantiate models ============================= model = instantiate_models(args) # Check if we are using a user specific pre-trained model other than our # pre-defined pre-trained models. This can be used to to EVALUATE a # trained model. You need to set args.max_epochs to -1 so no training is # performed. This is a hack to avoid creating other function to deal with # LATER-evaluation after this code is done. This script is intended for # training. We evaluate at the end. However, if you missed something # during the training/evaluation (for example plot something over the # predicted images), you do not need to re-train the model. You can 1. # specify the path to the pre-trained model. 2. Set max_epochs to -1. 3. # Set strict to True. By doing this, we load the pre-trained model, and, # we skip the training loop, fast-forward to the evaluation. if args.model['path_pre_trained'] not in [None, 'None']: warnings.warn("You have asked to load a specific pre-trained " "model from {} .... [OK]".format( args.model['path_pre_trained'])) model = load_pre_pretrained_model( model=model, path_file=args.model['path_pre_trained'], strict=args.model['strict'], freeze_classifier=args.freeze_classifier ) # Check if there are multiple GPUS. if ALLOW_MULTIGPUS: model = MyDataParallel(model) if args.batch_size < NBRGPUS: warnings.warn("You asked for MULTIGPU mode. However, " "your batch size {} is smaller than the number of " "GPUs available {}. This is fine in practice. " "However, some GPUs will be idol. " "This is just a warning .... " "[OK]".format(args.batch_size, NBRGPUS)) model.to(DEVICE) # freeze the classifier if needed if args.freeze_classifier: warnings.warn("You asked to freeze the classifier." "We are going to do it right now.") model.freeze_cl() assert model.assert_cl_is_frozen(), "Something is wrong" # ========================================================================== return model
reproducibility.force_seed(int(os.environ["MYSEED"])) valid_loader = DataLoader( validset, batch_size=1, shuffle=False, num_workers=args.num_workers * FACTOR_MUL_WORKERS, pin_memory=True, collate_fn=default_collate, worker_init_fn=_init_fn ) # we need more workers since the batch size is 1, and set_for_eval is False (need more time to prepare a sample). reproducibility.force_seed(int(os.environ["MYSEED"])) # ################################ Instantiate models ######################################## reproducibility.force_seed(int(os.environ["MYSEED"])) model = instantiate_models(args) # Check if we are using a user specific pre-trained model other than our pre-defined pre-trained models. # This can be used to to EVALUATE a trained model. You need to set args.max_epochs to -1 so no training is # performed. This is a hack to avoid creating other function to deal with LATER-evaluation after this code is done. # This script is intended for training. We evaluate at the end. However, if you missed something during the # training/evaluation (for example plot something over the predicted images), you do not need to re-train the # model. You can 1. specify the path to the pre-trained model. 2. Set max_epochs to -1. 3. Set strict to True. By # doing this, we load the pre-trained model, and, we skip the training loop, fast-forward to the evaluation. if hasattr(args, "path_pre_trained"): warnings.warn( "You have asked to load a specific pre-trained model from {} .... [OK]" .format(args.path_pre_trained)) model = load_pre_pretrained_model(model=model, path_file=args.path_pre_trained,