def test_load_model_from_predefined_folder(self, mock_torch_load, temp_dir): checkpointer = Checkpointer(temp_dir.path) epoch_num, model, optimizer = checkpointer.load_model( self.model, self.optimizer, temp_dir.path) mock_torch_load.assert_not_called() self.check_loaded_objects(epoch_num, model, optimizer)
def test_set_best_score(self, temp_dir): checkpointer = Checkpointer(temp_dir.path) scores = [None, 13.0, 17.0, 11.0, 23.0] expected = [np.Inf, 13.0, 13.0, 11.0, 11.0] for i, score in enumerate(scores): with self.subTest(score=score, expected=expected[i]): checkpointer.set_best_score(score) self.assertEqual(checkpointer.best_score, expected[i])
def test_load_model_from_save_latest(self, mock_torch_load, temp_dir): mock_torch_load.return_value = self.state_dict checkpointer = Checkpointer(temp_dir.path) checkpointer.save_latest(self.state_dict) epoch_num, model, optimizer = checkpointer.load_model( self.model, self.optimizer, temp_dir.path, "model.latest") self.assertTrue(mock_torch_load.call_count == 1) self.check_loaded_objects(epoch_num, model, optimizer)
def test_save_value_csv_no_folder(self, temp_dir): checkpointer = Checkpointer(temp_dir.path) value = ["1", "2", "3"] filename = "checkpointer_test" checkpointer.save_value_csv(value, filename=filename) read_value = [] with open(os.path.join(temp_dir.path, filename)) as csvfile: read_csv_file = csv.reader(csvfile) for row in read_csv_file: read_value = row self.assertEqual(value, read_value)
def test_load_pretrained_encoder_with_dict_attr(self, temp_dir): checkpointer = Checkpointer(temp_dir.path) checkpointer.save_latest(self.state_dict, filename=self.model_name) encoder = PretrainedEncoder(FullyConnectedMapper, pretrained_path=os.path.join( temp_dir.path, self.model_name), encoder_args=(2, 3), checkpoint_key='model') encoded = encoder(self.input) expected_encoding = self.model(self.input) self.assertEqual((encoded - expected_encoding).sum().data.numpy(), 0)
def test_save_latest_folder_None(self, temp_dir): checkpointer = Checkpointer(temp_dir.path) scores = [3.0, 7.0, 2.0, 11.0] for epoch_num, score in enumerate(scores): self.state_dict["epoch"] = epoch_num + 1 self.state_dict["score"] = score with self.subTest(state_dict=self.state_dict, epoch_num=epoch_num, score=score): checkpointer.save_latest(self.state_dict) self.state_dict = torch.load( os.path.join(temp_dir.path, "model.latest")) self.assertEqual(self.state_dict["score"], score) self.assertEqual(self.state_dict["epoch"], epoch_num + 1)
def test_save_best_after_loading_from_latest(self, temp_dir): checkpointer = Checkpointer(temp_dir.path) checkpointer.save_latest(self.state_dict) checkpointer.load_model(self.model, self.optimizer, temp_dir.path, "model.latest") self.state_dict["score"] = 3.0 self.state_dict["epoch"] = 1 checkpointer.save_best(self.state_dict) self.assertEqual(self.state_dict["score"], checkpointer.best_score) self.assertEqual(self.state_dict["epoch"], 1)
def test_save_best_higher_is_not_better(self, temp_dir): checkpointer = Checkpointer(temp_dir.path, higher_is_better=False) scores = [3.0, 7.0, 3.0, 2.0, 11.0] best_epoch = [1, 1, 1, 4, 4] for epoch_num, score in enumerate(scores): self.state_dict["epoch"] = epoch_num + 1 self.state_dict["score"] = score with self.subTest(state_dict=self.state_dict, epoch_num=epoch_num, score=score, best_score=checkpointer.best_score): checkpointer.save_best(self.state_dict, temp_dir.path) self.state_dict = torch.load( os.path.join(temp_dir.path, "model.best")) self.assertEqual(self.state_dict["score"], checkpointer.best_score) self.assertEqual(self.state_dict["epoch"], best_epoch[epoch_num])
def train_model(config_obj, relative_path=""): # Find paths to training, validation and test sets training_path = os.path.join(relative_path, config_obj.get("paths", "train_annot")) validation_path = os.path.join(relative_path, config_obj.get("paths", "validation_annot")) # Load attributes of config file caption_type = config_obj.get("targets", "caption_type") checkpoint_folder = os.path.join( relative_path, config_obj.get("paths", "checkpoint_folder")) higher_is_better = config_obj.get("criteria", "higher_is_better") clip_grad = config_obj.get("training", "clip_grad") frequency_valid = config_obj.get("validation", "frequency") gpus = config_obj.get("device", "gpus") num_epoch = config_obj.get("training", "num_epochs") pretrained_folder = config_obj.get("pretrained", "pretrained_folder") pretrained_file = config_obj.get("pretrained", "pretrained_file") pretrained_folder = os.path.join( relative_path, pretrained_folder) if pretrained_folder else None teacher_force_train = config_obj.get("training", "teacher_force") teacher_force_valid = config_obj.get("validation", "teacher_force") verbose_train = config_obj.get("training", "verbose") verbose_valid = config_obj.get("validation", "verbose") # Get model, loss, optimizer, scheduler, and criteria from config_file model_type = config_obj.get("model", "type") loss_type = config_obj.get("loss", "type") classif_loss_type = "CrossEntropy" #TODO: FIX reading from config file optimizer_type = config_obj.get("optimizer", "type") scheduler_type = config_obj.get("scheduler", "type") criteria = config_obj.get("criteria", "score") videos_folder = config_obj.get("paths", "videos_folder") # Preprocess crop_size = config_obj.get("preprocess", "crop_size") scale = config_obj.get("preprocess", "scale") input_resize = config_obj.get("preprocess", "input_resize") # Load Json annotation files training_parser = JsonParser(training_path, os.path.join(relative_path, videos_folder), caption_type=caption_type) validation_parser = JsonParser(validation_path, os.path.join(relative_path, videos_folder), caption_type=caption_type) # Build a tokenizer that contains all captions from annotation files tokenizer = Tokenizer(**config_obj.get("tokenizer", "kwargs")) if pretrained_folder: tokenizer.load_dictionaries(pretrained_folder) print("Inside pretrained", tokenizer.get_vocab_size()) else: tokenizer.build_dictionaries( training_parser.get_captions_from_tmp_and_lbl()) #tokenizer.build_dictionaries(training_parser.get_captions()) preprocessor = Compose([ prep.RandomCrop(crop_size), prep.PadVideo(crop_size), prep.Float32Converter(scale), prep.PytorchTransposer() ]) val_preprocessor = Compose([ CenterCropper(crop_size), prep.PadVideo(crop_size), prep.Float32Converter(scale), prep.PytorchTransposer() ]) training_set = GulpVideoDataset(annotation_parser=training_parser, tokenizer=tokenizer, preprocess=preprocessor, gulp_dir=videos_folder, size=input_resize) validation_set = GulpVideoDataset(annotation_parser=validation_parser, tokenizer=tokenizer, preprocess=val_preprocessor, gulp_dir=videos_folder, size=input_resize) dataloader = DataLoader(training_set, shuffle=True, drop_last=False, **config_obj.get("dataloaders", "kwargs")) val_dataloader = DataLoader(validation_set, shuffle=True, drop_last=False, **config_obj.get("dataloaders", "kwargs")) encoder_type = config_obj.get("model", "encoder") decoder_type = config_obj.get("model", "decoder") encoder_args = config_obj.get("model", "encoder_args") encoder_kwargs = config_obj.get("model", "encoder_kwargs") decoder_args = config_obj.get("model", "decoder_args") decoder_kwargs = config_obj.get("model", "decoder_kwargs") decoder_kwargs["vocab_size"] = tokenizer.get_vocab_size() # decoder_kwargs["go_token"] = tokenizer.encode_token(tokenizer.GO) # TODO: Remove GPUs? gpus = config_obj.get("device", "gpus") # decoder_kwargs["gpus"] = gpus # Create model, loss, and optimizer objects model = getattr(ptcap.model.captioners, model_type)(encoder=getattr(all_models, encoder_type), decoder=getattr(all_models, decoder_type), encoder_args=encoder_args, encoder_kwargs=encoder_kwargs, decoder_args=decoder_args, decoder_kwargs=decoder_kwargs, gpus=gpus) loss_function = getattr(ptcap.losses, loss_type)() classif_loss_function = getattr(ptcap.losses, classif_loss_type)() params = filter(lambda p: p.requires_grad, model.parameters()) optimizer = getattr(torch.optim, optimizer_type)(params=params, **config_obj.get( "optimizer", "kwargs")) scheduler_kwargs = copy.deepcopy(config_obj.get("scheduler", "kwargs")) scheduler_kwargs["optimizer"] = optimizer scheduler_kwargs["mode"] = "max" if higher_is_better else "min" scheduler = getattr(torch.optim.lr_scheduler, scheduler_type)(**scheduler_kwargs) writer = Seq2seqAdapter(os.path.join(checkpoint_folder, "runs"), config_obj.get("logging", "tensorboard_frequency")) # Prepare checkpoint directory and save config Checkpointer.save_meta(checkpoint_folder, config_obj, tokenizer) checkpointer = Checkpointer(checkpoint_folder, higher_is_better) # Setup the logger logger = CustomLogger(folder=checkpoint_folder, tokenizer=tokenizer) # Trainer trainer = Trainer(model, loss_function, scheduler, tokenizer, logger, writer, checkpointer, folder=pretrained_folder, filename=pretrained_file, gpus=gpus, clip_grad=clip_grad, classif_loss_function=classif_loss_function) # Train the Model trainer.train(dataloader, val_dataloader, criteria, num_epoch, frequency_valid, teacher_force_train, teacher_force_valid, verbose_train, verbose_valid)
def test_save_meta_folder_does_not_exist(self, temp_dir): random_folder = temp_dir.path + "random_folder" checkpointer = Checkpointer(temp_dir.path) checkpointer.save_meta(random_folder, self.config_obj, self.tokenizer) self.assertEqual(len(os.listdir(random_folder)), 2)
def test_save_meta(self, temp_dir): checkpointer = Checkpointer(temp_dir.path) checkpointer.save_meta(temp_dir.path, self.config_obj, self.tokenizer) self.assertEqual(len(os.listdir(temp_dir.path)), 2)