コード例 #1
0
 def test_encode_decode_from_loaded_tokenizer_with_1_maxlen(self, temp_dir):
     tokenizer = Tokenizer(self.captions, 1)
     tokenizer.save_dictionaries(temp_dir.path)
     loading_tokenizer = Tokenizer()
     loading_tokenizer.load_dictionaries(temp_dir.path)
     phrase_encoded = loading_tokenizer.encode_caption(self.phrase)
     phrase_decoded = tokenizer.decode_caption(phrase_encoded)
     self.assertEqual(phrase_decoded, [Tokenizer.END])
コード例 #2
0
 def test_encode_decode(self):
     for value in [None, 5, 10]:
         with self.subTest(captions=self.captions, user_maxlen=value):
             tokenizer = Tokenizer(self.captions, value)
             phrase_encoded = tokenizer.encode_caption(self.phrase)
             phrase_decoded = tokenizer.decode_caption(phrase_encoded)
             expected_string = ["THERE", Tokenizer.UNK, "ONE", "HAND"]
             pad_length = self.max_phraselen if (value is None) or (
                 value >= self.max_phraselen) else value
             self.assertEqual(
                 phrase_decoded[:pad_length], expected_string +
                 [Tokenizer.END] * (pad_length - len(expected_string)))
コード例 #3
0
 def test_get_string(self):
     tokenizer = Tokenizer(self.captions, 5)
     first_chunk = ["THERE", tokenizer.UNK, "ONE", "HAND"]
     for remove_end in [True, False]:
         phrase_encoded = tokenizer.encode_caption(self.phrase)
         if remove_end:
             phrase_encoded[-1] = phrase_encoded[0]
             expected = " ".join(first_chunk + [first_chunk[0]])
         else:
             expected = " ".join(first_chunk)
         with self.subTest(remove_end=remove_end):
             string = tokenizer.get_string(phrase_encoded)
             self.assertEqual(expected, string)
 def setUp(self):
     self.config_obj = YamlConfig(config_dict={"1": 1})
     self.epoch_num = 0
     self.model = FullyConnectedMapper(1, 1)
     self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
     self.score = None
     self.state_dict = {
         "epoch": self.epoch_num,
         "model": self.model.state_dict(),
         "optimizer": self.optimizer.state_dict(),
         "score": self.score,
     }
     self.tokenizer = Tokenizer()
     self.tokenizer.build_dictionaries(["Just a dummy caption"])
コード例 #5
0
 def test_encode_decode_from_loaded_tokenizer(self, temp_dir):
     for value in [None, 5, 10]:
         with self.subTest(captions=self.captions, user_maxlen=value):
             tokenizer = Tokenizer(self.captions, value)
             tokenizer.save_dictionaries(temp_dir.path)
             loading_tokenizer = Tokenizer()
             loading_tokenizer.load_dictionaries(temp_dir.path)
             phrase_encoded = loading_tokenizer.encode_caption(self.phrase)
             phrase_decoded = tokenizer.decode_caption(phrase_encoded)
             expected_string = ["THERE", Tokenizer.UNK, "ONE", "HAND"]
             pad_length = self.max_phraselen if (value is None) or (
                 value >= self.max_phraselen) else value
             self.assertEqual(
                 phrase_decoded[:pad_length], expected_string +
                 [Tokenizer.END] * (pad_length - len(expected_string)))
コード例 #6
0
 def test_user_maxlen_vs_caption_maxlen(self):
     for value in [None, 1, 5, 10]:
         with self.subTest(captions=self.captions, user_maxlen=value):
             tokenizer = Tokenizer(self.captions, value)
             self.assertEqual(
                 tokenizer.maxlen, self.max_phraselen if
                 (value is None or value >= self.max_phraselen) else value)
コード例 #7
0
 def test_save_load(self, temp_dir):
     for value in [None, 1, 5, 10]:
         with self.subTest(captions=self.captions, user_maxlen=value):
             tokenizer = Tokenizer(self.captions, value)
             tokenizer.save_dictionaries(temp_dir.path)
             loading_tokenizer = Tokenizer()
             loading_tokenizer.load_dictionaries(temp_dir.path)
             self.assertEqual(tokenizer.caption_dict,
                              loading_tokenizer.caption_dict)
             self.assertEqual(tokenizer.inv_caption_dict,
                              loading_tokenizer.inv_caption_dict)
コード例 #8
0
 def test_filter_tokens(self):
     expected = dict()
     expected[0] = self.captions_vocab
     expected[1] = {"<GO>", "<END>", "<UNK>", "THE"}
     expected[2] = {"<GO>", "<END>", "<UNK>"}
     for value in [0, 1, 2]:
         with self.subTest(captions=self.captions, cutoff=value):
             tokenizer = Tokenizer(self.captions, cutoff=value)
             self.assertEqual(set(tokenizer.caption_dict.keys()),
                              expected[value])
class CheckpointerTests(unittest.TestCase):
    def setUp(self):
        self.config_obj = YamlConfig(config_dict={"1": 1})
        self.epoch_num = 0
        self.model = FullyConnectedMapper(1, 1)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
        self.score = None
        self.state_dict = {
            "epoch": self.epoch_num,
            "model": self.model.state_dict(),
            "optimizer": self.optimizer.state_dict(),
            "score": self.score,
        }
        self.tokenizer = Tokenizer()
        self.tokenizer.build_dictionaries(["Just a dummy caption"])

    def check_loaded_objects(self, epoch_num, model, optimizer):
        self.assertEqual(epoch_num, self.epoch_num)
        self.assertEqual(model, self.model)
        self.assertEqual(optimizer, self.optimizer)

    @tempdir()
    def test_set_best_score(self, temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        scores = [None, 13.0, 17.0, 11.0, 23.0]
        expected = [np.Inf, 13.0, 13.0, 11.0, 11.0]
        for i, score in enumerate(scores):
            with self.subTest(score=score, expected=expected[i]):
                checkpointer.set_best_score(score)
                self.assertEqual(checkpointer.best_score, expected[i])

    @patch.object(torch, "load", autospec=True)
    @tempdir()
    def test_load_model_from_save_latest(self, mock_torch_load, temp_dir):
        mock_torch_load.return_value = self.state_dict
        checkpointer = Checkpointer(temp_dir.path)
        checkpointer.save_latest(self.state_dict)
        epoch_num, model, optimizer = checkpointer.load_model(
            self.model, self.optimizer, temp_dir.path, "model.latest")
        self.assertTrue(mock_torch_load.call_count == 1)
        self.check_loaded_objects(epoch_num, model, optimizer)

    @patch.object(torch, "load", autospec=True)
    @tempdir()
    def test_load_model_from_save_best(self, mock_torch_load, temp_dir):
        self.state_dict["score"] = 5.0
        mock_torch_load.return_value = self.state_dict
        checkpointer = Checkpointer(temp_dir.path)
        checkpointer.best_score = self.state_dict["score"] + 1
        checkpointer.save_best(self.state_dict)
        epoch_num, model, optimizer = checkpointer.load_model(
            self.model, self.optimizer, temp_dir.path, "model.best")
        self.assertTrue(mock_torch_load.call_count == 1)
        self.check_loaded_objects(epoch_num, model, optimizer)

    @patch.object(torch, "load")
    @tempdir()
    def test_load_model_from_no_checkpoint(self, mock_torch_load, temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        epoch_num, model, optimizer = checkpointer.load_model(
            self.model, self.optimizer, temp_dir.path, "model.best")
        mock_torch_load.assert_not_called()
        self.check_loaded_objects(epoch_num, model, optimizer)

    @patch.object(torch, "load")
    @tempdir()
    def test_load_model_from_no_predefined_folder(self, mock_torch_load,
                                                  temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        epoch_num, model, optimizer = checkpointer.load_model(
            self.model, self.optimizer)
        mock_torch_load.assert_not_called()
        self.check_loaded_objects(epoch_num, model, optimizer)

    @patch.object(torch, "load")
    @tempdir()
    def test_load_model_from_predefined_folder(self, mock_torch_load,
                                               temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        epoch_num, model, optimizer = checkpointer.load_model(
            self.model, self.optimizer, temp_dir.path)
        mock_torch_load.assert_not_called()
        self.check_loaded_objects(epoch_num, model, optimizer)

    @patch.object(torch, "load")
    @tempdir()
    def test_load_model_from_predefined_file(self, mock_torch_load, temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        epoch_num, model, optimizer = checkpointer.load_model(
            self.model, self.optimizer, filename="model.latest")
        mock_torch_load.assert_not_called()
        self.check_loaded_objects(epoch_num, model, optimizer)

    @tempdir()
    def test_save_best_higher_is_better(self, temp_dir):
        checkpointer = Checkpointer(temp_dir.path, higher_is_better=True)
        scores = [3.0, 7.0, 2.0, 7.0, 11.0]
        best_epoch = [1, 2, 2, 2, 5]
        for epoch_num, score in enumerate(scores):
            self.state_dict["epoch"] = epoch_num + 1
            self.state_dict["score"] = score
            with self.subTest(state_dict=self.state_dict,
                              epoch_num=epoch_num,
                              score=score,
                              best_score=checkpointer.best_score):
                checkpointer.save_best(self.state_dict, temp_dir.path)
                self.state_dict = torch.load(
                    os.path.join(temp_dir.path, "model.best"))
                self.assertEqual(self.state_dict["score"],
                                 checkpointer.best_score)
                self.assertEqual(self.state_dict["epoch"],
                                 best_epoch[epoch_num])

    @tempdir()
    def test_save_best_higher_is_not_better(self, temp_dir):
        checkpointer = Checkpointer(temp_dir.path, higher_is_better=False)
        scores = [3.0, 7.0, 3.0, 2.0, 11.0]
        best_epoch = [1, 1, 1, 4, 4]
        for epoch_num, score in enumerate(scores):
            self.state_dict["epoch"] = epoch_num + 1
            self.state_dict["score"] = score
            with self.subTest(state_dict=self.state_dict,
                              epoch_num=epoch_num,
                              score=score,
                              best_score=checkpointer.best_score):
                checkpointer.save_best(self.state_dict, temp_dir.path)
                self.state_dict = torch.load(
                    os.path.join(temp_dir.path, "model.best"))
                self.assertEqual(self.state_dict["score"],
                                 checkpointer.best_score)
                self.assertEqual(self.state_dict["epoch"],
                                 best_epoch[epoch_num])

    @tempdir()
    def test_save_best_folder_None(self, temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        scores = [3.0, 7.0, 2.0, 11.0]
        best_epoch = [1, 1, 3, 3]
        for epoch_num, score in enumerate(scores):
            self.state_dict["epoch"] = epoch_num + 1
            self.state_dict["score"] = score
            with self.subTest(state_dict=self.state_dict,
                              epoch_num=epoch_num,
                              score=score,
                              best_score=checkpointer.best_score):
                checkpointer.save_best(self.state_dict)
                self.state_dict = torch.load(
                    os.path.join(temp_dir.path, "model.best"))
                self.assertEqual(self.state_dict["score"],
                                 checkpointer.best_score)
                self.assertEqual(self.state_dict["epoch"],
                                 best_epoch[epoch_num])

    @tempdir()
    def test_save_latest(self, temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        scores = [3.0, 7.0, 2.0, 11.0]
        for epoch_num, score in enumerate(scores):
            self.state_dict["epoch"] = epoch_num + 1
            self.state_dict["score"] = score
            with self.subTest(state_dict=self.state_dict,
                              epoch_num=epoch_num,
                              score=score):
                checkpointer.save_latest(self.state_dict, temp_dir.path)
                self.state_dict = torch.load(
                    os.path.join(temp_dir.path, "model.latest"))
                self.assertEqual(self.state_dict["score"], score)
                self.assertEqual(self.state_dict["epoch"], epoch_num + 1)

    @tempdir()
    def test_save_latest_folder_None(self, temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        scores = [3.0, 7.0, 2.0, 11.0]
        for epoch_num, score in enumerate(scores):
            self.state_dict["epoch"] = epoch_num + 1
            self.state_dict["score"] = score
            with self.subTest(state_dict=self.state_dict,
                              epoch_num=epoch_num,
                              score=score):
                checkpointer.save_latest(self.state_dict)
                self.state_dict = torch.load(
                    os.path.join(temp_dir.path, "model.latest"))
                self.assertEqual(self.state_dict["score"], score)
                self.assertEqual(self.state_dict["epoch"], epoch_num + 1)

    @tempdir()
    def test_save_meta(self, temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        checkpointer.save_meta(temp_dir.path, self.config_obj, self.tokenizer)
        self.assertEqual(len(os.listdir(temp_dir.path)), 2)

    @tempdir()
    def test_save_meta_folder_does_not_exist(self, temp_dir):
        random_folder = temp_dir.path + "random_folder"
        checkpointer = Checkpointer(temp_dir.path)
        checkpointer.save_meta(random_folder, self.config_obj, self.tokenizer)
        self.assertEqual(len(os.listdir(random_folder)), 2)

    @tempdir()
    def test_save_value_csv(self, temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        value = ["1", "2", "3"]
        filename = "checkpointer_test"
        checkpointer.save_value_csv(value, temp_dir.path, filename)
        read_value = []
        with open(os.path.join(temp_dir.path, filename)) as csvfile:
            read_csv_file = csv.reader(csvfile)
            for row in read_csv_file:
                read_value = row
        self.assertEqual(value, read_value)

    @tempdir()
    def test_save_value_csv_no_folder(self, temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        value = ["1", "2", "3"]
        filename = "checkpointer_test"
        checkpointer.save_value_csv(value, filename=filename)
        read_value = []
        with open(os.path.join(temp_dir.path, filename)) as csvfile:
            read_csv_file = csv.reader(csvfile)
            for row in read_csv_file:
                read_value = row
        self.assertEqual(value, read_value)

    @tempdir()
    def test_save_best_after_loading_from_latest(self, temp_dir):
        checkpointer = Checkpointer(temp_dir.path)
        checkpointer.save_latest(self.state_dict)
        checkpointer.load_model(self.model, self.optimizer, temp_dir.path,
                                "model.latest")
        self.state_dict["score"] = 3.0
        self.state_dict["epoch"] = 1
        checkpointer.save_best(self.state_dict)
        self.assertEqual(self.state_dict["score"], checkpointer.best_score)
        self.assertEqual(self.state_dict["epoch"], 1)
コード例 #10
0
 def test_encoding_length_equal_max_len(self):
     for value in [None, 1, 5, 10]:
         with self.subTest(captions=self.captions, user_maxlen=value):
             tokenizer = Tokenizer(self.captions, value)
             phrase_encoded = tokenizer.encode_caption(self.phrase)
             self.assertEqual(len(phrase_encoded), tokenizer.maxlen)
コード例 #11
0
 def test_encode_decode_with_1_maxlen(self):
     tokenizer = Tokenizer(self.captions, 1)
     phrase_encoded = tokenizer.encode_caption(self.phrase)
     phrase_decoded = tokenizer.decode_caption(phrase_encoded)
     self.assertEqual(phrase_decoded, [Tokenizer.END])
コード例 #12
0
def train_model(config_obj, relative_path=""):
    # Find paths to training, validation and test sets
    training_path = os.path.join(relative_path,
                                 config_obj.get("paths", "train_annot"))
    validation_path = os.path.join(relative_path,
                                   config_obj.get("paths", "validation_annot"))

    # Load attributes of config file
    caption_type = config_obj.get("targets", "caption_type")
    checkpoint_folder = os.path.join(
        relative_path, config_obj.get("paths", "checkpoint_folder"))
    higher_is_better = config_obj.get("criteria", "higher_is_better")
    clip_grad = config_obj.get("training", "clip_grad")
    frequency_valid = config_obj.get("validation", "frequency")
    gpus = config_obj.get("device", "gpus")
    num_epoch = config_obj.get("training", "num_epochs")
    pretrained_folder = config_obj.get("pretrained", "pretrained_folder")
    pretrained_file = config_obj.get("pretrained", "pretrained_file")
    pretrained_folder = os.path.join(
        relative_path, pretrained_folder) if pretrained_folder else None
    teacher_force_train = config_obj.get("training", "teacher_force")
    teacher_force_valid = config_obj.get("validation", "teacher_force")
    verbose_train = config_obj.get("training", "verbose")
    verbose_valid = config_obj.get("validation", "verbose")

    # Get model, loss, optimizer, scheduler, and criteria from config_file
    model_type = config_obj.get("model", "type")
    loss_type = config_obj.get("loss", "type")
    classif_loss_type = "CrossEntropy"  #TODO: FIX reading from config file
    optimizer_type = config_obj.get("optimizer", "type")
    scheduler_type = config_obj.get("scheduler", "type")
    criteria = config_obj.get("criteria", "score")
    videos_folder = config_obj.get("paths", "videos_folder")

    # Preprocess
    crop_size = config_obj.get("preprocess", "crop_size")
    scale = config_obj.get("preprocess", "scale")
    input_resize = config_obj.get("preprocess", "input_resize")

    # Load Json annotation files
    training_parser = JsonParser(training_path,
                                 os.path.join(relative_path, videos_folder),
                                 caption_type=caption_type)
    validation_parser = JsonParser(validation_path,
                                   os.path.join(relative_path, videos_folder),
                                   caption_type=caption_type)

    # Build a tokenizer that contains all captions from annotation files
    tokenizer = Tokenizer(**config_obj.get("tokenizer", "kwargs"))
    if pretrained_folder:
        tokenizer.load_dictionaries(pretrained_folder)
        print("Inside pretrained", tokenizer.get_vocab_size())
    else:
        tokenizer.build_dictionaries(
            training_parser.get_captions_from_tmp_and_lbl())

        #tokenizer.build_dictionaries(training_parser.get_captions())
    preprocessor = Compose([
        prep.RandomCrop(crop_size),
        prep.PadVideo(crop_size),
        prep.Float32Converter(scale),
        prep.PytorchTransposer()
    ])

    val_preprocessor = Compose([
        CenterCropper(crop_size),
        prep.PadVideo(crop_size),
        prep.Float32Converter(scale),
        prep.PytorchTransposer()
    ])

    training_set = GulpVideoDataset(annotation_parser=training_parser,
                                    tokenizer=tokenizer,
                                    preprocess=preprocessor,
                                    gulp_dir=videos_folder,
                                    size=input_resize)

    validation_set = GulpVideoDataset(annotation_parser=validation_parser,
                                      tokenizer=tokenizer,
                                      preprocess=val_preprocessor,
                                      gulp_dir=videos_folder,
                                      size=input_resize)

    dataloader = DataLoader(training_set,
                            shuffle=True,
                            drop_last=False,
                            **config_obj.get("dataloaders", "kwargs"))

    val_dataloader = DataLoader(validation_set,
                                shuffle=True,
                                drop_last=False,
                                **config_obj.get("dataloaders", "kwargs"))

    encoder_type = config_obj.get("model", "encoder")
    decoder_type = config_obj.get("model", "decoder")
    encoder_args = config_obj.get("model", "encoder_args")
    encoder_kwargs = config_obj.get("model", "encoder_kwargs")
    decoder_args = config_obj.get("model", "decoder_args")
    decoder_kwargs = config_obj.get("model", "decoder_kwargs")
    decoder_kwargs["vocab_size"] = tokenizer.get_vocab_size()
    # decoder_kwargs["go_token"] = tokenizer.encode_token(tokenizer.GO)

    # TODO: Remove GPUs?
    gpus = config_obj.get("device", "gpus")

    # decoder_kwargs["gpus"] = gpus

    # Create model, loss, and optimizer objects
    model = getattr(ptcap.model.captioners,
                    model_type)(encoder=getattr(all_models, encoder_type),
                                decoder=getattr(all_models, decoder_type),
                                encoder_args=encoder_args,
                                encoder_kwargs=encoder_kwargs,
                                decoder_args=decoder_args,
                                decoder_kwargs=decoder_kwargs,
                                gpus=gpus)

    loss_function = getattr(ptcap.losses, loss_type)()
    classif_loss_function = getattr(ptcap.losses, classif_loss_type)()

    params = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = getattr(torch.optim, optimizer_type)(params=params,
                                                     **config_obj.get(
                                                         "optimizer",
                                                         "kwargs"))

    scheduler_kwargs = copy.deepcopy(config_obj.get("scheduler", "kwargs"))
    scheduler_kwargs["optimizer"] = optimizer
    scheduler_kwargs["mode"] = "max" if higher_is_better else "min"

    scheduler = getattr(torch.optim.lr_scheduler,
                        scheduler_type)(**scheduler_kwargs)

    writer = Seq2seqAdapter(os.path.join(checkpoint_folder, "runs"),
                            config_obj.get("logging", "tensorboard_frequency"))
    # Prepare checkpoint directory and save config
    Checkpointer.save_meta(checkpoint_folder, config_obj, tokenizer)

    checkpointer = Checkpointer(checkpoint_folder, higher_is_better)

    # Setup the logger
    logger = CustomLogger(folder=checkpoint_folder, tokenizer=tokenizer)

    # Trainer
    trainer = Trainer(model,
                      loss_function,
                      scheduler,
                      tokenizer,
                      logger,
                      writer,
                      checkpointer,
                      folder=pretrained_folder,
                      filename=pretrained_file,
                      gpus=gpus,
                      clip_grad=clip_grad,
                      classif_loss_function=classif_loss_function)

    # Train the Model
    trainer.train(dataloader, val_dataloader, criteria, num_epoch,
                  frequency_valid, teacher_force_train, teacher_force_valid,
                  verbose_train, verbose_valid)
コード例 #13
0
 def test_captions_len(self):
     tokenizer = Tokenizer(self.captions)
     self.assertEqual(tokenizer.get_vocab_size(), len(self.captions_vocab))
コード例 #14
0
 def test_captions_vocab(self):
     tokenizer = Tokenizer(self.captions)
     self.assertEqual(set(tokenizer.caption_dict.keys()),
                      self.captions_vocab)
 def setUp(self, mock_open_annot):
     # Create a fake video along with a string caption
     captions = [p for p in self.FAKE_ANNOT["label"]]
     self.tokenizer = Tokenizer(captions)
     self.annotation_parser = JsonParser('', '')
     self.dataset = VideoDataset(self.annotation_parser, self.tokenizer)
コード例 #16
0
 def test_user_maxlen(self):
     for value in [None, 1, 5, 10]:
         with self.subTest(user_maxlen=value):
             tokenizer = Tokenizer(user_maxlen=value)
             self.assertEqual(tokenizer.maxlen,
                              None if value is None else value)
コード例 #17
0
 def test_assert_error_for_maxlen(self):
     tokenizer = Tokenizer()
     with self.assertRaises(AssertionError):
         tokenizer.set_maxlen(-1)
コード例 #18
0
 def test_max_len(self):
     tokenizer = Tokenizer(self.captions)
     self.assertEqual(tokenizer.maxlen, self.max_phraselen)