Exemplo n.º 1
0
def test_lstm_model_init_hidden_states_should_return_correct_output():
    # Given
    expected = (torch.zeros(1, 64, 10), torch.zeros(1, 64, 10))
    model = LSTMModel(100, 10, 10, 5, 1)

    # When
    output = model.init_hidden_states(64)

    # Then
    assert torch.equal(output[0], expected[0])
    assert torch.equal(output[1], expected[1])
Exemplo n.º 2
0
 def _eval_on_epoch(
     self,
     model: LSTMModel,
     eval_dataloader: LanguageModelingDataLoader,
     criterion: CrossEntropyLoss,
 ) -> None:
     self._clean_gradients(model)
     self._put_model_to_eval_mode(model)
     with torch.no_grad():
         hidden_states = model.init_hidden_states(self.batch_size)
         for batch_index in tqdm(
                 range(0, len(eval_dataloader), eval_dataloader.bptt),
                 desc=EVAL_DESCRIPTION_MESSAGE,
         ):
             hidden_states = self._eval_on_batch(
                 model,
                 next(eval_dataloader.get_batches(batch_index)),
                 hidden_states,
                 criterion,
             )
Exemplo n.º 3
0
 def _train_on_epoch(
     self,
     model: LSTMModel,
     train_dataloader: LanguageModelingDataLoader,
     criterion: CrossEntropyLoss,
     optimizer: Optimizer,
 ) -> None:
     self._clean_gradients(model)
     self._put_model_to_train_mode(model)
     hidden_states = model.init_hidden_states(self.batch_size)
     for batch_index in tqdm(
             range(0, len(train_dataloader), train_dataloader.bptt),
             desc=TRAIN_DESCRIPTION_MESSAGE,
     ):
         hidden_states = self._train_on_batch(
             model,
             next(train_dataloader.get_batches(batch_index)),
             hidden_states,
             criterion,
             optimizer,
         )