def test_whenEncoderForward_thenPass(self): with patch("deepparse.network.nn.LSTM") as lstm_mock: output_mock = MagicMock() hidden_mock = MagicMock() lstm_mock().return_value = output_mock, hidden_mock with patch("deepparse.network.encoder.pack_padded_sequence" ) as pack_padded_sequence_mock: packed_sequence_mock = MagicMock() pack_padded_sequence_mock.return_value = packed_sequence_mock with patch("deepparse.network.encoder.pad_packed_sequence" ) as pad_packed_sequence_mock: pad_packed_sequence_mock.return_value = (MagicMock(), MagicMock()) encoder = Encoder(self.input_size_dim, self.hidden_size, self.num_layers) to_predict_mock = MagicMock() lengths_tensor_mock = MagicMock() encoder.forward(to_predict_mock, lengths_tensor_mock) pack_padded_sequence_mock.assert_has_calls([ call( to_predict_mock, lengths_tensor_mock.cpu(), batch_first=True, enforce_sorted=False, ) ]) lstm_mock.assert_has_calls([call()(packed_sequence_mock)])
class EncoderTest(TestCase): @classmethod def setUpClass(cls): cls.a_torch_device = torch.device("cuda:0") cls.input_size_dim = 300 cls.hidden_size = 1024 cls.num_layers = 1 def setUp(self) -> None: self.encoder = Encoder(self.input_size_dim, self.hidden_size, self.num_layers) self.encoder.to(self.a_torch_device) # we mount it into the device self.encoder_input_setUp() def encoder_input_setUp(self): # we use the fasttext case since easier file = open("./tests/network/integration/to_predict_fasttext.p", "rb") self.to_predict_tensor = pickle.load(file) self.to_predict_tensor = self.to_predict_tensor.to(self.a_torch_device) file.close() self.a_lengths_tensor = torch.tensor([6, 6], device=self.a_torch_device) self.a_batch_size = 2 self.max_length = self.a_lengths_tensor[0].item() def assert_output_is_valid_dim(self, actual_predictions): for actual_prediction in actual_predictions: self.assertEqual(self.num_layers, actual_prediction.shape[0]) self.assertEqual(self.a_batch_size, actual_prediction.shape[1]) self.assertEqual(self.hidden_size, actual_prediction.shape[2]) def test_whenForwardStep_thenStepIsOk(self): predictions = self.encoder.forward(self.to_predict_tensor, self.a_lengths_tensor) self.assert_output_is_valid_dim(predictions)