def test_whenEncoderForward_thenPass(self): with patch("deepparse.network.nn.LSTM") as lstm_mock: output_mock = MagicMock() hidden_mock = MagicMock() lstm_mock().return_value = output_mock, hidden_mock with patch("deepparse.network.encoder.pack_padded_sequence" ) as pack_padded_sequence_mock: packed_sequence_mock = MagicMock() pack_padded_sequence_mock.return_value = packed_sequence_mock with patch("deepparse.network.encoder.pad_packed_sequence" ) as pad_packed_sequence_mock: pad_packed_sequence_mock.return_value = (MagicMock(), MagicMock()) encoder = Encoder(self.input_size_dim, self.hidden_size, self.num_layers) to_predict_mock = MagicMock() lengths_tensor_mock = MagicMock() encoder.forward(to_predict_mock, lengths_tensor_mock) pack_padded_sequence_mock.assert_has_calls([ call( to_predict_mock, lengths_tensor_mock.cpu(), batch_first=True, enforce_sorted=False, ) ]) lstm_mock.assert_has_calls([call()(packed_sequence_mock)])
def setUp_encoder(self, device: torch.device) -> None: self.encoder = Encoder(self.input_size_dim, self.hidden_size, self.num_layers) self.encoder.to(device) # we mount it into the device with open(os.path.join(self.weights_dir, "to_predict_fasttext.p"), "rb") as file: self.to_predict_tensor = pickle.load(file) self.to_predict_tensor = self.to_predict_tensor.to(device) self.a_lengths_tensor = torch.tensor([6, 6], device=device) self.max_length = self.a_lengths_tensor[0].item()
def setUp(self) -> None: self.a_torch_device = torch.device("cpu") self.input_size_dim = 300 self.hidden_size = 1024 self.num_layers = 1 self.encoder = Encoder(self.input_size_dim, self.hidden_size, self.num_layers)
class EncoderCase(TestCase): @classmethod def setUpClass(cls): cls.a_torch_device = torch.device("cuda:0") cls.a_cpu_device = torch.device("cpu") cls.input_size_dim = 300 cls.hidden_size = 1024 cls.num_layers = 1 cls.a_batch_size = 2 cls.temp_dir_obj = TemporaryDirectory() cls.weights_dir = os.path.join(cls.temp_dir_obj.name, "weights") download_from_url( file_name="to_predict_fasttext", saving_dir=cls.weights_dir, file_extension="p", ) @classmethod def tearDownClass(cls) -> None: cls.temp_dir_obj.cleanup() def setUp_encoder(self, device: torch.device) -> None: self.encoder = Encoder(self.input_size_dim, self.hidden_size, self.num_layers) self.encoder.to(device) # we mount it into the device with open(os.path.join(self.weights_dir, "to_predict_fasttext.p"), "rb") as file: self.to_predict_tensor = pickle.load(file) self.to_predict_tensor = self.to_predict_tensor.to(device) self.a_lengths_tensor = torch.tensor([6, 6], device=device) self.max_length = self.a_lengths_tensor[0].item() def assert_output_is_valid_dim(self, actual_predictions): self.assertEqual(self.a_batch_size, len(actual_predictions)) for actual_prediction in actual_predictions: self.assertEqual(self.max_length, actual_prediction.shape[0]) self.assertEqual(self.hidden_size, actual_prediction.shape[1])
class EncoderTest(TestCase): @classmethod def setUpClass(cls): cls.a_torch_device = torch.device("cuda:0") cls.input_size_dim = 300 cls.hidden_size = 1024 cls.num_layers = 1 def setUp(self) -> None: self.encoder = Encoder(self.input_size_dim, self.hidden_size, self.num_layers) self.encoder.to(self.a_torch_device) # we mount it into the device self.encoder_input_setUp() def encoder_input_setUp(self): # we use the fasttext case since easier file = open("./tests/network/integration/to_predict_fasttext.p", "rb") self.to_predict_tensor = pickle.load(file) self.to_predict_tensor = self.to_predict_tensor.to(self.a_torch_device) file.close() self.a_lengths_tensor = torch.tensor([6, 6], device=self.a_torch_device) self.a_batch_size = 2 self.max_length = self.a_lengths_tensor[0].item() def assert_output_is_valid_dim(self, actual_predictions): for actual_prediction in actual_predictions: self.assertEqual(self.num_layers, actual_prediction.shape[0]) self.assertEqual(self.a_batch_size, actual_prediction.shape[1]) self.assertEqual(self.hidden_size, actual_prediction.shape[2]) def test_whenForwardStep_thenStepIsOk(self): predictions = self.encoder.forward(self.to_predict_tensor, self.a_lengths_tensor) self.assert_output_is_valid_dim(predictions)
def test_whenInstantiateEncoder_thenParametersAreOk(self): encoder = Encoder(self.input_size_dim, self.hidden_size, self.num_layers) self.assertEqual(self.input_size_dim, encoder.lstm.input_size) self.assertEqual(self.hidden_size, encoder.lstm.hidden_size) self.assertEqual(self.num_layers, encoder.lstm.num_layers)
def setUp(self) -> None: self.encoder = Encoder(self.input_size_dim, self.hidden_size, self.num_layers) self.encoder.to(self.a_torch_device) # we mount it into the device self.encoder_input_setUp()