Пример #1
0
    def test_whenDecoderAttForward_thenReturnAttWeights(self):
        with patch("deepparse.network.nn.LSTM") as lstm_mock:
            output_mock = MagicMock()
            hidden_mock = MagicMock()
            lstm_mock().return_value = output_mock, hidden_mock

            with patch("deepparse.network.nn.Linear") as linear_mock:
                linear_mock().return_value = MagicMock()
                with patch("deepparse.network.torch.tanh") as tanh_mock:
                    tanh_mock().return_value = MagicMock()
                    with patch(
                            "deepparse.network.torch.matmul") as matmul_mock:
                        matmul_mock().return_value = MagicMock()
                        with patch("deepparse.network.torch.cat") as cat_mock:
                            cat_mock().return_value = MagicMock()
                            with patch("deepparse.network.nn.LogSoftmax"):
                                decoder = Decoder(
                                    self.input_size_dim,
                                    self.hidden_size,
                                    self.num_layers,
                                    self.output_size,
                                    attention_mechanism=True,
                                )
                                to_predict_mock = MagicMock()
                                hidden_mock = MagicMock()
                                encoder_mock = MagicMock()
                                lengths_mock = torch.ones(1, 2)
                                _, _, attention_weights = decoder.forward(
                                    to_predict_mock,
                                    hidden_mock,
                                    encoder_mock,
                                    lengths_mock,
                                )
                                self.assertIsNotNone(attention_weights)
Пример #2
0
    def test_whenDecoderForward_thenPass(self):
        with patch("deepparse.network.nn.LSTM") as lstm_mock:
            output_mock = MagicMock()
            hidden_mock = MagicMock()
            lstm_mock().return_value = output_mock, hidden_mock

            with patch("deepparse.network.nn.Linear") as linear_mock:
                linear_output = MagicMock()
                linear_mock().return_value = linear_output
                with patch("deepparse.network.nn.LogSoftmax"
                           ) as log_soft_max_mock:
                    decoder = Decoder(
                        self.input_size_dim,
                        self.hidden_size,
                        self.num_layers,
                        self.output_size,
                        attention_mechanism=False,
                    )
                    to_predict_mock = MagicMock()
                    hidden_mock = MagicMock()
                    encoder_mock = MagicMock()
                    lengths_mock = MagicMock()
                    decoder.forward(to_predict_mock, hidden_mock, encoder_mock,
                                    lengths_mock)

                    lstm_mock.assert_has_calls(
                        [call()(to_predict_mock.float(), hidden_mock)])
                    linear_mock.assert_has_calls(
                        [call()(output_mock.__getitem__())])
                    log_soft_max_mock.assert_has_calls([call()(linear_output)])
Пример #3
0
    def test_whenDecoderNotAttForward_thenReturnAttWeightsToNone(self):
        with patch("deepparse.network.nn.LSTM") as lstm_mock:
            output_mock = MagicMock()
            hidden_mock = MagicMock()
            lstm_mock().return_value = output_mock, hidden_mock

            with patch("deepparse.network.nn.Linear") as linear_mock:
                linear_output = MagicMock()
                linear_mock().return_value = linear_output
                with patch("deepparse.network.nn.LogSoftmax"):
                    decoder = Decoder(
                        self.input_size_dim,
                        self.hidden_size,
                        self.num_layers,
                        self.output_size,
                        attention_mechanism=False,
                    )
                    to_predict_mock = MagicMock()
                    hidden_mock = MagicMock()
                    encoder_mock = MagicMock()
                    lengths_mock = MagicMock()
                    _, _, attention_weights = decoder.forward(
                        to_predict_mock, hidden_mock, encoder_mock,
                        lengths_mock)
                    self.assertIsNone(attention_weights)
 def setUp_encoder_decoder(self, output_size: int, device: torch.device, attention_mechanism) -> None:
     self.decoder = Decoder(
         self.input_size_dim,
         self.hidden_size,
         self.num_layers,
         output_size,
         attention_mechanism=attention_mechanism,
     )
     self.decoder.to(device)  # we mount it into the device
     self.decoder_input_setUp(device)
class DecoderCase(TestCase):
    @classmethod
    def setUpClass(cls):
        cls.temp_dir_obj = TemporaryDirectory()
        cls.weights_dir = os.path.join(cls.temp_dir_obj.name, "./weights")

        download_from_url(file_name="decoder_hidden", saving_dir=cls.weights_dir, file_extension="p")

        cls.a_torch_device = torch.device("cuda:0")
        cls.a_cpu_device = torch.device("cpu")

        cls.input_size_dim = 1
        cls.hidden_size = 1024
        cls.num_layers = 1
        cls.a_batch_size = 2
        cls.sequence_len = 1

    @classmethod
    def tearDownClass(cls) -> None:
        cls.temp_dir_obj.cleanup()

    def setUp_encoder_decoder(self, output_size: int, device: torch.device, attention_mechanism) -> None:
        self.decoder = Decoder(
            self.input_size_dim,
            self.hidden_size,
            self.num_layers,
            output_size,
            attention_mechanism=attention_mechanism,
        )
        self.decoder.to(device)  # we mount it into the device
        self.decoder_input_setUp(device)

    def decoder_input_setUp(self, device: torch.device):
        self.decoder_input = torch.tensor([[[-1], [-1]]], device=device)
        self.a_lengths_tensor = torch.tensor(([self.sequence_len, self.sequence_len]), device="cpu")

        with open(os.path.join(self.weights_dir, "decoder_hidden.p"), "rb") as file:
            self.decoder_hidden_tensor = pickle.load(file)
        self.decoder_hidden_tensor = (
            self.decoder_hidden_tensor[0].to(device),
            self.decoder_hidden_tensor[1].to(device),
        )
        self.decoder_output = torch.rand((1, self.sequence_len, self.hidden_size), device=device)

    def assert_predictions_is_valid_dim(self, actual_predictions, output_size: int):
        self.assertEqual(self.a_batch_size, actual_predictions.shape[0])
        self.assertEqual(output_size, actual_predictions.shape[1])

    def assert_hidden_is_valid_dim(self, actual_predictions):
        for actual_prediction in actual_predictions:
            self.assertEqual(self.num_layers, actual_prediction.shape[0])
            self.assertEqual(self.a_batch_size, actual_prediction.shape[1])
            self.assertEqual(self.hidden_size, actual_prediction.shape[2])
Пример #6
0
    def setUp(self) -> None:
        self.a_torch_device = torch.device("cuda:0")

        self.input_size_dim = 1
        self.hidden_size = 1024
        self.num_layers = 1
        self.output_size = 9

        self.a_batch_size = 2

        self.encoder = Decoder(self.input_size_dim, self.hidden_size,
                               self.num_layers, self.output_size)
        self.encoder.to(self.a_torch_device)  # we mount it into the device
        self.decoder_input_setUp()
Пример #7
0
class DecoderTest(TestCase):
    def setUp(self) -> None:
        self.a_torch_device = torch.device("cuda:0")

        self.input_size_dim = 1
        self.hidden_size = 1024
        self.num_layers = 1
        self.output_size = 9

        self.a_batch_size = 2

        self.encoder = Decoder(self.input_size_dim, self.hidden_size,
                               self.num_layers, self.output_size)
        self.encoder.to(self.a_torch_device)  # we mount it into the device
        self.decoder_input_setUp()

    def decoder_input_setUp(self):
        self.decoder_input = torch.tensor([[[-1.], [-1.]]],
                                          device=self.a_torch_device)

        file = open("./tests/network/integration/decoder_hidden.p", "rb")
        self.decoder_hidden_tensor = pickle.load(file)
        self.decoder_hidden_tensor = (self.decoder_hidden_tensor[0].to(
            self.a_torch_device), self.decoder_hidden_tensor[1].to(
                self.a_torch_device))
        file.close()

    def assert_predictions_is_valid_dim(self, actual_predictions):
        self.assertEqual(self.a_batch_size, actual_predictions.shape[0])
        self.assertEqual(self.output_size, actual_predictions.shape[1])

    def assert_hidden_is_valid_dim(self, actual_predictions):
        for actual_prediction in actual_predictions:
            self.assertEqual(self.num_layers, actual_prediction.shape[0])
            self.assertEqual(self.a_batch_size, actual_prediction.shape[1])
            self.assertEqual(self.hidden_size, actual_prediction.shape[2])

    def test_whenForwardStep_thenStepIsOk(self):
        predictions, hidden = self.encoder.forward(self.decoder_input,
                                                   self.decoder_hidden_tensor)

        self.assert_predictions_is_valid_dim(predictions)
        self.assert_hidden_is_valid_dim(hidden)
Пример #8
0
    def setUp(self) -> None:
        self.a_torch_device = torch.device("cpu")

        self.input_size_dim = 1
        self.hidden_size = 1024
        self.num_layers = 1
        self.output_size = 9

        self.decoder = Decoder(self.input_size_dim, self.hidden_size,
                               self.num_layers, self.output_size)
Пример #9
0
    def test_whenInstantiateASeq2SeqModel_thenParametersAreOk(self):
        decoder = Decoder(
            self.input_size_dim,
            self.hidden_size,
            self.num_layers,
            self.output_size,
            attention_mechanism=False,
        )

        self.assertEqual(self.input_size_dim, decoder.lstm.input_size)
        self.assertEqual(self.hidden_size, decoder.lstm.hidden_size)
        self.assertEqual(self.num_layers, decoder.lstm.num_layers)
        self.assertEqual(self.output_size, decoder.linear.out_features)