def test_givenSeq2SeqModelRetrained_whenLoadRetrainedWeights_thenLoadProperly(
            self, torch_nn_mock, torch_mock):
        all_layers_params_mock = MagicMock()
        all_layers_params_mock.__getitem__(
        ).__len__.return_value = self.decoder_output_size
        torch_mock.load.return_value = all_layers_params_mock

        seq2seq_model = Seq2SeqModel(
            self.a_cpu_device,
            input_size=self.encoder_input_size_dim,
            encoder_hidden_size=self.encoder_hidden_size,
            encoder_num_layers=self.encoder_num_layers,
            decoder_hidden_size=self.decoder_hidden_size,
            decoder_num_layers=self.decoder_num_layers,
            output_size=self.decoder_output_size,
            verbose=True,
        )
        seq2seq_model._load_weights(self.a_fake_retrain_path)

        torch_mock.assert_has_calls([
            call.load(self.a_fake_retrain_path, map_location=self.a_cpu_device)
        ])

        torch_nn_mock.assert_called()
        torch_nn_mock.asser_has_calls([call(all_layers_params_mock)])
    def test_whenInstantiateASeq2SeqModelGPU_thenParametersAreOk(self):
        seq2seq_model = Seq2SeqModel(
            self.a_torch_device,
            input_size=self.encoder_input_size_dim,
            encoder_hidden_size=self.encoder_hidden_size,
            encoder_num_layers=self.encoder_num_layers,
            decoder_hidden_size=self.decoder_hidden_size,
            decoder_num_layers=self.decoder_num_layers,
            output_size=self.decoder_output_size,
        )

        self.assertEqual(self.a_torch_device, seq2seq_model.device)

        self.assertEqual(self.encoder_input_size_dim,
                         seq2seq_model.encoder.lstm.input_size)
        self.assertEqual(self.encoder_hidden_size,
                         seq2seq_model.encoder.lstm.hidden_size)
        self.assertEqual(self.encoder_num_layers,
                         seq2seq_model.encoder.lstm.num_layers)
        self.assertEqual(self.a_torch_device,
                         seq2seq_model.encoder.lstm.all_weights[0][0].device)

        self.assertEqual(self.decoder_input_size_dim,
                         seq2seq_model.decoder.lstm.input_size)
        self.assertEqual(self.decoder_hidden_size,
                         seq2seq_model.decoder.lstm.hidden_size)
        self.assertEqual(self.decoder_num_layers,
                         seq2seq_model.decoder.lstm.num_layers)
        self.assertEqual(self.decoder_output_size,
                         seq2seq_model.decoder.linear.out_features)
        self.assertEqual(self.a_torch_device,
                         seq2seq_model.decoder.lstm.all_weights[0][0].device)
 def setUp(self) -> None:
     super().setUp()
     self.pre_trained_seq2seq_model = Seq2SeqModel(self.a_torch_device)
     self.encoder_input_setUp(
         "fasttext"
     )  # fasttext since the simplest case (bpemb use a embedding layer)
     self.none_target = None  # No target (for teacher forcing)
     self.a_value_greater_than_threshold = 0.1
 def test_whenSameOutput_thenReturnTrue(self):
     seq2seq_model = Seq2SeqModel(
         self.a_cpu_device,
         input_size=self.encoder_input_size_dim,
         encoder_hidden_size=self.encoder_hidden_size,
         encoder_num_layers=self.encoder_num_layers,
         decoder_hidden_size=self.decoder_hidden_size,
         decoder_num_layers=self.decoder_num_layers,
         output_size=self.decoder_output_size,
     )
     self.assertTrue(seq2seq_model.same_output_dim(
         self.decoder_output_size))
 def test_givenSeq2seqModel_whenLoadPreTrainedWeightsVerboseCPU_thenWarningsRaised(
         self, torch_nn_mock, torch_mock, isfile_mock, last_version_mock):
     seq2seq_model = Seq2SeqModel(
         self.a_cpu_device,
         input_size=self.encoder_input_size_dim,
         encoder_hidden_size=self.encoder_hidden_size,
         encoder_num_layers=self.encoder_num_layers,
         decoder_hidden_size=self.decoder_hidden_size,
         decoder_num_layers=self.decoder_num_layers,
         output_size=self.decoder_output_size,
         verbose=True,
     )
     isfile_mock.return_value = True
     last_version_mock.return_value = False
     with patch("deepparse.network.seq2seq.download_weights"):
         with self.assertWarns(UserWarning):
             seq2seq_model._load_pre_trained_weights("a_model_type")
Exemple #6
0
    def setUp(self) -> None:
        super().setUp()
        self.pre_trained_seq2seq_model = Seq2SeqModel(
            self.a_cpu_device,
            input_size=self.input_size,
            encoder_hidden_size=self.encoder_hidden_size,
            encoder_num_layers=self.num_layers,
            decoder_hidden_size=self.decoder_hidden_size,
            decoder_num_layers=self.num_layers,
            output_size=self.output_size,
        )
        self.encoder_input_setUp(
            "fasttext", self.a_cpu_device
        )  # fasttext since the simplest case (bpemb use a embedding layer)
        self.none_target = None  # No target (for teacher forcing)
        self.a_value_greater_than_threshold = 0.1

        self.a_target_vector = torch.tensor(
            [[0, 1, 1, 4, 5, 8], [1, 0, 3, 8, 0, 0]], device=self.a_cpu_device)
    def test_givenSeq2SeqModelRetrained_whenLoadRetrainedWeightsNewTagModel_thenLoadProperDict(
            self, torch_nn_mock, torch_mock):
        all_layers_params_mock = MagicMock(spec=dict)
        all_layers_params_mock.__getitem__(
        ).__len__.return_value = self.decoder_output_size
        torch_mock.load.return_value = all_layers_params_mock

        seq2seq_model = Seq2SeqModel(
            self.a_cpu_device,
            input_size=self.encoder_input_size_dim,
            encoder_hidden_size=self.encoder_hidden_size,
            encoder_num_layers=self.encoder_num_layers,
            decoder_hidden_size=self.decoder_hidden_size,
            decoder_num_layers=self.decoder_num_layers,
            output_size=self.decoder_output_size,
            verbose=True,
        )
        seq2seq_model._load_weights(self.a_fake_retrain_path)

        all_layers_params_mock.get.assert_called()
    def test_whenHandleNewOutputDim_thenProperlyHandleNewDim(self):
        seq2seq_model = Seq2SeqModel(
            self.a_cpu_device,
            input_size=self.encoder_input_size_dim,
            encoder_hidden_size=self.encoder_hidden_size,
            encoder_num_layers=self.encoder_num_layers,
            decoder_hidden_size=self.decoder_hidden_size,
            decoder_num_layers=self.decoder_num_layers,
            output_size=self.decoder_output_size,
        )

        a_new_dim = 1
        seq2seq_model.handle_new_output_dim(a_new_dim)

        expected = a_new_dim
        actual = seq2seq_model.output_size
        self.assertEqual(expected, actual)

        actual = seq2seq_model.decoder.linear.out_features
        self.assertEqual(expected, actual)