Ejemplo n.º 1
0
    def test_tokenizer_with_special_tokens(self, special_tokens, expected):
        tokenizer = TokenEncoder(special_tokens=special_tokens)
        indices = tokenizer.fit_transform(['abcdefg'])[0]
        result = [tokenizer.id2token_[idx] for idx in indices]
        assert result == expected

        joined = tokenizer.inverse_transform([indices])[0]
        assert joined == 'abcdefg'
Ejemplo n.º 2
0
    def test_tokenizer_with_special_tokens(self, special_tokens, expected):
        tokenizer = TokenEncoder(special_tokens=special_tokens)
        indices = tokenizer.fit_transform(['abcdefg'])[0]
        result = [tokenizer.id2token_[idx] for idx in indices]
        assert result == expected

        joined = tokenizer.inverse_transform([indices])[0]
        assert joined == 'abcdefg'
Ejemplo n.º 3
0
    def test_tokenizer_encoded_and_decodes_simple_text(self):
        X = 'abcdef'
        tokenizer = TokenEncoder().fit(X)

        encoded = tokenizer.transform(['fedcba'])[0]
        assert len(encoded) == 6
        assert all([type(idx) == int for idx in encoded])

        decoded = tokenizer.inverse_transform([encoded])[0]
        assert decoded == 'fedcba'
Ejemplo n.º 4
0
    def test_tokenizer_encoded_and_decodes_simple_text(self):
        X = 'abcdef'
        tokenizer = TokenEncoder().fit(X)

        encoded = tokenizer.transform(['fedcba'])[0]
        assert len(encoded) == 6
        assert all([type(idx) == int for idx in encoded])

        decoded = tokenizer.inverse_transform([encoded])[0]
        assert decoded == 'fedcba'
Ejemplo n.º 5
0
    def test_tokenizer_creates_indices(self):
        X = 'abcdef'
        tokenizer = TokenEncoder().fit(X)

        # num tokens + start token + end token
        assert len(tokenizer.token2id_) == 8

        expected_keys = set(
            list('abcdef') + [tokenizer.start_token, tokenizer.end_token])
        assert set(tokenizer.token2id_.keys()) == expected_keys
Ejemplo n.º 6
0
    def test_tokenizer_wo_args(self, line, expected):
        tokenizer = TokenEncoder().fit(line)

        result = tokenizer.transform([line])[0]
        result = [tokenizer.id2token_[idx] for idx in result]
        assert result == expected
Ejemplo n.º 7
0
    def test_detokenize_with_space(self, line):
        tokenizer = TokenEncoder(separator=" ").fit([line])
        encoded = tokenizer.transform([line])
        decoded = tokenizer.inverse_transform(encoded)[0]

        assert decoded == line
Ejemplo n.º 8
0
    def test_tokenizer_split_on_space(self, line, expected):
        tokenizer = TokenEncoder(separator=" ").fit([line])

        encoded = tokenizer.transform([line])[0]
        expected = [tokenizer.token2id_[token] for token in expected]
        assert encoded == expected
Ejemplo n.º 9
0
    def test_tokenizer_with_special_tokens_more_matches(self, line, expected):
        tokenizer = TokenEncoder(special_tokens=['abc']).fit(ALPHABET)

        encoded = tokenizer.transform([line])[0]
        expected = [tokenizer.token2id_[token] for token in expected]
        assert encoded == expected
Ejemplo n.º 10
0
 def encoder(self):
     encoder = TokenEncoder().fit(self.X)
     return encoder
Ejemplo n.º 11
0
    def test_tokenizer_split_on_space(self, line, expected):
        tokenizer = TokenEncoder(separator=" ").fit([line])

        encoded = tokenizer.transform([line])[0]
        expected = [tokenizer.token2id_[token] for token in expected]
        assert encoded == expected
Ejemplo n.º 12
0
    def test_tokenizer_with_special_special_token(self, line):
        tokenizer = TokenEncoder(special_tokens=['$']).fit(['hi'])

        encoded = tokenizer.transform([line])
        decoded = tokenizer.inverse_transform(encoded)[0]
        assert decoded == line
Ejemplo n.º 13
0
    def test_tokenizer_wo_args(self, line, expected):
        tokenizer = TokenEncoder().fit(line)

        result = tokenizer.transform([line])[0]
        result = [tokenizer.id2token_[idx] for idx in result]
        assert result == expected
Ejemplo n.º 14
0
 def test_tokenizer_with_unsupported_separator(self):
     with pytest.raises(NotImplementedError):
         TokenEncoder(separator='and')
Ejemplo n.º 15
0
    def test_detokenize_with_space(self, line):
        tokenizer = TokenEncoder(separator=" ").fit([line])
        encoded = tokenizer.transform([line])
        decoded = tokenizer.inverse_transform(encoded)[0]

        assert decoded == line
Ejemplo n.º 16
0
def encoder(X):
    encoder = TokenEncoder().fit(X)
    return encoder
Ejemplo n.º 17
0
    def test_tokenizer_with_special_special_token(self, line):
        tokenizer = TokenEncoder(special_tokens=['$']).fit(['hi'])

        encoded = tokenizer.transform([line])
        decoded = tokenizer.inverse_transform(encoded)[0]
        assert decoded == line
Ejemplo n.º 18
0
    def test_tokenizer_with_special_tokens_more_matches(self, line, expected):
        tokenizer = TokenEncoder(special_tokens=['abc']).fit(ALPHABET)

        encoded = tokenizer.transform([line])[0]
        expected = [tokenizer.token2id_[token] for token in expected]
        assert encoded == expected