Пример #1
0
    def test_tokenizer_with_special_tokens(self, special_tokens, expected):
        tokenizer = TokenEncoder(special_tokens=special_tokens)
        indices = tokenizer.fit_transform(['abcdefg'])[0]
        result = [tokenizer.id2token_[idx] for idx in indices]
        assert result == expected

        joined = tokenizer.inverse_transform([indices])[0]
        assert joined == 'abcdefg'
Пример #2
0
    def test_tokenizer_with_special_tokens(self, special_tokens, expected):
        tokenizer = TokenEncoder(special_tokens=special_tokens)
        indices = tokenizer.fit_transform(['abcdefg'])[0]
        result = [tokenizer.id2token_[idx] for idx in indices]
        assert result == expected

        joined = tokenizer.inverse_transform([indices])[0]
        assert joined == 'abcdefg'
Пример #3
0
    def test_tokenizer_encoded_and_decodes_simple_text(self):
        X = 'abcdef'
        tokenizer = TokenEncoder().fit(X)

        encoded = tokenizer.transform(['fedcba'])[0]
        assert len(encoded) == 6
        assert all([type(idx) == int for idx in encoded])

        decoded = tokenizer.inverse_transform([encoded])[0]
        assert decoded == 'fedcba'
Пример #4
0
    def test_tokenizer_encoded_and_decodes_simple_text(self):
        X = 'abcdef'
        tokenizer = TokenEncoder().fit(X)

        encoded = tokenizer.transform(['fedcba'])[0]
        assert len(encoded) == 6
        assert all([type(idx) == int for idx in encoded])

        decoded = tokenizer.inverse_transform([encoded])[0]
        assert decoded == 'fedcba'
Пример #5
0
    def test_tokenizer_creates_indices(self):
        X = 'abcdef'
        tokenizer = TokenEncoder().fit(X)

        # num tokens + start token + end token
        assert len(tokenizer.token2id_) == 8

        expected_keys = set(
            list('abcdef') + [tokenizer.start_token, tokenizer.end_token])
        assert set(tokenizer.token2id_.keys()) == expected_keys
Пример #6
0
    def test_tokenizer_wo_args(self, line, expected):
        tokenizer = TokenEncoder().fit(line)

        result = tokenizer.transform([line])[0]
        result = [tokenizer.id2token_[idx] for idx in result]
        assert result == expected
Пример #7
0
    def test_detokenize_with_space(self, line):
        tokenizer = TokenEncoder(separator=" ").fit([line])
        encoded = tokenizer.transform([line])
        decoded = tokenizer.inverse_transform(encoded)[0]

        assert decoded == line
Пример #8
0
    def test_tokenizer_split_on_space(self, line, expected):
        tokenizer = TokenEncoder(separator=" ").fit([line])

        encoded = tokenizer.transform([line])[0]
        expected = [tokenizer.token2id_[token] for token in expected]
        assert encoded == expected
Пример #9
0
    def test_tokenizer_with_special_tokens_more_matches(self, line, expected):
        tokenizer = TokenEncoder(special_tokens=['abc']).fit(ALPHABET)

        encoded = tokenizer.transform([line])[0]
        expected = [tokenizer.token2id_[token] for token in expected]
        assert encoded == expected
Пример #10
0
 def encoder(self):
     encoder = TokenEncoder().fit(self.X)
     return encoder
Пример #11
0
    def test_tokenizer_split_on_space(self, line, expected):
        tokenizer = TokenEncoder(separator=" ").fit([line])

        encoded = tokenizer.transform([line])[0]
        expected = [tokenizer.token2id_[token] for token in expected]
        assert encoded == expected
Пример #12
0
    def test_tokenizer_with_special_special_token(self, line):
        tokenizer = TokenEncoder(special_tokens=['$']).fit(['hi'])

        encoded = tokenizer.transform([line])
        decoded = tokenizer.inverse_transform(encoded)[0]
        assert decoded == line
Пример #13
0
    def test_tokenizer_wo_args(self, line, expected):
        tokenizer = TokenEncoder().fit(line)

        result = tokenizer.transform([line])[0]
        result = [tokenizer.id2token_[idx] for idx in result]
        assert result == expected
Пример #14
0
 def test_tokenizer_with_unsupported_separator(self):
     with pytest.raises(NotImplementedError):
         TokenEncoder(separator='and')
Пример #15
0
    def test_detokenize_with_space(self, line):
        tokenizer = TokenEncoder(separator=" ").fit([line])
        encoded = tokenizer.transform([line])
        decoded = tokenizer.inverse_transform(encoded)[0]

        assert decoded == line
Пример #16
0
def encoder(X):
    encoder = TokenEncoder().fit(X)
    return encoder
Пример #17
0
    def test_tokenizer_with_special_special_token(self, line):
        tokenizer = TokenEncoder(special_tokens=['$']).fit(['hi'])

        encoded = tokenizer.transform([line])
        decoded = tokenizer.inverse_transform(encoded)[0]
        assert decoded == line
Пример #18
0
    def test_tokenizer_with_special_tokens_more_matches(self, line, expected):
        tokenizer = TokenEncoder(special_tokens=['abc']).fit(ALPHABET)

        encoded = tokenizer.transform([line])[0]
        expected = [tokenizer.token2id_[token] for token in expected]
        assert encoded == expected