Exemple #1
0
def tokenize_message(message, tokenizer_type, spacer_annotate,
                     preserve_placeholders, spacer_new):
    tokenizer = Tokenizer(tokenizer_type,
                          spacer_annotate=spacer_annotate,
                          preserve_placeholders=preserve_placeholders,
                          spacer_new=spacer_new)
    return tokenizer.tokenize(message)[0]
Exemple #2
0
 def __init__(self, config_path):
     self.config = json.loads(jsonnet_evaluate_file(config_path))
     self.lang_detect_model_path = self.config["lang_detect_model_path"]
     self.cat_detect_model_path = self.config["cat_detect_model_path"]
     self.max_tokens = self.config.get("max_tokens")
     self.is_lower = self.config["is_lower"]
     self.languages = self.config.get("languages", ["ru", "en"])
     self.is_news_only = self.config.get("is_news_only", False)
     assert os.path.exists(
         self.lang_detect_model_path), "No language detection model found"
     assert os.path.exists(
         self.cat_detect_model_path), "No category detection model found"
     self.lang_detect_model = ft_load_model(self.lang_detect_model_path)
     self.cat_detect_model = ft_load_model(self.cat_detect_model_path)
     self.tokenizer = Tokenizer("conservative", joiner_annotate=False)
Exemple #3
0
    def __init__(self, messages, tokenizer_type):

        self.messages = messages
        Tokens.TOKENIZER = Tokenizer(tokenizer_type,
                                     spacer_annotate=True,
                                     preserve_placeholders=True,
                                     spacer_new=True)
Exemple #4
0
class DocumentsCleaner:
    def __init__(self, config_path):
        self.config = json.loads(jsonnet_evaluate_file(config_path))
        self.lang_detect_model_path = self.config["lang_detect_model_path"]
        self.cat_detect_model_path = self.config["cat_detect_model_path"]
        self.max_tokens = self.config.get("max_tokens")
        self.is_lower = self.config["is_lower"]
        self.languages = self.config.get("languages", ["ru", "en"])
        self.is_news_only = self.config.get("is_news_only", False)
        assert os.path.exists(
            self.lang_detect_model_path), "No language detection model found"
        assert os.path.exists(
            self.cat_detect_model_path), "No category detection model found"
        self.lang_detect_model = ft_load_model(self.lang_detect_model_path)
        self.cat_detect_model = ft_load_model(self.cat_detect_model_path)
        self.tokenizer = Tokenizer("conservative", joiner_annotate=False)

    def preprocess(self, text):
        text = str(text).strip().replace("\n", " ").replace("\xa0", " ")
        if self.is_lower:
            text = text.lower()
        tokens, _ = self.tokenizer.tokenize(text)
        if self.max_tokens:
            tokens = tokens[:self.max_tokens]
        return " ".join(tokens)

    def __call__(self, document) -> Optional[Document]:
        title = document.get("title")
        description = document.get("description", "")
        text = document.get("text")

        lang_text = text[:100]
        lang_text_sample = " ".join((title, description, lang_text))
        lang_text_sample = lang_text_sample.replace("  ",
                                                    " ").replace("\n", " ")
        (lang_label, ), (lang_prob, ) = self.lang_detect_model.predict(
            lang_text_sample, k=1)
        lang_label = lang_label[FASTTEXT_LABEL_OFFSET:]
        document["language"] = lang_label
        if lang_label not in self.languages and (lang_label != "ru"
                                                 or lang_prob < 0.6):
            return None

        document["patched_title"] = self.preprocess(title)
        document["patched_text"] = self.preprocess(text)

        cat_text_sample = document["patched_title"] + " " + document[
            "patched_text"]
        cat_text_sample = self.preprocess(cat_text_sample)
        (cat_label, ), (cat_prob, ) = self.cat_detect_model.predict(
            cat_text_sample, k=1)
        cat_label = cat_label[FASTTEXT_LABEL_OFFSET:]
        document["category"] = cat_label
        if self.is_news_only and cat_label == "not_news":
            return None
        return document
Exemple #5
0
 def __init__(
     self,
     model_paths={
         "nn": "./models/classifier_3k_v3_4000.pth",
         "svm": "./models/svm.joblib",
         "knn": "./models/knn.joblib",
         "dt": "./models/dt.joblib"
     }):
     self.tokenizer = Tokenizer('conservative')
     self.smoothing = SmoothingFunction()
     self.classifiers = {
         key: self.load_sklearn_classifier(val)
         for key, val in model_paths.items() if key != "nn"
     }
     self.classifiers["nn"] = self.load_nn_classifier(model_paths["nn"])
     self.len_norm = self.load_sklearn_classifier(
         "./models/len_norm.joblib")
     self.src_norm = self.load_sklearn_classifier(
         "./models/src_norm.joblib")
Exemple #6
0
 def process(self):
     """
     The best tokenizer for error messages is TreebankWordTokenizer (nltk).
     It's good at tokenizing file paths.
     Alternative tokenizer. It performs much faster, but worse in tokenizing of paths.
     It splits all paths by "/".
     TODO: This method should be optimized to the same tokenization quality as TreebankWordTokenizer
     :return:
     """
     tokenized = []
     if self.type == 'nltk':
         for line in self.messages:
             tokenized.append(TreebankWordTokenizer().tokenize(line))
     elif self.tokenizer == 'pyonmttok':
         tokenizer = Tokenizer("space", joiner_annotate=False, segment_numbers=False)
         for line in self.messages:
             tokens, features = tokenizer.tokenize(line)
             tokenized.append(tokens)
     self.tokenized = self.clean_tokens(tokenized)
     return self.tokenized
def extend_file(file, size):
    tokenizer = Tokenizer('conservative')
    sentences = file.read().split('\n')
    new_sentence = ""
    new_set = []
    while sentences:
        tok_sent, _ = tokenizer.tokenize(new_sentence)
        if len(tok_sent) < size:
            sent = sentences[0]
            sentences.remove(sent)
            if new_sentence != "":
                new_sentence += " "
            new_sentence += sent
        else:
            new_set.append(new_sentence)
            new_sentence = ""

    if new_sentence != "":
        new_set.append(new_sentence)

    return '\n'.join(new_set)
Exemple #8
0
def tokenize_list(list):
    tokenizer = Tokenizer('conservative')
    return [tokenizer.tokenize(line)[0] for line in list]
Exemple #9
0
 def __init__(self):
     self.patterns = None
     self.tokenizer = Tokenizer("conservative", spacer_annotate=True)
Exemple #10
0
class Detector:
    def __init__(
        self,
        model_paths={
            "nn": "./models/classifier_3k_v3_4000.pth",
            "svm": "./models/svm.joblib",
            "knn": "./models/knn.joblib",
            "dt": "./models/dt.joblib"
        }):
        self.tokenizer = Tokenizer('conservative')
        self.smoothing = SmoothingFunction()
        self.classifiers = {
            key: self.load_sklearn_classifier(val)
            for key, val in model_paths.items() if key != "nn"
        }
        self.classifiers["nn"] = self.load_nn_classifier(model_paths["nn"])
        self.len_norm = self.load_sklearn_classifier(
            "./models/len_norm.joblib")
        self.src_norm = self.load_sklearn_classifier(
            "./models/src_norm.joblib")

    def load_sklearn_classifier(self, model_path):
        try:
            return load(model_path)
        except FileNotFoundError:
            return None

    def load_nn_classifier(self, model_path):
        classifier = Classifier()
        classifier.load_state_dict(torch.load(model_path))
        classifier.float()
        return classifier

    def predict(self, text, src, algorithm):
        other_langs = [l for l in LANG_POOL if l != src]
        results = {lang: [text] for lang in other_langs}
        data = []

        last_back = text
        for lang in other_langs:
            for _ in range(0, 2):
                translation = translate(last_back, src, lang)
                last_back = translate(translation, lang, src)
                results[lang].append(last_back)

        for lang in LANG_POOL:
            if lang != src:
                for i in range(0, 2):
                    ref_sent, _ = self.tokenizer.tokenize(results[lang][i])
                    hypothesis, _ = self.tokenizer.tokenize(results[lang][i +
                                                                          1])
                    bleu = sentence_bleu(
                        [ref_sent],
                        hypothesis,
                        smoothing_function=self.smoothing.method4,
                        weights=(0.25, 0.25, 0.25, 0.25))
                    data.append(bleu)
            else:
                data += [0.0, 0.0]

        data.append(
            self.src_norm.transform(
                np.array([LANG_POOL.index(src)]).reshape(-1, 1))[0][0])
        data.append(
            self.len_norm.transform(
                np.array([len(self.tokenizer.tokenize(text)[0])
                          ]).reshape(-1, 1))[0][0])

        data = np.array(data).reshape(1, -1)

        prediction = None
        if algorithm != "nn":
            prediction = self.classifiers[algorithm].predict_proba(data)
        else:
            tensor = torch.from_numpy(data)
            prediction = self.classifiers["nn"](tensor.float()).tolist()

        return list(prediction[0])
 def __init__(self):
     self.word_map = load_dakshina_map()
     self.tokenizer = Tokenizer('aggressive')
class Transliterator:
    def __init__(self):
        self.word_map = load_dakshina_map()
        self.tokenizer = Tokenizer('aggressive')

    def process_line(self, line, rule_based_only=False):
        processed_words = []

        line = line.strip()
        for word in line.split():
            processed_tokens = []

            for token in self.tokenizer.tokenize(word)[0]:
                if BANGLA_CHARS.search(token):
                    current_transliteration = transliterate.process(
                        'Bengali',
                        'Custom',
                        token,
                        pre_options=[
                            'AnuChandraEqDeva', 'RemoveSchwaHindi',
                            'SchwaFinalBengali'
                        ],
                        post_options=['RemoveDiacritics'])

                    if not rule_based_only:
                        transliteration_map = self.word_map.get(token, {})
                        if transliteration_map:
                            # approach 1: use only edit distance
                            # selected_transliteration = min(transliteration_map, key=lambda k: distance(current_transliteration, k))

                            # approach 2: use attestation scores with edit distance as tiebreaker
                            highest_scored_transliteration = max(
                                transliteration_map,
                                key=lambda k: int(transliteration_map[k]))
                            highest_score = transliteration_map[
                                highest_scored_transliteration]
                            candidate_transliterations = [
                                k for k, v in transliteration_map.items()
                                if v == highest_score
                            ]

                            if len(candidate_transliterations) > 1:
                                selected_transliteration = min(
                                    candidate_transliterations,
                                    key=lambda k: distance(
                                        current_transliteration, k))
                            else:
                                selected_transliteration = highest_scored_transliteration

                            processed_tokens.append(selected_transliteration)

                        else:
                            processed_tokens.append(current_transliteration)
                    else:
                        processed_tokens.append(current_transliteration)

                else:
                    processed_tokens.append(token)

            processed_words.append(''.join(processed_tokens))

        return ' '.join(processed_words)
Exemple #13
0
import string

from pyonmttok import Tokenizer
from pymorphy2 import MorphAnalyzer

tokenizer = Tokenizer("conservative", joiner_annotate=False)
morph = MorphAnalyzer()


def tokenize(text, lower=True):
    text = str(text).strip().replace("\n", " ").replace("\xa0", " ")
    if lower:
        text = text.lower()
    tokens, _ = tokenizer.tokenize(text)
    return tokens


def tokenize_to_lemmas(text):
    tokens = tokenize(text)
    tokens = filter(lambda x: x not in string.punctuation, tokens)
    tokens = filter(lambda x: not x.isnumeric(), tokens)
    tokens = filter(lambda x: len(x) >= 2, tokens)
    tokens = [morph.parse(token)[0].normal_form for token in tokens]
    return tokens