def __call__(self, text): return process_text(text, load_nlp_pipeline('jp'), return_lemma=True, filter_stopwords=True)
def __call__(self, text): return process_text(text, load_nlp_pipeline('zh'))
def __call__(self, text): return process_text(text, load_nlp_pipeline('ro'), return_lemma=True)
def __call__(self, text): return process_text(text, load_nlp_pipeline('jp'), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True)
def __call__(self, text): process_text(text, load_nlp_pipeline("en"), return_lemma=True)
def __call__(self, text): return process_text(text, load_nlp_pipeline('ro'), filter_stopwords=True)
def english_lemmatize_remove_stopwords(text): return process_text(text, load_nlp_pipeline(), return_lemma=True, filter_stopwords=True)
def english_lemmatize(text): return process_text(text, load_nlp_pipeline(), return_lemma=True)
def english_tokenize_remove_stopwords(text): return process_text(text, load_nlp_pipeline(), filter_stopwords=True)
def english_tokenize_filter(text): return process_text(text, load_nlp_pipeline(), filter_numbers=True, filter_punctuation=True, filter_short_tokens=True)
def english_tokenize(text): return process_text(text, load_nlp_pipeline())