def __init__(self, bert_vocab_file_path, word_processor=word_processor.BaseWordProcessor()): super().__init__(word_processor) self.bert_vocab_file_path = bert_vocab_file_path self.bert_tokenizer = BertTokenizer.from_pretrained( self.bert_vocab_file_path, do_lower_case=False)
def __init__(self, word_processor=word_processor.BaseWordProcessor(), lang='en', corenlp: my_corenlp.StanfordCoreNLP = None): super().__init__(word_processor) self.stanford_nlp = corenlp if not self.stanford_nlp: self.stanford_nlp = corenlp_factory.create_corenlp_server( lang=lang)
def __init__(self, word_processor=word_processor.BaseWordProcessor()): super().__init__(word_processor) self.spacy_nlp = spacy.load("en_core_web_sm")
def __init__(self, word_processor=word_processor.BaseWordProcessor(), lang='en'): super().__init__(word_processor) self.stanford_nlp = corenlp_factory.create_corenlp_server(lang=lang)
def __init__(self, word_processor=word_processor.BaseWordProcessor()): super().__init__(word_processor)
def __init__(self, word_processor=word_processor.BaseWordProcessor()): self.word_processor = word_processor