def construct_component_from_pipe_identifier(language, sparknlp_reference): ''' # creates a list of components from a Spark NLP Pipeline reference # 1. download pipeline # 2. unpack pipeline to annotators and create list of nlu components # 3. return list of nlu components :param language: language of the pipeline :param sparknlp_reference: Reference to a spark nlp petrained pipeline :return: Each element of the SaprkNLP pipeline wrapped as a NLU componed inside of a list ''' logger.info("Starting Spark NLP to NLU pipeline conversion process") from sparknlp.pretrained import PretrainedPipeline if 'language' in sparknlp_reference : language='xx' #special edge case for lang detectors pipe = PretrainedPipeline(sparknlp_reference, lang=language) constructed_components = [] for component in pipe.light_model.pipeline_model.stages: logger.info("Extracting model from Spark NLP pipeline: %s and creating Component", component) parsed='' parsed = str(component).split('_')[0].lower() logger.info("Parsed Component for : %s", parsed) if 'NerConverter' in component.name : constructed_components.append(Util(component_name='ner_converter', model=component)) elif parsed == 'match': constructed_components.append(nlu.Matcher(model=component)) elif parsed == 'document': constructed_components.append(nlu.Util(model=component)) elif parsed == 'sentence': constructed_components.append(nlu.Util(component_name='sentence_detector',model=component)) # todo differentiate normal and deep detector elif parsed == 'regex': constructed_components.append(nlu.Matcher(component_name='regex', model=component)) elif parsed == 'text': constructed_components.append(nlu.Matcher(model=component)) elif parsed == 'spell': constructed_components.append(nlu.SpellChecker(model=component)) elif parsed == 'lemmatizer': constructed_components.append(nlu.lemmatizer.Lemmatizer(model=component)) elif parsed == 'normalizer': constructed_components.append(nlu.lemmatizer.Normalizer(model=component)) elif parsed == 'stemmer': constructed_components.append(nlu.stemmer.Stemmer(model=component)) elif parsed == 'pos' or parsed =='language': constructed_components.append(nlu.Classifier(model=component)) elif parsed == 'word': constructed_components.append(nlu.Embeddings(model=component)) elif parsed == 'ner' or parsed == 'nerdlmodel': constructed_components.append(nlu.Classifier(component_name='ner',model=component)) elif parsed == 'dependency': constructed_components.append(nlu.Util(model=component)) elif parsed == 'typed': constructed_components.append(nlu.Util(model=component)) # todo util abuse elif parsed == 'multi': constructed_components.append(nlu.Util(model=component)) # todo util abuse elif parsed == 'sentimentdlmodel': constructed_components.append(nlu.Classifier(model=component)) elif parsed in ['universal','bert','albert', 'elmo', 'xlnet', 'glove','electra','covidbert','small_bert',''] : constructed_components.append(nlu.Embeddings(model=component)) elif parsed == 'vivekn': constructed_components.append(nlu.Classifier(component_name='vivekn', model=component)) elif parsed == 'chunker': constructed_components.append(nlu.chunker.Chunker(model=component)) elif parsed == 'ngram': constructed_components.append(nlu.chunker.Chunker(model=component)) elif '2e2' in parsed: constructed_components.append(nlu.Embeddings(model=component)) elif parsed == 'embeddings_chunk': constructed_components.append(embeddings_chunker.EmbeddingsChunker(model=component)) elif parsed == 'stopwords': constructed_components.append(nlu.StopWordsCleaner(model=component)) logger.info("Extracted into NLU Component type : %s", parsed) if None in constructed_components : logger.exception("EXCEPTION: Could not infer component type for lang=%s and sparknlp_reference=%s during pipeline conversion,", language,sparknlp_reference) return None return constructed_components
def construct_component_from_identifier(language, component_type='', dataset='', component_embeddings='', nlu_ref='', nlp_ref=''): ''' Creates a NLU component from a pretrained SparkNLP model reference or Class reference. Class references will return default pretrained models :param language: Language of the sparknlp model reference :param component_type: Class which will be used to instantiate the model :param dataset: Dataset that the model was trained on :param component_embeddings: Embedded that the models was traiend on (if any) :param nlu_ref: Full user request :param nlp_ref: Full Spark NLP reference :return: Returns a NLU component which embelished the Spark NLP pretrained model and class for that model ''' logger.info('Creating singular NLU component for type=%s sparknlp_ref=%s , dataset=%s, language=%s , nlu_ref=%s ', component_type, nlp_ref, dataset, language, nlu_ref) try: if any( x in NameSpace.seq2seq for x in [nlp_ref, nlu_ref, dataset, component_type, ]): return Seq2Seq(annotator_class=component_type, language=language, get_default=False, nlp_ref=nlp_ref,configs=dataset) # if any([component_type in NameSpace.word_embeddings,dataset in NameSpace.word_embeddings, nlu_ref in NameSpace.word_embeddings, nlp_ref in NameSpace.word_embeddings]): elif any(x in NameSpace.word_embeddings and not x in NameSpace.classifiers for x in [nlp_ref, nlu_ref, dataset, component_type, ] + dataset.split('_')): return Embeddings(get_default=False, nlp_ref=nlp_ref, nlu_ref=nlu_ref, language=language) # elif any([component_type in NameSpace.sentence_embeddings,dataset in NameSpace.sentence_embeddings, nlu_ref in NameSpace.sentence_embeddings, nlp_ref in NameSpace.sentence_embeddings]): if any(x in NameSpace.sentence_embeddings and not x in NameSpace.classifiers for x in [nlp_ref, nlu_ref, dataset, component_type, ] + dataset.split('_')): return Embeddings(get_default=False, nlp_ref=nlp_ref, nlu_ref=nlu_ref, language=language) elif any( x in NameSpace.classifiers for x in [nlp_ref, nlu_ref, dataset, component_type, ] + dataset.split('_')): return Classifier(get_default=False, nlp_ref=nlp_ref, nlu_ref=nlu_ref, language=language) elif any('spell' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return SpellChecker(annotator_class=component_type, language=language, get_default=True, nlp_ref=nlp_ref, dataset=dataset) elif any('dep' in x and not 'untyped' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return LabledDepParser() elif any('dep.untyped' in x or 'untyped' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return UnlabledDepParser() elif any('lemma' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return nlu.lemmatizer.Lemmatizer(language=language, nlp_ref=nlp_ref) elif any('norm' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return nlu.normalizer.Normalizer(nlp_ref=nlp_ref, nlu_ref=nlu_ref) elif any('clean' in x or 'stopword' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return nlu.StopWordsCleaner(language=language, get_default=False, nlp_ref=nlp_ref) elif any('sentence_detector' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return NLUSentenceDetector(nlu_ref=nlu_ref, nlp_ref=nlp_ref, language=language) elif any('match' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return Matcher(nlu_ref=nlu_ref, nlp_ref=nlp_ref) # THIS NEEDS TO CAPTURE THE WORD SEGMNETER!!! elif any('tokenize' in x or 'segment_words' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return nlu.tokenizer.Tokenizer(nlp_ref=nlp_ref, nlu_ref=nlu_ref, language=language,get_default=False) elif any('stem' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return Stemmer() # supported in future version with auto embed generation # elif any('embed_chunk' in x for x in [nlp_ref, nlu_ref, dataset, component_type] ): # return embeddings_chunker.EmbeddingsChunker() elif any('chunk' in x for x in [nlp_ref, nlu_ref, dataset, component_type]): return nlu.chunker.Chunker() elif component_type == 'ngram': return nlu.chunker.Chunker('ngram') logger.exception('EXCEPTION: Could not resolve singular Component for type=%s and nlp_ref=%s and nlu_ref=%s', component_type, nlp_ref, nlu_ref) return None except: # if reference is not in namespace and not a component it will cause a unrecoverable crash logger.exception('EXCEPTION: Could not resolve singular Component for type=%s and nlp_ref=%s and nlu_ref=%s', component_type, nlp_ref, nlu_ref) return None
def construct_component_from_pipe_identifier(language, nlp_ref, nlu_ref,path=None): ''' # creates a list of components from a Spark NLP Pipeline reference # 1. download pipeline # 2. unpack pipeline to annotators and create list of nlu components # 3. return list of nlu components :param nlu_ref: :param language: language of the pipeline :param nlp_ref: Reference to a spark nlp petrained pipeline :param path: Load pipe from HDD :return: Each element of the SaprkNLP pipeline wrapped as a NLU componed inside of a list ''' logger.info("Starting Spark NLP to NLU pipeline conversion process") from sparknlp.pretrained import PretrainedPipeline, LightPipeline if 'language' in nlp_ref: language = 'xx' # special edge case for lang detectors if path == None : pipe = PretrainedPipeline(nlp_ref, lang=language) iterable_stages = pipe.light_model.pipeline_model.stages else : pipe = LightPipeline(PipelineModel.load(path=path)) iterable_stages = pipe.pipeline_model.stages constructed_components = [] # for component in pipe.light_model.pipeline_model.stages: for component in iterable_stages: logger.info("Extracting model from Spark NLP pipeline: %s and creating Component", component) parsed = str(component).split('_')[0].lower() logger.info("Parsed Component for : %s", parsed) c_name = component.__class__.__name__ if isinstance(component, NerConverter): constructed_components.append(Util(annotator_class='ner_converter', model=component)) elif parsed in NameSpace.word_embeddings + NameSpace.sentence_embeddings: constructed_components.append(nlu.Embeddings(model=component)) elif parsed in NameSpace.classifiers: constructed_components.append(nlu.Classifier(model=component)) elif isinstance(component, MultiClassifierDLModel): constructed_components.append(nlu.Classifier(model=component, nlp_ref='multiclassifierdl')) elif isinstance(component, PerceptronModel): constructed_components.append(nlu.Classifier(nlp_ref='classifierdl', model=component)) elif isinstance(component, (ClassifierDl,ClassifierDLModel)): constructed_components.append(nlu.Classifier(nlp_ref='classifierdl', model=component)) elif isinstance(component, UniversalSentenceEncoder): constructed_components.append(nlu.Embeddings(model=component, nlp_ref='use')) elif isinstance(component, BertEmbeddings): constructed_components.append(nlu.Embeddings(model=component, nlp_ref='bert')) elif isinstance(component, AlbertEmbeddings): constructed_components.append(nlu.Embeddings(model=component, nlp_ref='albert')) elif isinstance(component, XlnetEmbeddings): constructed_components.append(nlu.Embeddings(model=component, nlp_ref='xlnet')) elif isinstance(component, WordEmbeddingsModel): constructed_components.append(nlu.Embeddings(model=component, nlp_ref='glove')) elif isinstance(component, ElmoEmbeddings): constructed_components.append(nlu.Embeddings(model=component, nlp_ref='elmo')) elif isinstance(component, BertSentenceEmbeddings): constructed_components.append(nlu.Embeddings(model=component, nlp_ref='bert_sentence')) elif isinstance(component, UniversalSentenceEncoder): constructed_components.append(nlu.Embeddings(model=component, nlu_ref='use')) elif isinstance(component, TokenizerModel) and parsed != 'regex': constructed_components.append(nlu.Tokenizer(model=component)) elif isinstance(component, TokenizerModel) and parsed == 'regex' : constructed_components.append(nlu.Tokenizer(model=component, annotator_class='regex_tokenizer')) elif isinstance(component, DocumentAssembler): constructed_components.append(nlu.Util(model=component)) elif isinstance(component, SentenceDetectorDLModel): constructed_components.append(NLUSentenceDetector(annotator_class='deep_sentence_detector', model=component)) elif isinstance(component, (SentenceDetectorDLModel, SentenceDetector)): constructed_components.append(NLUSentenceDetector(annotator_class='pragmatic_sentence_detector', model=component)) elif isinstance(component, RegexMatcherModel) or parsed == 'match': constructed_components.append(nlu.Matcher(model=component, annotator_class='regex')) elif isinstance(component, TextMatcherModel): constructed_components.append(nlu.Matcher(model=component, annotator_class='text')) elif isinstance(component, DateMatcher): constructed_components.append(nlu.Matcher(model=component, annotator_class='date')) elif isinstance(component, ContextSpellCheckerModel): constructed_components.append(nlu.SpellChecker(model=component, annotator_class='context')) elif isinstance(component, SymmetricDeleteModel): constructed_components.append(nlu.SpellChecker(model=component, annotator_class='symmetric')) elif isinstance(component, NorvigSweetingModel): constructed_components.append(nlu.SpellChecker(model=component, annotator_class='norvig')) elif isinstance(component, LemmatizerModel): constructed_components.append(nlu.lemmatizer.Lemmatizer(model=component)) elif isinstance(component, NormalizerModel): constructed_components.append(nlu.normalizer.Normalizer(model=component)) elif isinstance(component, Stemmer): constructed_components.append(nlu.stemmer.Stemmer(model=component)) elif isinstance(component, (NerDLModel, NerCrfModel)): component.setIncludeConfidence(True) # Pipes dont always extrat confidences, so here we enable all pipes to extract confidences manually constructed_components.append(nlu.Classifier(model=component, annotator_class='ner')) elif isinstance(component, LanguageDetectorDL): constructed_components.append(nlu.Classifier(model=component, annotator_class='language_detector')) elif isinstance(component, DependencyParserModel): constructed_components.append(UnlabledDepParser(model=component)) elif isinstance(component, TypedDependencyParserModel): constructed_components.append(LabledDepParser(model=component)) elif isinstance(component, MultiClassifierDLModel): constructed_components.append(nlu.Classifier(model=component, nlp_ref='multiclassifierdl')) elif isinstance(component, (SentimentDetectorModel,SentimentDLModel)): constructed_components.append(nlu.Classifier(model=component, nlp_ref='sentimentdl')) elif isinstance(component, (SentimentDetectorModel,ViveknSentimentModel)): constructed_components.append(nlu.Classifier(model=component, nlp_ref='vivekn')) elif isinstance(component, Chunker): constructed_components.append(nlu.chunker.Chunker(model=component)) elif isinstance(component, NGram): constructed_components.append(nlu.chunker.Chunker(model=component)) elif isinstance(component, ChunkEmbeddings): constructed_components.append(embeddings_chunker.EmbeddingsChunker(model=component)) elif isinstance(component, StopWordsCleaner): constructed_components.append(nlu.StopWordsCleaner(model=component)) elif isinstance(component, (TextMatcherModel, RegexMatcherModel, DateMatcher,MultiDateMatcher)) or parsed == 'match': constructed_components.append(nlu.Matcher(model=component)) elif isinstance(component,(T5Transformer)): constructed_components.append(nlu.Seq2Seq(annotator_class='t5', model=component)) elif isinstance(component,(MarianTransformer)): constructed_components.append(nlu.Seq2Seq(annotator_class='marian', model=component)) else: logger.exception( f"EXCEPTION: Could not infer component type for lang={language} and nlp_ref={nlp_ref} and model {component} during pipeline conversion,") logger.info("USING DEFAULT ANNOTATOR TYPE Lemmatizer to fix issue") constructed_components.append(nlu.normalizer.Normalizer(model=component)) logger.info(f"Extracted into NLU Component type : {parsed}", ) if None in constructed_components: logger.exception( f"EXCEPTION: Could not infer component type for lang={language} and nlp_ref={nlp_ref} during pipeline conversion,") return None return constructed_components
def construct_component_from_pipe_identifier(language, nlp_ref, nlu_ref): ''' # creates a list of components from a Spark NLP Pipeline reference # 1. download pipeline # 2. unpack pipeline to annotators and create list of nlu components # 3. return list of nlu components :param language: language of the pipeline :param nlp_ref: Reference to a spark nlp petrained pipeline :return: Each element of the SaprkNLP pipeline wrapped as a NLU componed inside of a list ''' logger.info("Starting Spark NLP to NLU pipeline conversion process") from sparknlp.pretrained import PretrainedPipeline if 'language' in nlp_ref: language = 'xx' # special edge case for lang detectors pipe = PretrainedPipeline(nlp_ref, lang=language) constructed_components = [] for component in pipe.light_model.pipeline_model.stages: logger.info("Extracting model from Spark NLP pipeline: %s and creating Component", component) parsed = str(component).split('_')[0].lower() logger.info("Parsed Component for : %s", parsed) c_name = component.__class__.__name__ if c_name == 'NerConverter': constructed_components.append(Util(annotator_class='ner_converter', model=component)) elif parsed in NameSpace.word_embeddings + NameSpace.sentence_embeddings: constructed_components.append(nlu.Embeddings(model=component)) elif parsed in NameSpace.classifiers: constructed_components.append(nlu.Classifier(model=component)) elif c_name == 'TokenizerModel' and parsed !='regex': constructed_components.append(nlu.Tokenizer(model=component)) elif c_name == 'TokenizerModel': constructed_components.append(nlu.Tokenizer(model=component,annotator_class='regex_tokenizer')) elif parsed == 'match': constructed_components.append(nlu.Matcher(model=component)) elif parsed == 'document': constructed_components.append(nlu.Util(model=component)) elif parsed == 'sentence': constructed_components.append(nlu.Util(annotator_class='sentence_detector', model=component)) elif parsed == 'regex': constructed_components.append(nlu.Matcher(model=component, nlu_ref=parsed)) elif parsed == 'date': constructed_components.append(nlu.Matcher(model=component, nlu_ref=parsed)) elif parsed == 'text': constructed_components.append(nlu.Matcher(model=component, nlu_ref=parsed)) elif parsed == 'spell': constructed_components.append(nlu.SpellChecker(model=component)) elif parsed == 'lemmatizer': constructed_components.append(nlu.lemmatizer.Lemmatizer(model=component)) elif parsed == 'normalizer': constructed_components.append(nlu.normalizer.Normalizer(model=component)) elif parsed == 'stemmer': constructed_components.append(nlu.stemmer.Stemmer(model=component)) elif c_name == 'PerceptronModel': constructed_components.append(nlu.Classifier(annotator_class='classifierdl', model=component)) elif c_name == 'ClassifierDLModel': constructed_components.append(nlu.Classifier(annotator_class='language_detector', model=component)) elif parsed == 'word': constructed_components.append(nlu.Embeddings(model=component)) elif parsed == 'ner' or parsed == 'nerdlmodel': constructed_components.append(nlu.Classifier(model=component)) elif parsed == 'dependency': constructed_components.append(nlu.Util(model=component)) elif parsed == 'typed': constructed_components.append(nlu.UnlabledDepParser(model=component)) elif parsed == 'multi': constructed_components.append(nlu.Classifier(model=component)) elif parsed == 'sentimentdlmodel': constructed_components.append(nlu.Classifier(model=component)) elif parsed == 'chunker': constructed_components.append(nlu.chunker.Chunker(model=component)) elif parsed == 'ngram': constructed_components.append(nlu.chunker.Chunker(model=component)) elif parsed == 'embeddings_chunk': constructed_components.append(embeddings_chunker.EmbeddingsChunker(model=component)) elif parsed == 'stopwords': constructed_components.append(nlu.StopWordsCleaner(model=component)) else: logger.exception( "EXCEPTION: Could not infer component type for lang=%s and nlp_ref=%s during pipeline conversion,", language, nlp_ref) logger.info("USING DEFAULT ANNOTATOR TYPE Lemmatizer to fix issue") constructed_components.append(nlu.normalizer.Normalizer(model=component)) logger.info("Extracted into NLU Component type : %s", parsed) if None in constructed_components: logger.exception( "EXCEPTION: Could not infer component type for lang=%s and nlp_ref=%s during pipeline conversion,", language, nlp_ref) return None return constructed_components
def construct_component_from_identifier(language, component_type, dataset, component_embeddings, nlu_reference, sparknlp_reference): ''' Creates a NLU component from a pretrained SparkNLP model reference or Class reference. Class references will return default pretrained models :param language: Language of the sparknlp model reference :param component_type: Class which will be used to instantiate the model :param dataset: Dataset that the model was trained on :param component_embeddings: Embedded that the models was traiend on (if any) :param nlu_reference: Full user request :param sparknlp_reference: Full Spark NLP reference :return: Returns a NLU component which embelished the Spark NLP pretrained model and class for that model ''' logger.info('Creating singular NLU component for type=%s sparknlp reference=%s , dataset=%s, language=%s ', component_type, sparknlp_reference, dataset, language) try : if sparknlp_reference == 'yake': return Classifier('yake') elif 'bert' in dataset or component_type == 'embed' or 'albert' in component_type or 'bert' in component_type or 'xlnet' in component_type or 'use' in component_type or 'glove' in component_type or 'elmo' in component_type or 'tfhub_use' in sparknlp_reference\ or 'bert' in sparknlp_reference or 'labse' in sparknlp_reference or component_type =='embed_sentence' or 'electra' in nlu_reference: if component_type == 'embed' and dataset != '' : return Embeddings(component_name=dataset, language=language, get_default=False, sparknlp_reference=sparknlp_reference) elif component_type == 'embed' : return Embeddings(component_name=sparknlp_reference) #default else : return Embeddings(component_name=component_type, language=language, get_default=False, sparknlp_reference=sparknlp_reference) elif component_type == 'classify' or 'e2e' in sparknlp_reference: if component_type == 'classify' and dataset != '' : return Classifier(component_name=dataset, language=language, get_default=False, sparknlp_reference=sparknlp_reference) else : return Classifier(component_name=component_type, language=language, get_default=False, sparknlp_reference=sparknlp_reference) elif component_type == 'tokenize': return nlu.tokenizer.Tokenizer(component_name=component_type, language=language, get_default=False, sparknlp_reference=sparknlp_reference) elif component_type == 'pos': return Classifier(component_name=component_type, language=language, get_default=False, sparknlp_reference=sparknlp_reference) elif component_type == 'ner' or 'ner_dl' in sparknlp_reference: return Classifier(component_name='ner', language=language, get_default=False, sparknlp_reference=sparknlp_reference) elif component_type == 'sentiment': return Classifier(component_name=component_type, language=language, get_default=False, sparknlp_reference=sparknlp_reference) elif component_type == 'emotion': return Classifier(component_name=component_type, language=language, get_default=False, sparknlp_reference=sparknlp_reference) elif component_type == 'spell': return SpellChecker(component_name=component_type, language=language, get_default=False, sparknlp_reference=sparknlp_reference, dataset = dataset) elif component_type == 'dep' and dataset!='untyped' :# There are no trainable dep parsers this gets only default dep return LabledDepParser(component_name='labeled_dependency_parser', language=language, get_default=True, sparknlp_reference=sparknlp_reference) elif component_type == 'dep.untyped' or dataset =='untyped': # There are no trainable dep parsers this gets only default dep return UnlabledDepParser(component_name='unlabeled_dependency_parser', language=language, get_default=True, sparknlp_reference=sparknlp_reference) elif component_type == 'lemma': return nlu.lemmatizer.Lemmatizer(component_name=component_type, language=language, get_default=False, sparknlp_reference=sparknlp_reference) elif component_type == 'norm': return nlu.normalizer.Normalizer(component_name='normalizer', language=language, get_default=True, sparknlp_reference=sparknlp_reference) elif component_type == 'clean' or component_type == 'stopwords' : return nlu.StopWordsCleaner( language=language, get_default=False, sparknlp_reference=sparknlp_reference) elif component_type == 'sentence_detector': return NLUSentenceDetector(component_name=component_type, language=language, get_default=True, sparknlp_reference=sparknlp_reference) elif component_type == 'match': return Matcher(component_name=dataset, language=language, get_default=True, sparknlp_reference=sparknlp_reference) elif component_type == 'stem' or component_type == 'stemm' or sparknlp_reference == 'stemmer' : return Stemmer() elif component_type == 'chunk' :return nlu.chunker.Chunker() elif component_type == 'ngram' :return nlu.chunker.Chunker('ngram') elif component_type == 'embed_chunk': return embeddings_chunker.EmbeddingsChunker() elif component_type == 'regex' or sparknlp_reference =='regex_matcher' : return nlu.Matcher(component_name='regex') elif component_type == 'text' or sparknlp_reference =='text_matcher' : return nlu.Matcher(component_name='text') logger.exception('EXCEPTION: Could not resolve singular Component for type=%s and sparknl reference=%s and nlu reference=%s', component_type, sparknlp_reference, nlu_reference) return None except : # if reference is not in namespace and not a component it will cause a unrecoverable crash logger.exception('EXCEPTION: Could not resolve singular Component for type=%s and sparknl reference=%s and nlu reference=%s', component_type, sparknlp_reference, nlu_reference) return None