def load(meta, nlp, featurizer=None):
        """
        :type meta: rasa_nlu.model.Metadata
        :type nlp: spacy.language.Language
        :type featurizer: None or rasa_nlu.featurizers.spacy_featurizer.SpacyFeaturizer
        :rtype: MITIEInterpreter
        """
        if meta.entity_extractor_path:
            extractor = SpacyEntityExtractor(nlp, meta.entity_extractor_path,
                                             meta.metadata.get("should_fine_tune_spacy_ner"))
        else:
            extractor = None
        if meta.intent_classifier_path:
            with open(meta.intent_classifier_path, 'rb') as f:
                classifier = cloudpickle.load(f)
        else:
            classifier = None
        if meta.entity_synonyms_path:
            entity_synonyms = Interpreter.load_synonyms(meta.entity_synonyms_path)
        else:
            entity_synonyms = None

        if featurizer is None:
            featurizer = SpacyFeaturizer(nlp)
        return SpacySklearnInterpreter(
            classifier,
            extractor,
            entity_synonyms,
            featurizer,
            nlp)
Ejemplo n.º 2
0
def test_spacy_featurizer(spacy_nlp_en, sentence, expected):
    from rasa_nlu.featurizers.spacy_featurizer import SpacyFeaturizer
    ftr = SpacyFeaturizer(spacy_nlp_en)
    doc = spacy_nlp_en(sentence)
    vecs = ftr.features_for_doc(doc)
    assert np.allclose(doc.vector[:5], expected, atol=1e-5)
    assert np.allclose(vecs, doc.vector, atol=1e-5)
Ejemplo n.º 3
0
    def __init__(self, entity_extractor=None, intent_classifier=None, language_name='en', **kwargs):
        self.nlp = spacy.load(language_name, parser=False, entity=False, matcher=False)
        self.featurizer = SpacyFeaturizer(self.nlp)
        with open(intent_classifier, 'rb') as f:
            self.classifier = cloudpickle.load(f)

        self.extractor = SpacyEntityExtractor(self.nlp, entity_extractor)
Ejemplo n.º 4
0
 def __init__(self, config, language_name):
     self.ensure_language_support(language_name)
     self.name = "spacy_sklearn"
     self.language_name = language_name
     self.training_data = None
     self.nlp = spacy.load(self.language_name, parser=False, entity=False)
     self.featurizer = SpacyFeaturizer(self.nlp)
     self.intent_classifier = None
     self.entity_extractor = None
Ejemplo n.º 5
0
def test_spacy_featurizer(sentence, language, expected):
    import spacy
    from rasa_nlu.featurizers.spacy_featurizer import SpacyFeaturizer
    nlp = spacy.load(language, tagger=False, parser=False)
    ftr = SpacyFeaturizer(nlp)
    doc = nlp(sentence)
    vecs = ftr.features_for_doc(doc)
    assert np.allclose(doc.vector[:5], expected, atol=1e-5)
    assert np.allclose(vecs, doc.vector, atol=1e-5)
Ejemplo n.º 6
0
 def test_sentence(sentence, language, _ref):
     import spacy
     from rasa_nlu.featurizers.spacy_featurizer import SpacyFeaturizer
     nlp = spacy.load(language, tagger=False, parser=False)
     doc = nlp(sentence)
     ftr = SpacyFeaturizer(nlp)
     vecs = ftr.create_bow_vecs([sentence])
     assert np.allclose(doc.vector[:5], _ref, atol=1e-5)
     assert np.allclose(vecs[0], doc.vector, atol=1e-5)
Ejemplo n.º 7
0
 def __init__(self,
              language_name,
              max_num_threads=1,
              should_fine_tune_spacy_ner=False):
     super(self.__class__, self).__init__(language_name, max_num_threads)
     self.should_fine_tune_spacy_ner = should_fine_tune_spacy_ner
     self.nlp = self._load_nlp_model(language_name,
                                     should_fine_tune_spacy_ner)
     self.featurizer = SpacyFeaturizer(self.nlp)
     ensure_proper_language_model(self.nlp)
Ejemplo n.º 8
0
    def __init__(self,
                 intent_classifier=None,
                 entity_extractor=None,
                 entity_synonyms=None,
                 nlp=None):
        self.extractor = entity_extractor
        self.classifier = intent_classifier
        self.ent_synonyms = entity_synonyms
        self.nlp = nlp
        self.featurizer = SpacyFeaturizer(nlp)

        ensure_proper_language_model(nlp)
Ejemplo n.º 9
0
    def __init__(self,
                 entity_extractor=None,
                 entity_synonyms=None,
                 intent_classifier=None,
                 language_name='en',
                 **kwargs):
        self.extractor = None
        self.classifier = None
        self.ent_synonyms = None
        self.nlp = spacy.load(language_name,
                              parser=False,
                              entity=False,
                              matcher=False)
        self.featurizer = SpacyFeaturizer(self.nlp)

        ensure_proper_language_model(self.nlp)

        if intent_classifier:
            with open(intent_classifier, 'rb') as f:
                self.classifier = cloudpickle.load(f)
        if entity_extractor:
            self.extractor = SpacyEntityExtractor(self.nlp, entity_extractor)
        self.ent_synonyms = Interpreter.load_synonyms(entity_synonyms)
Ejemplo n.º 10
0
 def __init__(self, language_name, max_num_threads=1):
     super(self.__class__, self).__init__("spacy_sklearn", language_name,
                                          max_num_threads)
     self.nlp = spacy.load(self.language_name, parser=False, entity=False)
     self.featurizer = SpacyFeaturizer(self.nlp)
     ensure_proper_language_model(self.nlp)
Ejemplo n.º 11
0
from rasa_nlu.train import load_data
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.utils.spacy_utils import SpacyNLP
from rasa_nlu.tokenizers.spacy_tokenizer import SpacyTokenizer
from rasa_nlu.featurizers.spacy_featurizer import SpacyFeaturizer
import numpy as np, spacy

training_data = load_data("data/examples/rasa/demo-rasa.json")
config = RasaNLUModelConfig()
SpacyNLP(nlp=spacy.load("en")).train(training_data, config)
SpacyTokenizer().train(training_data, config)
SpacyFeaturizer().train(training_data, config)

from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC

labels = [e.get("intent") for e in training_data.intent_examples]
le = LabelEncoder()

y = le.fit_transform(labels)
X = np.stack([
    example.get("text_features") for example in training_data.intent_examples
])

defaults = {
    # C parameter of the svm - cross validation will select the best value
    "C": [1, 2, 5, 10, 20, 100],

    # the kernels to use for the svm training - cross validation will
    # decide which one of them performs best