Beispiel #1
0
def run_evaluation(data_path, model_path,
                   component_builder=None):  # pragma: no cover
    """Evaluate intent classification and entity extraction."""

    # get the metadata config from the package data
    interpreter = Interpreter.load(model_path, component_builder)
    test_data = training_data.load_data(data_path,
                                        interpreter.model_metadata.language)
    extractors = get_entity_extractors(interpreter)
    entity_predictions, tokens = get_entity_predictions(interpreter,
                                                        test_data)
    if duckling_extractors.intersection(extractors):
        entity_predictions = remove_duckling_entities(entity_predictions)
        extractors = remove_duckling_extractors(extractors)

    if is_intent_classifier_present(interpreter):
        intent_targets = get_intent_targets(test_data)
        intent_predictions = get_intent_predictions(interpreter, test_data)
        logger.info("Intent evaluation results:")
        evaluate_intents(intent_targets, intent_predictions)

    if extractors:
        entity_targets = get_entity_targets(test_data)

        logger.info("Entity evaluation results:")
        evaluate_entities(entity_targets, entity_predictions, tokens,
                          extractors)
Beispiel #2
0
def run_intent_evaluation(config, model_path, component_builder=None):
    from sklearn.metrics import accuracy_score
    from sklearn.metrics import classification_report
    from sklearn.metrics import confusion_matrix
    from sklearn.metrics import f1_score
    from sklearn.metrics import precision_score
    from sklearn.utils.multiclass import unique_labels

    # get the metadata config from the package data
    test_data = load_data(config['data'])
    metadata = Metadata.load(model_path)
    interpreter = Interpreter.load(metadata, config, component_builder)

    test_y = [e.get("intent") for e in test_data.training_examples]

    preds = []
    for e in test_data.training_examples:
        res = interpreter.parse(e.text)
        if res.get('intent'):
            preds.append(res['intent'].get('name'))
        else:
            preds.append(None)

    logger.info("Intent Evaluation Results")
    logger.info("F1-Score:  {}".format(f1_score(test_y, preds, average='weighted')))
    logger.info("Precision: {}".format(precision_score(test_y, preds, average='weighted')))
    logger.info("Accuracy:  {}".format(accuracy_score(test_y, preds)))
    logger.info("Classification report: \n{}".format(classification_report(test_y, preds)))

    cnf_matrix = confusion_matrix(test_y, preds)
    plot_intent_confusion_matrix(cnf_matrix, classes=unique_labels(test_y, preds),
                                 title='Intent Confusion matrix')

    plt.show()
    return
Beispiel #3
0
def run_intent_evaluation(config, model_path, component_builder=None):
    from sklearn.metrics import confusion_matrix
    from sklearn.utils.multiclass import unique_labels

    # get the metadata config from the package data
    test_data = load_data(config['data'])
    interpreter = Interpreter.load(model_path, config, component_builder)

    test_y = [e.get("intent") for e in test_data.training_examples]

    preds = []
    for e in test_data.training_examples:
        res = interpreter.parse(e.text)
        if res.get('intent'):
            preds.append(res['intent'].get('name'))
        else:
            preds.append(None)

    log_evaluation_table(test_y, preds)

    cnf_matrix = confusion_matrix(test_y, preds)
    plot_confusion_matrix(cnf_matrix, classes=unique_labels(test_y, preds),
                          title='Intent Confusion matrix')

    plt.show()
    return
Beispiel #4
0
def run_evaluation(data_path, model,
                   report_folder=None,
                   successes_filename=None,
                   errors_filename='errors.json',
                   confmat_filename=None,
                   intent_hist_filename=None,
                   component_builder=None):  # pragma: no cover
    """Evaluate intent classification and entity extraction."""

    # get the metadata config from the package data
    if isinstance(model, Interpreter):
        interpreter = model
    else:
        interpreter = Interpreter.load(model, component_builder)
    test_data = training_data.load_data(data_path,
                                        interpreter.model_metadata.language)
    extractors = get_entity_extractors(interpreter)
    entity_predictions, tokens = get_entity_predictions(interpreter,
                                                        test_data)

    if duckling_extractors.intersection(extractors):
        entity_predictions = remove_duckling_entities(entity_predictions)
        extractors = remove_duckling_extractors(extractors)

    result = {
        "intent_evaluation": None,
        "entity_evaluation": None
    }

    if report_folder:
        utils.create_dir(report_folder)

    if is_intent_classifier_present(interpreter):
        intent_targets = get_intent_targets(test_data)
        intent_results = get_intent_predictions(
            intent_targets, interpreter, test_data)

        logger.info("Intent evaluation results:")
        result['intent_evaluation'] = evaluate_intents(intent_results,
                                                       report_folder,
                                                       successes_filename,
                                                       errors_filename,
                                                       confmat_filename,
                                                       intent_hist_filename)

    if extractors:
        entity_targets = get_entity_targets(test_data)

        logger.info("Entity evaluation results:")
        result['entity_evaluation'] = evaluate_entities(entity_targets,
                                                        entity_predictions,
                                                        tokens,
                                                        extractors,
                                                        report_folder)

    return result
Beispiel #5
0
def run_cmdline(model_path, component_builder=None):
    interpreter = Interpreter.load(model_path, component_builder)

    logger.info("NLU model loaded. Type a message and "
                "press enter to parse it.")
    while True:
        text = input().strip()
        r = interpreter.parse(text)
        print(json.dumps(r, indent=2))
        logger.info("Next message:")
Beispiel #6
0
def test_train_with_empty_data(language, pipeline, component_builder, tmpdir):
    _config = RasaNLUModelConfig({"pipeline": pipeline, "language": language})
    trainer = Trainer(_config, component_builder)
    trainer.train(TrainingData())
    persistor = create_persistor(_config)
    persisted_path = trainer.persist(tmpdir.strpath, persistor,
                                     project_name="my_project")
    loaded = Interpreter.load(persisted_path, component_builder)
    assert loaded.pipeline
    assert loaded.parse("hello") is not None
    assert loaded.parse("Hello today is Monday, again!") is not None
Beispiel #7
0
def test_train_model_noents(language, pipeline, component_builder, tmpdir):
    _config = RasaNLUModelConfig({"pipeline": pipeline, "language": language})
    (trained, _, persisted_path) = train.do_train(
            _config,
            path=tmpdir.strpath,
            data="./data/test/demo-rasa-noents.json",
            component_builder=component_builder)
    assert trained.pipeline
    loaded = Interpreter.load(persisted_path, component_builder)
    assert loaded.pipeline
    assert loaded.parse("hello") is not None
    assert loaded.parse("Hello today is Monday, again!") is not None
Beispiel #8
0
def test_train_model(pipeline_template, component_builder, tmpdir):
    _config = utilities.base_test_conf(pipeline_template)
    (trained, _, persisted_path) = train.do_train(
            _config,
            path=tmpdir.strpath,
            data=DEFAULT_DATA_PATH,
            component_builder=component_builder)
    assert trained.pipeline
    loaded = Interpreter.load(persisted_path, component_builder)
    assert loaded.pipeline
    assert loaded.parse("hello") is not None
    assert loaded.parse("Hello today is Monday, again!") is not None
Beispiel #9
0
    def __init__(self, languages):
        utils.check_languages(languages)

        self.languages = languages
        self.config = RasaNLUConfig(cmdline_args=utils.load_config())
        self.query_logger = DataRouter._create_query_logger(self.config['response_log'])
        self._metadata = {}
        self._interpreter = {}
        for lang in languages:
            self._metadata[lang] = Metadata.load(utils.get_model_dir(lang))
            self._interpreter[lang] = Interpreter.load(self._metadata[lang], self.config)
        self.registry = IntentRegistry(self.config['active_apps'])
Beispiel #10
0
def run_cv_evaluation(data, n_folds, nlu_config):
    from sklearn import metrics
    from sklearn.model_selection import StratifiedKFold
    from collections import defaultdict
    # type: (List[rasa_nlu.training_data.Message], int, RasaNLUConfig) -> Dict[Text, List[float]]
    """Stratified cross validation on data

    :param data: list of rasa_nlu.training_data.Message objects
    :param n_folds: integer, number of cv folds
    :param nlu_config: nlu config file
    :return: dictionary with key, list structure, where each entry in list
              corresponds to the relevant result for one fold

    """
    trainer = Trainer(nlu_config)
    results = defaultdict(list)

    y_true = [e.get("intent") for e in data]

    skf = StratifiedKFold(n_splits=n_folds, random_state=11, shuffle=True)
    counter = 1
    logger.info("Evaluation started")
    for train_index, test_index in skf.split(data, y_true):

        train = [data[i] for i in train_index]
        test = [data[i] for i in test_index]

        logger.debug("Fold: {}".format(counter))
        logger.debug("Training ...")
        trainer.train(TrainingData(training_examples=train))
        model_directory = trainer.persist("projects/")  # Returns the directory the model is stored in

        logger.debug("Evaluation ...")
        interpreter = Interpreter.load(model_directory, nlu_config)
        test_y = [e.get("intent") for e in test]

        preds = []
        for e in test:
            res = interpreter.parse(e.text)
            if res.get('intent'):
                preds.append(res['intent'].get('name'))
            else:
                preds.append(None)

        # compute fold metrics
        results["Accuracy"].append(metrics.accuracy_score(test_y, preds))
        results["F1-score"].append(metrics.f1_score(test_y, preds, average='weighted'))
        results["Precision"] = metrics.precision_score(test_y, preds, average='weighted')

        # increase fold counter
        counter += 1

    return dict(results)
Beispiel #11
0
def test_train_model_on_test_pipelines(language, pipeline,
                                       component_builder, tmpdir):
    _config = RasaNLUModelConfig({"pipeline": pipeline, "language": language})
    (trained, _, persisted_path) = train.do_train(
            _config,
            path=tmpdir.strpath,
            data=DEFAULT_DATA_PATH,
            component_builder=component_builder)
    assert trained.pipeline
    loaded = Interpreter.load(persisted_path, component_builder)
    assert loaded.pipeline
    assert loaded.parse("hello") is not None
    assert loaded.parse("Hello today is Monday, again!") is not None
Beispiel #12
0
def run_cmdline(model_path, component_builder=None):
    interpreter = Interpreter.load(model_path, component_builder)

    logger.info("NLU model loaded. Type a message and "
                "press enter to parse it.")
    while True:
        text = input().strip()
        if six.PY2:
            # in python 2 input doesn't return unicode values
            text = text.decode("utf-8")
        r = interpreter.parse(text)
        print(json.dumps(r, indent=2))
        logger.info("Next message:")
Beispiel #13
0
def test_random_seed(component_builder, tmpdir):
    """test if train result is the same for two runs of tf embedding"""

    _config = utilities.base_test_conf("supervised_embeddings")
    # set fixed random seed to 1
    _config.set_component_attr(5, random_seed=1)
    # first run
    (trained_a, _, persisted_path_a) = train(
        _config,
        path=tmpdir.strpath + "_a",
        data=DEFAULT_DATA_PATH,
        component_builder=component_builder)
    # second run
    (trained_b, _, persisted_path_b) = train(
        _config,
        path=tmpdir.strpath + "_b",
        data=DEFAULT_DATA_PATH,
        component_builder=component_builder)
    loaded_a = Interpreter.load(persisted_path_a, component_builder)
    loaded_b = Interpreter.load(persisted_path_b, component_builder)
    result_a = loaded_a.parse("hello")["intent"]["confidence"]
    result_b = loaded_b.parse("hello")["intent"]["confidence"]
    assert result_a == result_b
Beispiel #14
0
def run_evaluation(config, model_path, component_builder=None):  # pragma: no cover
    """Evaluate intent classification and entity extraction."""
    # get the metadata config from the package data
    test_data = training_data.load_data(config['data'], config['language'])
    interpreter = Interpreter.load(model_path, config, component_builder)
    intent_targets, entity_targets = get_targets(test_data)
    intent_predictions, entity_predictions, tokens = get_predictions(interpreter, test_data)
    extractors = get_entity_extractors(interpreter)

    if extractors.intersection(duckling_extractors):
        entity_predictions = patch_duckling_entities(entity_predictions)
        extractors = patch_duckling_extractors(interpreter, extractors)

    evaluate_intents(intent_targets, intent_predictions)
    evaluate_entities(entity_targets, entity_predictions, tokens, extractors)
Beispiel #15
0
    def __create_model_store(self):
        # Fallback for users that specified the model path as a string and hence only want a single default model.
        if type(self.config.server_model_dirs) is Text:
            model_dict = {self.DEFAULT_MODEL_NAME: self.config.server_model_dirs}
        elif self.config.server_model_dirs is None:
            model_dict = self.__search_for_models()
        else:
            model_dict = self.config.server_model_dirs

        model_store = {}

        for alias, model_path in list(model_dict.items()):
            try:
                logger.info("Loading model '{}'...".format(model_path))
                model_store[alias] = self.__interpreter_for_model(model_path)
            except Exception as e:
                logger.exception("Failed to load model '{}'. Error: {}".format(model_path, e))
        if not model_store:
            meta = Metadata({"pipeline": ["intent_classifier_keyword"]}, "")
            interpreter = Interpreter.load(meta, self.config, self.component_builder)
            model_store[self.DEFAULT_MODEL_NAME] = interpreter
        return model_store
Beispiel #16
0
def run_nlu():
	interpreter = Interpreter.load('./models/nlu/default/current')
	print(interpreter.parse(u"I am sad, plased send me a picture of a dog"))
Beispiel #17
0
def interpreter_for(component_builder, data, path, config):
    (trained, _, path) = train(config, data, path,
                               component_builder=component_builder)
    interpreter = Interpreter.load(path, component_builder)
    return interpreter
Beispiel #18
0
    def _load_interpreter(self):
        from rasa_nlu.model import Interpreter

        self.interpreter = Interpreter.load(self.model_directory)
Beispiel #19
0
def load_interpreter_for_model(config, persisted_path, component_builder):
    metadata = DataRouter.read_model_metadata(persisted_path, config)
    return Interpreter.load(metadata, config, component_builder)
Beispiel #20
0
 def __interpreter_for_model(self, model_path):
     metadata = DataRouter.read_model_metadata(model_path, self.config)
     return Interpreter.load(metadata, self.config, self.component_builder)
import sys
from rasa_nlu.components import ComponentBuilder
from rasa_nlu.model import Metadata, Interpreter

builder = ComponentBuilder(use_cache = True)

#laedt NLU-Interpreter und wartet auf Input
if __name__ == '__main__':
    print(sys.argv)
    interpreter = Interpreter.load(sys.argv[1],builder);
    print("Model geladen")
    while(True):
        sentence = input()
        print(interpreter.parse(sentence))




Beispiel #22
0
from rasa_nlu.model import Interpreter

import os

rasa_model_path = os.path.abspath("rasa/models/20200321-174355.tar.gz")
print(rasa_model_path)

interpreter = Interpreter.load(rasa_model_path)


def rasa_output(text):
    message = str(text).strip()
    result = interpreter.parse(message)
    return result


rasa_output("hello")
Beispiel #23
0
def run_nlu():
	interpreter = Interpreter.load('./models/nlu/default/weathernlu', RasaNLUConfig('config_spacy.json'))
	print(interpreter.parse("I am planning my holiday to Lithuania. I wonder what is the weather out there."))
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.model import Trainer
from rasa_nlu.model import Metadata, Interpreter
import json
app = Flask(__name__)

import warnings
warnings.filterwarnings('ignore')


@app.route('/')
def index():
    return render_template('index.html')


interpreter = Interpreter.load('./models/nlu/default/restaurantnlu')


@app.route('/nlu_parsing', methods=['POST'])
def transform():
    if request.headers['Content-Type'] == 'application/json':
        query = request.json.get("utterance")
        results = interpreter.parse(query)
        js = json.dumps(results)
        resp = Response(js, status=200, mimetype='application/json')
        return resp


if __name__ == '__main__':
    app.run(debug=True)
Beispiel #25
0
#!/usr/bin/env python3
import argparse
import pathlib
import re
from typing import List, Dict, Union

from flask import Flask, jsonify, request, wrappers
from flask_cors import CORS
from rasa_nlu.model import Interpreter

MODEL_DIR = (pathlib.Path(__file__).parent / 'projects' / 'default' /
             'model_20180613-130746').absolute()
INTERPRETER = Interpreter.load(str(MODEL_DIR))

# Initialize the server
app = Flask(__name__)
CORS(app)


def get_whatsapp_messages(content: str) -> List[Dict[str, str]]:
    """

    Args:
        content: All messages in a string

    Returns: [{'name': 'matched_string', ...}]
    """

    pattern = r'(?P<time>[0-9\/, :AMP]+) \- (?P<name>[a-zA-Z0-9+ ]+): (?P<text>.+)'
    regex_ = re.compile(pattern, re.DOTALL)
Beispiel #26
0
if __name__ == '__main__':
    args = docopt(__doc__)
    source_file_path = args['<source>']
    target_language = args['--target-language']
    output_file_path = args['--output']

    # import code dictionary for target language
    if target_language == 'python':
        from languages.python import code_dict
    elif target_language == 'c++':
        from languages.c_plus_plus import code_dict
    elif target_language == 'java':
        from languages.java import code_dict

    # load NLU model
    interpreter = Interpreter.load(MODEL_DIR + listdir(MODEL_DIR)[0])

    # parse each line from the input file and store its transpiled code in the
    # `code` list
    code = [code_dict['default_code']]
    current_indent_level = 0
    prev_indent_level = 0
    with open(source_file_path, 'r') as source:
        source_lines = source.readlines()
        for line in source_lines:
            print(line, end='')
            current_indent_level = counts_tabs(line)
            if current_indent_level < prev_indent_level:
                code.append('\t' * current_indent_level +
                            code_dict['end_block'])
            if not line.isspace():  # line is not "blank"
Beispiel #27
0
def run_nlu():
    interpreter = Interpreter.load('./models/default/current/')
    print(interpreter.parse("Can I book an appointment please?"))
def run_nlu():
    interpreter = Interpreter.load('./models/nlu/default/robosoc')
    print(interpreter.parse("what is the reputation of 10.54.1.3"))
Beispiel #29
0
def run_nlu():
    interpreter = Interpreter.load('./models/nlu/default/chatter')
    pprint.pprint(interpreter.parse("CCU"))
Beispiel #30
0
def run_nlu(sentence, nlu_model_path='./models/nlu/default/current'):
    interpreter = Interpreter.load(nlu_model_path)
    pprint(interpreter.parse(sentence))
Beispiel #31
0
from rasa_nlu.model import Interpreter

# where model_directory points to the model folder
interpreter = Interpreter.load("../projects/default/model_20180812-135546")
output = interpreter.parse(u"looking for chinese food")
print(output)
Beispiel #32
0
def run_nlu():
    interpreter = Interpreter.load('./models/nlu/default/restaurantnlu',
                                   builder)
    print(
        interpreter.parse(
            "south indian restaurants in agra costs less than 500"))
Beispiel #33
0
from rasa_nlu.model import Interpreter

CONFIDENCE_THRESHOLD = 0.5
FALLBACK_INTENT = 'N/A'

interpreter = Interpreter.load('./models/nlu/default/test')


def extract_structured_data(query):
    result = interpreter.parse(query)
    data = {'input': query, 'intent': FALLBACK_INTENT, 'entities': []}
    if result['intent']['confidence'] > CONFIDENCE_THRESHOLD:
        data['intent'] = result['intent']['name']
    for entity in result['entities']:
        if entity['confidence'] > CONFIDENCE_THRESHOLD:
            data['entities'].append({
                'name': entity['entity'],
                'value': entity['value']
            })
    return data


if '__main__' == __name__:
    queries = [
        {
            'input': 'roll a die',
            'intent': 'dice',
            'entities': []
        },
        {
            'input': 'songs by linkin park',
Beispiel #34
0
from flask import Flask, render_template, request
from config import DevConfig
import pickle
import jieba
import numpy
import json
import os
import shutil
import pymysql
import datetime
import xmljson
import xml.etree.ElementTree as ET
import requests
import qa_inference2 as qa_inference

interpreter_proj = Interpreter.load("./intent_models/proj2")
interpreter_bill = Interpreter.load("./intent_models/bill2")
interpreter_mb1 = Interpreter.load("./intent_models/mb1")
interpreter_mb2 = Interpreter.load("./intent_models/mb2")
interpreter_mba = Interpreter.load("./intent_models/mba3v1")

sess = tf.Session()
set_session(sess)
graph = tf.get_default_graph()
w2vmodel = Word2Vec.load('./LSTM_model4/aptg_20180309wiki_model.bin')
lstmmodel = load_model('./LSTM_model4/sentiment_test_aptg.h5')
# zzz = np.ones((1, 200))
# lstmmodel.predict(zzz)
# label_dic = {'合約查詢': 0, '帳務查詢': 1, '魔速方塊1.0': 2, '魔速方塊2.0': 3}
label_dic = {'合約查詢': 0, '帳務查詢': 1, '魔速方塊': 2}
gensim_dict = Dictionary()
    def find_dataset(self, threshold=0.70):
        """
        runs the model for entity extraction
        :return: citation and mention lists
        """

        model_directory = "project/models/model_20190116-214229"

        all_interpreter = Interpreter.load(model_directory)

        dataset_citations = []
        dataset_mentions = []
        count_jel_methods = 0

        for _, row in self.pub_df.iterrows():
            if len(row['jel_method']) > 0:
                count_jel_methods += 1

            try:
                print(row['pdf_file_name'])
                distinct_datasets = {
                }  # { id1 : [('data1', score1), ('data2', score2)] , id2 : ..}
                entities, jel_field = self.find_entities_jel_field(
                    all_interpreter, threshold, row['text_file_name'])

                with open('project/additional_files/pub_field.json',
                          'r+') as file:
                    pub_field_dict = json.load(file)

                for ent in entities:

                    belongsToField = True

                    if len(entities) == 1 and ent['data_set'].lower(
                    ) == 'sage':
                        continue

                    for _, rowd in self.dataset_vocab.iterrows():

                        if ent['data_set'] in (
                                mention for mention in rowd['mention_list']
                        ) or ent['data_set'] == rowd['name']:

                            if not self.dataset_belongsTo_field(
                                    pub_field_dict[
                                        row['pdf_file_name'].replace(
                                            '.pdf', '').strip()], jel_field,
                                    rowd["subjects"], rowd["description"]):
                                belongsToField = False
                                break

                            if rowd['data_set_id'] not in distinct_datasets:
                                distinct_datasets[rowd['data_set_id']] = [
                                    (ent['data_set'], ent['confidence'])
                                ]
                            else:
                                distinct_datasets[rowd['data_set_id']].append(
                                    (ent['data_set'], ent['confidence']))
                            break

                    if not belongsToField:
                        print(f"not field: {ent['data_set']}")
                        continue

                    result_dict = {}
                    result_dict["publication_id"] = row["publication_id"]
                    result_dict["score"] = ent['confidence']
                    result_dict["mention"] = ent['data_set']
                    dataset_mentions.append(result_dict)

                for id in distinct_datasets:
                    result_dict = {}
                    result_dict["publication_id"] = row["publication_id"]
                    result_dict["data_set_id"] = id
                    result_dict["score"] = max(distinct_datasets[id],
                                               key=itemgetter(1))[1]
                    result_dict["mention_list"] = [
                        i[0] for i in distinct_datasets[id]
                    ]
                    dataset_citations.append(result_dict)

            except Exception as e:
                logging.exception(e)
                continue

        # write results from rasa to intermediate files
        with open("project/additional_files/data_set_citations_rasa.json",
                  "w+") as write_file:
            json.dump(dataset_citations,
                      write_file,
                      indent=4,
                      ensure_ascii=False)

        with open("project/additional_files/data_set_mentions_rasa.json",
                  "w+") as write_file:
            json.dump(dataset_mentions,
                      write_file,
                      indent=4,
                      ensure_ascii=False)

        return dataset_citations, dataset_mentions
from rasa_nlu.model import Interpreter
import json

interpreter = Interpreter.load("./models/current/nlu")
messages = [
    "the password could be bananas", "username foo, password: Zzzsa23",
    "the weather is just fine"
]

for message in messages:
    print("Evaluating: %s" % message)
    result = interpreter.parse(message)
    print("Most likely intent: %s" % result['intent'])
    print("all data: %s" % json.dumps(result, indent=2))
Beispiel #37
0
def run_nlu(model_directory):
	#interpreter = Interpreter.load('./models/nlu/default/customernlu', RasaNLUModelConfig('config_spacy.yml'))
	interpreter = Interpreter.load(model_directory)
	print(interpreter.parse(u"I am planning my to order an 829 router. How much does it cost?"))
Beispiel #38
0
#!/usr/bin/env python

from rasa_nlu.model import Interpreter
import json


interpreter = Interpreter.load("./models/current/nlu")


def print_intent(msg):
    result = interpreter.parse(msg)
    print(json.dumps(result, indent=2))


print_intent("Can you book me a dental checkup appointment for 10'o clock")
print_intent("I need an appointment for eye checkup at 10'o clock")
#!/usr/bin/env python3
import os

import rospy
from std_msgs.msg import String
from roboga_nlu.srv import Nlu, NluResponse
from rasa_nlu.model import Interpreter

if __name__ == "__main__":
    directory = os.path.dirname(os.path.realpath(__file__))
    rospy.init_node('rasa_nlu', anonymous=True)
    interpreter = Interpreter.load(directory + "/rasa/models/nlu")

    def handler(req):
        result = interpreter.parse(req.data)
        message = str(result['text'])
        intent = str(result["intent"].get("name"))
        entities = [
            "{{\"entity\":\"{0}\", \"value\":\"{1}\"}}".format(
                x.get("entity"), x.get("value"))
            for x in result.get("entities")
        ]
        return NluResponse(message=message, intent=intent, entities=entities)

    pub = rospy.Service("zordon/nlu", Nlu, handler)
    rospy.spin()
Beispiel #40
0
 def __init__(self, model_dir):
     from rasa_nlu.model import Metadata, Interpreter
     self.interpreter = Interpreter.load(model_dir, RasaNLUConfig("mobile_nlu_model_config.json"))
     self._items = {}
Beispiel #41
0
def run_nlu():
	interpreter = Interpreter.load('./models/nlu/default/weathernlu',)
	print(interpreter.parse("I am planning my holiday to Lithuania. I wonder what is the weather out there."))
def run_nlu():
	query = request.args['q']
	print(query)
	interpreter = Interpreter.load('./models/nlu/default/stagebot', RasaNLUConfig('config_sapcy.json'))
	response = interpreter.parse(query)
	return jsonify({"Engine Status" : response}), 200
Beispiel #43
0
def run_nlu():
    interpreter = Interpreter.load('./models/nlu/default/Restaurant_NPSR',
                                   builder)
    print(interpreter.parse("show some chinese restaurant in Delhi"))