Beispiel #1
0
def plot_curve(output: Text, no_stories: List[int]) -> None:
    """Plot the results from run_comparison_evaluation.

    Args:
        output: Output directory to save resulting plots to
        no_stories: Number of stories per run
    """
    import matplotlib.pyplot as plt

    ax = plt.gca()

    # load results from file
    data = utils.read_json_file(os.path.join(output, 'results.json'))
    x = no_stories

    # compute mean of all the runs for keras/embed policies
    for label in data.keys():
        if len(data[label]) == 0:
            continue
        mean = np.mean(data[label], axis=0)
        std = np.std(data[label], axis=0)
        ax.plot(x, mean, label=label, marker='.')
        ax.fill_between(x, [m - s for m, s in zip(mean, std)],
                        [m + s for m, s in zip(mean, std)],
                        color='#6b2def',
                        alpha=0.2)
    ax.legend(loc=4)
    ax.set_xlabel("Number of stories present during training")
    ax.set_ylabel("Number of correct test stories")
    plt.savefig(os.path.join(output, 'model_comparison_graph.pdf'),
                format='pdf')
    plt.show()
Beispiel #2
0
def main():
    from rasa_core.agent import Agent
    from rasa_core.interpreter import NaturalLanguageInterpreter
    from rasa_core.utils import (AvailableEndpoints, set_default_subparser)
    import rasa_nlu.utils as nlu_utils
    import rasa_core.cli
    from rasa_core import utils

    loop = asyncio.get_event_loop()

    # Running as standalone python application
    arg_parser = create_argument_parser()
    set_default_subparser(arg_parser, 'default')
    cmdline_arguments = arg_parser.parse_args()

    logging.basicConfig(level=cmdline_arguments.loglevel)
    _endpoints = AvailableEndpoints.read_endpoints(cmdline_arguments.endpoints)

    if cmdline_arguments.output:
        nlu_utils.create_dir(cmdline_arguments.output)

    if not cmdline_arguments.core:
        raise ValueError("you must provide a core model directory to evaluate "
                         "using -d / --core")
    if cmdline_arguments.mode == 'default':

        _interpreter = NaturalLanguageInterpreter.create(
            cmdline_arguments.nlu, _endpoints.nlu)

        _agent = Agent.load(cmdline_arguments.core, interpreter=_interpreter)

        stories = loop.run_until_complete(
            rasa_core.cli.train.stories_from_cli_args(cmdline_arguments))

        loop.run_until_complete(
            test(stories, _agent, cmdline_arguments.max_stories,
                 cmdline_arguments.output,
                 cmdline_arguments.fail_on_prediction_errors,
                 cmdline_arguments.e2e))

    elif cmdline_arguments.mode == 'compare':
        compare(cmdline_arguments.core, cmdline_arguments.stories,
                cmdline_arguments.output)

        story_n_path = os.path.join(cmdline_arguments.core, 'num_stories.json')

        number_of_stories = utils.read_json_file(story_n_path)
        plot_curve(cmdline_arguments.output, number_of_stories)

    logger.info("Finished evaluation")
Beispiel #3
0
def test_core(model: Text,
              stories: Text,
              endpoints: Text = None,
              output: Text = DEFAULT_RESULTS_PATH,
              model_path: Text = None,
              **kwargs: Dict):
    import rasa_core.test
    import rasa_core.utils as core_utils
    from rasa_nlu import utils as nlu_utils
    from rasa.model import get_model
    from rasa_core.interpreter import NaturalLanguageInterpreter
    from rasa_core.agent import Agent

    _endpoints = core_utils.AvailableEndpoints.read_endpoints(endpoints)

    if output:
        nlu_utils.create_dir(output)

    if os.path.isfile(model):
        model_path = get_model(model)

    if model_path:
        # Single model: Normal evaluation
        loop = asyncio.get_event_loop()
        model_path = get_model(model)
        core_path, nlu_path = get_model_subdirectories(model_path)

        _interpreter = NaturalLanguageInterpreter.create(
            nlu_path, _endpoints.nlu)

        _agent = Agent.load(core_path, interpreter=_interpreter)

        kwargs = minimal_kwargs(kwargs, rasa_core.test)
        loop.run_until_complete(
            rasa_core.test(stories, _agent, out_directory=output, **kwargs))

    else:
        from rasa_core.test import compare, plot_curve

        compare(model, stories, output)

        story_n_path = os.path.join(model, 'num_stories.json')

        number_of_stories = core_utils.read_json_file(story_n_path)
        plot_curve(output, number_of_stories)
Beispiel #4
0
    def email(sender_id):
        request_params = request_parameters()

        if 'query' in request_params:
            message = request_params.get('query')
        elif 'q' in request_params:
            message = request_params.get('q')
        else:
            return Response(jsonify(error="Invalid respond parameter "
                                          "specified."),
                            status=400,
                            mimetype="application/json")

        global teams_channel
        inter_channel_mapper = read_json_file(env_json.get('inter_channel_mapper'))
        teams_channel.output_channel.id_map.update(inter_channel_mapper)
        #temporary code follows
        teams_channel.output_channel.id_map.update({sender_id:
                            inter_channel_mapper[list(inter_channel_mapper.keys())[0]]})
        teams_channel.output_channel.reverse_id_map.update({list(inter_channel_mapper.keys())[0]:sender_id})
        #temporary code ends

        email_id = request_params.get('email_id')
        preprocessor = partial(idare.email_preprocessor,email_id=email_id)
        try:
            # Set the output channel
            out = CollectingOutputChannel()
            # Fetches the appropriate bot response in a json format
            agent().handle_email(message, email_preprocessor = preprocessor,
                                                output_channel=out,
                                                alternate_channel = teams_channel,
                                               sender_id=sender_id)
            response = out.latest_output()

            return jsonify(response)

        except Exception as e:
            logger.exception("Caught an exception during respond.")
            return Response(jsonify(error="Server failure. Error: {}"
                                          "".format(e)),
                            status=500,
                            content_type="application/json")
Beispiel #5
0
def test_interactive_domain_persistence(mock_endpoint, tmpdir):
    # Test method interactive._write_domain_to_file

    tracker_dump = "data/test_trackers/tracker_moodbot.json"
    tracker_json = utils.read_json_file(tracker_dump)

    events = tracker_json.get("events", [])

    domain_path = tmpdir.join("interactive_domain_save.yml").strpath

    url = '{}/domain'.format(mock_endpoint.url)
    httpretty.register_uri(httpretty.GET, url, body='{}')

    httpretty.enable()
    interactive._write_domain_to_file(domain_path, events, mock_endpoint)
    httpretty.disable()

    saved_domain = utils.read_yaml_file(domain_path)

    for default_action in default_actions():
        assert default_action.name() not in saved_domain["actions"]
async def test_interactive_domain_persistence(mock_endpoint, tmpdir):
    # Test method interactive._write_domain_to_file

    tracker_dump = "data/test_trackers/tracker_moodbot.json"
    tracker_json = utils.read_json_file(tracker_dump)

    events = tracker_json.get("events", [])

    domain_path = tmpdir.join("interactive_domain_save.yml").strpath

    url = '{}/domain'.format(mock_endpoint.url)
    with aioresponses() as mocked:
        mocked.get(url, payload={})

        await interactive._write_domain_to_file(domain_path, events,
                                                mock_endpoint)

    saved_domain = utils.read_yaml_file(domain_path)

    for default_action in default_actions():
        assert default_action.name() not in saved_domain["actions"]
Beispiel #7
0
        nlu_utils.create_dir(cmdline_arguments.output)

    if not cmdline_arguments.core:
        raise ValueError("you must provide a core model directory to evaluate "
                         "using -d / --core")
    if cmdline_arguments.mode == 'default':

        _interpreter = NaturalLanguageInterpreter.create(
            cmdline_arguments.nlu, _endpoints.nlu)

        _agent = Agent.load(cmdline_arguments.core, interpreter=_interpreter)

        stories = cli.stories_from_cli_args(cmdline_arguments)

        run_story_evaluation(stories, _agent, cmdline_arguments.max_stories,
                             cmdline_arguments.output,
                             cmdline_arguments.fail_on_prediction_errors,
                             cmdline_arguments.e2e)

    elif cmdline_arguments.mode == 'compare':
        run_comparison_evaluation(cmdline_arguments.core,
                                  cmdline_arguments.stories,
                                  cmdline_arguments.output)

        story_n_path = os.path.join(cmdline_arguments.core, 'num_stories.json')

        number_of_stories = utils.read_json_file(story_n_path)
        plot_curve(cmdline_arguments.output, number_of_stories)

    logger.info("Finished evaluation")
from base64 import b64encode
from simplejson.errors import JSONDecodeError
from urllib3.exceptions import MaxRetryError
from requests.exceptions import ConnectionError, Timeout
import pymysql
import urllib.parse as urlparser
import hashlib

from rasa_core.exceptions import APIError
from rasa_core.utils import read_json_file

logger = logging.getLogger(__name__)

try:
    path = './'
    configdata = read_json_file(os.path.join(path, "envconfig.json"))

except FileNotFoundError:
    path = '../'
    configdata = read_json_file(os.path.join(path, "envconfig.json"))
    #This exception is handled coz of one of the test cases


def get_sentiment(usermessage, conversationid):
    apitimeout = configdata['sentiment_api_timeout']
    if (configdata['sentiment_api_timeout'] == None):
        apitimeout = 30.0
    else:
        apitimeout = float(apitimeout)
    try:
        if configdata['sentiment_api'] != "":