def main(): from rasa.utils.io import configure_colored_logging import rasa.core.cli.train from rasa.core.utils import set_default_subparser # Running as standalone python application arg_parser = create_argument_parser() set_default_subparser(arg_parser, 'default') cmdline_arguments = arg_parser.parse_args() additional_args = _additional_arguments(cmdline_arguments) configure_colored_logging(cmdline_arguments.loglevel) loop = asyncio.get_event_loop() training_stories = loop.run_until_complete( rasa.core.cli.train.stories_from_cli_args(cmdline_arguments)) if cmdline_arguments.mode == 'default': loop.run_until_complete(do_default_training(cmdline_arguments, training_stories, additional_args)) elif cmdline_arguments.mode == 'interactive': do_interactive_learning(cmdline_arguments, training_stories, additional_args) elif cmdline_arguments.mode == 'compare': loop.run_until_complete(do_compare_training(cmdline_arguments, training_stories, additional_args))
def main(): from rasa.core.agent import Agent from rasa.core.interpreter import NaturalLanguageInterpreter from rasa.core.utils import AvailableEndpoints, set_default_subparser import rasa.nlu.utils as nlu_utils import rasa.core.cli from rasa.core import utils loop = asyncio.get_event_loop() # Running as standalone python application arg_parser = create_argument_parser() set_default_subparser(arg_parser, "default") cmdline_arguments = arg_parser.parse_args() logging.basicConfig(level=cmdline_arguments.loglevel) _endpoints = AvailableEndpoints.read_endpoints(cmdline_arguments.endpoints) if cmdline_arguments.output: nlu_utils.create_dir(cmdline_arguments.output) if not cmdline_arguments.core: raise ValueError( "you must provide a core model directory to evaluate using -d / --core" ) if cmdline_arguments.mode == "default": _interpreter = NaturalLanguageInterpreter.create( cmdline_arguments.nlu, _endpoints.nlu ) _agent = Agent.load(cmdline_arguments.core, interpreter=_interpreter) stories = loop.run_until_complete( rasa.core.cli.train.stories_from_cli_args(cmdline_arguments) ) loop.run_until_complete( test( stories, _agent, cmdline_arguments.max_stories, cmdline_arguments.output, cmdline_arguments.fail_on_prediction_errors, cmdline_arguments.e2e, ) ) elif cmdline_arguments.mode == "compare": compare( cmdline_arguments.core, cmdline_arguments.stories, cmdline_arguments.output ) story_n_path = os.path.join(cmdline_arguments.core, "num_stories.json") number_of_stories = utils.read_json_file(story_n_path) plot_curve(cmdline_arguments.output, number_of_stories) logger.info("Finished evaluation")