def main(): import rasa.utils import rasa_core.cli.train from rasa_core.utils import set_default_subparser # Running as standalone python application arg_parser = create_argument_parser() set_default_subparser(arg_parser, 'default') cmdline_arguments = arg_parser.parse_args() additional_args = _additional_arguments(cmdline_arguments) rasa.utils.configure_colored_logging(cmdline_arguments.loglevel) loop = asyncio.get_event_loop() training_stories = loop.run_until_complete( rasa_core.cli.train.stories_from_cli_args(cmdline_arguments)) if cmdline_arguments.mode == 'default': loop.run_until_complete( do_default_training(cmdline_arguments, training_stories, additional_args)) elif cmdline_arguments.mode == 'interactive': do_interactive_learning(cmdline_arguments, training_stories, additional_args) elif cmdline_arguments.mode == 'compare': loop.run_until_complete( do_compare_training(cmdline_arguments, training_stories, additional_args))
def main(): from rasa_core.agent import Agent from rasa_core.interpreter import NaturalLanguageInterpreter from rasa_core.utils import (AvailableEndpoints, set_default_subparser) import rasa_nlu.utils as nlu_utils import rasa_core.cli from rasa_core import utils loop = asyncio.get_event_loop() # Running as standalone python application arg_parser = create_argument_parser() set_default_subparser(arg_parser, 'default') cmdline_arguments = arg_parser.parse_args() logging.basicConfig(level=cmdline_arguments.loglevel) _endpoints = AvailableEndpoints.read_endpoints(cmdline_arguments.endpoints) if cmdline_arguments.output: nlu_utils.create_dir(cmdline_arguments.output) if not cmdline_arguments.core: raise ValueError("you must provide a core model directory to evaluate " "using -d / --core") if cmdline_arguments.mode == 'default': _interpreter = NaturalLanguageInterpreter.create( cmdline_arguments.nlu, _endpoints.nlu) _agent = Agent.load(cmdline_arguments.core, interpreter=_interpreter) stories = loop.run_until_complete( rasa_core.cli.train.stories_from_cli_args(cmdline_arguments)) loop.run_until_complete( test(stories, _agent, cmdline_arguments.max_stories, cmdline_arguments.output, cmdline_arguments.fail_on_prediction_errors, cmdline_arguments.e2e)) elif cmdline_arguments.mode == 'compare': compare(cmdline_arguments.core, cmdline_arguments.stories, cmdline_arguments.output) story_n_path = os.path.join(cmdline_arguments.core, 'num_stories.json') number_of_stories = utils.read_json_file(story_n_path) plot_curve(cmdline_arguments.output, number_of_stories) logger.info("Finished evaluation")
cmdline_args.out, _interpreter, _endpoints, cmdline_args.dump_stories, cmdline_args.config[0], None, additional_arguments) interactive.run_interactive_learning( _agent, stories, finetune=cmdline_args.finetune, skip_visualization=cmdline_args.skip_visualization) if __name__ == '__main__': # Running as standalone python application arg_parser = create_argument_parser() set_default_subparser(arg_parser, 'default') cmdline_args = arg_parser.parse_args() if not cmdline_args.mode: raise ValueError("You must specify the mode you want training to run " "in. The options are: (default|compare|interactive)") additional_arguments = _additional_arguments(cmdline_args) utils.configure_colored_logging(cmdline_args.loglevel) if cmdline_args.url: stories = utils.download_file_from_url(cmdline_args.url) else: stories = cmdline_args.stories if cmdline_args.mode == 'default': do_default_training(cmdline_args, stories, additional_arguments)