async def test_events_schema(monkeypatch: MonkeyPatch): # this allows us to patch the printing part used in debug mode to collect the # reported events monkeypatch.setenv("RASA_TELEMETRY_DEBUG", "true") monkeypatch.setenv("RASA_TELEMETRY_ENABLED", "true") mock = Mock() monkeypatch.setattr(telemetry, "print_telemetry_event", mock) with open(TELEMETRY_EVENTS_JSON) as f: schemas = json.load(f)["events"] initial = asyncio.Task.all_tasks() # Generate all known backend telemetry events, and then use events.json to # validate their schema. training_data = TrainingDataImporter.load_from_config(DEFAULT_CONFIG_PATH) async with telemetry.track_model_training(training_data, "rasa"): await asyncio.sleep(1) await telemetry.track_telemetry_disabled() pending = asyncio.Task.all_tasks() - initial await asyncio.gather(*pending) assert mock.call_count == 3 for call in mock.call_args_list: event = call.args[0] # `metrics_id` automatically gets added to all event but is # not part of the schema so we need to remove it before validation del event["properties"]["metrics_id"] jsonschema.validate(instance=event["properties"], schema=schemas[event["event"]])
async def test_events_schema( monkeypatch: MonkeyPatch, default_agent: Agent, config_path: Text ): # this allows us to patch the printing part used in debug mode to collect the # reported events monkeypatch.setenv("RASA_TELEMETRY_DEBUG", "true") monkeypatch.setenv("RASA_TELEMETRY_ENABLED", "true") mock = Mock() monkeypatch.setattr(telemetry, "print_telemetry_event", mock) with open(TELEMETRY_EVENTS_JSON) as f: schemas = json.load(f)["events"] initial = asyncio.all_tasks() # Generate all known backend telemetry events, and then use events.json to # validate their schema. training_data = TrainingDataImporter.load_from_config(config_path) with telemetry.track_model_training(training_data, "rasa"): await asyncio.sleep(1) telemetry.track_telemetry_disabled() telemetry.track_data_split(0.5, "nlu") telemetry.track_validate_files(True) telemetry.track_data_convert("yaml", "nlu") telemetry.track_tracker_export(5, TrackerStore(domain=None), EventBroker()) telemetry.track_interactive_learning_start(True, False) telemetry.track_server_start([CmdlineInput()], None, None, 42, True) telemetry.track_project_init("tests/") telemetry.track_shell_started("nlu") telemetry.track_rasa_x_local() telemetry.track_visualization() telemetry.track_core_model_test(5, True, default_agent) telemetry.track_nlu_model_test(TrainingData()) pending = asyncio.all_tasks() - initial await asyncio.gather(*pending) assert mock.call_count == 15 for args, _ in mock.call_args_list: event = args[0] # `metrics_id` automatically gets added to all event but is # not part of the schema so we need to remove it before validation del event["properties"]["metrics_id"] jsonschema.validate( instance=event["properties"], schema=schemas[event["event"]] )
async def _train_nlu_with_validated_data( file_importer: TrainingDataImporter, output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, persist_nlu_training_data: bool = False, additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: """Train NLU with validated training and config data.""" import rasa.nlu.train if additional_arguments is None: additional_arguments = {} with ExitStack() as stack: if train_path: # If the train path was provided, do nothing on exit. _train_path = train_path else: # Otherwise, create a temp train path and clean it up on exit. _train_path = stack.enter_context( TempDirectoryPath(tempfile.mkdtemp())) config = await file_importer.get_config() print_color("Training NLU model...", color=rasa.shared.utils.io.bcolors.OKBLUE) async with telemetry.track_model_training(file_importer, model_type="nlu"): await rasa.nlu.train( config, file_importer, _train_path, fixed_model_name="nlu", persist_nlu_training_data=persist_nlu_training_data, **additional_arguments, ) print_color("NLU model training completed.", color=rasa.shared.utils.io.bcolors.OKBLUE) if train_path is None: # Only NLU was trained new_fingerprint = await model.model_fingerprint(file_importer) return model.package_model( fingerprint=new_fingerprint, output_directory=output, train_path=_train_path, fixed_model_name=fixed_model_name, model_prefix="nlu-", ) return _train_path
async def _train_core_with_validated_data( file_importer: TrainingDataImporter, output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, additional_arguments: Optional[Dict] = None, interpreter: Optional[Interpreter] = None, ) -> Optional[Text]: """Train Core with validated training and config data.""" import rasa.core.train with ExitStack() as stack: if train_path: # If the train path was provided, do nothing on exit. _train_path = train_path else: # Otherwise, create a temp train path and clean it up on exit. _train_path = stack.enter_context( TempDirectoryPath(tempfile.mkdtemp())) # normal (not compare) training print_color("Training Core model...", color=rasa.shared.utils.io.bcolors.OKBLUE) domain, config = await asyncio.gather(file_importer.get_domain(), file_importer.get_config()) async with telemetry.track_model_training(file_importer, model_type="core"): await rasa.core.train( domain_file=domain, training_resource=file_importer, output_path=os.path.join(_train_path, DEFAULT_CORE_SUBDIRECTORY_NAME), policy_config=config, additional_arguments=additional_arguments, interpreter=interpreter, ) print_color("Core model training completed.", color=rasa.shared.utils.io.bcolors.OKBLUE) if train_path is None: # Only Core was trained. new_fingerprint = await model.model_fingerprint(file_importer) return model.package_model( fingerprint=new_fingerprint, output_directory=output, train_path=_train_path, fixed_model_name=fixed_model_name, model_prefix="core-", ) return _train_path
async def _train_nlu_with_validated_data( file_importer: TrainingDataImporter, output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, persist_nlu_training_data: bool = False, additional_arguments: Optional[Dict] = None, model_to_finetune: Optional["Text"] = None, finetuning_epoch_fraction: float = 1.0, ) -> Optional[Text]: """Train NLU with validated training and config data.""" import rasa.nlu.train if additional_arguments is None: additional_arguments = {} with ExitStack() as stack: if train_path: # If the train path was provided, do nothing on exit. _train_path = train_path else: # Otherwise, create a temp train path and clean it up on exit. _train_path = stack.enter_context( TempDirectoryPath(tempfile.mkdtemp())) config = await file_importer.get_config() rasa.shared.utils.cli.print_color( "Training NLU model...", color=rasa.shared.utils.io.bcolors.OKBLUE) if model_to_finetune: rasa.shared.utils.common.mark_as_experimental_feature( "Incremental Training feature") model_to_finetune = await _nlu_model_for_finetuning( model_to_finetune, file_importer, finetuning_epoch_fraction, called_from_combined_training=train_path is not None, ) if not model_to_finetune: rasa.shared.utils.cli.print_error_and_exit( f"No NLU model for finetuning found. Please make sure to either " f"specify a path to a previous model or to have a finetunable " f"model within the directory '{output}'.") async with telemetry.track_model_training( file_importer, model_type="nlu", is_finetuning=model_to_finetune is not None, ): await rasa.nlu.train.train( config, file_importer, _train_path, fixed_model_name="nlu", persist_nlu_training_data=persist_nlu_training_data, model_to_finetune=model_to_finetune, **additional_arguments, ) rasa.shared.utils.cli.print_color( "NLU model training completed.", color=rasa.shared.utils.io.bcolors.OKBLUE) if train_path is None: # Only NLU was trained new_fingerprint = await model.model_fingerprint(file_importer) return model.package_model( fingerprint=new_fingerprint, output_directory=output, train_path=_train_path, fixed_model_name=fixed_model_name, model_prefix="nlu-", ) return _train_path
async def _train_core_with_validated_data( file_importer: TrainingDataImporter, output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, additional_arguments: Optional[Dict] = None, interpreter: Optional[Interpreter] = None, model_to_finetune: Optional["Text"] = None, finetuning_epoch_fraction: float = 1.0, ) -> Optional[Text]: """Train Core with validated training and config data.""" import rasa.core.train with ExitStack() as stack: if train_path: # If the train path was provided, do nothing on exit. _train_path = train_path else: # Otherwise, create a temp train path and clean it up on exit. _train_path = stack.enter_context( TempDirectoryPath(tempfile.mkdtemp())) # normal (not compare) training rasa.shared.utils.cli.print_color( "Training Core model...", color=rasa.shared.utils.io.bcolors.OKBLUE) domain, config = await asyncio.gather(file_importer.get_domain(), file_importer.get_config()) if model_to_finetune: rasa.shared.utils.common.mark_as_experimental_feature( "Incremental Training feature") model_to_finetune = await _core_model_for_finetuning( model_to_finetune, file_importer=file_importer, finetuning_epoch_fraction=finetuning_epoch_fraction, ) if not model_to_finetune: rasa.shared.utils.cli.print_error_and_exit( f"No Core model for finetuning found. Please make sure to either " f"specify a path to a previous model or to have a finetunable " f"model within the directory '{output}'.") async with telemetry.track_model_training( file_importer, model_type="core", is_finetuning=model_to_finetune is not None, ): await rasa.core.train.train( domain_file=domain, training_resource=file_importer, output_path=os.path.join(_train_path, DEFAULT_CORE_SUBDIRECTORY_NAME), policy_config=config, additional_arguments=additional_arguments, interpreter=interpreter, model_to_finetune=model_to_finetune, ) rasa.shared.utils.cli.print_color( "Core model training completed.", color=rasa.shared.utils.io.bcolors.OKBLUE) if train_path is None: # Only Core was trained. new_fingerprint = await model.model_fingerprint(file_importer) return model.package_model( fingerprint=new_fingerprint, output_directory=output, train_path=_train_path, fixed_model_name=fixed_model_name, model_prefix="core-", ) return _train_path
async def _train_async_internal( file_importer: TrainingDataImporter, train_path: Text, output_path: Text, dry_run: bool, force_training: bool, fixed_model_name: Optional[Text], persist_nlu_training_data: bool, core_additional_arguments: Optional[Dict] = None, nlu_additional_arguments: Optional[Dict] = None, model_to_finetune: Optional[Text] = None, finetuning_epoch_fraction: float = 1.0, ) -> TrainingResult: """Trains a Rasa model (Core and NLU). Use only from `train_async`. Args: file_importer: `TrainingDataImporter` which supplies the training data. train_path: Directory in which to train the model. output_path: Output path. dry_run: If `True` then no training will be done, and the information about whether the training needs to be done will be printed. force_training: If `True` retrain model even if data has not changed. fixed_model_name: Name of model to be stored. persist_nlu_training_data: `True` if the NLU training data should be persisted with the model. core_additional_arguments: Additional training parameters for core training. nlu_additional_arguments: Additional training parameters forwarded to training method of each NLU component. model_to_finetune: Optional path to a model which should be finetuned or a directory in case the latest trained model should be used. finetuning_epoch_fraction: The fraction currently specified training epochs in the model configuration which should be used for finetuning. Returns: An instance of `TrainingResult`. """ stories, nlu_data = await asyncio.gather(file_importer.get_stories(), file_importer.get_nlu_data()) new_fingerprint = await model.model_fingerprint(file_importer) old_model = model.get_latest_model(output_path) fingerprint_comparison = model.should_retrain( new_fingerprint, old_model, train_path, force_training=force_training) if dry_run: code, texts = dry_run_result(fingerprint_comparison) for text in texts: print_warning(text) if code > 0 else print_success(text) return TrainingResult(code=code) if nlu_data.has_e2e_examples(): rasa.shared.utils.common.mark_as_experimental_feature( "end-to-end training") if stories.is_empty() and nlu_data.contains_no_pure_nlu_data(): rasa.shared.utils.cli.print_error( "No training data given. Please provide stories and NLU data in " "order to train a Rasa model using the '--data' argument.") return TrainingResult() if stories.is_empty(): rasa.shared.utils.cli.print_warning( "No stories present. Just a Rasa NLU model will be trained.") trained_model = await _train_nlu_with_validated_data( file_importer, output=output_path, fixed_model_name=fixed_model_name, persist_nlu_training_data=persist_nlu_training_data, additional_arguments=nlu_additional_arguments, model_to_finetune=model_to_finetune, finetuning_epoch_fraction=finetuning_epoch_fraction, ) return TrainingResult(model=trained_model) # We will train nlu if there are any nlu example, including from e2e stories. if nlu_data.contains_no_pure_nlu_data( ) and not nlu_data.has_e2e_examples(): rasa.shared.utils.cli.print_warning( "No NLU data present. Just a Rasa Core model will be trained.") trained_model = await _train_core_with_validated_data( file_importer, output=output_path, fixed_model_name=fixed_model_name, additional_arguments=core_additional_arguments, model_to_finetune=model_to_finetune, finetuning_epoch_fraction=finetuning_epoch_fraction, ) return TrainingResult(model=trained_model) new_fingerprint = await model.model_fingerprint(file_importer) old_model = model.get_latest_model(output_path) if not force_training: fingerprint_comparison = model.should_retrain( new_fingerprint, old_model, train_path, has_e2e_examples=nlu_data.has_e2e_examples(), ) else: fingerprint_comparison = FingerprintComparisonResult( force_training=True) if fingerprint_comparison.is_training_required(): async with telemetry.track_model_training( file_importer, model_type="rasa", ): await _do_training( file_importer, output_path=output_path, train_path=train_path, fingerprint_comparison_result=fingerprint_comparison, fixed_model_name=fixed_model_name, persist_nlu_training_data=persist_nlu_training_data, core_additional_arguments=core_additional_arguments, nlu_additional_arguments=nlu_additional_arguments, old_model_zip_path=old_model, model_to_finetune=model_to_finetune, finetuning_epoch_fraction=finetuning_epoch_fraction, ) trained_model = model.package_model( fingerprint=new_fingerprint, output_directory=output_path, train_path=train_path, fixed_model_name=fixed_model_name, ) return TrainingResult(model=trained_model) rasa.shared.utils.cli.print_success( "Nothing changed. You can use the old model stored at '{}'." "".format(os.path.abspath(old_model))) return TrainingResult(model=old_model)
async def _train_async_internal( file_importer: TrainingDataImporter, train_path: Text, output_path: Text, force_training: bool, fixed_model_name: Optional[Text], persist_nlu_training_data: bool, core_additional_arguments: Optional[Dict] = None, nlu_additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: """Trains a Rasa model (Core and NLU). Use only from `train_async`. Args: file_importer: `TrainingDataImporter` which supplies the training data. train_path: Directory in which to train the model. output_path: Output path. force_training: If `True` retrain model even if data has not changed. fixed_model_name: Name of model to be stored. persist_nlu_training_data: `True` if the NLU training data should be persisted with the model. core_additional_arguments: Additional training parameters for core training. nlu_additional_arguments: Additional training parameters forwarded to training method of each NLU component. Returns: Path of the trained model archive. """ stories, nlu_data = await asyncio.gather(file_importer.get_stories(), file_importer.get_nlu_data()) if stories.is_empty() and nlu_data.is_empty(): print_error( "No training data given. Please provide stories and NLU data in " "order to train a Rasa model using the '--data' argument.") return if stories.is_empty(): print_warning( "No stories present. Just a Rasa NLU model will be trained.") return await _train_nlu_with_validated_data( file_importer, output=output_path, fixed_model_name=fixed_model_name, persist_nlu_training_data=persist_nlu_training_data, additional_arguments=nlu_additional_arguments, ) if nlu_data.is_empty(): print_warning( "No NLU data present. Just a Rasa Core model will be trained.") return await _train_core_with_validated_data( file_importer, output=output_path, fixed_model_name=fixed_model_name, additional_arguments=core_additional_arguments, ) new_fingerprint = await model.model_fingerprint(file_importer) old_model = model.get_latest_model(output_path) if not force_training: fingerprint_comparison = model.should_retrain(new_fingerprint, old_model, train_path) else: fingerprint_comparison = FingerprintComparisonResult( force_training=True) if fingerprint_comparison.is_training_required(): async with telemetry.track_model_training(file_importer, model_type="rasa"): await _do_training( file_importer, output_path=output_path, train_path=train_path, fingerprint_comparison_result=fingerprint_comparison, fixed_model_name=fixed_model_name, persist_nlu_training_data=persist_nlu_training_data, core_additional_arguments=core_additional_arguments, nlu_additional_arguments=nlu_additional_arguments, old_model_zip_path=old_model, ) return model.package_model( fingerprint=new_fingerprint, output_directory=output_path, train_path=train_path, fixed_model_name=fixed_model_name, ) print_success("Nothing changed. You can use the old model stored at '{}'." "".format(os.path.abspath(old_model))) return old_model
def train( domain: Text, config: Text, training_files: Optional[Union[Text, List[Text]]], output: Text = rasa.shared.constants.DEFAULT_MODELS_PATH, dry_run: bool = False, force_training: bool = False, fixed_model_name: Optional[Text] = None, persist_nlu_training_data: bool = False, core_additional_arguments: Optional[Dict] = None, nlu_additional_arguments: Optional[Dict] = None, model_to_finetune: Optional[Text] = None, finetuning_epoch_fraction: float = 1.0, ) -> TrainingResult: """Trains a Rasa model (Core and NLU). Args: domain: Path to the domain file. config: Path to the config file. training_files: List of paths to training data files. output: Output directory for the trained model. dry_run: If `True` then no training will be done, and the information about whether the training needs to be done will be printed. force_training: If `True` retrain model even if data has not changed. fixed_model_name: Name of model to be stored. persist_nlu_training_data: `True` if the NLU training data should be persisted with the model. core_additional_arguments: Additional training parameters for core training. nlu_additional_arguments: Additional training parameters forwarded to training method of each NLU component. model_to_finetune: Optional path to a model which should be finetuned or a directory in case the latest trained model should be used. finetuning_epoch_fraction: The fraction currently specified training epochs in the model configuration which should be used for finetuning. Returns: An instance of `TrainingResult`. """ file_importer = TrainingDataImporter.load_from_config( config, domain, training_files) stories = file_importer.get_stories() nlu_data = file_importer.get_nlu_data() training_type = TrainingType.BOTH if nlu_data.has_e2e_examples(): rasa.shared.utils.common.mark_as_experimental_feature( "end-to-end training") training_type = TrainingType.END_TO_END if stories.is_empty() and nlu_data.contains_no_pure_nlu_data(): rasa.shared.utils.cli.print_error( "No training data given. Please provide stories and NLU data in " "order to train a Rasa model using the '--data' argument.") return TrainingResult(code=1) domain = file_importer.get_domain() if domain.is_empty(): rasa.shared.utils.cli.print_warning( "Core training was skipped because no valid domain file was found. " "Only an NLU-model was created. Please specify a valid domain using " "the '--domain' argument or check if the provided domain file exists." ) training_type = TrainingType.NLU elif stories.is_empty(): rasa.shared.utils.cli.print_warning( "No stories present. Just a Rasa NLU model will be trained.") training_type = TrainingType.NLU # We will train nlu if there are any nlu example, including from e2e stories. elif nlu_data.contains_no_pure_nlu_data( ) and not nlu_data.has_e2e_examples(): rasa.shared.utils.cli.print_warning( "No NLU data present. Just a Rasa Core model will be trained.") training_type = TrainingType.CORE with telemetry.track_model_training(file_importer, model_type="rasa"): return _train_graph( file_importer, training_type=training_type, output_path=output, fixed_model_name=fixed_model_name, model_to_finetune=model_to_finetune, force_full_training=force_training, persist_nlu_training_data=persist_nlu_training_data, finetuning_epoch_fraction=finetuning_epoch_fraction, dry_run=dry_run, **(core_additional_arguments or {}), **(nlu_additional_arguments or {}), )
def _train_graph( file_importer: TrainingDataImporter, training_type: TrainingType, output_path: Text, fixed_model_name: Text, model_to_finetune: Optional[Text] = None, force_full_training: bool = False, dry_run: bool = False, **kwargs: Any, ) -> TrainingResult: if model_to_finetune: model_to_finetune = rasa.model.get_model_for_finetuning( model_to_finetune) if not model_to_finetune: rasa.shared.utils.cli.print_error_and_exit( f"No model for finetuning found. Please make sure to either " f"specify a path to a previous model or to have a finetunable " f"model within the directory '{output_path}'.") rasa.shared.utils.common.mark_as_experimental_feature( "Incremental Training feature") is_finetuning = model_to_finetune is not None config = file_importer.get_config() recipe = Recipe.recipe_for_name(config.get("recipe")) config, _missing_keys, _configured_keys = recipe.auto_configure( file_importer.get_config_file_for_auto_config(), config, training_type, ) model_configuration = recipe.graph_config_for_recipe( config, kwargs, training_type=training_type, is_finetuning=is_finetuning, ) rasa.engine.validation.validate(model_configuration) with tempfile.TemporaryDirectory() as temp_model_dir: model_storage = _create_model_storage(is_finetuning, model_to_finetune, Path(temp_model_dir)) cache = LocalTrainingCache() trainer = GraphTrainer(model_storage, cache, DaskGraphRunner) if dry_run: fingerprint_status = trainer.fingerprint( model_configuration.train_schema, file_importer) return _dry_run_result(fingerprint_status, force_full_training) model_name = _determine_model_name(fixed_model_name, training_type) full_model_path = Path(output_path, model_name) with telemetry.track_model_training( file_importer, model_type=training_type.model_type): trainer.train( model_configuration, file_importer, full_model_path, force_retraining=force_full_training, is_finetuning=is_finetuning, ) rasa.shared.utils.cli.print_success( f"Your Rasa model is trained and saved at '{full_model_path}'." ) return TrainingResult(str(full_model_path), 0)
async def _train_nlu_with_validated_data( file_importer: TrainingDataImporter, output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, persist_nlu_training_data: bool = False, retrain_nlu: Union[bool, List[Text]] = True, additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: """Train NLU with validated training and config data.""" import rasa.nlu.train if additional_arguments is None: additional_arguments = {} with ExitStack() as stack: if train_path: # If the train path was provided, do nothing on exit. _train_path = train_path else: # Otherwise, create a temp train path and clean it up on exit. _train_path = stack.enter_context(TempDirectoryPath(tempfile.mkdtemp())) # bf mod config = await file_importer.get_nlu_config(retrain_nlu) async with telemetry.track_model_training(file_importer, model_type="nlu"): for lang in config: if config[lang]: print_color( "Start training <{}> NLU model ...".format(lang), color=rasa.shared.utils.io.bcolors.OKBLUE, ) await rasa.nlu.train( config[lang], file_importer, _train_path, fixed_model_name="nlu-{}".format(lang), persist_nlu_training_data=persist_nlu_training_data, **additional_arguments, ) else: print_color( "NLU data for language <{}> didn't change, skipping training...".format( lang ), color=rasa.shared.utils.io.bcolors.OKBLUE, ) # /bf mod print_color( "NLU model training completed.", color=rasa.shared.utils.io.bcolors.OKBLUE ) if train_path is None: # Only NLU was trained new_fingerprint = await model.model_fingerprint(file_importer) return model.package_model( fingerprint=new_fingerprint, output_directory=output, train_path=_train_path, fixed_model_name=fixed_model_name, model_prefix="nlu-", ) return _train_path
async def _train_async_internal( file_importer: TrainingDataImporter, train_path: Text, output_path: Text, force_training: bool, fixed_model_name: Optional[Text], persist_nlu_training_data: bool, core_additional_arguments: Optional[Dict] = None, nlu_additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: """Trains a Rasa model (Core and NLU). Use only from `train_async`. Args: file_importer: `TrainingDataImporter` which supplies the training data. train_path: Directory in which to train the model. output_path: Output path. force_training: If `True` retrain model even if data has not changed. fixed_model_name: Name of model to be stored. persist_nlu_training_data: `True` if the NLU training data should be persisted with the model. core_additional_arguments: Additional training parameters for core training. nlu_additional_arguments: Additional training parameters forwarded to training method of each NLU component. Returns: Path of the trained model archive. """ stories, nlu_data = await asyncio.gather( file_importer.get_stories(), file_importer.get_nlu_data() ) # if stories.is_empty() and nlu_data.can_train_nlu_model(): # print_error( # "No training data given. Please provide stories and NLU data in " # "order to train a Rasa model using the '--data' argument." # ) # return # if stories.is_empty(): # print_warning("No stories present. Just a Rasa NLU model will be trained.") # return await _train_nlu_with_validated_data( # file_importer, # output=output_path, # fixed_model_name=fixed_model_name, # persist_nlu_training_data=persist_nlu_training_data, # additional_arguments=nlu_additional_arguments, # ) # if nlu_data.can_train_nlu_model(): # print_warning("No NLU data present. Just a Rasa Core model will be trained.") # return await _train_core_with_validated_data( # file_importer, # output=output_path, # fixed_model_name=fixed_model_name, # additional_arguments=core_additional_arguments, # ) new_fingerprint = await model.model_fingerprint(file_importer) old_model = model.get_latest_model(output_path) if not force_training: fingerprint_comparison = model.should_retrain( new_fingerprint, old_model, train_path ) else: fingerprint_comparison = FingerprintComparisonResult(force_training=True) # bf mod > if fingerprint_comparison.nlu == True: # replace True with list of all langs fingerprint_comparison.nlu = list(new_fingerprint.get("nlu-config", {}).keys()) domain = await file_importer.get_domain() core_untrainable = domain.is_empty() or stories.is_empty() nlu_untrainable = [l for l, d in nlu_data.items() if d.is_empty()] fingerprint_comparison.core = fingerprint_comparison.core and not core_untrainable fingerprint_comparison.nlu = [ l for l in fingerprint_comparison.nlu if l not in nlu_untrainable ] if core_untrainable: print_color( "Skipping Core training since domain or stories are empty.", color=rasa.shared.utils.io.bcolors.OKBLUE, ) for lang in nlu_untrainable: print_color( "No NLU data found for language <{}>, skipping training...".format(lang), color=rasa.shared.utils.io.bcolors.OKBLUE, ) # </ bf mod if fingerprint_comparison.is_training_required(): async with telemetry.track_model_training(file_importer, model_type="rasa"): await _do_training( file_importer, output_path=output_path, train_path=train_path, fingerprint_comparison_result=fingerprint_comparison, fixed_model_name=fixed_model_name, persist_nlu_training_data=persist_nlu_training_data, core_additional_arguments=core_additional_arguments, nlu_additional_arguments=nlu_additional_arguments, old_model_zip_path=old_model, ) return model.package_model( fingerprint=new_fingerprint, output_directory=output_path, train_path=train_path, fixed_model_name=fixed_model_name, ) print_success( "Nothing changed. You can use the old model stored at '{}'." "".format(os.path.abspath(old_model)) ) return old_model