コード例 #1
0
ファイル: exporter.py プロジェクト: sanaayakurup/rasa-1
    def publish_events(self) -> int:
        """Publish events in a tracker store using an event broker.

        Exits if the publishing of events is interrupted due to an error. In that case,
        the CLI command to continue the export where it was interrupted is printed.

        Returns:
            The number of successfully published events.

        """
        events = self._fetch_events_within_time_range()

        cli_utils.print_info(
            f"Selected {len(events)} events for publishing. Ready to go 🚀")

        published_events = 0
        current_timestamp = None

        headers = self._get_message_headers()

        for event in tqdm(events, "events"):
            # noinspection PyBroadException
            try:
                self._publish_with_message_headers(event, headers)
                published_events += 1
                current_timestamp = event["timestamp"]
            except Exception as e:
                logger.exception(e)
                raise PublishingError(current_timestamp)

        self.event_broker.close()

        return published_events
コード例 #2
0
ファイル: data.py プロジェクト: sysang/rasa
def _convert_to_yaml(args: argparse.Namespace, is_nlu: bool) -> None:

    output = Path(args.out)
    if not os.path.exists(output):
        print_error_and_exit(
            f"The output path '{output}' doesn't exist. Please make sure to specify "
            f"an existing directory and try again."
        )

    training_data = Path(args.data)
    if not os.path.exists(training_data):
        print_error_and_exit(
            f"The training data path {training_data} doesn't exist "
            f"and will be skipped."
        )

    num_of_files_converted = 0
    for file in os.listdir(training_data):
        source_path = training_data / file
        output_path = output / f"{source_path.stem}{CONVERTED_FILE_SUFFIX}"

        if MarkdownReader.is_markdown_nlu_file(source_path):
            if not is_nlu:
                continue
            _write_nlu_yaml(source_path, output_path, source_path)
            num_of_files_converted += 1
        elif not is_nlu and MarkdownStoryReader.is_markdown_story_file(source_path):
            _write_core_yaml(source_path, output_path, source_path)
            num_of_files_converted += 1
        else:
            print_warning(f"Skipped file: '{source_path}'.")

    print_info(f"Converted {num_of_files_converted} file(s), saved in '{output}'.")
コード例 #3
0
def _read_telemetry_consent(no_prompt: bool) -> bool:
    """Check if the user wants to enable telemetry or not.

    Args:
        no_prompt: If `True`, do not prompt the user for input (but inform
            about any decision taken).

    Returns:
        Boolean indicating if the user wants to enable telemetry.
    """
    import questionary

    allow_telemetry = (questionary.confirm(
        "Rasa will track a minimal amount of anonymized usage information "
        "(like how often the 'train' button is used) to help us improve Rasa X. "
        "None of your training data or conversations will ever be sent to Rasa. "
        "Are you OK with Rasa collecting anonymized usage data?").skip_if(
            no_prompt, default=True).ask())

    if not no_prompt:
        rasa_cli_utils.print_success(
            f"Your decision has been stored into '{GLOBAL_USER_CONFIG_PATH}'.")
    else:
        rasa_cli_utils.print_info(
            "By adding the '--no_prompt' parameter you agreed to allow Rasa to track "
            "and send anonymized usage information.")

    return allow_telemetry
コード例 #4
0
def _convert_to_yaml(args: argparse.Namespace,
                     converter: TrainingDataConverter) -> None:

    output = Path(args.out)
    if not os.path.exists(output):
        print_error_and_exit(
            f"The output path '{output}' doesn't exist. Please make sure to specify "
            f"an existing directory and try again.")

    training_data = Path(args.data)
    if not os.path.exists(training_data):
        print_error_and_exit(
            f"The training data path {training_data} doesn't exist "
            f"and will be skipped.")

    num_of_files_converted = 0

    if os.path.isfile(training_data):
        if _convert_file_to_yaml(training_data, output, converter):
            num_of_files_converted += 1
    elif os.path.isdir(training_data):
        for root, _, files in os.walk(training_data, followlinks=True):
            for f in sorted(files):
                source_path = Path(os.path.join(root, f))
                if _convert_file_to_yaml(source_path, output, converter):
                    num_of_files_converted += 1

    if num_of_files_converted:
        print_info(
            f"Converted {num_of_files_converted} file(s), saved in '{output}'."
        )
    else:
        print_warning(
            f"Didn't convert any files under '{training_data}' path. "
            "Did you specify the correct file/directory?")
コード例 #5
0
def locate_interface() -> Text:
    """Check if there is a packaged interface - if not build it from source.

    Returns the path to the interface directory."""

    if utils.is_enterprise_installed():
        from rasax.enterprise.interface import (  # pytype: disable=import-error
            PACKAGE_ENTERPRISE_INTERFACE_DIRECTORY,
        )

        pkg_base = PACKAGE_ENTERPRISE_INTERFACE_DIRECTORY
    else:
        pkg_base = PACKAGE_INTERFACE_DIRECTORY

    pkg_index = os.path.join(pkg_base, "index.html")
    if os.path.exists(pkg_index):
        return pkg_base
    else:
        if not os.environ.get("SKIP_FRONTEND_BUILD", "false").lower() == "true":
            return build_interface()
        else:
            external_frontend = os.getenv(
                "EXTERNAL_FRONTEND",
                os.path.join(root_dir, "src", "rasa-frontend", "build"),
            )
            print_info(
                f"Using external frontend build.\nMake sure there is a frontend build "
                f"available in '{os.path.abspath(external_frontend)}'."
            )
            return external_frontend
コード例 #6
0
    def run_build(cwd, output):
        if config.development_mode:
            build_cmd = "build-frontend-enterprise"
        else:
            build_cmd = "build-frontend"

        print_info("Building frontend (development mode)...")
        # this will always use the frontend enterprise build, as in this case we
        # have the source anyways (won't happen in packaged build)
        if subprocess.call(["make", "install-frontend"], cwd=cwd):
            print_error(
                "Failed to install frontend dependencies. Check logs for details."
            )
            _write_index_html(
                frontend_directory,
                "Frontend install failed! Check the logs for details.",
            )
        elif subprocess.call(["make", build_cmd], cwd=cwd):
            print_error("Failed to build frontend code. Check logs for details.")
            _write_index_html(
                frontend_directory, "Frontend build failed! Check the logs for details."
            )
        else:
            print_success(
                "Finished building frontend, serving from {}."
                "".format(os.path.abspath(output))
            )
コード例 #7
0
def _dump_config(
    config: Dict[Text, Any],
    config_file_path: Text,
    missing_keys: Set[Text],
    auto_configured_keys: Set[Text],
    training_type: Optional[TrainingType] = TrainingType.BOTH,
) -> None:
    """Dump the automatically configured keys into the config file.

    The configuration provided in the file is kept as it is (preserving the order of
    keys and comments).
    For keys that were automatically configured, an explanatory comment is added and the
    automatically chosen configuration is added commented-out.
    If there are already blocks with comments from a previous auto configuration run,
    they are replaced with the new auto configuration.

    Args:
        config: The configuration including the automatically configured keys.
        config_file_path: The file into which the configuration should be dumped.
        missing_keys: Keys that need to be added to the config file.
        auto_configured_keys: Keys for which a commented out auto configuration section
                              needs to be added to the config file.
        training_type: NLU, CORE or BOTH depending on which is trained.
    """

    config_as_expected = _is_config_file_as_expected(config_file_path,
                                                     missing_keys,
                                                     auto_configured_keys,
                                                     training_type)
    if not config_as_expected:
        cli_utils.print_error(
            f"The configuration file at '{config_file_path}' has been removed or "
            f"modified while the automatic configuration was running. The current "
            f"configuration will therefore not be dumped to the file. If you want to "
            f"your model to use the configuration provided in '{config_file_path}', "
            f"you need to re-run training.")
        return

    _add_missing_config_keys_to_file(config_file_path, missing_keys)

    autoconfig_lines = _get_commented_out_autoconfig_lines(
        config, auto_configured_keys)

    with open(config_file_path,
              "r+",
              encoding=rasa.shared.utils.io.DEFAULT_ENCODING) as f:
        lines = f.readlines()
        updated_lines = _get_lines_including_autoconfig(
            lines, autoconfig_lines)
        f.seek(0)
        for line in updated_lines:
            f.write(line)

    auto_configured_keys = common_utils.transform_collection_to_sentence(
        auto_configured_keys)
    cli_utils.print_info(
        f"The configuration for {auto_configured_keys} was chosen automatically. It "
        f"was written into the config file at '{config_file_path}'.")
コード例 #8
0
ファイル: exporter.py プロジェクト: souvikg10/rasa_nlu
    def _fetch_events_within_time_range(self) -> List[Dict[Text, Any]]:
        """Fetch all events for `conversation_ids` within the supplied time range.

        Returns:
            Serialized events with added `sender_id` field.

        """
        conversation_ids_to_process = self._get_conversation_ids_to_process()

        cli_utils.print_info(
            f"Fetching events for {len(conversation_ids_to_process)} "
            f"conversation IDs:"
        )

        events = []

        for conversation_id in tqdm(conversation_ids_to_process, "conversation IDs"):
            tracker = self.tracker_store.retrieve(conversation_id)
            if not tracker:
                logger.info(
                    f"Could not retrieve tracker for conversation ID "
                    f"'{conversation_id}'. Skipping."
                )
                continue

            _events = tracker.current_state(EventVerbosity.ALL)["events"]

            if not _events:
                logger.info(
                    f"No events to migrate for conversation ID '{conversation_id}'."
                )
                continue

            # the conversation IDs are needed in the event publishing
            events.extend(
                self._get_events_for_conversation_id(_events, conversation_id)
            )

        return self._sort_and_select_events_by_timestamp(events)
コード例 #9
0
 def _print_domain_change_info(_type: Text, origin: Text,
                               items: List[Text]):
     if origin and items:
         print_info(
             "The following {} were found in your {} and will be added to the "
             "domain: {}".format(_type, origin, ", ".join(items)))