コード例 #1
0
def main():
    # Create a grammar and add some rules to it
    grammar = RootGrammar()
    name = HiddenRule("name", AlternativeSet("john", "bob", "anna"))

    # greeting is either: 'hey', 'hey there' or 'hello'
    greeting = HiddenRule("greeting", AlternativeSet(
        Sequence("hey", OptionalGrouping("there")), "hello"))

    # parting_phrase is either: 'good bye' or 'see you'
    parting_phrase = HiddenRule("parting_phrase", AlternativeSet(
        "good bye", "see you"))

    # greet is a greeting followed by a name
    greet = PublicRule("greet", Sequence(RuleRef(greeting), RuleRef(name)))

    # goodbye is a parting phrase followed by a name
    goodbye = PublicRule("goodbye", Sequence(
        RuleRef(parting_phrase), RuleRef(name)))

    grammar.add_rules(name, greeting, parting_phrase, greet, goodbye)

    print("Root grammar compiles to the following:")
    print(grammar.compile())

    # Try matching some speech strings
    print_matching(grammar, "hey john")
    print_matching(grammar, "hey there john")
    print_matching(grammar, "see you john")

    # Try matching some hidden rules
    print_matching(grammar, "bob")
    print_matching(grammar, "hey there")
    print_matching(grammar, "good bye")
コード例 #2
0
ファイル: engine.py プロジェクト: dictation-toolbox/dragonfly
    def _get_best_hypothesis(self, hypotheses):
        """
        Take a list of speech hypotheses and return the most likely one.

        :type hypotheses: iterable
        :return: str | None
        """
        # Get all distinct, non-null hypotheses.
        distinct = tuple([h for h in set(hypotheses) if bool(h)])
        if not distinct:
            return None
        elif len(distinct) == 1:
            return distinct[0]  # only one choice

        # Decide between non-null hypotheses using a Pocket Sphinx search with
        # each hypothesis as a grammar rule.
        grammar = RootGrammar()
        grammar.language_name = self.language
        for i, hypothesis in enumerate(distinct):
            grammar.add_rule(PublicRule("rule%d" % i, Literal(hypothesis)))

        compiled = grammar.compile_grammar()
        name = "_temp"

        # Store the current search name.
        original = self._decoder.active_search

        # Note that there is no need to validate words in this case because
        # each literal in the _temp grammar came from a Pocket Sphinx
        # hypothesis.
        self._decoder.end_utterance()
        self._decoder.set_jsgf_string(name, compiled)
        self._decoder.active_search = name

        # Do the processing.
        hyp = self._decoder.batch_process(
            self._audio_buffers,
            use_callbacks=False
        )
        result = hyp.hypstr if hyp else None

        # Switch back to the previous search.
        self._decoder.end_utterance()  # just in case
        self._decoder.active_search = original
        return result
コード例 #3
0
def create_grammar(word_list, name, gram_file):
    """
    read a list in a text file (```word_list````) and create
    a grammar (```name```) file (```gram_file```) for that list,
    such that the speech can one of any of the elements of the list
    """
    upp_list = list()
    grammar = RootGrammar(name=name, case_sensitive=True)
    i = 0
    for lines in word_list:
        rule_name = "rule" + str(i)
        upp = lines.upper().strip()
        #print("upp is",upp)
        if upp != "" and upp != "{" and upp != "}" and upp != "." and upp[
                0] != "_":
            r = PublicRule(rule_name, upp, case_sensitive=True)
            grammar.add_rule(r)
            upp_list.append(upp)
            i = i + 1

    with open(gram_file, 'wt') as g:
        print(grammar.compile(), file=g)
コード例 #4
0
    def __init__(self, module_name: str, module: Module,
                 commands: List[ExpressionCommandInfo], keywords: KeywordList,
                 on_no_keywords: Callable, on_not_recognized: Callable):
        super().__init__(module_name, module, keywords, on_no_keywords)

        self._state: Optional[Assistant] = None

        def make_expression(cmd: ExpressionCommandInfo):
            def func(ts):
                return cmd.func(module, self._state, ts)

            return cmd.expression.to_parser().addParseAction(func)

        self.jsgf_grammar = RootGrammar(
            PublicRule(f'cmd-{i}', cmd.expression.to_jsgf())
            for i, cmd in enumerate(commands))
        self.parsing_grammar = pyparsing.Or(map(make_expression, commands))
        self.on_not_recognized = on_not_recognized
コード例 #5
0
    def _get_best_hypothesis(self, hypotheses):
        """
        Take a list of speech hypotheses and return the most likely one.

        :type hypotheses: iterable
        :return: str | None
        """
        # Get all distinct, non-null hypotheses.
        distinct = tuple([h for h in set(hypotheses) if bool(h)])
        if not distinct:
            return None
        elif len(distinct) == 1:
            return distinct[0]  # only one choice

        # Decide between non-null hypotheses using a Pocket Sphinx search with
        # each hypothesis as a grammar rule.
        grammar = RootGrammar()
        grammar.language_name = self.language
        for i, hypothesis in enumerate(distinct):
            grammar.add_rule(PublicRule("rule%d" % i, Literal(hypothesis)))

        compiled = grammar.compile_grammar()
        name = "_temp"

        # Store the current search name.
        original = self._decoder.active_search

        # Note that there is no need to validate words in this case because
        # each literal in the _temp grammar came from a Pocket Sphinx
        # hypothesis.
        self._decoder.end_utterance()
        self._decoder.set_jsgf_string(name, _map_to_str(compiled))
        self._decoder.active_search = name

        # Do the processing.
        hyp = self._decoder.batch_process(
            self._audio_buffers,
            use_callbacks=False
        )
        result = hyp.hypstr if hyp else None

        # Switch back to the previous search.
        self._decoder.end_utterance()  # just in case
        self._decoder.active_search = original
        self._decoder.unset_search("_temp")
        return result
コード例 #6
0
def add_to_grammar(grammar_path, file_path, gram_name):
    """
    loads a ``Grammar`` at grammar_path and tries to add rules to it 
    from the file in file_path then returns the new ``Grammar``
    """
    old_gram = parser.parse_grammar_file(grammar_path)
    with open(file_path, 'rt') as f:
        word_list = f.readlines()
    #remove root rule from old grammar
    old_gram.remove_rule(old_gram.get_rule_from_name("root"))
    # get list of rules from old grammar
    old_rules = old_gram.rules
    new_gram = RootGrammar(name=gram_name, case_sensitive=True)
    # add existing rules to new grammar
    i = 0
    old_rules_text = list()
    for rules in old_rules:
        exp = rules.expansion.text.upper()
        old_rules_text.append(exp)
        if exp not in word_list:
            rule_name = "rule" + str(i)
            r = PublicRule(rule_name, exp, case_sensitive=True)
            new_gram.add_rule(r)
            i += 1
    # add new rules to new grammar
    for lines in word_list:
        rule_name = "rule" + str(i)
        exp = lines.upper().strip()
        print("upp is ", exp)
        if exp not in old_rules_text and exp != "" and exp != "{" and exp != "}" and exp != ".":
            r = PublicRule(rule_name, exp, case_sensitive=True)
            new_gram.add_rule(r)
            i += 1

    # compile new grammar back to file
    new_gram.compile_to_file(grammar_path, compile_as_root_grammar=True)
コード例 #7
0
 def register(self, decoder: Decoder):
     super().register(decoder)
     grammar = RootGrammar(PublicRule(f'cmd-{i}', Literal(cmd.phrase)) for i, cmd in enumerate(self.commands))
     decoder.register_grammar(self.module_name, grammar)