Ejemplo n.º 1
0
 def test_same_local_and_imported_rule_names(self):
     """ Local rules with the same name have precedence over imported rules. """
     grammar = Grammar("test")
     local_z = Rule("Z", True, "z")
     grammar.add_rule(local_z)
     grammar.add_import(Import("grammars.test1.Z"))
     self.assertEqual(grammar.get_rule("Z"), local_z)
Ejemplo n.º 2
0
    def test_copying(self):
        """Original expansions are not used in output expansions"""
        # Note that JSGF only expansions are not expected to pass this test;
        # expand_dictation_expansion(e) returns exactly e.

        def assert_no_identical_expansions(original_e, expanded_list):
            """
            Recursively check if any expanded expansion is identical to one in the
            original expansion tree.
            Only the immediate tree is checked (shallow traversals).
            :type original_e: Expansion
            :type expanded_list: list
            """
            original_expansions = flat_map_expansion(original_e, shallow=True)

            def f(x):
                for o in original_expansions:
                    self.assertIsNot(x, o)

            for expanded in expanded_list:
                map_expansion(expanded, f, shallow=True)

        # Test with a relatively simple expansion
        e = AS("a", "b", Seq("c", Dict()))
        result = expand_dictation_expansion(e)
        self.assertListEqual(result, [
            AS("a", "b"),
            Seq("c", Dict())
        ])
        assert_no_identical_expansions(e, result)

        # Test with an expansion using RuleRefs
        n = Rule("n", False, AS("one", "two", "three"))
        e = AS(Seq("backward", RuleRef(n)), "forward", Seq(Dict(), RuleRef(n)))
        result = expand_dictation_expansion(e)
        self.assertListEqual(result, [
            AS(Seq("backward", RuleRef(n)), "forward"),
            Seq(Dict(), RuleRef(n))
        ])
        assert_no_identical_expansions(e, result)

        # Test with an expansion using optionals
        e = AS("a", "b", Seq("c", Opt(Dict())))
        result = expand_dictation_expansion(e)
        self.assertListEqual(result, [
            AS("a", "b", Seq("c")),
            AS("a", "b"),
            Seq("c", Dict())
        ])
        assert_no_identical_expansions(e, result)

        # And again instead using KleeneStar
        e = AS("a", "b", Seq("c", KleeneStar(Dict())))
        result = expand_dictation_expansion(e)
        self.assertListEqual(result, [
            AS("a", "b", Seq("c")),
            AS("a", "b"),
            Seq("c", Repeat(Dict()))
        ])
        assert_no_identical_expansions(e, result)
Ejemplo n.º 3
0
    def test_fully_qualified_rule_reference(self):
        """ Fully-qualified rule references do not require import statements. """
        grammar = Grammar("test")
        fully_qualified_ref = "grammars.test1.W"
        named_ref = NamedRuleRef(fully_qualified_ref)
        rule = Rule("rule", True, named_ref)
        grammar.add_rule(rule)
        expected_rule = self.grammars.test1.get_rule("W")
        for x in range(2):
            self.assertEqual(named_ref.referenced_rule, expected_rule)
            self.assertEqual(grammar.get_rule(fully_qualified_ref),
                             expected_rule)
            self.assertEqual(grammar.find_matching_rules("w"), [rule])

            # Check that the import statement is allowed.
            grammar.add_import(Import(fully_qualified_ref))
Ejemplo n.º 4
0
    def test_matcher_context(self):
        d = Dict()
        r1 = Rule("d", True, d)
        r2 = Rule("test", True, RuleRef(r1))

        # Test that matcher_context is None initially.
        self.assertIsNone(d.matcher_context)

        # Test that it is initialised along with matcher_element.
        self.assertTrue(r1.matches("lower lorem ipsum"))
        self.assertEqual(d.matcher_context, r1)

        # Test that it is set to None on invalidate_matcher().
        d.invalidate_matcher()
        self.assertIsNone(d.matcher_context)

        # Test that matching with r2 using a JointTreeContext sets matcher_context to
        # r2.
        with JointTreeContext(r2.expansion):
            self.assertTrue(r2.matches("lower lorem ipsum"))
            self.assertEqual(d.matcher_context, r2)
Ejemplo n.º 5
0
    def add_rule(self, rule):
        if not isinstance(rule, Rule):
            raise TypeError("object '%s' was not a JSGF Rule object" % rule)

        # Check if the same rule is already in the grammar.
        if rule.name in self.rule_names:
            if rule in self.rules:
                # Silently return if the rule is comparable to another in the
                # grammar.
                return
            else:
                # This is not strictly true for DictationGrammar, but still holds
                # for match_rules and output from the compile methods.
                raise GrammarError(
                    "JSGF grammars cannot have multiple rules with "
                    "the same name")

        # If the rule is not a dictation rule, add it to the JSGF only grammar and
        # the original rule map.
        if not dictation_in_expansion(rule.expansion):
            self._jsgf_only_grammar.add_rule(rule)
            self._original_rule_map[rule] = rule
            return

        # Check if the rule is a SequenceRule already and do a few things with it.
        if isinstance(rule, SequenceRule):
            if not rule.current_is_dictation_only:
                # The sequence starts with a JSGF only rule and can be
                # spoken like a normal rule
                self._jsgf_only_grammar.add_rule(rule)
            else:
                self._dictation_rules.append(rule)
            self._original_rule_map[rule] = rule
            return

        # Expand the rule's expansion into a list of 1 or more expansions.
        expanded = expand_dictation_expansion(rule.expansion)

        # Otherwise create new rules from the resulting expansions and add each to
        # either dictation_rules or _jsgf_only_grammar
        for i, x in enumerate(expanded):
            if len(expanded) == 1:
                # No need to use different names in this case
                new_name = rule.name
            else:
                new_name = "%s_%d" % (rule.name, i)
            if not dictation_in_expansion(x):
                r = Rule(new_name, rule.visible, x)

                # Add this rule to the JSGF only grammar
                self._jsgf_only_grammar.add_rule(r)

                # Keep track of the relationship between the original rule and its
                # expanded rules
                self._original_rule_map[r] = rule
            else:
                seq_rule = SequenceRule(new_name, rule.visible, x)
                self._original_rule_map[seq_rule] = rule

                if not seq_rule.current_is_dictation_only:
                    # The sequence starts with a JSGF only rule and can be
                    # spoken like a normal rule
                    self._jsgf_only_grammar.add_rule(seq_rule)
                else:
                    self._dictation_rules.append(seq_rule)
Ejemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser(prog="matching benchmark.py",
                                     description="pyjsgf matching benchmark")
    parser.add_argument("-r",
                        "--rule-string",
                        type=str,
                        default="default",
                        help=("Rule to use for benchmarking. "
                              "Must be a valid JSGF rule ending with ';'."))
    parser.add_argument("-n",
                        "--n-speech-strings",
                        type=int,
                        default=100,
                        dest="n",
                        help="Number of speech strings to generate.")
    parser.add_argument(
        "-q",
        "--quiet",
        default=False,
        action="store_true",
        help="Suppress output of generated strings.",
    )
    parser.add_argument(
        "-p",
        "--profile",
        default=False,
        action="store_true",
        help=(
            "Whether to run the benchmark through 'cProfile'. If the module is "
            "not available, then 'profile' will be used instead."),
    )

    # Parse the arguments.
    args = parser.parse_args()

    # Set up rules for testing.
    if not args.rule_string or args.rule_string == 'default':
        word = Rule("word", False, AlternativeSet(*WORDS))
        number = Rule("number", False, AlternativeSet(*NUMBERS))
        rule = Rule(
            "series", True,
            Repeat(Sequence(RuleRef(word), OptionalGrouping(RuleRef(number)))))
    else:
        rule = parse_rule_string(args.rule_string)

    # Generate N speech strings to test how well the matching performs.
    strings = []
    for _ in range(args.n):
        strings.append(rule.generate())

    if args.profile:
        try:
            # Try 'cProfile'.
            import cProfile as profile_mod
        except ImportError:
            # Fallback on 'profile' (slower) if it isn't available.
            import profile as profile_mod

        # Run the benchmark via the imported module, passing locals and globals.
        now = time.time()
        profile_mod.runctx("do_benchmark(rule, strings, args)", {}, {
            "do_benchmark": do_benchmark,
            "rule": rule,
            "strings": strings,
            "args": args
        })
    else:
        # Run the benchmark without profiling.
        now = time.time()
        do_benchmark(rule, strings, args)

    # Print the time it took to match N generated strings.
    after = time.time()
    print("Matched %d generated strings in %.3f seconds." %
          (args.n, after - now))