def recognize(spec, choices_values, timeout):

    global RESULT
    RESULT = None

    grammar = Grammar("grammar")

    extras = []
    for name, choices in choices_values.iteritems():
        extras.append(Choice(name, dict((c,c) for c in choices)))

    Rule = type("Rule", (GrammarRule,),{"spec": spec, "extras": extras})
    grammar.add_rule(Rule())
    grammar.load()   

    future = time.time() + timeout
    while time.time() < future:
        if RESULT is not None:
            break

        pythoncom.PumpWaitingMessages()

        time.sleep(.1)

    grammar.unload()

    print "RESULT:", RESULT

    return RESULT
Пример #2
0
    def test_list_grammars(self):
        """ Verify that the 'list_grammars' RPC method works correctly. """
        # Load a Grammar with three rules and check that the RPC returns the
        # correct data for them.
        g = Grammar("list_grammars_test")
        g.add_rule(CompoundRule(name="compound", spec="testing",
                                exported=True))
        g.add_rule(MappingRule(name="mapping", mapping={
            "command a": ActionBase(),
            "command b": ActionBase()
        }))
        g.add_rule(Rule(name="base", element=Literal("hello world"),
                        exported=False))
        g.load()

        response = self.send_request("list_grammars", [])
        expected_grammar_data = {
            "name": g.name, "enabled": True, "active": True, "rules": [
                {"name": "compound", "specs": ["testing"],
                 "exported": True, "active": True},
                {"name": "mapping", "specs": ["command a", "command b"],
                 "exported": True, "active": True},
                {"name": "base", "specs": ["hello world"],
                 "exported": False, "active": True}
            ]
        }
        # Check that the loaded grammar appears in the result. It might not
        # be the only grammar and that is acceptable because dragonfly's
        # tests can be run while user grammars are loaded.
        try:
            self.assertIn("result", response)
            self.assertIn(expected_grammar_data, response["result"])
        finally:
            g.unload()
Пример #3
0
class SwitchMode:
    def __init__(self, modeSwitcher):
        self.mode = "regular"

        if (SPEECHREC_ENABLED == True):
            # Create a grammar which contains and loads the command rule.
            self.grammar = Grammar(
                "Switch grammar"
            )  # Create a grammar to contain the command    rule.
            twitchRule = TwitchModeRule()
            twitchRule.setModeSwitch(modeSwitcher)
            self.grammar.add_rule(
                twitchRule)  # Add the command rule to the grammar.
            youtubeRule = YoutubeModeRule()
            youtubeRule.setModeSwitch(modeSwitcher)
            self.grammar.add_rule(
                youtubeRule)  # Add the command rule to the grammar.
            browseRule = BrowseModeRule()
            browseRule.setModeSwitch(modeSwitcher)
            self.grammar.add_rule(
                browseRule)  # Add the command rule to the grammar.
            heroesRule = HeroesModeRule()
            heroesRule.setModeSwitch(modeSwitcher)
            self.grammar.add_rule(
                heroesRule)  # Add the command rule to the grammar.
            testingRule = TestingModeRule()
            testingRule.setModeSwitch(modeSwitcher)
            self.grammar.add_rule(
                testingRule)  # Add the command rule to the grammar.
            excelLogModeRule = ExcelLogModeRule()
            excelLogModeRule.setModeSwitch(modeSwitcher)
            self.grammar.add_rule(
                excelLogModeRule)  # Add the command rule to the grammar.

            excelModeRule = ExcelModeRule()
            excelModeRule.setModeSwitch(modeSwitcher)
            self.grammar.add_rule(
                excelModeRule)  # Add the command rule to the grammar.

    def start(self):
        self.grammar.load()
        mute_sound()
        toggle_speechrec()

    def handle_input(self, dataDicts):
        if (SPEECHREC_ENABLED == True):
            pythoncom.PumpWaitingMessages()
            sleep(.1)
        else:
            print(
                "NO SPEECH RECOGNITION ENABLED - CANNOT SWITCH BETWEEN MODES")
            sleep(.5)

    def exit(self):
        self.grammar.unload()
        toggle_speechrec()
        turn_on_sound()
Пример #4
0
    def test_list_grammars(self):
        """ Verify that the 'list_grammars' RPC method works correctly. """
        # Load a Grammar with three rules and check that the RPC returns the
        # correct data for them.
        g = Grammar("list_grammars_test")
        g.add_rule(CompoundRule(name="compound", spec="testing",
                                exported=True))
        g.add_rule(
            MappingRule(name="mapping",
                        mapping={
                            "command a": ActionBase(),
                            "command b": ActionBase()
                        }))
        g.add_rule(
            Rule(name="base", element=Literal("hello world"), exported=False))
        g.load()

        response = self.send_request("list_grammars", [])
        expected_grammar_data = {
            "name":
            g.name,
            "enabled":
            True,
            "active":
            True,
            "rules": [{
                "name": "compound",
                "specs": ["testing"],
                "exported": True,
                "active": True
            }, {
                "name": "mapping",
                "specs": ["command a", "command b"],
                "exported": True,
                "active": True
            }, {
                "name": "base",
                "specs": ["hello world"],
                "exported": False,
                "active": True
            }]
        }
        # Check that the loaded grammar appears in the result. It might not
        # be the only grammar and that is acceptable because dragonfly's
        # tests can be run while user grammars are loaded.
        try:
            self.assertIn("result", response)
            self.assertIn(expected_grammar_data, response["result"])
        finally:
            g.unload()
Пример #5
0
    def test_mimic(self):
        """ Verify that the 'mimic' RPC method works correctly. """
        g = Grammar("mimic_test")
        g.add_rule(CompoundRule(name="compound", spec="testing mimicry",
                                exported=True))
        g.load()

        # Set the grammar as exclusive.
        # The sapi5shared engine apparently requires this for mimic() to
        # work, making the method kind of useless. This does not apply to
        # sapi5inproc.
        g.set_exclusiveness(True)

        response = self.send_request("mimic", ["testing mimicry"])
        try:
            self.assertIn("result", response)
            self.assertEqual(response["result"], True)
        finally:
            g.set_exclusiveness(False)
            g.unload()
Пример #6
0
    def test_mimic(self):
        """ Verify that the 'mimic' RPC method works correctly. """
        g = Grammar("mimic_test")
        g.add_rule(
            CompoundRule(name="compound",
                         spec="testing mimicry",
                         exported=True))
        g.load()

        # Set the grammar as exclusive.
        # The sapi5shared engine apparently requires this for mimic() to
        # work, making the method kind of useless. This does not apply to
        # sapi5inproc.
        g.set_exclusiveness(True)

        response = self.send_request("mimic", ["testing mimicry"])
        try:
            self.assertIn("result", response)
            self.assertEqual(response["result"], True)
        finally:
            g.set_exclusiveness(False)
            g.unload()
Пример #7
0
    def test_recognition_history_methods(self):
        """ Verify that the recognition history RPC methods work correctly.
        """
        # Load a grammar for testing that recognitions are saved.
        g = Grammar("history_test")
        g.add_rule(
            CompoundRule(name="compound", spec="test history", exported=True))
        g.load()
        g.set_exclusiveness(True)
        try:
            # Test 'register_history()'.
            response = self.send_request("register_history", [])
            self.assertIn("result", response)

            # Test that the method raises an error if used when the observer
            # is already registered.
            self.assertRaises(RuntimeError, self.send_request,
                              "register_history", [])

            # Send a mimic and check that it is returned by the
            # 'get_recognition_history()' method.
            self.send_request("mimic", ["test history"])
            response = self.send_request("get_recognition_history", [])
            self.assertIn("result", response)
            self.assertListEqual(response["result"], [["test", "history"]])

            # Test 'unregister_observer()'.
            response = self.send_request("unregister_history", [])
            self.assertIn("result", response)

            # Test that the method raises an error if used when the observer
            # is not registered.
            self.assertRaises(RuntimeError, self.send_request,
                              "unregister_history", [])
        finally:
            g.set_exclusiveness(False)
            g.unload()
Пример #8
0
class GrammarController(object):
    """Wraps grammars so they can be turned on and off by command."""

    def __init__(self, name, grammars):
        self._controlled_grammars = grammars
        self.enabled = True
        rule = create_rule(name + "_mode",
                           {
                               name + " (off|close)": Function(lambda: self.disable()),
                               name + " (on|open)": Function(lambda: self.enable()),
                           },
                           exported=True)
        self._command_grammar = Grammar(name + "_mode")
        self._command_grammar.add_rule(rule)

    def enable(self):
        if not self.enabled:
            for grammar in self._controlled_grammars:
                grammar.enable()
        self.enabled = True

    def disable(self):
        if self.enabled:
            for grammar in self._controlled_grammars:
                grammar.disable()
        self.enabled = False

    def load(self):
        for grammar in self._controlled_grammars:
            grammar.load()
        self._command_grammar.load()

    def unload(self):
        for grammar in self._controlled_grammars:
            grammar.unload()
        self._command_grammar.unload()
Пример #9
0
class BaseMode:
    quadrant3x3 = 0
    quadrant4x3 = 0
    speech_commands = {}
    patterns = {}
    toggles = {}
    use_direct_keys = False
    input_release_lag = 0

    def __init__(self, modeSwitcher, is_testing=False, repeat_delay=REPEAT_DELAY, repeat_rate=REPEAT_RATE):
        self.inputManager = InputManager(is_testing=is_testing, use_direct_keys=self.use_direct_keys, input_release_lag_ms=self.input_release_lag * 1000)
        self.mode = "regular"
        self.modeSwitcher = modeSwitcher
        self.detector = PatternDetector(self.patterns)
        self.pressed_keys = {}
        self.should_drag = False
        self.ctrlKey = False
        self.shiftKey = False
        self.altKey = False
        
        if( SPEECHREC_ENABLED == True and IS_WINDOWS == True ):
            from dragonfly import Grammar
            from lib.grammar.simple_grammar import SimpleSpeechCommand
            import pythoncom
            self.grammar = Grammar("Simple")
            self.simpleCommandRule = SimpleSpeechCommand(self.speech_commands, callback=self.toggle_speech)
            self.grammar.add_rule( self.simpleCommandRule )
        
    def start( self ):
        update_overlay_image( "default" )
        toggle_eyetracker()
        
    def exit( self ):
        if( self.mode == "speech" ):
            self.toggle_speech()
    
        update_overlay_image( "default" )
        toggle_eyetracker()
                    
    def handle_input( self, dataDicts ):
        self.detector.tick( dataDicts )
        self.quadrant3x3 = self.detector.detect_mouse_quadrant( 3, 3 )
        self.quadrant4x3 = self.detector.detect_mouse_quadrant( 4, 3 )
        
        if( self.detect_silence() ):
            self.stop_drag_mouse()
            self.inputManager.release_non_toggle_keys()
                
        # Recognize speech commands in speech mode
        if( self.mode == "speech" ):
            if (IS_WINDOWS == True):
                pythoncom.PumpWaitingMessages()
                self.handle_speech( dataDicts )
            else:
                print( "Speech recognition is only implemented for Windows")
                self.mode = "regular"
            
        # Regular quick command mode
        elif( self.mode == "regular" ):
            self.handle_sounds( dataDicts )
            
        return self.detector.tickActions
                
    def handle_speech( self, dataDicts ):
        if (IS_WINDOWS == False):
            print( "No speech handler" )
        return
        
    def handle_sounds( self, dataDicts ):
        print( "No sound handler" )
        return
        
    # Toggle between variables
    # If the value is a list, turn them on in sequence after another
    def toggle( self, value ):
        if (isinstance(value, list)):
            turned_on_index = -1
            for index, key in enumerate(value):
                if (key not in self.toggles):
                    self.toggles[key] = False
                elif (self.toggles[key] == True):
                    turned_on_index = index
                    self.toggles[key] = False
                    
            next_index = turned_on_index + 1
            if (next_index >= len(value)):
                next_index = 0

            self.toggles[value[next_index]] = True
        else:
            if (value not in self.toggles ):
                self.toggles[value] = False
                
            self.toggles[value] = not self.toggles[value]
    
    def enable( self, value ):
        if (isinstance(value, list)):
            for index, key in enumerate(value):
                self.toggles[key] = True
        else:
            self.toggles[value] = True

    def disable( self, value ):
        if (isinstance(value, list)):
            for index, key in enumerate(value):
                self.toggles[key] = False
        else:
            self.toggles[value] = False
        
    def detect( self, key ):
        if (key in self.toggles):
            return self.toggles[key]
    
        return self.detector.detect( key )
        
    def detect_silence( self ):
        return self.detector.detect_silence()        
        
    def drag_mouse( self ):
        self.toggle_drag_mouse( True )

    def stop_drag_mouse( self ):
        self.toggle_drag_mouse( False )
                
    def leftclick( self ):
        self.inputManager.click(button='left')

    def rightclick( self ):
        self.inputManager.click(button='right')
        
    def press( self, key ):
        self.inputManager.press( key )
        
    def hold( self, key, repeat_rate_ms=0 ):
        self.inputManager.hold( key, repeat_rate_ms )
        
    def release( self, key ):
        self.inputManager.release( key )
        
    def release_special_keys( self ):
        self.inputManager.release_special_keys()
        
    def toggle_speech( self ):
        if( self.mode != "speech" ):
            self.mode = "speech"
            self.grammar.load()
            print( "--- TOGGLING SPEECH RECOGNITION ON" )
        else:
            self.mode = "regular"
            self.grammar.unload()
            print( "--- TOGGLING SPEECH RECOGNITION OFF" )
        toggle_speechrec()

    # Drag mouse for selection purposes
    def toggle_drag_mouse( self, should_drag ):
        if( self.should_drag != should_drag ):
            if( should_drag == True ):
                self.inputManager.mouseDown()
            else:
                self.inputManager.mouseUp()
                
        self.should_drag = should_drag
        
    # Detect when the cursor is inside an area
    def detect_inside_area( self, x, y, width, height ):
        return self.detector.detect_inside_minimap( x, y, width, height )

    def update_overlay( self ):
        if( not( self.ctrlKey or self.shiftKey or self.altKey ) ):
            update_overlay_image( "default" )
        else:
            modes = []
            if( self.ctrlKey ):
                modes.append( "ctrl" )
            if( self.shiftKey ):
                modes.append( "shift" )
            if( self.altKey ):
                modes.append( "alt" )
                
            update_overlay_image( "mode-%s" % ( "-".join( modes ) ) )        
Пример #10
0
def dragonfly_recognise(spec, choices_values, timeout):
    """
    Build a grammar based on spec and choices and send it to dragonfly
    """

    # build the dragonfly request
    extras = []
    for name, choices in choices_values.iteritems():
        extras.append(Choice(name, dict((c, c) for c in choices)))

    results = Queue()

    class GrammarRule(CompoundRule):
        def _process_recognition(self, node, extras):
            logger.info('_process_recognition callback: %s', str(node))
            results.put_nowait(Result(node=node, extras=extras))

        def _process_begin(self):
            logger.debug('Rule:__process_begin')


    rule = GrammarRule(spec=spec, extras=extras)

    grammar = Grammar("grammar")
    grammar.add_rule(rule)

    # attach failure callback
    def process_recognition_failure():
        logger.info('Grammar:process_recognition_failure')
    grammar.process_recognition_failure = process_recognition_failure

    grammar.load()

    if os.name is 'nt':
        winsound.PlaySound(data_path + "/grammar_loaded.wav", winsound.SND_ASYNC)

    logger.info("Grammar loaded: %s", spec)

    global CANCEL_FLAG
    CANCEL_FLAG = False

    future = time.time() + timeout
    while time.time() < future and results.empty():
        if os.name is 'nt':
            pythoncom.PumpWaitingMessages()

        if CANCEL_FLAG:
            return {}

        time.sleep(.1)

    grammar.unload()

    try:
        result = results.get_nowait()
    except Empty:
        logger.info('No result, probably a timeout')
        return False

    if not results.empty():
        raise Exception('Multiple results received')

    # filter all extras with _ because they are private
    return {
        "result": result.node.value(),
        "choices": {k: v for (k, v) in result.extras.items() if not k.startswith('_')}
    }
Пример #11
0
class TestRules(RuleTestCase):
    def setUp(self):
        RuleTestCase.setUp(self)
        engine = get_engine()
        if engine.name == 'natlink':
            # Stop Dragon from dictating text for the duration of these
            # tests. This is required when testing for mimic failures.
            self.temp_grammar = Grammar("temp")
            self.temp_grammar.add_rule(CompoundRule(spec="exclusive rule"))
            self.temp_grammar.load()
            self.temp_grammar.set_exclusiveness(True)

    def tearDown(self):
        RuleTestCase.tearDown(self)
        engine = get_engine()
        if engine.name == 'natlink':
            self.temp_grammar.set_exclusiveness(False)
            self.temp_grammar.unload()

    def process_grammars_context(self):
        engine = get_engine()
        if engine.name.startswith("sapi5"):
            engine.process_grammars_context()

    def test_multiple_rules(self):
        """ Verify that the engine successfully mimics each rule in a
            grammar with multiple rules. """
        self.add_rule(CompoundRule(name="r1", spec="hello"))
        self.add_rule(CompoundRule(name="r2", spec="see you"))
        assert self.recognize_node("hello").words() == ["hello"]
        assert self.recognize_node("see you").words() == ["see", "you"]

    def test_rule_context(self):
        """ Verify that the engine works correctly with rule contexts. """
        context = TestContext(True)
        self.add_rule(
            CompoundRule(name="r1", spec="test context", context=context))
        self.grammar.load()

        # Test that the rule matches when in-context.
        results = self.recognize_node("test context").words()
        assert results == ["test", "context"]

        # Go out of context and test again.
        # Use the engine's mimic method because recognize_node won't return
        # RecognitionFailure like ElementTester.recognize does.
        context.active = False
        self.process_grammars_context()
        try:
            self.grammar.set_exclusiveness(True)
            self.assertRaises(MimicFailure, self.engine.mimic, "test context")
        finally:
            self.grammar.set_exclusiveness(False)

        # Test again after going back into context.
        context.active = True
        self.process_grammars_context()
        results = self.recognize_node("test context").words()
        assert results == ["test", "context"]

    def test_grammar_context(self):
        """ Verify that the engine works correctly with grammar
            contexts."""
        # Recreate the RuleTestGrammar using a context and add a rule.
        context = TestContext(True)
        self.grammar = RuleTestGrammar(context=context)
        self.add_rule(CompoundRule(name="r1", spec="test context"))
        self.grammar.load()

        # Test that the rule matches when in-context.
        results = self.recognize_node("test context").words()
        assert results == ["test", "context"]

        # Go out of context and test again.
        context.active = False
        self.process_grammars_context()
        try:
            self.grammar.set_exclusiveness(True)
            self.assertRaises(MimicFailure, self.engine.mimic, "test context")
        finally:
            self.grammar.set_exclusiveness(False)

        # Test again after going back into context.
        context.active = True
        self.process_grammars_context()
        results = self.recognize_node("test context").words()
        assert results == ["test", "context"]

    def test_exclusive_grammars(self):
        """ Verify that the engine supports exclusive grammars. """

        # This is here as grammar exclusivity is context related.
        # Set up two grammars to test with.
        class TestRule(CompoundRule):
            def __init__(self, spec):
                CompoundRule.__init__(self, spec=spec)
                self.words = None

            def _process_recognition(self, node, extras):
                self.words = self.spec

        grammar1 = Grammar(name="Grammar1")
        grammar1.add_rule(TestRule(spec="grammar one"))
        grammar2 = Grammar(name="Grammar2")
        grammar2.add_rule(TestRule(spec="grammar two"))
        grammar3 = Grammar(name="Grammar3")
        grammar3.add_rule(TestRule(spec="grammar three"))
        grammar1.load()
        grammar2.load()
        grammar3.load()

        # Set grammar1 as exclusive and make some assertions.
        grammar1.set_exclusiveness(True)
        self.engine.mimic("grammar one")
        assert grammar1.rules[0].words == "grammar one"
        self.assertRaises(MimicFailure, self.engine.mimic, "grammar two")

        # Set grammar2 as exclusive and make some assertions.
        # Both exclusive grammars should be active.
        grammar2.set_exclusiveness(True)
        self.engine.mimic("grammar one")
        assert grammar1.rules[0].words == "grammar one"
        self.engine.mimic("grammar two")
        assert grammar2.rules[0].words == "grammar two"

        # Non-exclusive grammar 'grammar3' should not be active.
        self.assertRaises(MimicFailure, self.engine.mimic, "grammar three")

        # Set both grammars as no longer exclusive and make some assertions.
        grammar1.set_exclusiveness(False)
        grammar2.set_exclusiveness(False)
        if get_engine().name == 'natlink':
            self.temp_grammar.set_exclusiveness(False)
        self.engine.mimic("grammar one")
        assert grammar1.rules[0].words == "grammar one"
        self.engine.mimic("grammar two")
        assert grammar2.rules[0].words == "grammar two"
        self.engine.mimic("grammar three")
        assert grammar3.rules[0].words == "grammar three"
Пример #12
0
 def unload(self):
     for rule in self._rules:
         # unregister to prevent multiply registered rules during restart
         rule.deactivate()
     Grammar.unload(self)
Пример #13
0
class ExcelMode:
    def __init__(self, modeSwitcher, file):
        self.mode = "regular"
        self.modeSwitcher = modeSwitcher
        self.file = file

        self.grammar = Grammar("Excel")
        columnNumberRule = ColumnNumberPrintRule()
        self.grammar.add_rule(columnNumberRule)
        columnModeRule = ColumnModePrintRule()
        self.grammar.add_rule(columnModeRule)
        correctionRule = CorrectionRule()
        self.grammar.add_rule(correctionRule)
        copyRowRule = CopyRowRule()
        self.grammar.add_rule(copyRowRule)
        nextRowRule = MoveRule()
        self.grammar.add_rule(nextRowRule)
        pasteRule = PasteRule()
        self.grammar.add_rule(pasteRule)
        self.grammar.add_rule(ShiftRule())

    def start(self):
        self.grammar.load()
        self.mode = "speech"
        toggle_speechrec()
        self.centerXPos, self.centerYPos = pyautogui.position()
        toggle_eyetracker()
        mute_sound()
        self.open_file()

    def handle_input(self, dataDicts):
        if (loud_detection(dataDicts, "bell")):
            self.modeSwitcher.turnOnModeSwitch()

        if (self.mode == "regular"):
            if (percentage_detection(dataDicts, "whistle", 75)):
                self.mode = "speech"
                toggle_speechrec()
            else:
                if (single_tap_detection(dataDicts, "cluck", 35, 1000)):
                    click()
                elif (single_tap_detection(dataDicts, "fingersnap", 50, 1000)):
                    click(button='right')
                elif (loud_detection(dataDicts, "sound_f")):
                    scroll(150)
                elif (loud_detection(dataDicts, "sound_s")):
                    scroll(-150)

                if (percentage_detection(dataDicts, "sound_thr", 75)):
                    quadrant = detect_mouse_quadrant(3, 3)
                    if (quadrant > 3):
                        self.modeSwitcher.switchMode('browse')

        elif (self.mode == "speech"):
            self.speech_mode()
            if (percentage_detection(dataDicts, "sound_thr", 75)):
                self.mode = "regular"
                toggle_speechrec()

    def speech_mode(self):
        pythoncom.PumpWaitingMessages()
        time.sleep(.1)

    def open_file(self):
        if (self.file != ''):
            call(["start", self.file], shell=True)

    def exit(self):
        if (self.mode == "speech"):
            toggle_speechrec()
        self.mode = "regular"
        toggle_eyetracker()
        self.grammar.unload()
Пример #14
0
class MasterGrammar(object):
    """A MasterGrammar is built up from a specific set of active rules. They
    synthesize the different rule types into one dragonfly grammar. There is
    only ever one master grammar active at a time."""
    def __init__(self, baseRuleSet, client, ruleCache):
        self.client = client
        self.ruleCache = ruleCache

        # Hashes that are directly part of this grammar
        self.baseRuleSet = set(baseRuleSet)
        # Hashes of rules that we discover are dependencies
        # of the base rule set
        self.dependencyRuleSet = set()

        # hash -> dragonfly rule
        self.concreteRules = {}
        # one hash per merge group, hash is of hashes of rules that were merged
        self.seriesRules = set()
        # one hash, hash is of hashes of rules that were merged
        self.terminatorRule = ""
        # one hash per rule, hash is the rule's actual hash
        self.independentRules = set()

        # Rule references are stored as hashes, so rules that
        # contain rule refs already effectively include those
        # rules in their hash, so just hashing the base set is
        # all we need.
        x = hashlib.sha256()
        x.update("".join(sorted([r for r in self.baseRuleSet])))
        self.hash = x.hexdigest()[:32]

        # Hashes of rules we depend on but haven't arrived yet.
        # These will be discovered during the dfly grammar building
        # process.
        self.missing = set()
        self.checkDeps(self.fullRullSet)  # build self.missing
        self.finalDflyRule = None
        self.dflyGrammar = None

        # word lists are *not* hashed. they are global state the
        # client can update at any time, and the change has to be
        # propogated into the currently active grammar. the client
        # can choose to make them rule specific by making the name
        # be the hash of the rule the word list applies to, but this
        # is only convention and not enforced
        self.concreteWordLists = {}

    @property
    def fullRullSet(self):
        return self.baseRuleSet | self.dependencyRuleSet

    def satisfyDependency(self, r):
        """Marks dependency on hash r as satisfied, and tries to build if no more known
        deps are missing. During the build process new indirect dependencies may still
        be discovered however."""
        assert r in self.missing
        self.missing.remove(r)
        if not self.missing:
            self.build()

    def checkDep(self, r):
        "Checks if dep r is present. Not recursive."
        if r not in self.ruleCache:
            self.ruleCache[r] = NeedDependency()
        if isinstance(self.ruleCache[r], NeedDependency):
            self.ruleCache[r].add(self.hash)
            self.missing.add(r)
            return False
        return True

    def checkMissing(self):
        if self.missing:
            raise MissingDependency(copy(self.missing))

    def checkDeps(self, ruleSet):
        "Recursively check if all deps in ruleSet are satisfied."
        if not ruleSet:
            return True

        newDeps = set()
        for r in ruleSet:
            if self.checkDep(r):
                rule = self.ruleCache[r]  # HashedRule

                rule = rule.rule
                log.info("rule [%s]" % (rule, ))
                for e in rule.extras:
                    if hasattr(e, "rule_ref"):
                        newDeps.add(e.rule_ref)

        self.dependencyRuleSet.update(newDeps)
        self.checkDeps(newDeps)

    def ready(self):
        return len(self.missing) == 0

    def build(self):
        if self.dflyGrammar:
            # already built
            return

        buildStartTime = time.time()

        self.checkMissing()
        self.checkDeps(self.fullRullSet)
        self.checkMissing()

        # from here on we assume all deps are present all the way down
        seriesGroups = {}
        terminal = {}

        allRules = []

        mergeStartTime = time.time()

        # Merge series and terminal rules, set independent rules aside
        self.fullName = []
        for r in self.fullRullSet:
            rule = self.ruleCache[r].rule
            hash = self.ruleCache[r].hash
            if rule.ruleType == RuleType.SERIES:
                if rule.seriesMergeGroup not in seriesGroups:
                    seriesGroups[rule.seriesMergeGroup] = {}
                x = seriesGroups[rule.seriesMergeGroup]
            elif rule.ruleType == RuleType.TERMINAL:
                x = terminal
            elif rule.ruleType == RuleType.INDEPENDENT:
                x = {}

            if "mapping" not in x:
                x["mapping"] = {}
            if "extras" not in x:
                x["extras"] = {}
            if "defaults" not in x:
                x["defaults"] = {}
            if "name" not in x:
                x["name"] = ""
            if "hash" not in x:
                x["hash"] = set()

            x["ruleType"] = rule.ruleType
            x["seriesMergeGroup"] = rule.seriesMergeGroup
            x["name"] = x["name"] + ("," if x["name"] else "") + rule.name
            x["mapping"].update(rule.mapping.items())
            for e in rule.extras:
                x["extras"][e.name] = e
            x["defaults"].update(rule.defaults.items())
            log.info("Adding hash [%s] to name [%s]" % (hash, x["name"]))
            x["hash"].add(hash)
            x["built"] = False
            x["exported"] = (rule.ruleType == RuleType.INDEPENDENT)

            # allRules will contain all the rules we have left
            # *after* merging. So only one series rule per merge
            # group and only one terminal rule.
            allRules.append(x)

        mergeEndTime = time.time()
        log.info("Grammar merge time: %ss" % (mergeEndTime - mergeStartTime))

        # We really should be doing a topological sort, but this
        # isn't a frequent operation so this inefficiency should
        # be OK. Keep trying to link deps until they're all good.
        uniqueRules = []
        for r in allRules:
            if r not in uniqueRules:
                uniqueRules.append(r)
                self.fullName.append(r["name"])
        self.fullName = ",".join(self.fullName)
        allRules = uniqueRules

        # collapse the hashes
        for r in allRules:
            assert type(r["hash"]) == set
            assert len(r["hash"]) >= 1
            if r["ruleType"] in (RuleType.SERIES, RuleType.TERMINAL):
                # We generate a composite hash for our new composite rules
                log.info("Multi-hash: [%s]" % r["hash"])
                hashes = sorted(list(r["hash"]))
                x = hashlib.sha256()
                x.update("".join(sorted([h for h in hashes])))
                hash = x.hexdigest()[:32]
                log.info("Composite: [%s]" % hash)
            else:
                # We just use the exising hash for a rule if it's not composite
                [hash] = r["hash"]
                log.info("Single hash: [%s]" % r["hash"])
            r["hash"] = hash

        allPrototypes = {i["hash"]: i for i in allRules}

        self.concreteTime = 0
        cleanupTime = 0
        for k, v in allPrototypes.items():
            if not v["built"]:
                cleanupStart = time.time()
                self.cleanupProtoRule(v, allPrototypes)
                cleanupEnd = time.time()
                cleanupTime += (cleanupEnd - cleanupStart)

        log.info("Total Cleanup time: %ss" % cleanupTime)
        log.info("Total Concrete time: %ss" % (self.concreteTime))

        #log.info("made it out of loop")
        self.buildFinalMergedRule()

        buildEndTime = time.time()
        log.info("Grammar build time: %ss" % (buildEndTime - buildStartTime))

        self.setupFinalDflyGrammar()

    def buildFinalMergedRule(self):
        #log.info("Building final merged rule.")
        if not self.seriesRules and not self.terminatorRule:
            return

        extras = []
        seriesRefNames = []
        for i, r in enumerate(self.seriesRules):
            name = "s" + str(i)
            seriesRefNames.append(name)
            ref = dfly.RuleRef(self.concreteRules[r], name)
            extras.append(ref)
        seriesPart = "[" + " | ".join([("<" + r + ">")
                                       for r in seriesRefNames]) + "]"

        terminatorPart = ""
        if self.terminatorRule:
            extras.append(
                dfly.RuleRef(self.concreteRules[self.terminatorRule],
                             "terminator"))
            terminatorPart = " [<terminator>]"

        masterPhrase = seriesPart + terminatorPart
        mapping = {
            masterPhrase: ReportingAction(masterPhrase, self.client, self.hash)
        }

        log.info(
            "Building master grammar rule with name [%s] mapping [%s] extras [%s] defaults [%s]"
            % (self.fullName, mapping, extras, {}))
        masterTimeStart = time.time()
        self.finalDflyRule = MappingRule(name=self.hash,
                                         mapping=mapping,
                                         extras=extras,
                                         defaults={},
                                         exported=True)
        masterTimeEnd = time.time()
        log.info("Master rule construction time: %ss" %
                 (masterTimeEnd - masterTimeStart))

    def setupFinalDflyGrammar(self):
        log.info("Setting up final grammar.")

        assert not self.dflyGrammar
        self.dflyGrammar = Grammar(self.fullName + "Grammar")
        if self.finalDflyRule:
            self.dflyGrammar.add_rule(self.finalDflyRule)
        for r in self.independentRules:
            self.dflyGrammar.add_rule(self.concreteRules[r])
        loadStart = time.time()
        self.dflyGrammar.load()
        loadEnd = time.time()
        log.info("Grammar load time: %ss" % (loadEnd - loadStart))
        get_engine().set_exclusiveness(self.dflyGrammar, 1)

        # These should never be recognized on their own, only as part of the
        # master rule, quirk of dragonfly that you have to do this even though
        # they're only pulled in by ruleref.
        for r in self.seriesRules:
            self.concreteRules[r].disable()
        if self.terminatorRule:
            self.concreteRules[self.terminatorRule].disable()

        # independent rules only enabled via being a dependency need to have disable
        # called on their dragonfly version so that they don't get recognized by
        # themselves, same quirk.
        notEnabledRules = self.dependencyRuleSet - self.baseRuleSet
        for r in notEnabledRules:
            self.concreteRules[r].disable()

        # they're enabled by default, don't activate until explicitly made to
        self.dflyGrammar.disable()

    def active(self):
        #log.info("active check [%s %s %s]" % (self.dflyGrammar is None, self.dflyGrammar and self.dflyGrammar.loaded, self.dflyGrammar and self.dflyGrammar.enabled))
        return self.dflyGrammar and self.dflyGrammar.loaded and self.dflyGrammar.enabled

    def activate(self):
        self.build()
        self.dflyGrammar.enable()
        log.info("Grammar activated: [%s]" % self.hash)

    def deactivate(self):
        # it's possible we never built successfully
        if self.dflyGrammar:
            self.dflyGrammar.disable()
            log.info("Grammar deactivated: [%s]" % self.hash)

    def unload(self):
        self.deactivate()
        if self.dflyGrammar:
            self.dflyGrammar.unload()

    def buildConcreteRule(self, r):
        # for independent rules we could use the plain
        # name, but it turns out Dragon crashes if your
        # names get too long, so for combined rules we
        # just use the hash as the name... hopefully
        # that's under the limit
        name = r["hash"]
        if r["ruleType"] == RuleType.SERIES:
            t = SeriesMappingRule
        elif r["ruleType"] == RuleType.TERMINAL:
            t = MappingRule
        else:
            t = MappingRule

        constructionStartTime = time.time()

        log.info(
            "Building rule [%s] with size [%s] num extras [%s] num defaults [%s]"
            % (r["name"], len(r["mapping"]), len(
                r["extras"]), len(r["defaults"])))

        rule = t(name=name,
                 mapping=r["mapping"],
                 extras=r["extras"],
                 defaults=r["defaults"],
                 exported=r["exported"])
        constructionEndTime = time.time()

        log.info("Rule construction time: %ss" %
                 (constructionEndTime - constructionStartTime))

        self.concreteRules[r["hash"]] = rule

        if r["ruleType"] == RuleType.SERIES:
            self.seriesRules.add(r["hash"])
        elif r["ruleType"] == RuleType.TERMINAL:
            self.terminatorRule = r["hash"]
        elif r["ruleType"] == RuleType.INDEPENDENT:
            self.independentRules.add(r["hash"])
        else:
            assert False

        log.info("done building")

    def cleanupProtoRule(self, r, allPrototypes):
        # have to uniquify in this round about way because lists
        # aren't hashable and we need them for ListRef.
        if type(r["extras"]) == dict:
            r["extras"] = r["extras"].values()

        newExtras = []
        for e in r["extras"]:
            if isinstance(e, protocol.Integer):
                newExtras.append(dfly.Integer(e.name, e.min, e.max))
            elif isinstance(e, protocol.Dictation):
                newExtras.append(dfly.Dictation(e.name))
            elif isinstance(e, protocol.Repetition):
                if e.rule_ref not in self.concreteRules:
                    self.cleanupProtoRule(allPrototypes[e.rule_ref],
                                          allPrototypes)

                # Dragonfly wants RuleRef to take a RuleRef rather than an actual
                # Rule, so we just make one rather than forcing the server to
                # handle this, see protocol.py comments.
                concrete = self.concreteRules[e.rule_ref]
                log.info("concrete type: [%s]" % type(concrete))
                newExtras.append(
                    dfly.Repetition(dfly.RuleRef(rule=concrete), e.min, e.max,
                                    e.name))
            elif isinstance(e, protocol.RuleRef):
                if e.rule_ref not in self.concreteRules:
                    self.cleanupProtoRule(allPrototypes[e.rule_ref],
                                          allPrototypes)

                newExtras.append(
                    dfly.RuleRef(self.concreteRules[e.rule_ref], e.name))
            elif isinstance(e, protocol.ListRef):
                self.concreteWordLists[e.name] = List(e.name + "ConcreteList")
                # self.concreteWordLists[e.name].set(e.words)
                newExtras.append(
                    dfly.ListRef(e.ref_name, self.concreteWordLists[e.name]))
            else:
                raise Exception("Unknown extra type: [%s]" % e)

        r["extras"] = newExtras

        self.concreteStartTime = time.time()
        self.buildConcreteRule(r)
        self.concreteEndTime = time.time()
        self.concreteTime += (self.concreteEndTime - self.concreteStartTime)

        r["built"] = True
        return True

    def updateWordList(self, name, words):
        if name not in self.concreteWordLists:
            # log.info("Word list [%s] not in grammar [%s], ignoring" % (name, self.hash))
            return

        # We want to check if the value has actually changed because List's
        # set method will blindly tell Dragon to delete its old list and replace
        # it with this one and we don't want to disturb Dragon unless we have to
        # because Dragon is slow.
        if sorted(words) != sorted(self.concreteWordLists[name]):
            log.info(
                "Updating word list [%s] on grammar [%s] with contents [%s]" %
                (name, self.hash, len(words)))
            log.info("old list: %s" % self.concreteWordLists[name])
            # TODO: need to check existing load state, then send a loading message here, then restore
            # old state. This way we can see when word lists are taking a long time to load...
            updateStart = time.time()
            self.concreteWordLists[name].set(words)
            updateEnd = time.time()
            log.info("Word list update time: %ss" % (updateEnd - updateStart))
Пример #15
0
class MasterGrammar(object):
    """A MasterGrammar is built up from a specific set of active rules. They
    synthesize the different rule types into one dragonfly grammar. There is
    only ever one master grammar active at a time."""

    def __init__(self, baseRuleSet, client, ruleCache):
        self.client = client
        self.ruleCache = ruleCache

        # Hashes that are directly part of this grammar
        self.baseRuleSet = set(baseRuleSet)
        # Hashes of rules that we discover are dependencies
        # of the base rule set
        self.dependencyRuleSet = set()

        # hash -> dragonfly rule
        self.concreteRules = {}
        # one hash per merge group, hash is of hashes of rules that were merged
        self.seriesRules = set()
        # one hash, hash is of hashes of rules that were merged
        self.terminatorRule = ""
        # one hash per rule, hash is the rule's actual hash
        self.independentRules = set()

        # Rule references are stored as hashes, so rules that
        # contain rule refs already effectively include those
        # rules in their hash, so just hashing the base set is
        # all we need.
        x = hashlib.sha256()
        x.update("".join(sorted([r for r in self.baseRuleSet])))
        self.hash = x.hexdigest()[:32]

        # Hashes of rules we depend on but haven't arrived yet.
        # These will be discovered during the dfly grammar building
        # process.
        self.missing = set()
        self.checkDeps(self.fullRullSet) # build self.missing
        self.finalDflyRule = None
        self.dflyGrammar = None

        # word lists are *not* hashed. they are global state the
        # client can update at any time, and the change has to be
        # propogated into the currently active grammar. the client
        # can choose to make them rule specific by making the name
        # be the hash of the rule the word list applies to, but this
        # is only convention and not enforced
        self.concreteWordLists = {}

    @property
    def fullRullSet(self):
        return self.baseRuleSet | self.dependencyRuleSet

    def satisfyDependency(self, r):
        """Marks dependency on hash r as satisfied, and tries to build if no more known
        deps are missing. During the build process new indirect dependencies may still
        be discovered however."""
        assert r in self.missing
        self.missing.remove(r)
        if not self.missing:
            self.build()

    def checkDep(self, r):
        "Checks if dep r is present. Not recursive."
        if r not in self.ruleCache:
            self.ruleCache[r] = NeedDependency()
        if isinstance(self.ruleCache[r], NeedDependency):
            self.ruleCache[r].add(self.hash)
            self.missing.add(r)
            return False
        return True

    def checkMissing(self):
        if self.missing:
            raise MissingDependency(copy(self.missing))

    def checkDeps(self, ruleSet):
        "Recursively check if all deps in ruleSet are satisfied."
        if not ruleSet:
            return True

        newDeps = set()
        for r in ruleSet:
            if self.checkDep(r):
                rule = self.ruleCache[r] # HashedRule

                rule = rule.rule
                log.info("rule [%s]" % (rule,))
                for e in rule.extras:
                    if hasattr(e, "rule_ref"):
                        newDeps.add(e.rule_ref)

        self.dependencyRuleSet.update(newDeps)
        self.checkDeps(newDeps)

    def ready(self):
        return len(self.missing) == 0

    def build(self):
        if self.dflyGrammar:
            # already built
            return

        buildStartTime = time.time()

        self.checkMissing()
        self.checkDeps(self.fullRullSet)
        self.checkMissing()

        # from here on we assume all deps are present all the way down
        seriesGroups = {}
        terminal = {}

        allRules = []

        mergeStartTime = time.time()

        # Merge series and terminal rules, set independent rules aside
        self.fullName = []
        for r in self.fullRullSet:
            rule = self.ruleCache[r].rule
            hash = self.ruleCache[r].hash
            if rule.ruleType == RuleType.SERIES:
                if rule.seriesMergeGroup not in seriesGroups:
                    seriesGroups[rule.seriesMergeGroup] = {}
                x = seriesGroups[rule.seriesMergeGroup]
            elif rule.ruleType == RuleType.TERMINAL:
                x = terminal
            elif rule.ruleType == RuleType.INDEPENDENT:
                x = {}

            if "mapping" not in x:
                x["mapping"] = {}
            if "extras" not in x:
                x["extras"] = {}
            if "defaults" not in x:
                x["defaults"] = {}
            if "name" not in x:
                x["name"] = ""
            if "hash" not in x:
                x["hash"] = set()

            x["ruleType"] = rule.ruleType
            x["seriesMergeGroup"] = rule.seriesMergeGroup
            x["name"] = x["name"] + ("," if x["name"] else "") + rule.name
            x["mapping"].update(rule.mapping.items())
            for e in rule.extras:
                x["extras"][e.name] = e
            x["defaults"].update(rule.defaults.items())
            log.info("Adding hash [%s] to name [%s]" % (hash, x["name"]))
            x["hash"].add(hash)
            x["built"] = False
            x["exported"] = (rule.ruleType == RuleType.INDEPENDENT)

            # allRules will contain all the rules we have left
            # *after* merging. So only one series rule per merge
            # group and only one terminal rule.
            allRules.append(x)

        mergeEndTime = time.time()
        log.info("Grammar merge time: %ss" % (mergeEndTime - mergeStartTime))

        # We really should be doing a topological sort, but this
        # isn't a frequent operation so this inefficiency should
        # be OK. Keep trying to link deps until they're all good.
        uniqueRules = []
        for r in allRules:
            if r not in uniqueRules:
                uniqueRules.append(r)
                self.fullName.append(r["name"])
        self.fullName = ",".join(self.fullName)
        allRules = uniqueRules

        # collapse the hashes
        for r in allRules:
            assert type(r["hash"]) == set
            assert len(r["hash"]) >= 1
            if r["ruleType"] in (RuleType.SERIES, RuleType.TERMINAL):
                # We generate a composite hash for our new composite rules
                log.info("Multi-hash: [%s]" % r["hash"])
                hashes = sorted(list(r["hash"]))
                x = hashlib.sha256()
                x.update("".join(sorted([h for h in hashes])))
                hash = x.hexdigest()[:32]
                log.info("Composite: [%s]" % hash)
            else:
                # We just use the exising hash for a rule if it's not composite
                [hash] = r["hash"]
                log.info("Single hash: [%s]" % r["hash"])
            r["hash"] = hash

        allPrototypes = { i["hash"] : i for i in allRules }

        self.concreteTime = 0
        cleanupTime = 0
        for k, v in allPrototypes.items():
            if not v["built"]:
                cleanupStart = time.time()
                self.cleanupProtoRule(v, allPrototypes)
                cleanupEnd = time.time()
                cleanupTime += (cleanupEnd - cleanupStart)

        log.info("Total Cleanup time: %ss" % cleanupTime)
        log.info("Total Concrete time: %ss" % (self.concreteTime))

        #log.info("made it out of loop")
        self.buildFinalMergedRule()

        buildEndTime = time.time()
        log.info("Grammar build time: %ss" % (buildEndTime - buildStartTime))

        self.setupFinalDflyGrammar()

    def buildFinalMergedRule(self):
        #log.info("Building final merged rule.")
        if not self.seriesRules and not self.terminatorRule:
            return

        extras = []
        seriesRefNames = []
        for i, r in enumerate(self.seriesRules):
            name = "s" + str(i)
            seriesRefNames.append(name)
            ref = dfly.RuleRef(self.concreteRules[r], name)
            extras.append(ref)
        seriesPart = "[" + " | ".join([("<" + r + ">") for r in seriesRefNames]) + "]"

        terminatorPart = ""
        if self.terminatorRule:
            extras.append(dfly.RuleRef(self.concreteRules[self.terminatorRule], "terminator"))
            terminatorPart = " [<terminator>]"

        masterPhrase = seriesPart + terminatorPart
        mapping = {
            masterPhrase : ReportingAction(masterPhrase, self.client, self.hash)
        }

        log.info("Building master grammar rule with name [%s] mapping [%s] extras [%s] defaults [%s]"
                 % (self.fullName, mapping, extras, {}))
        masterTimeStart = time.time()
        self.finalDflyRule = MappingRule(name=self.hash, mapping=mapping, extras=extras,
                                         defaults={}, exported=True)
        masterTimeEnd = time.time()
        log.info("Master rule construction time: %ss" % (masterTimeEnd - masterTimeStart))

    def setupFinalDflyGrammar(self):
        log.info("Setting up final grammar.")

        assert not self.dflyGrammar
        self.dflyGrammar = Grammar(self.fullName + "Grammar")
        if self.finalDflyRule:
            self.dflyGrammar.add_rule(self.finalDflyRule)
        for r in self.independentRules:
            self.dflyGrammar.add_rule(self.concreteRules[r])
        loadStart = time.time()
        self.dflyGrammar.load()
        loadEnd = time.time()
        log.info("Grammar load time: %ss" % (loadEnd - loadStart))
        get_engine().set_exclusiveness(self.dflyGrammar, 1)

        # These should never be recognized on their own, only as part of the
        # master rule, quirk of dragonfly that you have to do this even though
        # they're only pulled in by ruleref.
        for r in self.seriesRules:
            self.concreteRules[r].disable()
        if self.terminatorRule:
            self.concreteRules[self.terminatorRule].disable()

        # independent rules only enabled via being a dependency need to have disable
        # called on their dragonfly version so that they don't get recognized by
        # themselves, same quirk.
        notEnabledRules = self.dependencyRuleSet - self.baseRuleSet
        for r in notEnabledRules:
            self.concreteRules[r].disable()

        # they're enabled by default, don't activate until explicitly made to
        self.dflyGrammar.disable()

    def active(self):
        #log.info("active check [%s %s %s]" % (self.dflyGrammar is None, self.dflyGrammar and self.dflyGrammar.loaded, self.dflyGrammar and self.dflyGrammar.enabled))
        return self.dflyGrammar and self.dflyGrammar.loaded and self.dflyGrammar.enabled

    def activate(self):
        self.build()
        self.dflyGrammar.enable()
        log.info("Grammar activated: [%s]" % self.hash)

    def deactivate(self):
        # it's possible we never built successfully
        if self.dflyGrammar:
            self.dflyGrammar.disable()
            log.info("Grammar deactivated: [%s]" % self.hash)

    def unload(self):
        self.deactivate()
        if self.dflyGrammar:
            self.dflyGrammar.unload()

    def buildConcreteRule(self, r):
        # for independent rules we could use the plain
        # name, but it turns out Dragon crashes if your
        # names get too long, so for combined rules we
        # just use the hash as the name... hopefully
        # that's under the limit
        name = r["hash"]
        if r["ruleType"] == RuleType.SERIES:
            t = SeriesMappingRule
        elif r["ruleType"] == RuleType.TERMINAL:
            t = MappingRule
        else:
            t = MappingRule

        constructionStartTime = time.time()

        log.info("Building rule [%s] with size [%s] num extras [%s] num defaults [%s]" % (r["name"], len(r["mapping"]), len(r["extras"]), len(r["defaults"])))

        rule = t(name=name, mapping=r["mapping"], extras=r["extras"],
                 defaults=r["defaults"], exported=r["exported"])
        constructionEndTime = time.time()

        log.info("Rule construction time: %ss" % (constructionEndTime - constructionStartTime))

        self.concreteRules[r["hash"]] = rule

        if r["ruleType"] == RuleType.SERIES:
            self.seriesRules.add(r["hash"])
        elif r["ruleType"] == RuleType.TERMINAL:
            self.terminatorRule = r["hash"]
        elif r["ruleType"] == RuleType.INDEPENDENT:
            self.independentRules.add(r["hash"])
        else:
            assert False

        log.info("done building")

    def cleanupProtoRule(self, r, allPrototypes):
        # have to uniquify in this round about way because lists
        # aren't hashable and we need them for ListRef.
        if type(r["extras"]) == dict:
            r["extras"] = r["extras"].values()

        newExtras = []
        for e in r["extras"]:
            if isinstance(e, protocol.Integer):
                newExtras.append(dfly.Integer(e.name, e.min, e.max))
            elif isinstance(e, protocol.Dictation):
                newExtras.append(dfly.Dictation(e.name))
            elif isinstance(e, protocol.Repetition):
                if e.rule_ref not in self.concreteRules:
                    self.cleanupProtoRule(allPrototypes[e.rule_ref], allPrototypes)

                # Dragonfly wants RuleRef to take a RuleRef rather than an actual
                # Rule, so we just make one rather than forcing the server to
                # handle this, see protocol.py comments.
                concrete = self.concreteRules[e.rule_ref]
                log.info("concrete type: [%s]" % type(concrete))
                newExtras.append(dfly.Repetition(dfly.RuleRef(rule=concrete),
                                                 e.min, e.max, e.name))
            elif isinstance(e, protocol.RuleRef):
                if e.rule_ref not in self.concreteRules:
                    self.cleanupProtoRule(allPrototypes[e.rule_ref], allPrototypes)

                newExtras.append(dfly.RuleRef(self.concreteRules[e.rule_ref], e.name))
            elif isinstance(e, protocol.ListRef):
                self.concreteWordLists[e.name] = List(e.name + "ConcreteList")
                # self.concreteWordLists[e.name].set(e.words)
                newExtras.append(dfly.ListRef(e.ref_name, self.concreteWordLists[e.name]))
            else:
                raise Exception("Unknown extra type: [%s]" % e)

        r["extras"] = newExtras

        self.concreteStartTime = time.time()
        self.buildConcreteRule(r)
        self.concreteEndTime = time.time()
        self.concreteTime += (self.concreteEndTime - self.concreteStartTime)

        r["built"] = True
        return True

    def updateWordList(self, name, words):
        if name not in self.concreteWordLists:
            # log.info("Word list [%s] not in grammar [%s], ignoring" % (name, self.hash))
            return

        # We want to check if the value has actually changed because List's
        # set method will blindly tell Dragon to delete its old list and replace
        # it with this one and we don't want to disturb Dragon unless we have to
        # because Dragon is slow.
        if sorted(words) != sorted(self.concreteWordLists[name]):
            log.info("Updating word list [%s] on grammar [%s] with contents [%s]" % (name, self.hash, len(words)))
            log.info("old list: %s" % self.concreteWordLists[name])
            # TODO: need to check existing load state, then send a loading message here, then restore
            # old state. This way we can see when word lists are taking a long time to load...
            updateStart = time.time()
            self.concreteWordLists[name].set(words)
            updateEnd = time.time()
            log.info("Word list update time: %ss" % (updateEnd - updateStart))
Пример #16
0
    from dragonfly.engines.backend_sapi5.engine import Sapi5InProcEngine
    from dragonfly import (Grammar, CompoundRule, Dictation, Choice)
except:
    error("Failed to import dragonfly, path: %s" % dragonfly_path)


engine = Sapi5InProcEngine()
engine.connect()
engine.speak('Speak recognition active!')


# Voice command rule combining spoken form and recognition processing.
class ExampleRule(CompoundRule):
    spec = "do something computer"  # Spoken form of command.

    def _process_recognition(self, node, extras):  # Callback when command is spoken.
        print "Voice command spoken."

# Create a grammar which contains and loads the command rule.
grammar = Grammar("example grammar")  # Create a grammar to contain the command    rule.
grammar.add_rule(ExampleRule())  # Add the command rule to the grammar.
logger.info("Loading Grammar")
grammar.load()  # Load the grammar.
logger.info("Grammar loaded")

while True:
    pythoncom.PumpWaitingMessages()
    sleep(.1)

grammar.unload()
Пример #17
0
 def unload(self):
     for rule in self._rules:
         # unregister to prevent multiply registered rules during restart
         rule.deactivate()
     Grammar.unload(self)