def _processBrailleEvent(command): """Called whenever a key is pressed on the Braille display. Arguments: - command: the BrlAPI command for the key that was pressed. Returns True if the event was consumed; otherwise False """ # [[[TODO: WDW - probably should add braille bindings to this module.]]] consumed = False # Braille key presses always interrupt speech. # speech.stop() event = BrailleEvent(command) orca_state.lastInputEvent = event try: consumed = _PRESENTATION_MANAGERS[_currentPresentationManager].\ processBrailleEvent(event) except: debug.printException(debug.LEVEL_SEVERE) if (not consumed) and settings.learnModeEnabled: consumed = True return consumed
def sayAndPrint(text, stop=False, getInput=False, speechServer=None, acss=None): """Prints the given text. In addition, if the text field is not None, speaks the given text, optionally interrupting anything currently being spoken. Arguments: - text: the text to print and speak - stop: if True, interrupt any speech currently being spoken - getInput: if True, elicits raw input from the user and returns it - speechServer: the speech server to use - acss: the ACSS to use for speaking Returns raw input from the user if getInput is True. """ if stop: speech.stop() if speechServer: speechServer.stop() if speechServer: speechServer.speak(text, acss) else: speech.speak(text, acss) if getInput: return raw_input(text) else: print text
def sayAndPrint(text, stop=False, getInput=False, speechServer=None, voice=None): """Prints the given text. In addition, if the text field is not None, speaks the given text, optionally interrupting anything currently being spoken. Arguments: - text: the text to print and speak - stop: if True, interrupt any speech currently being spoken - getInput: if True, elicits raw input from the user and returns it - speechServer: the speech server to use - voice: the ACSS to use for speaking Returns raw input from the user if getInput is True. """ if stop: speech.stop() if speechServer: speechServer.stop() if speechServer: speechServer.speak(text, voice) else: speech.speak(text, voice) if getInput: return raw_input(text) else: print text
def play_cb(self, widget, images): widget.set_icon_widget(images[int(widget.get_active())]) if widget.get_active(): if speech.is_stopped(): speech.play(self.activity.add_word_marks()) else: speech.stop()
def _play_cb(self, widget, images): widget.set_icon_widget(images[int(widget.get_active())]) if widget.get_active(): if speech.is_stopped(): speech.play(self.activity.add_word_marks()) else: speech.stop()
def play_cb(self, widget): if widget.get_active(): self.play_btn.set_named_icon('media-playback-pause') if speech.is_stopped(): speech.play(self._activity._view.get_marked_words()) else: self.play_btn.set_named_icon('media-playback-start') speech.stop()
def get_more_text(self): if self.current_word < len(self.word_tuples): speech.stop() more_text = self.get_marked_words() speech.play(more_text) else: if speech.reset_buttons_cb is not None: speech.reset_buttons_cb()
def __speak(self, text=None, acss=None, interrupt=True): """Speaks all queued text immediately. If text is not None, it is added to the queue before speaking. Arguments: - text: optional text to add to the queue before speaking - acss: acss.ACSS instance; if None, the default voice settings will be used. Otherwise, the acss settings will be used to augment/override the default voice settings. - interrupt: if True, stops any speech in progress before speaking the text Returns an id of the thing being spoken or -1 if nothing is to be spoken. """ # If the user has speech turned off, just return. # if not settings.enableSpeech: return -1 speaker = self.__getSpeaker(acss) if acss and not acss.has_key(ACSS.RATE): voices = settings.voices defaultACSS = voices[settings.DEFAULT_VOICE] if defaultACSS.has_key(ACSS.RATE): self.__setRate(speaker, defaultACSS[ACSS.RATE]) if not text: if interrupt: speech.stop() return -1 text = self.__addVerbalizedPunctuation(text) if orca_state.activeScript and orca_state.usePronunciationDictionary: text = orca_state.activeScript.adjustForPronunciation(text) try: # [[[TODO: WDW - back this stop out for now. The problem is # that we end up clipping too much speech, especially in the # case where we want to speak the contents of a popup before # speaking the object with focus.]]] # #if interrupt: # speaker.stop() self.__lastText = [text, acss] self.__isSpeaking = True return speaker.say(text) except: # On failure, remember what we said, reset our connection to the # speech synthesis driver, and try to say it again. # debug.printException(debug.LEVEL_SEVERE) debug.println(debug.LEVEL_SEVERE, "Restarting speech...") self.reset() return -1
def handle_button_action(self, name, sender): BUTTON_PREFIX = 'button_' INFO_PREFIX = 'info_' if name == 'button_start_speech': speech.say(sample_text.get_sample_text(), 'de', 1.0 * self.conf.rechtschreibung.speech_speed / 100.0) elif name == 'button_stop_speech': speech.stop() elif name == 'button_load_mode': self.load_mode_start(LOAD_MODE_RULESET) elif name == 'button_load_reference_mode': self.load_mode_start(LOAD_MODE_REFERENCE) elif name == 'button_save_mode': self.save_mode_start() elif name == 'button_open_app_control_view': self.open_app_control_view() elif name == 'button_open_top_navigation_view': self.open_top_navigation_view() elif name == 'button_open_statistics_view': self.open_statistics_view() elif name == 'button_close_top_navigation_view': self.close_top_navigation_view() elif name == 'button_icon_rechtschreibung': self.button_icon_rechtschreibung() elif name.startswith(BUTTON_PREFIX): view_name = name[len(BUTTON_PREFIX):] child_view = self.find_subview_by_name(view_name) if child_view != None: view = self.find_subview_by_name(NAME_NAVIGATION_VIEW) view.push_view(child_view) return 1 else: logger.warning("cannot find subview '%s" % view_name) elif name.startswith(INFO_PREFIX): info_name = name[len(INFO_PREFIX):] rule_info = self.rule_doc_manager.get_rule_info_by_attr_name(info_name) if rule_info: self.info_popup.present(rule_info, close_label=words.schlieszen(c=rulesets.C_BOS)) else: logger.error("cannot find info text for %s" % info_name) else: super(MainViewController, self).handle_button_action(name, sender)
def run(self): """Activates the interface raises ------ InterfaceIsRunningError If the interface is currently running """ if self._running: raise InterfaceIsRunningError( 'The interface is currently running!') self._running = True playOK = 0 # Tells the method when it's time to play or speak the confirm prompt (0: waiting, 1: play prompt, 2: has played the prompt) if self._sounds['pop'] != None: sound_pool.play_stationary(self._sounds['pop']) # Play the message if self._isFile: source = sound_pool.play_stationary(self._content) else: speech.speak(self._content + ' ' + self._button, True) while self._running: time.sleep(.005) # Be kind to processors if self._isFile: # This is a sound file if not sound_pool.sound_is_playing( source): # The message has finished playing if playOK != 2: playOK = 1 # Setting playOK to 1 means the message is finished playing if playOK == 1: if self._exitAutomatically: # Exit the dialogue when done self._running = False else: # Play or speak the done prompt if self._sounds['confirm'] != None: sound_pool.play_stationary( self._sounds['confirm']) else: speech.speak(self._button) playOK = 2 # Don't play the confirm prompt again for e in pygame.event.get(): # Poll the key events if e.type == pygame.KEYDOWN: if e.key != pygame.K_UP: # Any key besides up exits the dialogue if self._isFile: sound_pool.pause_sound(source) else: speech.stop() self._running = False else: # Up replays the contents if self._isFile: if not sound_pool.sound_is_playing(source): playOK = 0 source = sound_pool.play_stationary( self._content) else: speech.speak(self._content + ' ' + self._button, True)
def touch_began(self, touch): global warn global doblock global pres global lan2 global whole global hideit x, y = touch.location sound.play_effect("Beep", 1.0) # sound.play_effect("Bleep",1.0) # sound.play_effect("Shot",1.0) # sound.play_effect("Woosh_2",1.0) if hideit and abs(x - self.size.x) < 50 and (y < 50): speech.say("exit") time.sleep(0.5) os.abort() # x,y=[10,10] if doblock: dis = 1e6 k = -1 ksave = -1 for c in self.children: [px, py] = c.position dx = (px - x)**2 dy = (py - y)**2 k = k + 1 tdis = (dx + dy)**0.5 if tdis < dis: dis = tdis ksave = k # print(ksave,dis) doblock = False pres = ksave - 1 # print("final",dis,ksave) while (len(self.children) > 0): self.children[0].remove_from_parent() startit() return move_action = Action.move_to(x, y, 0.7, TIMING_SINODIAL) self.s2.run_action(move_action) speech.stop() pick = int(random.random() * len(warn)) while (pick == self.pick): pick = int(random.random() * len(warn)) self.pick = pick # speech.say(warn[pick],lan2) # speech.say(whole) # if (x < 50) and (y < 50): if (x < self.size.x) and (y < self.size.y): doblock = True while (len(self.children) > 0): self.children[0].remove_from_parent()
def do_POST(self): contentLength = self.headers.getheader('content-length') if contentLength: contentLength = int(contentLength) inputBody = self.rfile.read(contentLength) debug.println(debug.LEVEL_FINEST, "httpserver._HTTPRequestHandler received %s" \ % inputBody) if inputBody.startswith("speak:"): speech.speak(inputBody[6:]) self.send_response(200, 'OK') elif inputBody == "stop": speech.stop() self.send_response(200, 'OK') elif inputBody == "isSpeaking": self.send_response(200, 'OK') self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write("%s" % speech.isSpeaking()) elif inputBody.startswith("log:"): import logging logFile = inputBody[4:] for logger in ['braille', 'speech']: log = logging.getLogger(logger) formatter = logging.Formatter('%(name)s.%(message)s') try: loggingFileHandlers[logger].close() log.removeHandler(loggingFileHandlers[logger]) except: pass if logFile and len(logFile): loggingFileHandlers[logger] = logging.FileHandler( '%s.%s' % (logFile, logger), 'w') loggingFileHandlers[logger].setFormatter(formatter) log.addHandler(loggingFileHandlers[logger]) log.setLevel(logging.INFO) self.send_response(200, 'OK') elif inputBody.startswith("debug:"): split = inputBody.split(':') debug.debugLevel = int(split[1]) if debug.debugFile: debug.debugFile.close() debug.debugFile = None if (len(split) == 3) and (len(split[2])): debug.debugFile = open('%s.debug' % split[2], 'w', 0) self.send_response(200, 'OK') else: debug.println(debug.LEVEL_FINEST, "httpserver._HTTPRequestHandler received no data")
def _onMouseButton(e): """Tracks mouse button events, stopping any speech in progress. Arguments: - e: at-spi event from the at-api registry """ event = atspi.Event(e) orca_state.lastInputEvent = MouseButtonEvent(event) # A mouse button event looks like: mouse:button:1p, where the # number is the button number and the 'p' is either 'p' or 'r', # meaning pressed or released. We only want to stop speech on # button presses. # if event.type.endswith("p"): speech.stop()
def listNotificationMessages(event): """ When list notification messages mode is enabled, this function provides a means by which users can navigate through the list the notification messages. User can use the navigation keys or press the number of the message. Pressing escape key disable the mode. """ global indexNotificationMessages global invalidKeys consumed = True speak = True if event.type != pyatspi.KEY_PRESSED_EVENT: return False speech.stop() if event.event_string == "Escape": exitListNotificationMessagesMode() speak = False elif event.event_string == "Home": indexNotificationMessages = 1 elif event.event_string == "End": indexNotificationMessages = size() elif event.event_string == "Up": indexNotificationMessages -= 1 elif event.event_string == "Down": indexNotificationMessages += 1 elif event.event_string in\ [ '1', '2', '3', '4', '5', '6', '7', '8', '9' ]: indexNotificationMessages = int(event.event_string) elif event.event_string in [ 'h', 'H']: _help(True) speak = False elif event.event_string == "space": pass else: speak = False invalidKeys += 1 if invalidKeys > 2: _help() invalidKeys = 0 if speak: _showNotificationMessage(indexNotificationMessages) invalidKeys = 0 return consumed
def _toggleSilenceSpeech(script=None, inputEvent=None): """Toggle the silencing of speech. Returns True to indicate the input event has been consumed. """ speech.stop() if settings.silenceSpeech: settings.silenceSpeech = False # Translators: this is a spoken prompt letting the user know # that speech synthesis has been turned back on. # speech.speak(_("Speech enabled.")) else: # Translators: this is a spoken prompt letting the user know # that speech synthesis has been temporarily turned off. # speech.speak(_("Speech disabled.")) settings.silenceSpeech = True return True
def main(): speech.stop() if not appex.is_running_extension(): console.hud_alert('Reading clipboard') text = clipboard.get() url = None else: text = appex.get_text() url = appex.get_url() if url == None: try: url = [ mgroups[0] for mgroups in GRUBER_URLINTEXT_PAT.findall(text) ][0] except: pass if url != None: console.hud_alert('Reading: ' + url) h = html2text.HTML2Text() try: r = requests.get(url=url, headers={ "User-agent": "Mozilla/5.0{0:06}".format( random.randrange(999999)) }) except requests.ConnectionError as e: console.alert('Unable to connect to url.') return True html_content = r.text.decode('utf-8') text = html2text.html2text(html_content) else: console.hud_alert('Reading text: ' + str(text)) if text: speech.say(text) stop = console.alert('Done?', hide_cancel_button=True, button1='OK') speech.stop() else: console.hud_alert('No text found.')
def do_POST(self): contentLength = self.headers.getheader('content-length') if contentLength: contentLength = int(contentLength) inputBody = self.rfile.read(contentLength) debug.println(debug.LEVEL_FINEST, "httpserver._HTTPRequestHandler received %s" \ % inputBody) if inputBody.startswith("speak:"): speech.speak(inputBody[6:]) self.send_response(200, 'OK') elif inputBody == "stop": speech.stop() self.send_response(200, 'OK') elif inputBody == "isSpeaking": self.send_response(200, 'OK') self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write("%s" % speech.isSpeaking()) else: debug.println(debug.LEVEL_FINEST, "httpserver._HTTPRequestHandler received no data")
def touch_began(self, touch): global warn global doblock global pres global lan2 x, y = touch.location if doblock: dis = 1e6 k = -1 ksave = -1 for c in self.children: [px, py] = c.position dx = (px - x)**2 dy = (py - y)**2 k = k + 1 tdis = (dx + dy)**0.5 if tdis < dis: dis = tdis ksave = k # print(ksave,dis) doblock = False pres = ksave - 1 print("final", dis, ksave) while (len(self.children) > 0): self.children[0].remove_from_parent() startit() return move_action = Action.move_to(x, y, 0.7, TIMING_SINODIAL) self.s2.run_action(move_action) speech.stop() pick = int(random.random() * len(warn)) while (pick == self.pick): pick = int(random.random() * len(warn)) self.pick = pick speech.say(warn[pick], lan2) if (x < 50) and (y < 50): doblock = True while (len(self.children) > 0): self.children[0].remove_from_parent()
def _outputElements(self, output_obj): """Output the given elements. TODO: Now we are mainly using WhereAmI, we might need to find out a better, less verbose output method. Arguments: - output_obj: A list of objects to output, could be accessibles and text. """ if output_obj: speech.stop() for obj in output_obj: if obj is None: continue if isinstance(obj, str): speech.speak(obj) # TODO: There is probably something more useful that we could # display. braille.displayMessage(obj) else: speech.speak(self._currentMouseOver.script.speechGenerator.generateSpeech(obj)) self._currentMouseOver.script.updateBraille(obj)
def main(): speech.stop() if not appex.is_running_extension(): console.hud_alert('Reading clipboard') text = clipboard.get() url = None else: text = appex.get_text() url = appex.get_url() if url == None: try: url = [ mgroups[0] for mgroups in GRUBER_URLINTEXT_PAT.findall(text) ][0] except: pass if url != None: console.hud_alert('Reading: ' + url) h = html2text.HTML2Text() try: r = requests.get( url=url, headers={"User-agent": "Mozilla/5.0{0:06}".format(random.randrange(999999))}) except Exception as e: console.alert(e.message) return True html_content = r.text.decode('utf-8') text = html2text.html2text(html_content) else: console.hud_alert('Reading text: ' + str(text)) if text: speech.say(text) stop = console.alert('Done?', hide_cancel_button=True, button1='OK') speech.stop() else: console.hud_alert('No text found.')
def touch_began(self, touch): TouchX, TouchY = touch.location.x, touch.location.y self.Dragging = False if self.Stage == 'Insult': if TouchY < 70: self.ButtonPressed = True if TouchX < self.sw/4.0: self.SpeechEnabled = not self.SpeechEnabled speech.stop() speech.stop() speech.stop() elif TouchX > self.sw-(self.sw/4.0): if self.Full != '': self.Stage = 'Share' else: self.Dragging = True self.AimY = TouchY self.tX = 20 self.Barrier = self.sh/4.0 else: self.ButtonPressed = False if self.Dunn and not self.ButtonPressed: self.Display = '' self.Randum = 25 self.Dunn = False elif self.Stage == 'Edit': if TouchY > self.sh-70: self.Dragging = True self.AimY = TouchY self.tX = self.sh-90 self.Barrier = self.sh/4.0*3 else: pass #Edit values else: if TouchX < 80 and TouchY > self.sh-100: self.Stage = 'Insult' if TouchY > self.sh-280 and TouchY < self.sh-180: if TouchX < self.sw/2.0+50 and TouchX > self.sw/2.0-50: if string.find(self.FirstDisplay, ' ') > -1: self.FirstDisplay2 = string.join(string.split(self.FirstDisplay, ' '), '%20') else: self.FirstDisplay2 = self.FirstDisplay if not self.Full[0] in ('A','E','I','O','U'): openUrl = 'mailto:?subject=The%20Ultimate%20Insult%20Creator%20for%20iOS&body=You%20know%20what?%20You\'re%20nothing%20but%20a%20'+self.FirstDisplay2+'%20'+self.SecondDisplay+'%20'+self.ThirdDisplay+'!' else: openUrl = 'mailto:?subject=The%20Ultimate%20Insult%20Creator%20for%20iOS&body=You%20know%20what?%20You\'re%20nothing%20but%20an%20'+self.FirstDisplay2+'%20'+self.SecondDisplay+'%20'+self.ThirdDisplay+'!' webbrowser.open(openUrl)
def _getMessage(self, event): """Gets the message associated with a given live event.""" attrs = self._getAttrDictionary(event.source) content = [] labels = [] # A message is divided into two parts: labels and content. We # will first try to get the content. If there is None, # assume it is an invalid message and return None if event.type.startswith('object:children-changed:add'): # Get the text based on the atomic property try: if attrs['container-atomic'] == 'true': # expand the source if atomic is true newcontent = \ self._script.utilities.expandEOCs(event.source) else: # expand the target if atomic is false newcontent = \ self._script.utilities.expandEOCs(event.any_data) except (KeyError, TypeError): # expand the target if there is no ARIA markup newcontent = \ self._script.utilities.expandEOCs(event.any_data) # add our content to the returned message or return None if no # content if newcontent: content.append(newcontent) else: return None else: #object:text-changed:insert # Get a handle to the Text interface for the source. # Serious problems if this fails. # try: sourceitext = event.source.queryText() except NotImplementedError: return None # We found an embed character. We can expect a children-changed # event, which we will act on, so just return. txt = sourceitext.getText(0, -1) if txt.count(self._script.EMBEDDED_OBJECT_CHARACTER) > 0: return None # Get the text based on the atomic property try: if attrs['container-atomic'] == 'true': newcontent = txt else: newcontent = txt[event.detail1:event.detail1+event.detail2] except KeyError: newcontent = txt[event.detail1:event.detail1+event.detail2] # add our content to the returned message or return None if no # content if len(newcontent) > 0: content.append(newcontent) else: return None # Get the labeling information now that we have good content. labels = self._getLabelsAsUtterances(event.source) # instantly send out notify messages if 'channel' in attrs and attrs['channel'] == 'notify': utts = labels + content speech.stop() # Note: we would like to use a different ACSS for alerts. This work # should be done as part of bug #412656. For now, however, we will # also present the message in braille. self._script.presentMessage(utts) return None else: return {'content':content, 'labels':labels}
def stop_cb(self, widget): self.stop_btn.set_sensitive(False) self.play_btn.set_icon_name("media-playback-start") self.play_btn.set_active(False) self.is_paused = False speech.stop()
def draw(self): background(0.75, 0.75, 0.75) no_tint() image(self.Valuez, 10, 10, self.sw-20, self.sh-90) if self.Dragging: self.tX -= (self.tX-self.AimY)*0.25 if not self.Dragging and self.Stage != 'Share': if self.Stage == 'Insult': self.tX -= (self.tX * 0.1) else: self.tX += ((self.sh-70)-self.tX) * 0.1 push_matrix() translate(0, self.tX) stroke_weight(1) for ij in range(10): stroke(0.2, 0.2, 0.2, (1-ij/10.0)/2.0) line(0, -ij, self.sw, -ij) fill(0.8, 0.8, 0.8) no_stroke() rect(0, 0, self.sw, self.sh) if self.Randum > 0: self.Randum -= 1 self.Randomize() elif self.Randum > -10: self.Randum -= 1 if self.Randum % 2 == 0: self.Randomize() elif self.Randum > -20: self.Randum -= 1 if self.Randum % 3 == 0: self.Randomize() elif self.Randum > -30: self.Randum -= 1 if self.Randum % 4 == 0: self.Randomize() elif self.Randum > -40: self.Randum -= 1 if self.Randum % 5 == 0: self.Randomize() elif not self.Dunn: self.Dunn = True self.Full = self.FirstDisplay + ' ' + self.SecondDisplay + ' ' + self.ThirdDisplay if self.SpeechEnabled: speech.stop() if not self.Full[0] in ('A','E','I','O','U'): speech.say('You are a '+self.Full, 'en-US', 0.15) else: speech.say('You are an '+self.Full, 'en-US', 0.15) tint(1, 1, 1) text(self.Display, 'Noteworthy-Bold', 18, self.sw/2.0+2, self.sh/2.0-2, 5) tint(0, 0, 0) text(self.Display, 'Noteworthy-Bold', 18, self.sw/2.0, self.sh/2.0, 5) tint(0, 0, 0, 0.5) if self.Display == '': try: if not self.Full[0] in ('A','E','I','O','U'): text('You are a...', 'SourceSansPro-Bold', 42, self.sw/2.0-1, self.sh/4.0*3+1, 5) else: text('You are an...', 'SourceSansPro-Bold', 42, self.sw/2.0-1, self.sh/4.0*3+1, 5) except: text('You are a...', 'SourceSansPro-Bold', 42, self.sw/2.0-1, self.sh/4.0*3+1, 5) text(self.FirstDisplay, 'SourceSansPro-Bold', 26, self.sw/2.0-0.5, self.sh/8.0*5+0.5, 5) text(self.SecondDisplay, 'SourceSansPro-Bold', 26, self.sw/2.0-0.5, self.sh/2.0+0.5, 5) text(self.ThirdDisplay, 'SourceSansPro-Bold', 26, self.sw/2.0-0.5, self.sh/8.0*3+0.5, 5) tint(0, 0, 0, 0.5-(self.tX/(self.sh-70))/2.0) #text(self.Full, 'Noteworthy-Bold', 18, self.sw/2.0-1, 51, 8) tint(1, 1, 1) if self.Display == '': try: if not self.Full[0] in ('A','E','I','O','U'): text('You are a...', 'SourceSansPro-Bold', 42, self.sw/2.0+1, self.sh/4.0*3-1, 5) else: text('You are an...', 'SourceSansPro-Bold', 42, self.sw/2.0+1, self.sh/4.0*3-1, 5) except: text('You are a...', 'SourceSansPro-Bold', 42, self.sw/2.0+1, self.sh/4.0*3-1, 5) text(self.FirstDisplay, 'SourceSansPro-Bold', 26, self.sw/2.0+1, self.sh/8.0*5-1, 5) text(self.SecondDisplay, 'SourceSansPro-Bold', 26, self.sw/2.0+1, self.sh/2.0-1, 5) text(self.ThirdDisplay, 'SourceSansPro-Bold', 26, self.sw/2.0+1, self.sh/8.0*3-1, 5) tint(1, 1, 1, 1-(self.tX/(self.sh-70))) text(self.Full, 'Noteworthy-Bold', 18, self.sw/2.0+2, 48, 8) tint(0.2, 0.4, 0.9, 0.75) text(self.FirstDisplay, 'SourceSansPro-Bold', 26, self.sw/2.0, self.sh/8.0*5, 5) text(self.SecondDisplay, 'SourceSansPro-Bold', 26, self.sw/2.0, self.sh/2.0, 5) text(self.ThirdDisplay, 'SourceSansPro-Bold', 26, self.sw/2.0, self.sh/8.0*3, 5) tint(0, 0.3, 0.6, 1-(self.tX/(self.sh-70))) text(self.Full, 'Noteworthy-Bold', 18, self.sw/2.0, 50, 8) tint(0.3, 0.3, 0.3) if self.Display == '': try: if not self.Full[0] in ('A','E','I','O','U'): text('You are a...', 'SourceSansPro-Bold', 42, self.sw/2.0, self.sh/4.0*3, 5) else: text('You are an...', 'SourceSansPro-Bold', 42, self.sw/2.0, self.sh/4.0*3, 5) except: text('You are a...', 'SourceSansPro-Bold', 42, self.sw/2.0, self.sh/4.0*3, 5) tint(0.2, 0.2, 0.2, 1-self.tX/(self.sh-50)) image(self.Unmuted if self.SpeechEnabled else self.Muted, 5, 5, 40, 40) if self.Full == '': tint(0.2, 0.2, 0.2, 0.25-self.tX/(self.sh-50)/4.0) image(self.ShareImage, self.sw-45, 5, 40, 40) tint(0.2, 0.2, 0.2) image(self.EditImage, self.sw/2.0-20, 5, 40, 40) #Help if self.Display != '': tint(1, 1, 1, 1-(self.tX/(self.sh-70))) text('Text-to-speech', 'Noteworthy-Bold', 14, 7, 48, 9) text('Share', 'Noteworthy-Bold', 14, self.sw-5, 48, 7) text('List', 'Noteworthy-Bold', 14, self.sw/2.0+2, 48, 8) tint(0, 0, 0, 1-(self.tX/(self.sh-70))) text('Text-to-speech', 'Noteworthy-Bold', 14, 5, 50, 9) text('Share', 'Noteworthy-Bold', 14, self.sw-7, 50, 7) text('List', 'Noteworthy-Bold', 14, self.sw/2.0, 50, 8) pop_matrix() if self.Stage == 'Share': fill(0, 0, 0, 0.8) no_stroke() rect(0, 0, self.sw, self.sh) fill(0.8, 0.8, 0.8) rect(25, 35, self.sw-50, self.sh-90) rect(35, 25, self.sw-70, self.sh-70) ellipse(25, self.sh-65, 20, 20) ellipse(self.sw-45, self.sh-65, 20, 20) ellipse(25, 25, 20, 20) ellipse(self.sw-45, 25, 20, 20) tint(1, 1, 1) if not self.Full[0] in ('A','E','I','O','U'): text('You know what? You\'re a', 'Noteworthy-Bold', 18, self.sw/2.0+2, self.sh-98, 2) else: text('You know what? You\'re an', 'Noteworthy-Bold', 18, self.sw/2.0+2, self.sh-98, 2) text(self.Full, 'Noteworthy-Bold', 18, self.sw/2.0+2, self.sh-122, 2) tint(0.2, 0.2, 0.2) if not self.Full[0] in ('A','E','I','O','U'): text('You know what? You\'re a', 'Noteworthy-Bold', 18, self.sw/2.0, self.sh-98, 2) else: text('You know what? You\'re an', 'Noteworthy-Bold', 18, self.sw/2.0, self.sh-98, 2) text('Share by Email:', 'SourceSansPro-Bold', 18, self.sw/2.0, self.sh-180, 2) tint(0.2, 0.2, 1) text(self.Full, 'Noteworthy-Bold', 18, self.sw/2.0, self.sh-120, 2) no_tint() image(self.CloseButton, 30, self.sh-90, 40, 40) image(self.EM, self.sw/2.0-25, self.sh-255-25, 50, 50)
def speak_scripture(sender): if speech.is_speaking(): speech.stop() else: speech.say(contents.text)
def close(sender): sender.superview.superview.close() speech.stop()
def destroy_cb(self, widget, data=None): speech.stop() Gtk.main_quit()
def stop_cb(self, widget): self.stop_btn.set_sensitive(False) self.play_btn.set_named_icon('media-playback-start') self.play_btn.set_active(False) speech.stop()
def _processKeyboardEvent(event): """The primary key event handler for Orca. Keeps track of various attributes, such as the lastInputEvent. Also calls keyEcho as well as any local keybindings before passing the event on to the active presentation manager. This method is called synchronously from the AT-SPI registry and should be performant. In addition, it must return True if it has consumed the event (and False if not). Arguments: - event: an AT-SPI DeviceEvent Returns True if the event should be consumed. """ global _orcaModifierPressed orca_state.lastInputEventTimestamp = event.timestamp # Log the keyboard event for future playback, if desired. # Note here that the key event_string being output is # exactly what we received. The KeyboardEvent object, # however, will translate the event_string for control # characters to their upper case ASCII equivalent. # string = atspi.KeystrokeListener.keyEventToString(event) if _recordingKeystrokes and _keystrokesFile \ and (event.event_string != "Pause") \ and (event.event_string != "F21"): _keystrokesFile.write(string + "\n") debug.printInputEvent(debug.LEVEL_FINE, string) keyboardEvent = KeyboardEvent(event) # See if this is one of our special Orca modifier keys. # # [[[TODO: WDW - Note that just looking at the keycode should # suffice, but there is a "feature" in the Java Access Bridge # where it chooses to emit Java platform-independent keycodes # instead of the keycodes for the base platform: # # http://bugzilla.gnome.org/show_bug.cgi?id=106004 # http://bugzilla.gnome.org/show_bug.cgi?id=318615 # # So...we need to workaround this problem. # # If you make the following expression True we will get a positive # match for all keysyms associated with a given keysym specified # as an Orca modifier key. # # For example, assume the Orca modifier is set to \ for some # reason. The key that has \ on it produces \ without the Shift # key and | with the Shift key. If the following expression is # True, both the \ and | will be viewed as the Orca modifier. If # the following expression is False, only the \ will be viewed as # the Orca modifier (i.e., Shift+\ will still function as the | # character). In general, I think we want to avoid sucking in all # possible keysyms because it can have unexpected results.]]] # if False: allPossibleKeysyms = [] for keysym in settings.orcaModifierKeys: allPossibleKeysyms.extend(keybindings.getAllKeysyms(keysym)) else: allPossibleKeysyms = settings.orcaModifierKeys isOrcaModifier = allPossibleKeysyms.count(keyboardEvent.event_string) > 0 if event.type == atspi.Accessibility.KEY_PRESSED_EVENT: # Key presses always interrupt speech. # speech.stop() # If learn mode is enabled, it will echo the keys. # if not settings.learnModeEnabled: _keyEcho(keyboardEvent) # We treat the Insert key as a modifier - so just swallow it and # set our internal state. # if isOrcaModifier: _orcaModifierPressed = True elif isOrcaModifier \ and (keyboardEvent.type == atspi.Accessibility.KEY_RELEASED_EVENT): _orcaModifierPressed = False if _orcaModifierPressed: keyboardEvent.modifiers = keyboardEvent.modifiers \ | (1 << settings.MODIFIER_ORCA) # Orca gets first stab at the event. Then, the presenter gets # a shot. [[[TODO: WDW - might want to let the presenter try first? # The main reason this is staying as is is that we may not want # scripts to override fundamental Orca key bindings.]]] # consumed = False try: if orca_state.capturingKeys: _processKeyCaptured(keyboardEvent) else: consumed = _keyBindings.consumeKeyboardEvent(None, keyboardEvent) if (not consumed) and (_currentPresentationManager >= 0): consumed = _PRESENTATION_MANAGERS[_currentPresentationManager].\ processKeyboardEvent(keyboardEvent) if (not consumed) and settings.learnModeEnabled: if keyboardEvent.type \ == atspi.Accessibility.KEY_PRESSED_EVENT: clickCount = orca_state.activeScript.getClickCount(\ orca_state.lastInputEvent, keyboardEvent) if clickCount == 2: orca_state.activeScript.phoneticSpellCurrentItem(\ keyboardEvent.event_string) else: # Check to see if there are localized words to be # spoken for this key event. # braille.displayMessage(keyboardEvent.event_string) event_string = keyboardEvent.event_string event_string = keynames.getKeyName(event_string) speech.speak(event_string) consumed = True except: debug.printException(debug.LEVEL_SEVERE) orca_state.lastInputEvent = keyboardEvent # If this is a key event for a non-modifier key, save a handle to it. # This is needed to help determine user actions when a multi-key chord # has been pressed, and we might get the key events in different orders. # See comment #15 of bug #435201 for more details. # if not _isModifierKey(keyboardEvent.event_string): orca_state.lastNonModifierKeyEvent = keyboardEvent return consumed or isOrcaModifier
def get_more_text(self): if self.current_word < len(self.word_tuples): speech.stop() more_text = self.get_marked_words() speech.play(more_text)
def stop_cb(self, widget): self.stop_btn.set_sensitive(False) self.play_btn.set_icon_name('media-playback-start') self.play_btn.set_active(False) self.is_paused = False speech.stop()
#get page page = wikipedia.page(search[0]) #test #page = wikipedia.page("illinois") sections = page.sections print("---> Page id: " + page.pageid) #get page data pageData = wikipedia.WikipediaPage(search[0]) print("Categories: ") if not pageData.categories: print("No Categories") else: print(pageData.categories[0]) print("----------") #get page summary #show other options for disambig error try: localSummary = wikipedia.summary(search[0]) print(localSummary) except wikipedia.exceptions.DisambiguationError as e: print(e.options) speech.say(localSummary) speech.stop()
def stop(): speech.stop()
def say(text): global Language speech.stop() speech.say(text, Language)
def resume(self): for ij in range(10): speech.stop()
def delete_cb(self, widget, event, data=None): speech.stop() return False
def pause(self): for ij in range(10): speech.stop()
def main(): global logger console.clear() logger = log.open_logging('rechtschreibung', reload=True) logger.info("Start application") default_mode = spelling_mode.spelling_mode() rulesets.set_default_mode(default_mode.combination) config_handler = config.ConfigHandler(app_config.AppConfig()) conf = config_handler.read_config_file(CONFIG_FILE, SAMPLE_CONFIG_FILE) image_rechtschreibung = ui.Image.named(IMAGE_URL_RECHTSCHREIBUNG).with_rendering_mode(ui.RENDERING_MODE_ORIGINAL) my_main_view_controller = MainViewController(conf) top_navigation_vc = ui_util.ViewController(my_main_view_controller) navigation_vc = ui_util.ViewController(top_navigation_vc) navigation_vc.load('top_navigation') top_navigation_view = ui.NavigationView(navigation_vc.view, title_bar_color = defaults.COLOR_GREY) top_navigation_view.title_bar_color = defaults.COLOR_LIGHT_GREY top_navigation_vc.view = top_navigation_view my_main_view_controller.add_child_controller(NAME_NAVIGATION_VIEW, top_navigation_vc) top_navigation_view.name = NAME_NAVIGATION_VIEW if ui_util.is_iphone(): my_main_view_controller.load('rechtschreibung_iphone') app_control_vc = ui_util.ViewController(my_main_view_controller) app_control_vc.load('rechtschreibung_app_control_iphone') my_main_view_controller.add_left_button_item(NAME_NAVIGATION_VIEW_TOP_LEVEL, 'button_close_top_navigation_view', ui.ButtonItem(image=ui.Image.named('iob:close_round_32'))) button_item_list = [ ui.ButtonItem(image=ui.Image.named('lib/ios7_toggle_32.png'), action=my_main_view_controller.handle_action, title='button_open_top_navigation_view'), ui.ButtonItem(image=ui.Image.named('ionicons-gear-a-32'), action=my_main_view_controller.handle_action, title='button_open_app_control_view'), ui.ButtonItem(image=image_rechtschreibung, action=my_main_view_controller.handle_action, title='button_icon_rechtschreibung'), ] my_main_view_controller.set_right_button_item_list('Rechtschreibung', button_item_list) else: my_main_view_controller.load('rechtschreibung') my_main_view_controller.add_right_button_item('Rechtschreibung', 'button_icon_rechtschreibung', ui.ButtonItem(image=image_rechtschreibung)) my_main_view_controller.add_subview('view_container_navigation', top_navigation_vc.view) view = my_main_view_controller.find_subview_by_name('segmented_control_highlighting_mode') view.action = my_main_view_controller.handle_action view_controller_capitalization = ui_util.ViewController(my_main_view_controller) view_controller_capitalization.load('view_capitalization') view_controller_harmonization = ui_util.ViewController(my_main_view_controller) view_controller_harmonization.load('view_harmonization') view = my_main_view_controller.find_subview_by_name('segmented_control_harmonization_elongation') view.action = my_main_view_controller.handle_action view_controller_combinations_simplification = ui_util.ViewController(my_main_view_controller) view_controller_combinations_simplification.load('view_combinations_simplification') view_controller_combinations_simplification_vowels = ui_util.ViewController(my_main_view_controller) view_controller_combinations_simplification_vowels.load('view_combinations_simplification_vowels') view_controller_punctuation = ui_util.ViewController(my_main_view_controller) view_controller_punctuation.load('view_punctuation') view_controller_legacy = ui_util.ViewController(my_main_view_controller) view_controller_legacy.load('view_legacy') view_controller_layout = ui_util.ViewController(my_main_view_controller) view_controller_layout.load('view_layout') view_controller_misc_rules = ui_util.ViewController(my_main_view_controller) view_controller_misc_rules.load('view_misc_rules') my_main_view_controller.set_model(default_mode.combination) # Set the empty html page for displaying the sample text. The actual content will be set in # method "update_sample_text". We use an absolute path to load the page so that the relative # path reference to the style sheet can be derrived by the browser. text_view = my_main_view_controller.find_subview_by_name('webview_text_view') absolute_page_path = 'file:' + os.path.abspath('etc/text_page.html') logger.info('Loading HTML page at %s' % absolute_page_path) text_view.load_url(absolute_page_path) # Wait for a fraction of a second so that load_url() above (which seems to be aynchronous) # has a chance to load the page before update_sample_text() below sets the initial content. time.sleep(1.0 * conf.rechtschreibung.initial_update_sample_text_delay / 1000.0) my_main_view_controller.update_sample_text() my_main_view_controller.present('fullscreen', title_bar_color=defaults.COLOR_GREY) speech.stop() logger.info("Terminate application")