def solve_recaptcha(self, driver): driver.switch_to_frame(driver.find_element_by_tag_name("iframe")) # time.sleep(4) # click on the "i'm not a robot checkbox" checkbox = driver.find_element_by_class_name( "recaptcha-checkbox-checkmark") checkbox.click() print("Clicked on the checkbox. Verifying captcha.") time.sleep(3) is_checked = driver.find_element_by_id( "recaptcha-anchor").get_attribute("aria-checked") print("reCAPTCHA was clicked. Validated? {}".format(is_checked)) if is_checked == "false": driver.switch_to_default_content() frames = driver.find_elements_by_tag_name("iframe") driver.switch_to_frame( frames[-1]) # select the last frame, that's the image frame print("Diving into the image frame.") time.sleep(2) audio_button = driver.find_element_by_id("recaptcha-audio-button") audio_button.click() print("Clicked on the audio button.") time.sleep(3) audio_download_link = driver.find_element_by_class_name( "rc-audiochallenge-tdownload-link").get_attribute('href') speech = Speech() audio_result = speech.speech_to_text(audio_download_link) audio_result_input = driver.find_element_by_id("audio-response") audio_result_input.send_keys(audio_result) time.sleep(3) verify_button = driver.find_element_by_id( "recaptcha-verify-button") verify_button.click() while not EC.invisibility_of_element_located( (By.CLASS_NAME, "rc-audiochallenge-error-message")): audio_download_link = driver.find_element_by_class_name( "rc-audiochallenge-tdownload-link").get_attribute('href') speech = Speech() audio_result = speech.speech_to_text(audio_download_link) audio_result_input = driver.find_element_by_id( "audio-response") audio_result_input.send_keys(audio_result) time.sleep(3) verify_button = driver.find_element_by_id( "recaptcha-verify-button") verify_button.click() time.sleep(2) driver.switch_to_default_content() return driver
def get_speech(self, rm='', parti='', anftyp='', antal=100): anforande_list = [] data = self._get( self.url, 'anforandelista', { 'rm': rm, 'parti': parti, 'anftyp': anftyp, 'utformat': 'json', 'sz': antal }) # Only special rule for a party, hate it. Because Folkpartiet changed name to Libreralerna if parti == Party.L.name: anforande_list += self.get_speech(rm=rm, parti='FP', anftyp=anftyp, antal=antal) if not data: return anforande_list if data['@antal'] == '0': logging.warning(f'No data for {rm}, {parti}, {anftyp}') elif data['@antal'] == '1': anforande_list.append(Speech(data['anforande'])) else: for anforande_data in data['anforande']: anforande_list.append(Speech(anforande_data)) return anforande_list
def key_control(self): for event in pygame.event.get(): if event.type == pygame.KEYDOWN and event.key == pygame.K_POWER: self.shutdown() if event.type == pygame.KEYDOWN and event.key == pygame.K_0: self.shutdown() if event.type == pygame.KEYDOWN and event.key == pygame.K_9: self.display_text('Restarting raspie...') PyMove().gpio_cleanup() time.sleep(2) subprocess.call(['.././start.sh']) sys.exit() if event.type == pygame.KEYUP and event.key == pygame.K_1: self.run_autopilot() autopilot_process.start() if event.type == pygame.KEYDOWN and event.key == pygame.K_8: print 'Cleaning up gpio' PyMove().gpio_cleanup() if event.type == pygame.KEYUP and event.key == pygame.K_2: text = "Good Morning! My name is Shadow. I am prototype of home asistant robot. I was programmed to listen to your commands." self.display_text(text) Speech().create_voice(text) if event.type == pygame.KEYUP and event.key == pygame.K_3: text = "Let's dance!" speech = Speech() speech.create_voice(text) self.display_text(text) PyMove().run_left_start() time.sleep(1) PyMove().run_left_stop() PyMove().run_right_start() time.sleep(1) PyMove().run_right_stop() PyMove().run_up_start() time.sleep(1) PyMove().run_up_stop() PyMove().run_down_start() time.sleep(1) PyMove().run_down_stop() if event.type == pygame.KEYDOWN and event.key == pygame.K_4: self.play_sound('sounds/Very_Excited_R2D2.mp3') if event.type == pygame.KEYDOWN and event.key == pygame.K_UP: PyMove().run_up_start() elif event.type == pygame.KEYUP and event.key == pygame.K_UP: PyMove().run_up_stop() if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN: PyMove().run_down_start() elif event.type == pygame.KEYUP and event.key == pygame.K_DOWN: PyMove().run_down_stop() if event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT: PyMove().run_left_start() elif event.type == pygame.KEYUP and event.key == pygame.K_LEFT: PyMove().run_left_stop() if event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT: PyMove().run_right_start() elif event.type == pygame.KEYUP and event.key == pygame.K_RIGHT: PyMove().run_right_stop()
def main(): ted = TEDrecord() speech = Speech() tedmqtt = TEDMQTT() print("TEDiot is starting...press the button to start talking...") while True: input_state = GPIO.input(18) if input_state == False: print("Now TEDiot is listening you...") ted.startRecording() audioResult = speech.speechToText() if not (ted.isPlaying): if any(word in audioResult for word in speech.keyWordsPlay): ted.isPlaying = True print("Starting button thread...") fileLog(audioResult) t1 = Thread(target=buttonThread, args=(ted, speech)) t1.start() ted.playWAV(audioResult) #t1 = Thread(target=ted.playWAV(audioResult)) elif any(word in audioResult for word in speech.keyWordsUpdate): print("Updating the cloud...") tedmqtt.update() else: print("Command not found!")
def populateSpeech(self): url = self._build_url({'mode': MenuNavigator.COMMAND_SPEECH_INPUT}) li = xbmcgui.ListItem(__addon__.getLocalizedString(32200), iconImage=__icon__) li.addContextMenuItems([], replaceItems=True) # Clear the Context Menu self._addPlayerToContextMenu(li) # Add the Sonos player to the menu xbmcplugin.addDirectoryItem(handle=self.addon_handle, url=url, listitem=li, isFolder=False) url = self._build_url({'mode': MenuNavigator.COMMAND_SPEECH_SAVE}) li = xbmcgui.ListItem(__addon__.getLocalizedString(32203), iconImage=__icon__) li.addContextMenuItems([], replaceItems=True) # Clear the Context Menu self._addPlayerToContextMenu(li) # Add the Sonos player to the menu xbmcplugin.addDirectoryItem(handle=self.addon_handle, url=url, listitem=li, isFolder=False) # Add a blank line before the filters li = xbmcgui.ListItem("", iconImage=__icon__) li.addContextMenuItems([], replaceItems=True) xbmcplugin.addDirectoryItem(handle=self.addon_handle, url="", listitem=li, isFolder=False) # Create the speech class (Not going to call the Sonos System do no need for the device) speech = Speech() phrases = speech.loadSavedPhrases() del speech # Loop through all the phrases and add them to the screen for phrase in phrases: url = self._build_url({'mode': 'action', 'action': ActionManager.ACTION_SPEECH_SAY_PHRASE, 'itemId': phrase}) li = xbmcgui.ListItem(phrase, iconImage=__icon__) # Add the remove button to the context menu cmd = self._build_url({'mode': 'action', 'action': ActionManager.ACTION_SPEECH_REMOVE_PHRASE, 'itemId': phrase}) ctxtMenu = [] ctxtMenu.append((__addon__.getLocalizedString(32204), 'RunPlugin(%s)' % cmd)) li.addContextMenuItems(ctxtMenu, replaceItems=True) # Clear the Context Menu xbmcplugin.addDirectoryItem(handle=self.addon_handle, url=url, listitem=li, isFolder=False) xbmcplugin.endOfDirectory(self.addon_handle)
def command_autopilot(self): text = 'Runing autopilot!' speech = Speech() speech.create_voice(text) process = RaspieAutopilotProcess() process.start() return True
def __init__(self): """Initialize brain class of Sara.""" self.log = logging.getLogger() self.log.info('initialize brain...') self.config = configs.default_obj() if os.path.exists('user.json'): self.config = configs.update(self.config, 'user.json') self.speech = Speech(self.config.speech) self.speech.speak("Load language: " + self.config.language.name[self.config.language.code]) self.phrases = configs.language_obj(self.config.language.languages_dir, self.config.language.code) self.help = Help(self.config.language, self.phrases) self.speech.speak(self.phrases.general.start) self.generals = {'quit': False, 'text': '', 'arg': None} self.bd = Databases(self.config.sqlite) self.assist = Assistant(self.speech, self.config, self.phrases, self.bd) if True if "true" == self.config.general.setup else False: self.assist.setup() self.backups = Backups(self.config.backups, self.bd) self.hotkeys = Hotkeys(self.config.hotkeys, self.generals) self.recognizer = Recognizer(self.config.recognition, self.config.language.code, self.generals, self.speech, self.phrases.recognition)
def sayPhrase(self, phrase): # Make sure a Sonos speaker was found if self.sonosDevice is not None: # Create the speech class and play the message speech = Speech(self.sonosDevice) # Now get the Sonos Sytem to say the message speech.say(phrase) del speech
def __init__(self): self.commans = (Play(), Variable(), Speech(), Info()) #, Sinoptik()) self.db = DBConnector() # Очищаем список команд. Список не актуален. self.db.IUD("delete from core_execute") self.last_processed_ID = -1 self.db.commit() self.run()
def command_weatcher(self): weather = Weather() weatcherData = weatcher.check_weather() text = weather['forecast']['txt_forecast']['forecastday'][0][ 'fcttext_metric'] speech = Speech() speech.create_voice(text) return True
def __init__(self): self._messageDuration = Config.MESSAGE_DURATION self._speech = Speech("audio/") self._startPlayingVideoTime = None self._messageBox = MessageBox() if Config.ENABLE_BROWSER: print("Initing browser intent") self._browserService = BrowserService()
def main(): manege.init() s = Speech() s.record() dat = s.get_result() if dat != None: print(dat) manege.main(dat)
def load_modules(self): self._speech = Speech(self.session) self._motion = Motion(self.session) self._tablet = Tablet(self.session) self._face_detection = FaceDetection(self.session, self) self._wave_detection = WavingDetection(self.session) self._audio_player = AudioPlayer(self.session) self._speech_recognition = SpeechRecognition(self.session) self._system = System(self.session)
def test_intervals_overlapping_for_included_interval(self): speech = Speech(path_to_wav="", interval=Interval(0, 15.5), word_intervals=[ WordInterval(0, 14, "woooow"), ]) self.assertEqual(intersecting_utterances(speech, Interval(0, 14)), [Interval(0, 14)])
def __init__(self, files): Corpus.corpus = PlaintextCorpusReader('./data/speeches', files) Corpus.speech = Speech(self.corpus.raw(), self.corpus.words(), self.corpus.sents(), self.corpus.paras(), None, None, None, None) Corpus.speeches = None Corpus.years = [ int(year.split('.')[0]) for year in self.corpus.fileids() ] Corpus.complementary = None
def __init__(self, transition_model, observation_model=None, states=None): self.transition_model = transition_model self.observation_model = observation_model self.states_number = transition_model.shape[0] self.belief = np.zeros((self.states_number, 1)) if states is None: self.belief.fill(1. / self.states_number) else: self.belief = states self.speech = Speech()
def __init__(self): self.nlg = NLG(user_name=self.my_name) self.speech = Speech(launch_phrase=self.launch_phrase, debugger_enabled=self.debugger_enabled) self.actions = Actions(self.location) if os.path.isfile('unknown_commands.csv') == False: with open('unknown_commands.csv', 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=self.unknown_fieldnames) writer.writeheader()
def get_audio(): # Stores what user said in var var = Speech().audio() # Var goes into find filter function and returns the found filter into choosenfilter global choosenfilter choosenfilter = Filter().find_filter(var) user_filter = Filter().get_name() global filtervideostream filtervideostream = FilterCamera(choosenfilter) curr_string = "You are currently using " + str(user_filter) + "!" return render_template('filters.html', message=curr_string)
def speech_window(self): from speech import Speech self.speech_window = tk.Toplevel() main_window.withdraw() new_window = Window() new_window.center_window(self.speech_window, 900, 690) self.app = Speech(self.speech_window) self.speech_window.protocol( "WM_DELETE_WINDOW", lambda: self.iconify_main_panel(self.speech_window))
def __init__(self): """ here the main speech module is speech which is in the file here the dictionary modusle is PyDictionary which will be using """ self.speech = Speech() self.dictionary = PyDictionary() self.universal = Universal(self.speech) self.meaning = Meaning(self.speech) self.synonym = Synonym(self.speech) self.antonym = Antonym(self.speech)
def __init__(self, name=None, speech_input=False, facebook_input=False): self.phrases = Phrases() self.speech = Speech() self.knowledge = Knowledge(weather_api_token=weather_api_token) self.name = name self.facebook_input = facebook_input if self.facebook_input: self.facebook_response = list() self.speech_input = speech_input self.witai = Wit("S73IKQDWJ22OJMOSD6AOT4CSJOWXIPX6") self.fs = Fatsecret("90fe184a283449ed8a83e35790c04d65", "054e80b2be154337af191be2c9e11c28") self.translator = Translator()
def test_intervals_overlapping_for_merged_non_silent_intervals(self): speech = Speech(path_to_wav="", interval=Interval(0, 15.5), word_intervals=[ WordInterval(0.0, 14.0, "#"), WordInterval(14.0, 14.5, "hi"), WordInterval(14.5, 15.5, "frank") ]) self.assertItemsEqual( hybrid_intersecting_utterances(speech, Interval(13.0, 15.0)), [Interval(14.0, 15.5)])
def dispatch(self): if (not self.params.has_key("id")) or (not self.params.has_key("type") or (not self.params.has_key("text"))): return Render.render_400() try: pa = self.params speech_obj = Speech(pa["id"], pa["text"], pa["lan"], pa["type"]) speech_obj.speak() # response back to client response = Render(speech_obj) return response.render() except Exception, e: return Render.render_400(e)
def test_intervals_overlapping_for_merged_non_silent_intervals(self): speech = Speech(path_to_wav="", interval=Interval(0, 15.5), word_intervals=[ WordInterval(0, 14.0, WordInterval.SILENCE_WORD), WordInterval(14.0, 14.5, "hi"), WordInterval(14.5, 15.5, "frank") ]) interval = Interval(13.0, 16.0) self.assertItemsEqual(intersecting_utterances(speech, interval), [Interval(14.0, 15.5)])
def __init__(self): """Initialize running class.""" self.__LOW = 33 self.__MIDLE = 27 self.config = ConfigParser() self.config.read('settings.ini') self.size_x = self.config.getint('screen', 'size_x') self.size_y = self.config.getint('screen', 'size_y') self.difficult = self.config.get('board', 'difficult') with open('languages.dat', 'rb') as lang_file: self.phrases = pickle.load(lang_file)[self.config.get( 'total', 'language')] self.speech = Speech(self.config) self.speech.speak(self.phrases['start']) pygame.init() pygame.font.init() self.screen = pygame.display.set_mode((self.size_x, self.size_y)) pygame.display.set_caption('Sudoku') self.board = Board(self.config, self.screen) self.player = Player(self.board, self.speech, self.phrases) self.game_over = True self.win = False self.handle_numbers = {'K_' + str(num): num for num in range(10)} self.handle_numbers.update( {'K_KP' + str(num): num for num in range(10)}) self.fontObj = pygame.font.SysFont('arial', 50) self.clock = pygame.time.Clock() self.gen = Generator() self.new_game() try: save_file = open('autosave.sav', 'rb') except IOError as e: pass else: with save_file: data = pickle.load(save_file) self.grid = data['grid'] self.origin = data['origin'] self.board.cells = data['cells'] for cell in self.board.cells: if 0 != cell.status: cell.set_text() self.speech.speak(self.phrases['load']) self.player.speak()
def __init__(self, config_provider, disk, display, replay): self.config_provider = config_provider self.disk = disk self.display = display self.replay = replay # cameras self.cameras = self._create_cameras() self.camera_number = 0 # speech recognition self.speech = Speech() self.speech.start()
def __init__(self, filename): self._conf = yaml.load(open(filename)) self._rec = sr.Recognizer() self._micro = sr.Microphone(sample_rate=16000) with self._micro as source: self._rec.adjust_for_ambient_noise(source) self._dcs = DcsClient(self._conf['dcs']) self._player = PlayerManager() self._speech = Speech(self._conf['aip'])
def listen_commands(self): text = "How I can help You." speech = Speech() speech.create_voice(text) r = SpeechRecognizer() command = (r.recognize()).lower() print 'Recognized command: {0}'.format(command) if 'weatcher' in command: self.command_weatcher() if 'autopilot' in command: self.command_autopilot() if 'dance' in command: self.command_dance() if 'exterminate' in command: self.command_exterminate()
def __init__(self, files): if not files: self.corpus = PlaintextCorpusReader('./data/speeches', '.*') else: self.corpus = PlaintextCorpusReader('./data/speeches', files) self.speech = Speech(self.corpus.raw(), self.corpus.words(), self.corpus.sents(),self.corpus.paras(), None, None, None, None) self.speeches = build_speeches_dict(self.corpus) self.years = [int(year.split('.')[0]) for year in self.corpus.fileids()] complementary_years = list(set(os.listdir("./data/speeches")) - set([str(years) + '.txt' for years in self.years])) if not files: self.complementary = None self.unique_words = None else: self.complementary = ComplementaryCorpus(complementary_years) self.unique_words = [word for word in self.speech.tokens if word not in self.complementary.speech.tokens]
def get_schedule_info(self): questions = [ "Please, tell me the day?", "now, the exactly hour", "what would you like to remember?" ] schedule_data = [] while True: for i, ask in enumerate(questions): Text_To_Speech().translate_and_play(questions[i]) schedule_data.append(str(Speech().get_speech())) break self.set_new_task(schedule_data)