def playHand(hand, wordList, n): score=0 s=0 while calculateHandlen(hand)>0: print 'Current Hand: ', displayHand(hand) print 'Enter word, or a "." to indicate that you are finished: ' say1=('Enter word, or a dot to indicate that you are finished: ') #print (say1) engine=pyttsx.init() engine.say(say1) engine.runAndWait() engine.stop() del engine word=raw_input() if word=='.': break else: if isValidWord(word, hand, wordList)==False: say1=('Invalid word, please try again.') print (say1) engine=pyttsx.init() engine.say(say1) engine.runAndWait() engine.stop() del engine else: s=getWordScore(word, n) score+=s hand=updateHand(hand, word) say1=(word+' earned '+(str)(s)+' points. Total: '+str(score)+' points') print (say1) engine=pyttsx.init() engine.say(say1) engine.runAndWait() engine.stop() del engine if calculateHandlen(hand)==0: break if word=='.': say1=('Goodbye! Total score: '+str(score)+' points.') print (say1) engine=pyttsx.init() engine.say(say1) engine.runAndWait() engine.stop() del engine time.sleep(0.5) say1=('Press r to replay the hand and then c to compare your score with the computer') print (say1) engine=pyttsx.init() engine.say(say1) engine.runAndWait() engine.stop() del engine time.sleep(0.5) else: print 'Run out of letters. Total score: '+str(score)+' points.'
def sayText(text): ## engine = pyttsx.init() ## engine.setProperty('rate', 150) ## engine.say("lala") ## engine.runAndWait() try: engine = pyttsx.init() engine.setProperty('rate', 150) engine.say(text) engine.runAndWait() except: engine = pyttsx.init() engine.setProperty('rate', 150) engine.say("The image contains garbage text. Please upload another image.") engine.runAndWait()
def wordc_voice(): engine = pyttsx.init() engine.setProperty('rate', 180) voices = engine.getProperty('voices') engine.say("The meaning of the word " + word_new + " is") engine.say(val33) engine.runAndWait()
def iepc(): os.startfile(r"C:\Program Files (x86)\JetBrains\PyCharm Community Edition 4.5.3\bin\pycharm.exe") engine = pyttsx.init() engine.setProperty('rate', 180) voices = engine.getProperty('voices') engine.say("Starting JetBeans in a moment") engine.runAndWait()
def steamc(): os.startfile("C:\Program Files (x86)\Steam\Steam.exe") engine = pyttsx.init() engine.setProperty('rate', 180) voices = engine.getProperty('voices') engine.say("Starting Steam in a moment") engine.runAndWait()
def chromec(): os.startfile("C:\Program Files (x86)\Google\Chrome\Application\chrome.exe") engine = pyttsx.init() engine.setProperty('rate', 180) voices = engine.getProperty('voices') engine.say("Starting Google Chrome in a moment") engine.runAndWait()
def speak(str): print(str) engine = pyttsx.init() rate = engine.getProperty('rate') engine.setProperty('rate', rate + 0.5) engine.say(str) engine.runAndWait()
def main(): if len(sys.argv) < 2: return text = sys.argv[1] engine = pyttsx.init() engine.say(text) engine.runAndWait()
def sap(thistext): engine = pyttsx.init() rate = engine.getProperty('rate') engine.setProperty('rate', rate-50) engine.say(thistext) print(thistext) engine.runAndWait()
def speak_ICAO(text): t = make_msg(text) engine = pyttsx.init() engine.setProperty('rate', 120) for i in t: engine.say(i) engine.runAndWait()
def say_pyttsx(self, txt): txt = random.choice(txt) print 'HAL >>> ' + txt speech = pyttsx.init() speech.say(txt) speech.runAndWait() del speech
def __init__(self): pygame.init() pygame.mixer.init(buffer=1024) pygame.mixer.set_num_channels(13) pygame.mixer.set_reserved(13) self.engine=pyttsx.init() self.eating_sound=pygame.mixer.Sound("sounds/eat.wav") self.hit_sound=pygame.mixer.Sound("sounds/ghost_hit.wav") self.bump_sound=pygame.mixer.Sound("sounds/bump.wav") self.misc_sound_channel=pygame.mixer.Channel(10) self.misc_sound_channel.set_volume(1.0, 1.0) self.screen = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32) self.font=pygame.font.SysFont(pygame.font.get_default_font(), 72) self.key_held_down=False self.pacman_visible=True self.leftWall=pygame.Rect(0, 0, WALL_THICKNESS, HEIGHT) self.topWall=pygame.Rect(0+WALL_THICKNESS, 0, WIDTH-WALL_THICKNESS, WALL_THICKNESS) self.rightWall=pygame.Rect(WIDTH-WALL_THICKNESS, 0+WALL_THICKNESS, WALL_THICKNESS, HEIGHT-WALL_THICKNESS) self.bottomWall=pygame.Rect(0+WALL_THICKNESS, HEIGHT-WALL_THICKNESS, WIDTH-2*WALL_THICKNESS, WALL_THICKNESS) self.level=1 self.score=0 self.lives=4 self.engine.say('Welcome to Fruit Man! You are wandering in a field and very hungry.') self.engine.say('Try to collect all of the fruits using the sounds in your ears, but') self.engine.say('don\'t hit the ghosts! They want to eat you as well. Good luck!') self.engine.runAndWait() self.gameRunning=True self.gameJustEnded=False
def talk(text): # Talk to the user engine = pyttsx.init() engine.setProperty('rate', 100) engine.setProperty('volume', 100) engine.say(text) engine.runAndWait()
def TTS(string): """ 입력된 단어를 읽어줍니다. """ engine = pyttsx.init() engine.say(string) engine.runAndWait()
def gest_callback(self, gesture): # if different gesture is being detected, begin timer if gesture.data != self.current_gesture: self.last_message_time = self.millis() # Calculate time elapsed delta_time = self.millis() - self.last_message_time # if no gesture is detected or an old gesture is detected, cut music and update current gesture if gesture.data == '' or (self.previous_gesture == gesture.data): self.current_gesture = "" pygame.mixer.music.stop() # Else if time has not elapsed and a new gesture is detected, play music. Also, update current gesture elif (delta_time < self.match_time) and (self.previous_gesture != gesture.data): if not pygame.mixer.music.get_busy(): pygame.mixer.music.load(self.audio_path) pygame.mixer.music.play() self.current_gesture = gesture.data # Else if time has elapsed and we have a new gesture, stop beeping, play voice, and update gestures and time. elif (gesture.data != self.previous_gesture) and (delta_time >= self.match_time): engine = pyttsx.init('espeak') engine.setProperty('rate', 100) while engine.isBusy(): pygame.mixer.music.stop() engine.say(unicode(str(gesture.data)), "utf-8") engine.runAndWait() self.previous_gesture = gesture.data self.last_message_time = self.last_message_time
def main(speech): engine=pyttsx.init() engine.setProperty('rate',150) engine.setProperty('voice','en-wims') engine.say(speech) engine.runAndWait()
def tts(self, text): print '* tts:', text engine = pyttsx.init() engine.setProperty('voice', self.tts_voice_id) engine.setProperty('rate', self.tts_voice_rate) engine.say(text) engine.runAndWait()
def __init__(self, language = 'fr', rate = 120): '''Initialisation language : ex : 'fr' ''' self.engine = pyttsx.init() self.engine.setProperty('rate', rate) self.engine.setProperty('voice', language)
def __init__(self, language="en-US", name=""): self.engine = tts.init() self.language = language self.recognizer = sr.Recognizer(language) self.recognizer.pause_threshold = 0.5 self.recognizer.energy_threshold = 2500 self.name = name
def feedback(): engine = pyttsx.init() engine.setProperty("rate", 110) voices = engine.getProperty("voices") engine.setProperty("voice", "english-scottish") engine.say("Message Sent") engine.runAndWait()
def __init__(self): import pyttsx self.engine = pyttsx.init() # optional property self.rate = self.engine.getProperty('rate') self.voices = self.engine.getProperty('voices') self.volume = self.engine.getProperty('volume')
def __init__(self, phi): threading.Thread.__init__(self) self.voice_engine = pyttsx.init() self.voice_engine.setProperty('rate', self.voice_engine.getProperty('rate') - 50) self._stopevent = threading.Event() self.phi = phi
def say_something(): """ test function """ engine = pyttsx.init() engine.say('Franchement, Precise Sad se ne vaut pas une bonne choucroute') engine.runAndWait()
def main(): # use sys.argv if needed print 'running speech-test.py...' engine = pyttsx.init() str = "I speak. Therefore. I am. " engine.say(str) engine.runAndWait()
def speak(self,sentence): """ This function says sentence Input: sentence - String sentence Returns: Nothing """ import pyttsx with self._voice_lock: engine = pyttsx.init() if isinstance(sentence,list): sentence = " ".join(sentence) elif isinstance(sentence,str): pass else: raise VoiceException("Wrong sentence type: " + str(type(sentence))) engine.setProperty('volume',self._volume) engine.setProperty('rate',self._rate) engine.say(sentence) engine.runAndWait() engine.stop() del engine
def ReadTheStringAloud(voiceName,voiceRate,speechString): ''' Says speechString, using the default voice or voiceName voice Positive voiceRate is faster; Negative voiceRate is slower ''' try: import pyttsx engine = pyttsx.init() except ImportError: print 'I did not find the pyttsx text to speech resources!' print str(err) usage() sys.exit(2) try: engine.setProperty('rate', engine.getProperty('rate')+int(voiceRate)) except Error: print ('I did not understand the rate!') usage() sys.exit(2) voices = engine.getProperty('voices') for voice in voices: if voice.name == voiceName: engine.setProperty('voice', voice.id) break engine.say(speechString) engine.runAndWait()
def Speak(phrase): engine = pyttsx.init() voices = engine.getProperty("voices") engine.setProperty("rate", 175) engine.setProperty("voice", 2) engine.say(phrase) engine.runAndWait()
def launch(self): #Méthode qui doit lire le discours de chacun des phénomènes. #On définit une voix qui parle à un rythme de 150 mots/minute. engine = pyttsx.init() engine.setProperty("rate", 150) engine.say(self.discours) engine.runAndWait()
def speakToMe(what, **justwith): """ speak to me about something with some special justwith key: "speed": "slow","medium","fast" "faster": number type (1-10) "volume": "low", "medium","loud" "louder": number type(0.01-0.1) """ defspeed={"slow":0.01,"medium":80,"fast":210} defvolume={"low":0.3,"medium":0.5,"loud":1} engine = pyttsx.init("espeak") engine.setProperty('languages','zh') if justwith == None : # Just say out engine.say(what) else: if justwith['speed'] != None: engine.setProperty('rate',defspeed[justwith['speed']]) if justwith['volume']!= None: engine.setProperty('volume', defvolume[justwith['volume']]) if justwith['faster'] != None: if isinstance(justwith['faster'], int): sp = engine.getProperty('rate') engine.setProperty('rate',sp+justwith['faster']) if justwith['louder'] != None: if isinstance(justwith['louder'], float): vo = engine.getProperty('volume') engine.setProperty('volume',vo+justwith['louder']) engine.say(what) engine.runAndWait()
def play_report(): pythoncom.CoInitialize() engine = pyttsx.init() rate = engine.getProperty('rate') engine.setProperty('rate', rate-10) time.sleep(2) if GLOBAL_STOP: return True engine.say('Good Morning... Uhh run... I hope you had a good sleep') engine.runAndWait() HueCommand(1, [BulbState(1, {'on' : True, 'bri' : 200, 'sat': 200, 'hue' : 46920, 'transitiontime' : 20}), BulbState(2, {'on' : True, 'bri' : 200, 'sat': 200, 'hue' : 46920, 'transitiontime' : 20}), BulbState(3, {'on' : True, 'bri' : 200, 'sat': 200, 'hue' : 46920, 'transitiontime' : 20})]).execute() if GLOBAL_STOP: return True engine.say(weather()) engine.runAndWait() HueCommand(1, [BulbState(1, {'on' : True, 'bri' : 200, 'sat': 175, 'hue' : 25000, 'transitiontime' : 20}), BulbState(2, {'on' : True, 'bri' : 200, 'sat': 175, 'hue' : 25000, 'transitiontime' : 20}), BulbState(3, {'on' : True, 'bri' : 200, 'sat': 175, 'hue' : 25000, 'transitiontime' : 20})]).execute() if GLOBAL_STOP: return True engine.say(quote()) engine.runAndWait() if GLOBAL_STOP: return True # Deactivate engine.say("I hope you have a good day... Uhh run") HueCommand(1, [BulbState(1, {'on' : True, 'bri' : 200, 'sat': 175, 'hue' : 12750, 'transitiontime' : 50}), BulbState(2, {'on' : True, 'bri' : 200, 'sat': 175, 'hue' : 12750, 'transitiontime' : 50}), BulbState(3, {'on' : True, 'bri' : 200, 'sat': 175, 'hue' : 12750, 'transitiontime' : 50})]).execute() engine.runAndWait() return False
def say(tosay): speaker = speak.init() speaker.setProperty('rate', 120) speaker.setProperty('voice', 'english-us') speaker.say(tosay) a = speaker.runAndWait()
def process(find_meaning=False): frame = cv2.imread('image.jpg') frame = imutils.resize(frame, width=2000) img = imutils.resize(frame, width=2000) #HSV and threshold hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, orangeLower, orangeUpper) cv2.imwrite("step0.jpg", mask) #Erosion and dilation #Make this dynamic? mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=5) #Detect cue cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None x_main = 0 y_main = 0 if len(cnts) > 0: # find the largest contour in the mask, then use # it to compute the minimum enclosing circle and # centroid c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) # only proceed if the radius meets a minimum size if radius > 5: rect = cv2.minAreaRect(c) box = cv2.boxPoints(rect) box = np.int0(box) Ys = [i[1] for i in box] y_main = min(Ys) cv2.drawContours(frame, [box], 0, (0, 255, 0), 2) # draw the circle and centroid on the frame, # then update the list of tracked points cv2.circle(frame, center, 5, (0, 0, 255), -1) #Word detection starts here image = cv2.medianBlur(img, 7) th = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) th1 = cv2.adaptiveThreshold(th,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\ cv2.THRESH_BINARY_INV,11,2) cv2.imwrite("step1.jpg", th1) #Erode and dilate th1 = cv2.erode(th1, None, iterations=1) th1 = cv2.dilate(th1, None, iterations=6) cv2.imwrite('step2.jpg', th1) im2, cnts, hierarchy = cv2.findContours(th1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #Getting the desired word out of all contours word_x = 0 word_y = 0 dist = 99999999 fin_label = -1 final_cnt = None for c in cnts: if cv2.contourArea(c) > 100000 or cv2.contourArea(c) < 500: continue M = cv2.moments(c) rect = cv2.minAreaRect(c) box = cv2.boxPoints(rect) box = np.int0(box) cv2.drawContours(frame, [box], 0, (0, 255, 0), 2) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) if (center[1] < y_main and (x - center[0])**2 + (y - center[1])**2 < dist): #print center[0], center[1] dist = (x - center[0])**2 + (y - center[1])**2 final_cnt = c #This is the bounding rect on image rect = cv2.minAreaRect(final_cnt) box = cv2.boxPoints(rect) box = np.int0(box) cv2.drawContours(frame, [box], 0, (0, 0, 255), 2) frame = imutils.resize(frame, width=600) cv2.imwrite('step3.jpg', frame) #Extracting and saving the word here W = rect[1][0] H = rect[1][1] Xs = [i[0] for i in box] Ys = [i[1] for i in box] x1 = min(Xs) x2 = max(Xs) y1 = min(Ys) y2 = max(Ys) angle = rect[2] if angle < -45: angle += 90 # Center of rectangle in source image center = ((x1 + x2) / 2, (y1 + y2) / 2) # Size of the upright rectangle bounding the rotated rectangle size = (x2 - x1, y2 - y1) M = cv2.getRotationMatrix2D((size[0] / 2, size[1] / 2), angle, 1.0) # Cropped upright rectangle cropped = cv2.getRectSubPix(img, size, center) cropped = cv2.warpAffine(cropped, M, size) croppedW = H if H > W else W croppedH = H if H < W else W # Final cropped & rotated rectangle croppedRotated = cv2.getRectSubPix(cropped, (int(croppedW), int(croppedH)), (size[0] / 2, size[1] / 2)) cv2.imwrite("Word.jpg", croppedRotated) #Using OCR here word = pytesseract.image_to_string(Image.open('Word.jpg'), config='-psm 8') word = clean(word) print "Detected :", word print "Corrected :", spell(word) word = spell(word) #Using dictionary API here meaning = "" try: if find_meaning: r = requests.get( "http://api.pearson.com/v2/dictionaries/ldoce5/entries?headword=" + word) result = json.loads(r.text) meaning = result["results"][0]["senses"][0]["definition"][0] except: meaning = "Trouble getting meaning" print meaning + "\n\n" #Read aloud here engine = pyttsx.init() engine.say("Word. " + word + ". Meaning. " + meaning) engine.runAndWait()
winName = "Movement Indicator" cv2.namedWindow(winName, cv2.WINDOW_NORMAL) currentworkingdirectory = str(os.getcwd()).replace("\\", "\\") # Read three images first: frame = cam.read()[1] gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) firstFrame = gray #speech_engine = pyttsx.init('sapi5') #speech_engine.setProperty('rate', 150) speech_engine = pyttsx.init() speech_engine.setProperty('rate', 160) def speak(text): speech_engine.say(text) speech_engine.runAndWait() recognizer = speech_recognition.Recognizer() def listen(): zresult = "" with speech_recognition.Microphone() as source: recognizer.adjust_for_ambient_noise(source)
username = "******" mode = 0 mode_print = [ 'general', 'web browser', 'turbo c', 'python', 'smart', 'ultimate', 'web development' ] #--------------------------Defined modes----------------------------- # 0 = General ( Default ) # 1 = Webbrowser ( easy Internet access) # 2 = programming mode ( turbo c) # 3 = python mode ( IDLE ) # 4 = smart mode ( command run ) # 5 = ultimate mode ( Assistant mode ) # 6 = webdevelopment ( brackets editor ) #--------------------------TALKING FUNCTION-------------------------- rey1 = pyttsx.init() def engine2(text): rey1.say(text) rey1.runAndWait() # phase 2 mixer.init() def engine(text): tts = gTTS(text, lang='en') tts.save("response.mp3") os.system("mpg321 response.mp3")
def __init__(self, command): self.engine = pyttsx.init() self.command = command self.command_analyzer()
facedetect.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [<video_source>] ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 as cv # local modules from video import create_capture from common import clock, draw_str import pyttsx import time import datetime engine = pyttsx.init() # Python TTs for the text to speech capability WorkDay = [] today = datetime.date.today() WorkDay.append(today) x1 = 0 x2 = 0 y1 = 0 y2 = 0 # Valiable added into the system for the function that will be tracking Facename = " " positionX = 0 positionY = 0 def detect(img, cascade): rects = cascade.detectMultiScale(img,
from std_msgs.msg import String from baxter_core_msgs.msg import CollisionDetectionState rospy.init_node('speechControl') # Global Variables rawCommand = "" collisionState = False lLimb = baxter.Limb('left') rLimb = baxter.Limb('right') lGripper = baxter.Gripper('left') rGripper = baxter.Gripper('right') # Text to speech engine t2s = pyttsx.init() voices = t2s.getProperty('voices') t2s.setProperty('voice', 'english') """ for voice in voices: print(voice.id) """ t2s.setProperty('rate', 150) ################## Definitions ##################### # clamping function to constrain arm movement def clamp(n, minn, maxn): return max(min(maxn, n), minn)
def Lire(): engine = pyttsx.init() lebel['text'] = var_texte.get() engine.say(var_texte.get()) engine.runAndWait()
def setup(self): self.engine = pyttsx.init() self.engine.setProperty('rate', 130)
def speech(script): engine = pyttsx.init() engine.setProperty('rate', engine.getProperty('rate') - 20) engine.say(script) engine.runAndWait()
import pyttsx import time jarvis = pyttsx.init() time.sleep(3) jarvis.say( "Good morning Madam. As the species of H**o sapiens say. Rise and shine. Have a wonderful day Miss Parvarthy. Sincerely, Jarvis." ) jarvis.runAndWait()
def tts(text): engine = pyttsx.init() engine.say(text) engine.runAndWait()
#!/usr/bin/python import speech_recognition as sr import pyttsx import time import os r = sr.Recognizer() m = sr.Microphone() e = pyttsx.init() try: print("A moment of silence, please...") with m as source: r.adjust_for_ambient_noise(source) print("Set minimum energy threshold to {}".format(r.energy_threshold)) print("Say something!") with m as source: audio = r.listen(source) print("Got it! Now to recognize it...") try: value = r.recognize_google(audio) if str is bytes: output = (u"I think you said: {}".format(value).encode("utf-8")) print(output) else: output = ("I think you said: {}".format(value)) print(output) e.say(output)
import pyttsx speech_engine = pyttsx.init('espeak') # see http://pyttsx.readthedocs.org/en/latest/engine.html#pyttsx.init speech_engine.setProperty('rate', 150) """ Purpose: This uses linux's espeak to read text back to the user and will print the string as verification input: text - string to read Output: nothing """ def speak(text): speech_engine.say(text) speech_engine.runAndWait() print text
def voice(label): engine = pyttsx.init() engine.say("Lables found" + label) engine.runAndWait()
# ------------------- ## NOTE #If the module is not found #Download from the link below #https://github.com/Ishanvaid9/pyttsx-files or use --- pip install pyttsx3 --- # #Save as the Above file as pyttsx #So that it wont show the module not found error # #Save as in the location #Lib\site-packages\ # ------------------- import pyttsx # import pyttsx3 engine = pyttsx.init() # engine = pyttsx3.init() voices = engine.getProperty('voices') engine.setProperty('voice', voices[1].id) # print(voices[1].id) engine.setProperty('rate', 150) #engine.say("Hello, How are you ?") engine.runAndWait() def speak(str): engine.say(str) engine.runAndWait() speak("Hello, What's going on")
def __init__(self): super().__init__() self.engine = pyttsx.init('sapi5') self.voices = self.engine.getProperty('voices') self.engine.setProperty('voice', self.voices[0].id)
#coding:utf-8 import sys reload(sys) sys.setdefaultencoding('utf8') import pyttsx import xlrd engine = pyttsx.init() rate = engine.getProperty('rate') engine.setProperty('rate', rate - 80) data = xlrd.open_workbook('words.xlsx') table = data.sheets()[0] print '输入你需要的行数,退出输入q' while True: i = input('你要念第几行?') engine.say(table.cell_value(i - 1, 0)) engine.runAndWait() engine.endLoop()
def say(word): engine = pyttsx.init() engine.setProperty('rate', RATE) engine.say(word) engine.runAndWait()
def __init__(self): self.engine = pyttsx.init()
def tts(data): engine = pyttsx.init() engine.setProperty('rate', 60) engine.say(data) engine.runAndWait()
def __init__(self): self.engine = pyttsx.init() self.createDicts self.engine.setProperty('rate', 160) self.engine.say('Initializing') self.engine.runAndWait()
#text = r.recognize_google(audio, language='en-US') text = r.recognize_google(audio, language='en-GB') # text = r.recognize_ibm(audio,"*****@*****.**", "shafay12332100s") # r.re # r.re print("You said: " + text) # self.total_saying = text # self.process_text_input(self.total_saying) return text # returning the text which has been inputed. except sr.UnknownValueError: print("Google Speech Recognition could not understand audio") except sr.RequestError as e: print( "Could not request results from Google Speech Recognition service; {0}" .format(e)) def speak(text): engine.say(text) engine.runAndWait() if __name__ == '__main__': while True: engine = pt.init( ) # Creates the engine and then intializes the engine on the modeule of the data text = speech_to_text() speak(text)
global t3 global t6 global cv2 main = Tk() # creates root window # all components of thw window will come here main.title(" Naagarik Seva ") root = Frame(main) width = 600 height = 400 #background_image=Tk.PhotoImage(file="C:\Python27\imgres.jpg") #background_label = Tk.Label(root, image=background_image) #background_label.place(x=0, y=0, relwidth=1, relheight=1) engine = pyttsx.init('sapi5') engine.say("Namaskaar !") cv2.waitKey(2) engine.say("Welcome, Please Fill your details! ") engine.runAndWait() label_head = Label(root, text="Collector Office,Sangli.", font=("Lucida Fax", 23), fg="cyan", bg="black") f = tkFont.Font(label_head, label_head.cget("font")) f.configure(underline=True) label_head.grid(row=1, column=2) #label_head.configure(font=f) #label_head.pack()
def __init__(self): self.engine = pyttsx.init() self.engine.setProperty('rate', 70)
def Jarvis(self, text): self.string = str(text) self.engine = pyttsx.init() self.engine.connect('finished-utterance', self.onEnd) self.engine.say(self.string) self.engine.startLoop()
def speak(*args, **kwargs): speaker = pyttsx.init() speaker.setProperty('rate', 150) speaker.say(inp) speaker.runAndWait()
def msg_definer(character): character = character.upper() if (character == 'A'): if os.path.isfile(__PATH__ + "A_en.mp3"): vlc.MediaPlayer(__PATH__ + "A_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter A, dot 1.") engine.runAndWait() return "Letter A, dot 1." if (character == 'B'): if os.path.isfile(__PATH__ + "B_en.mp3"): vlc.MediaPlayer(__PATH__ + "B_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter B, dots 1 and 2.") engine.runAndWait() return "Letter B, dots 1 and 2." if (character == 'C'): if os.path.isfile(__PATH__ + "C_en.mp3"): vlc.MediaPlayer(__PATH__ + "C_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter C, dots 1 and 4.") engine.runAndWait() return "Letter C, dots 1 and 4." if (character == 'D'): if os.path.isfile(__PATH__ + "D_en.mp3"): vlc.MediaPlayer(__PATH__ + "D_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter D, dots 1, 4 and 5.") engine.runAndWait() return "Letter D, dots 1, 4 and 5." if (character == 'E'): if os.path.isfile(__PATH__ + "E_en.mp3"): vlc.MediaPlayer(__PATH__ + "E_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter E, dots 1 and 5.") engine.runAndWait() return "Letter E, dots 1 and 5." if (character == 'F'): if os.path.isfile(__PATH__ + "F_en.mp3"): vlc.MediaPlayer(__PATH__ + "F_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter F, dots 1, 2 and 4.") engine.runAndWait() return "Letter F, dots 1, 2 and 4." if (character == 'G'): if os.path.isfile(__PATH__ + "G_en.mp3"): vlc.MediaPlayer(__PATH__ + "G_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter G, dots 1, 2, 4 and 5.") engine.runAndWait() return "Letter G, dots 1, 2, 4 and 5." if (character == 'H'): if os.path.isfile(__PATH__ + "H_en.mp3"): vlc.MediaPlayer(__PATH__ + "H_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter H, dots 1, 2 and 5.") engine.runAndWait() return "Letter H, dots 1, 2 and 5." if (character == 'I'): if os.path.isfile(__PATH__ + "I_en.mp3"): vlc.MediaPlayer(__PATH__ + "I_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter I, dots 2 and 4.") engine.runAndWait() return "Letter I, dots 2 and 4." if (character == 'J'): if os.path.isfile(__PATH__ + "J_en.mp3"): vlc.MediaPlayer(__PATH__ + "J_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter J, dots 2, 4 and 5.") engine.runAndWait() return "Letter J, dots 2, 4 and 5." if (character == 'K'): if os.path.isfile(__PATH__ + "K_en.mp3"): vlc.MediaPlayer(__PATH__ + "K_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter K, dots 1 and 3.") engine.runAndWait() return "Letter K, dots 1 and 3." if (character == 'L'): if os.path.isfile(__PATH__ + "L_en.mp3"): vlc.MediaPlayer(__PATH__ + "L_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter L, dots 1, 2 and 3.") engine.runAndWait() return "Letter L, dots 1, 2 and 3." if (character == 'M'): if os.path.isfile(__PATH__ + "M_en.mp3"): vlc.MediaPlayer(__PATH__ + "M_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter M, dots 1, 3 and 4.") engine.runAndWait() return "Letter M, dots 1, 3 and 4." if (character == 'N'): if os.path.isfile(__PATH__ + "N_en.mp3"): vlc.MediaPlayer(__PATH__ + "N_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter N, dots 1, 3, 4 and 5.") engine.runAndWait() return "Letter N, dots 1, 3, 4 and 5." if (character == 'O'): if os.path.isfile(__PATH__ + "O_en.mp3"): vlc.MediaPlayer(__PATH__ + "O_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter O, dots 1, 3 and 5.") engine.runAndWait() return "Letter O, dots 1, 3 and 5." if (character == 'P'): if os.path.isfile(__PATH__ + "P_en.mp3"): vlc.MediaPlayer(__PATH__ + "P_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter P, dots 1, 2, 3 and 4.") engine.runAndWait() return "Letter P, dots 1, 2, 3 and 4." if (character == 'Q'): if os.path.isfile(__PATH__ + "Q_en.mp3"): vlc.MediaPlayer(__PATH__ + "Q_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter Q, dots 1, 2, 3, 4 and 5.") engine.runAndWait() return "Letter Q, dots 1, 2, 3, 4 and 5." if (character == 'R'): if os.path.isfile(__PATH__ + "R_en.mp3"): vlc.MediaPlayer(__PATH__ + "R_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter R, dots 1, 2, 3 and 5.") engine.runAndWait() return "Letter R, dots 1, 2, 3 and 5." if (character == 'S'): if os.path.isfile(__PATH__ + "S_en.mp3"): vlc.MediaPlayer(__PATH__ + "S_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter S, dots 2, 3 and 4.") engine.runAndWait() return "Letter S, dots 2, 3 and 4." if (character == 'T'): if os.path.isfile(__PATH__ + "T_en.mp3"): vlc.MediaPlayer(__PATH__ + "T_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter T, dots 2, 3, 4 and 5.") engine.runAndWait() return "Letter T, dots 2, 3, 4 and 5." if (character == 'U'): if os.path.isfile(__PATH__ + "U_en.mp3"): vlc.MediaPlayer(__PATH__ + "U_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter U, dots 1, 3 and 6.") engine.runAndWait() return "Letter U, dots 1, 3 and 6." if (character == 'V'): if os.path.isfile(__PATH__ + "V_en.mp3"): vlc.MediaPlayer(__PATH__ + "V_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter V, dots 1, 2, 3 and 6.") engine.runAndWait() return "Letter V, dots 1, 2, 3 and 6." if (character == 'W'): if os.path.isfile(__PATH__ + "W_en.mp3"): vlc.MediaPlayer(__PATH__ + "W_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter W, dots 2, 4, 5 and 6.") engine.runAndWait() return "Letter W, dots 2, 4, 5 and 6." if (character == 'X'): if os.path.isfile(__PATH__ + "X_en.mp3"): vlc.MediaPlayer(__PATH__ + "X_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter X, dots dots 1, 3, 4 and 6.") engine.runAndWait() return "Letter X, dots dots 1, 3, 4 and 6." if (character == 'Y'): if os.path.isfile(__PATH__ + "Y_en.mp3"): vlc.MediaPlayer(__PATH__ + "Y_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter Y, dots 1, 3, 4, 5 and 6.") engine.runAndWait() return "Letter Y, dots 1, 3, 4, 5 and 6." if (character == 'Z'): if os.path.isfile(__PATH__ + "Z_en.mp3"): vlc.MediaPlayer(__PATH__ + "Z_en.mp3").play() else: engine = pyttsx.init() engine.say("Letter Z, dots 1, 3, 5 and 6.") engine.runAndWait() return "Letter Z, dots 1, 3, 5 and 6." if (character == '1'): if os.path.isfile(__PATH__ + "1_en.mp3"): vlc.MediaPlayer(__PATH__ + "1_en.mp3").play() else: engine = pyttsx.init() engine.say("Number 1, dot 1.") engine.runAndWait() return "Number 1, dot 1." if (character == '2'): if os.path.isfile(__PATH__ + "2_en.mp3"): vlc.MediaPlayer(__PATH__ + "2_en.mp3").play() else: engine = pyttsx.init() engine.say("Number 2, dots 1 and 2.") engine.runAndWait() return "Number 2, dots 1 and 2." if (character == '3'): if os.path.isfile(__PATH__ + "3_en.mp3"): vlc.MediaPlayer(__PATH__ + "3_en.mp3").play() else: engine = pyttsx.init() engine.say("Number 3, dots 1 and 4.") engine.runAndWait() return "Number 3, dots 1 and 4." if (character == '4'): if os.path.isfile(__PATH__ + "4_en.mp3"): vlc.MediaPlayer(__PATH__ + "4_en.mp3").play() else: engine = pyttsx.init() engine.say("Number 4, dots 1, 4 and 5.") engine.runAndWait() return "Number 4, dots 1, 4 and 5." if (character == '5'): if os.path.isfile(__PATH__ + "5_en.mp3"): vlc.MediaPlayer(__PATH__ + "5_en.mp3").play() else: engine = pyttsx.init() engine.say("Number 5, dots 1 and 5.") engine.runAndWait() return "Number 5, dots 1 and 5." if (character == '6'): if os.path.isfile(__PATH__ + "6_en.mp3"): vlc.MediaPlayer(__PATH__ + "6_en.mp3").play() else: engine = pyttsx.init() engine.say("Number 6, dots 1, 2, and 4.") engine.runAndWait() return "Number 6, dots 1, 2, and 4." if (character == '7'): if os.path.isfile(__PATH__ + "7_en.mp3"): vlc.MediaPlayer(__PATH__ + "7_en.mp3").play() else: engine = pyttsx.init() engine.say("Number 7, dots 1, 2, 4 and 5.") engine.runAndWait() return "Number 7, dots 1, 2, 4 and 5." if (character == '8'): if os.path.isfile(__PATH__ + "8_en.mp3"): vlc.MediaPlayer(__PATH__ + "8_en.mp3").play() else: engine = pyttsx.init() engine.say("Number 8, dots 1, 2 and 5.") engine.runAndWait() return "Number 8, dots 1, 2 and 5." if (character == '9'): if os.path.isfile(__PATH__ + "9_en.mp3"): vlc.MediaPlayer(__PATH__ + "9_en.mp3").play() else: engine = pyttsx.init() engine.say("Number 9, dots 2 and 4.") engine.runAndWait() return "Number 9, dots 2 and 4." if (character == '0'): if os.path.isfile(__PATH__ + "0_en.mp3"): vlc.MediaPlayer(__PATH__ + "0_en.mp3").play() else: engine = pyttsx.init() engine.say("Number 0, dots 2, 4 and 5.") engine.runAndWait() return "Number 0, dots 2, 4 and 5."
def setUp(self): self.engine = pyttsx.init(debug=False)
def speak(x): engine = pyttsx.init() engine.say(x) engine.runAndWait()