def startEngine(self): try: import pyttsx3 if self.module != '': self._engine = pyttsx3.init(self.module) else: self._engine = pyttsx3.init() self.eventLoopThread = Thread(target=self.eventLoop) self._isInitialized = True self.eventLoopThread.start() except Exception as e: self.env['runtime']['debug'].writeDebugOut('SpeechDriver:initialize:' + str(e),debug.debugLevel.ERROR)
def say_something(something): engine = pyttsx.init() engine.setProperty('rate', 150) # by default the rate is 200 engine.setProperty('volume', speak_volume) engine.say(something) engine.runAndWait() del engine
def keyPressEvent(self, event): # escape : txtCommand 텍스트 삭제 if event.key() == QtCore.Qt.Key_Escape: self.txtWord.clear() self.txtMean.clear() self.txtExplain.clear() self.DeleteKeyBuffer() # Enter : Command 실행 elif event.key() == QtCore.Qt.Key_Return or event.key() == QtCore.Qt.Key_Enter: self._PushSearch() self.DeleteKeyBuffer() # Control 기능키 elif event.key() == QtCore.Qt.Key_Control: self.keyBuffer = event.key() tr.Timer(1, self.DeleteKeyBuffer).start() elif self.keyBuffer == QtCore.Qt.Key_Control: if event.key() == QtCore.Qt.Key_S: self._PushUpdate() elif event.key() == QtCore.Qt.Key_D: if not self.speechEngine: self.speechEngine = pyttsx3.init() text = ' ' + self.txtWord.text() self.speechEngine.say(text) self.speechEngine.runAndWait() self.speechEngine = None else: self.DeleteKeyBuffer()
def create(self): """ This method creates a pyttsx3 object. :return: Nothing to return. """ self.engine = pyttsx3.init() self.engine.setProperty('rate', 120)
def speak(msg): # os.system("espeak -v en-wi+f1 -s150 '" + msg +"'") engine = pyttsx3.init() voices = engine.getProperty('voices') for dat in voices: print(dat) engine.setProperty('voice', 'english+f1') engine.say(msg) engine.runAndWait()
def speaker(self) -> pyttsx3.engine.Engine: """The espeak speaker""" if self._speaker is None: self._speaker = pyttsx3.init() return self._speaker
import cv2 import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.preprocessing.image import img_to_array import imutils import cv2 from tensorflow.keras.models import load_model import numpy as np import time import pyttsx3 engine = pyttsx3.init('sapi5') voices = engine.getProperty('voices') engine.setProperty('voice', voices[1].id) #Function to speak def speak(audio): engine.say(audio) engine.runAndWait() global flag dd = 0 #not used flag = 0 def read_exp(emot): print("You are currently feeling", emot) def main():
# for page in range(0,size): # pageno=pdf.getPage(page) # print("page is ",page) # print("pageno is ",pageno) # text=pageno.extractText() # speak.say(text) # print("saying") # speak.runAndWait() # b=input("enter choice") def onStart(name): print('starting', name) def onWord(name, location, length): print('word', name, location, length) def onEnd(name, completed): print('finishing', name, completed) engine = pyttsx3.init() engine.connect('started-utterance', onStart) engine.connect('started-word', onWord) engine.connect('finished-utterance', onEnd) engine.say('The quick brown fox jumped over the lazy dog.') # engine.runAndWait() # print("entering atomic") # for page in range(0,1): # pageno=pdf.getPage(page) # print("page is ",page) # # print("pageno is ",pageno) # text=pageno.extractText() # with open("logs1.txt","a") as f: # f.writelines(text) # speak.say(text)
fh.close() except Exception as e: speak('Fetching of email and passsword failed') songs = [] try: #set chrome as default browser webbrowser.register( 'chrome', None, webbrowser.BackgroundBrowser( "C://Program Files (x86)//Google//Chrome//Application//chrome.exe") ) except Exception as e: speak('Something went wronng!') engine = pyttsx3.init( 'sapi5') # initializing speech application programming interface voices = engine.getProperty('voices') # get existing voices engine.setProperty('voices', voices[1].id) # choose any voice from an existing ones def speak(audio): #function to speak to user engine.say(audio) engine.runAndWait() def wishMe(): # function to wish user hour = int(datetime.datetime.now().hour) if hour < 12 and hour >= 0:
import pickle from lxml import etree import pyttsx3 with open('html.pkl', 'rb') as handle: html = pickle.load(handle) data = etree.HTML(html) weather_text = data.xpath('//dl[@class="weather_info"]//text()') content = '' for text in weather_text: content += text weather = pyttsx3.init() weather.say()
import pyttsx3 playAudio = pyttsx3.init() playAudio.say(input("Enter a number: ")) playAudio.runAndWait()
def Voice(): engineio = pyttsx3.init() voices = engineio.getProperty('voices') engineio.setProperty('rate', 130) engineio.setProperty('voice', voices[0].id) def speak(text): engineio.say(text) engineio.runAndWait() engineio = pyttsx3.init() voices = engineio.getProperty('voices') r = sr.Recognizer() #r.energy_threshold = 4000 mic = sr.Microphone() while 1: with mic as source: print('Listening...') text = 'Listening...' speak(text) audio = r.listen(source) #r.adjust_for_ambient_noise(source, duration=0.5) #r.dynamic_energy_threshold = True # listen to the source try: # use recognizer to convert our audio into text part. text = r.recognize_google(audio) print("You said : {}".format(text)) except: # In case of voice not recognized clearly print("Sorry could not recognize your voice") speak("Could not understand") if text == 'exit': equation.set('Good Bye :D') root.update_idletasks() speak("Good Bye") e break #sys.exit("Thankyou !") elif text == "clear": equation.set(" ") a = '' b = '' i = 0 j = 0 c = len(text) while text[j] != ' ': a = a+text[j] j = j+1 j=j+1 ch = text[j] if ch=='d': while text[j]!='y': j=j+1 #j=j+1 j=j+2 while j != c: b = b+text[j] j = j+1 a = float(a) b = float(b) ans=0 if ch == '+': ans = a+b equation.set(ans) root.update_idletasks() elif ch == '-': ans = a-b equation.set(ans) root.update_idletasks() elif ch == 'x' or ch == 'X': ans = a*b equation.set(round(ans,2)) root.update_idletasks() elif ch == '/' or ch=='d': if b!=0: ans = a/b equation.set(round(ans,2)) root.update_idletasks() else: speak("Not defined") ans=0 #equation.set(round(ans,2)) root.update_idletasks() elif ch == '^': ans= math.pow(a,b) equation.set(ans) root.update_idletasks() else: ans = "Invalid Command" print(round(ans,2)) equation.set(ans) speak("The answer is {}".format(round(ans,2)))
def say_stuff(stuff_to_say): engine = pyttsx3.init() engine.say(str(stuff_to_say)) engine.runAndWait()
def speak(audio): print(audio) engine = pyttsx3.init('sapi5') engine.say(audio) engine.runAndWait()
authenticator = IAMAuthenticator('WATSON ASSISTANT API KEY HERE') assistant = AssistantV2(version='ASSISTANT VERSION DATE HERE', authenticator=authenticator) assistant.set_service_url('WATSON SERVICE URL HERE') assistant_id = 'WATSON ASSISTANT ID HERE' session = assistant.create_session(assistant_id) session_id = (str(session)[41:77]) # grab the session's ID input("Press Enter when ready...") # os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "FILE DIR HERE" # client = texttospeech.TextToSpeechClient() tts = pyttsx3.init() rate = tts.getProperty("rate") tts.setProperty("rate", 150) volume = tts.getProperty("volume") tts.setProperty("volume", 1) quit_check = False while quit_check is not True: r = sr.Recognizer() r.dynamic_energy_threshold = False r.energy_threshold = 400 with sr.Microphone() as source: print("Talk into mic now") audio = r.listen(source) try:
import pyttsx3 import datetime import speech_recognition as sr import wikipedia import smtplib import webbrowser as wb import os import pyautogui import psutil import pyjokes from covid import Covid from quotes import Quotes import pywhatkit as kit en=pyttsx3.init() #en.say("hello this is Jarvis") voices = en.getProperty('voices') en.setProperty('voice' ,voices[1].id) def speak(audio): en.say(audio) en.runAndWait() speak("hey boss this is friday and I am ai assistant") def time(): Time=datetime.datetime.now().strftime("%I:%M:%S") speak("time in 24 hrs format and is") speak(Time)
import pyautogui as p #for controlling mouse and keyboard virtually import webbrowser as w #for opening web.whatsapp.com import requests #for webscraping from bs4 import BeautifulSoup #for webscraping import time import tkinter #for appending and getting words to/from clipboard import random import wikipedia as wk #for info in a particular topic import re #"Tell me about xyz" For extracting xyz from sentence from urllib.request import urlopen import pyttsx3 #for text-to-speech, optional eng = pyttsx3.init() eng.setProperty('rate', 120) eng.setProperty('volume', 1) lastwrd = "Well" counter1 = 0 counter2 = 0 counter3 = 0 counter4 = 0 counter5 = 0 choce = [ "God", #common prefixes "Mannn! I have already told you", "Come on, I already told you", "Do I need to say again", "I think I have told you once before" ] def send(msg): #defining the send function
def init_tts(): engine = pyttsx3.init() return engine
class Assistance: lucy = pyttsx3.init() lucy.setProperty('rate', 150) def am_pm(self): hour = int(time.strftime('%H')) if int(hour) < 12: return 'AM' else: return "PM" def hi(self): hour = int(time.strftime('%H')) if hour < 12: self.lucy.say('Hi User, Good morning') self.lucy.runAndWait() elif hour < 18: self.lucy.say('Hi User, Good afternoon') self.lucy.runAndWait() else: self.lucy.say('Hi User, Good evening') self.lucy.runAndWait() def command(self): r = sr.Recognizer() with sr.Microphone() as user_command: r.pause_threshold = 1 r.adjust_for_ambient_noise(user_command, duration=0.2) cmd_audio = r.listen(user_command, phrase_time_limit=5) try: cmd_text = r.recognize_google(cmd_audio, language='en-US') return cmd_text.lower() except sr.UnknownValueError: cmd_text = self.command() return cmd_text.lower() def bye(self): self.lucy.say('bye bye user, have a nice day') self.lucy.runAndWait() def introduce(self): self.lucy.say('Hi, i am Lucifer.') self.lucy.say('you can call me lucy') self.lucy.say('I am voice controlled virtual assistant') self.lucy.say('I was born on 01/01/2020') self.lucy.runAndWait() def calculate(self): self.lucy.say('what you want me to calculate') self.lucy.runAndWait() winsound.PlaySound('Sounds/init.wav', winsound.SND_ASYNC) cmd_text = self.command() if cmd_text is not None: a = parser.expr(cmd_text).compile() result = eval(a) self.lucy.say('The answer is {}'.format(result)) self.lucy.runAndWait() def greetings(self): self.lucy.say('I am good, Thanks for asking') self.lucy.runAndWait() def name(self): self.lucy.say('My name is Lucifer, you can call me lucy') def maker(self): self.lucy.say('I was created my Aayu') self.lucy.say('You might know him as, Aayush kumar') self.lucy.runAndWait() def cant_answer(self): self.lucy.say("I can't answer that") self.lucy.runAndWait() def open_website(self): self.lucy.say('Which website would you like to open') self.lucy.runAndWait() winsound.PlaySound('Sounds/init.wav', winsound.SND_ASYNC) cmd_text = self.command() if cmd_text is not None: new_cmd = (str(cmd_text).replace('.com', '')).strip() self.lucy.say('Opening {}'.format(new_cmd)) self.lucy.runAndWait() webbrowser.open('https://www.{}.com/'.format(new_cmd)) def time(self): hour = int(time.strftime('%H')) if hour - 12 == 0 or hour - 12 == -12: self.lucy.say( ('The time is, {}:{} {}'.format(12, time.strftime('%M'), self.am_pm()))) self.lucy.runAndWait() elif hour > 12: self.lucy.say(('The time is, {}:{} {}'.format((hour - 12), time.strftime('%M'), self.am_pm()))) self.lucy.runAndWait() else: self.lucy.say( ('The time is, {}:{} {}'.format(hour, time.strftime('%M'), self.am_pm()))) self.lucy.runAndWait() def date(self): now = datetime.now() self.lucy.say(('the date is, {}'.format(now.strftime("%A %d. %B %Y")))) self.lucy.runAndWait() def control_panel(self): self.lucy.say('Opening control panel') self.lucy.runAndWait() os.system('control.exe') def command_prompt(self): self.lucy.say('Opening command prompt') self.lucy.runAndWait() os.system('start cmd') def notepad(self): self.lucy.say('Opening notepad') self.lucy.runAndWait() os.system('notepad') def chrome(self): self.lucy.say('Opening chrome') self.lucy.runAndWait() os.system('start chrome') def joke(self): response = requests.get('https://icanhazdadjoke.com/', headers={"Accept": "application/json"}) if response.status_code == requests.codes.ok: self.lucy.say(str(response.json()['joke'])) self.lucy.runAndWait() else: self.lucy.say('oops!I ran out of jokes') self.lucy.runAndWait() def weather(self): response = requests.get( "https://openweathermap.org/data/2.5/weather?q=Indore,in&appid=b6907d289e10d714a6e88b30761fae22" ) data = response.json() desc = data['weather'][0]['description'] city = data['name'] temp = data['main']['temp'] if response.status_code == requests.codes.ok: self.lucy.say("It's currently {} and {} degree in {} ".format( desc, temp, city)) self.lucy.runAndWait() def temperature(self): response = requests.get( "https://openweathermap.org/data/2.5/weather?q=Indore,in&appid=b6907d289e10d714a6e88b30761fae22" ) data = response.json() temp = data['main']['temp'] if response.status_code == requests.codes.ok: self.lucy.say("It's {} degree outside ".format(temp)) self.lucy.runAndWait() def slow(self): self.lucy.say('I am trying my best, sorry for the delay') self.lucy.runAndWait() def thanks(self): self.lucy.say('My pleasure') self.lucy.runAndWait() def play(self): self.lucy.say('Which song would you like to play') self.lucy.runAndWait() winsound.PlaySound('Sounds/init.wav', winsound.SND_ASYNC) cmd_text = self.command() if cmd_text is not None: url = "https://www.youtube.com/results?search_query=" + cmd_text.replace( ' ', '+') webbrowser.open(url) def sorry(self): self.lucy.say("Sorry, I didn't get you.") self.lucy.runAndWait() def wikipedia(self): self.lucy.say('What you want to know') self.lucy.runAndWait() winsound.PlaySound('Sounds/init.wav', winsound.SND_ASYNC) cmd_text = self.command() self.lucy.say('Let me see, what i can find') self.lucy.runAndWait() wiki_data = wikipedia.summary(cmd_text).split('.') self.lucy.say('.'.join(wiki_data[:3])) self.lucy.runAndWait() self.lucy.say('Do you want me to read more') self.lucy.runAndWait() winsound.PlaySound('Sounds/init.wav', winsound.SND_ASYNC) cmd_text = self.command() if 'yes' in cmd_text and cmd_text is not None: self.lucy.say('.'.join(wiki_data[3:])) self.lucy.runAndWait() else: self.lucy.say('Okay') self.lucy.runAndWait() def capture(self): video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW) ret, frame = video_capture.read(0) winsound.PlaySound('Sounds/click.wav', winsound.SND_ASYNC) cv2.flip(frame, 1, frame) now = datetime.now() cv2.imwrite(('Capture_images/img{}{}.jpg'.format( now.strftime('%d_%m_%y_'), time.strftime('%H_%M'))), frame) time.sleep(1) cv2.destroyAllWindows()
def __init__(self, parent): tk.Frame.__init__(self, parent) rospy.init_node("Ground_control") self.parent = parent # Position info self.home_alt = 0 self.position = tk.StringVar() tk.Label(parent, textvariable=self.position, justify=tk.LEFT).grid(row=0, column=1) self.position.set("Last heard: \nPosition: \nAltitude: ") self.last_heard = 0 self.time_last_heard_pos = 0 self.lat = 0 self.lon = 0 self.alt = 0 self.fly_alt = 32 self.ground_speed = 0 self.heading = 0.0 rospy.Subscriber('/mavlink_pos', mavlink_lora_pos, self.pos_callback) rospy.Subscriber('/mavlink_rx', mavlink_lora_msg, self.on_mavlink_msg) self.update_position() # Path draw parameter self.drone_collision_time_out = 120 self.land_now_flag = False self.path = [[0, 0], [0, 0]] # point 1, point 2 self.path_old = [[0, 0], [0, 0]] self.path_draw = [[0, 0], [0, 0]] # path[0] all x's path[1] all y's self.obstacles = [[[0, 0], [0, 0]], [[0, 0]]] self.obstacles_zone = [[[0, 0], [0, 0]], [[0, 0]]] self.drone_list = [['G1', 0, 0, 0, 0, 0, 0, 0, 0], ['G2', 0, 0, 0, 0, 0, 0, 0, 0], ['G3', 0, 0, 0, 0, 0, 0, 0, 0], ['G4', 0, 0, 0, 0, 0, 0, 0, 0]] self.target = [10.32345549979534, 55.47129930519026 ] self.path_x_start = 0 self.path_y_start = 0 self.target_zones = [[55.4718040079795, 10.32307768478619, "A"], [55.47167975123916, 10.32365770570652, "B"], [55.47206847655581, 10.32370825563272, "C"], [55.47173827754129, 10.32443766787085, "D"], [55.47228089795564, 10.32437208048389, "E"], [55.47179993860693, 10.32493302591576, "F"], [55.47130059193550, 10.32451701524021, "G"], [55.47129930519026, 10.32345549979534, "H"]] self.status_msg = "Initialise" self.current_target_zone = "H" self.last_target_zone = "G" self.current_zone = "G" # Attitude info and update home positions. self.attitude = tk.StringVar() tk.Label(parent, textvariable=self.attitude, justify=tk.LEFT).grid(row=0, column=2) self.attitude.set("Dist2Home: \nYaw: \nPitch: \nROLL: ") self.home_lat = 0 self.home_lon = 0 self.last_lat = 0 self.last_lon = 0 self.last_alt = 0 self.first_run_attitude_flag = True self.update_attitude() self.button_height = 7 self.button_width = 12 # Battery info self.battery_status = tk.StringVar() tk.Label(parent, textvariable=self.battery_status, justify=tk.LEFT).grid(sticky=tk.N, row=4, column=1) rospy.Subscriber('/mavlink_status', mavlink_lora_status, self.status_callback) self.battery_status.set("Battery voltage: ") self.battery_volt = 0.0 self.update_battery() # With-in geo fence self.geo_fence_status = tk.StringVar() self.geo_fence_label = tk.Label(parent, textvariable=self.geo_fence_status, justify=tk.LEFT) self.geo_fence_label.grid(sticky=tk.N, row=4, column=2) self.geo_fence_status.set("Geo fence: ") self.update_geo_fence_status() # Arm drone self.arm_drone_button = tk.Button(parent, text="Arm", command=self.click_arm, height=self.button_height, width=self.button_width) self.arm_drone_button.grid(sticky=tk.N, row=0, column=0) self.arm_drone_button_clicked = False self.start_mission_button_clicked = False self.slide_min = 0 self.slide_max = 100 self.arm_drone_slider = tk.Scale(parent, from_=0, to=100, label="Slide to confirm arm", orient=tk.HORIZONTAL, length=200, width=25) self.arm_drone_slider.bind("<ButtonRelease-1>", self.arm_slider) self.arm_drone_slider.grid(sticky=tk.N, row=5, column=2, columnspan=1) # Set home set_home_button = tk.Button(parent, text="Set home", command=self.click_set_home, height=self.button_height, width=self.button_width) set_home_button.grid(sticky=tk.N, row=3, column=0) # Upload mission self.upload_mission_button = tk.Button(parent, text="Upload mission", command=self.click_upload_mission, justify=tk.LEFT, height=self.button_height, width=self.button_width) self.upload_mission_button.grid(sticky=tk.N, row=1, column=0) # Update mission self.update_mission_button = tk.Button(parent, text="Update mission", command=self.click_update_mission, justify=tk.LEFT, height=self.button_height, width=self.button_width) self.update_mission_button.grid(sticky=tk.N, row=5, column=3) self.upload_mission_node = UploadMissionNode() mavlink_mission_ack_sub = rospy.Subscriber("mavlink_interface/mission/ack", mavlink_lora_mission_ack, self.on_ack_received_callback) self.waiting_for_mission_ack = 0 # Start mission: self.start_mission_button = tk.Button(parent, text="Start mission", command=self.click_start_mission, justify=tk.LEFT, height=self.button_height, width=self.button_width) self.start_mission_button.grid(sticky=tk.N, row=2, column=0) self.mavlink_start_mission_pub = rospy.Publisher('/mavlink_interface/command/start_mission', mavlink_lora_command_start_mission, queue_size=0) # Update map: self.map_fig = get_map_location(self.lat, self.lon) # self.map_fig.suptitle('Drone placement:', fontsize=10) self.airside_img = mpimg.imread("gc_functions/airfield.png") self.canvas = FigureCanvasTkAgg(self.map_fig, master=self.parent) self.canvas.draw() self.canvas.callbacks.connect('button_press_event', self.set_target_on_click) self.canvas.get_tk_widget().grid(sticky=tk.N, row=1, column=1, rowspan=3, columnspan=2) # pack(side=tk.TOP, fill=tk.BOTH, expand=1) self.update_map() # Gripper button self.gripper_node = GripperNode() self.gripper_b1 = tk.Button(parent, text="Gripper open", command=self.gripper_node.close_gripper, justify=tk.LEFT, height=self.button_height, width=self.button_width) self.gripper_b1.grid(sticky=tk.N, row=4, column=0) self.gripper_b2 = tk.Button(parent, text="Gripper close", command=self.gripper_node.open_gripper, justify=tk.LEFT, height=self.button_height, width=self.button_width) self.gripper_b2.grid(sticky=tk.N, row=5, column=0) # DroneID setup, not visual! self.sim = True self.status_toggle_button = tk.Button(parent, text="Sim On", command=self.toggle_sim_status, justify=tk.LEFT, height=self.button_height, width=self.button_width) self.status_toggle_button.grid(sticky=tk.N, row=4, column=3) self.drone_id_node = DroneIDNode() self.drone_id_func() # Land at spot land_pub_topic = '/fc/mavlink_interface/command/land' self.land_pub = rospy.Publisher(land_pub_topic, mavlink_lora_command_land, queue_size=0) self.land_button = tk.Button(parent, text="Land", command=self.land, justify=tk.LEFT, height=self.button_height, width=self.button_width*3) self.land_button.grid(sticky=tk.W, row=5, column=1) # Speech setup self.engine = pyttsx3.init("espeak") self.engine.setProperty('rate', 160) self.say("Ground station f*****g ready!") # Set home self.click_set_home() # Path planning self.this_drone_id = 0 self.plan_path()
# -*- coding: utf-8 -*- from chatterbot import ChatBot import pyttsx3 #intializing speach module voice = pyttsx3.init() # Create a new instance of a ChatBot bot = ChatBot( 'Default Response Example Bot', storage_adapter='chatterbot.storage.JsonFileStorageAdapter', #logic_adapters=[ # { # 'import_path': 'chatterbot.logic.BestMatch' # }, # { # 'import_path': 'chatterbot.logic.LowConfidenceAdapter', # 'threshold': 0.65, # 'default_response': 'I am sorry, but I do not understand.' # } #], trainer='chatterbot.trainers.ListTrainer', #trainer='chatterbot.trainers.ChatterBotCorpusTrainer', silence_performance_warning=True) # Train the chat bot with a few responses #bot.train([ # 'how are you', # 'I am good', # 'Thats great', # 'what are up to lately',
def speak(text): engine = pyttsx3.init() engine.say(text) engine.runAndWait()
def __init__(self): self.engine = pyttsx3.init()
#import packages import pyttsx3 import PyPDF2 book = open('Introduction_to_Machine_Learning.pdf', 'rb') #read PDF pdfReader = PyPDF2.PdfFileReader(book) pages = pdfReader.numPages #read pages in PDF #print(pages) #no of pages in PDF speaker = pyttsx3.init() #create speaker #page = pdfReader.getPage(9) #read single page for num in range(8, pages): #read certain range of pdf page = pdfReader.getPage(num) text = page.extractText() speaker.say(text) speaker.runAndWait()
def speak(audio): engine = pyttsx3.init() engine.say(audio) engine.runAndWait()
import pyaudio import wikipedia import webbrowser import pywhatkit as kit import os import smtplib from email.message import EmailMessage import pywhatkit as whatsapp import requests from newsapi import NewsApiClient import random import serial ser = serial.Serial('COM6', 9600) computer = pyttsx3.init( 'sapi5' ) # sapi5 is the microsoft speech api . This computer will only speak with us voices = computer.getProperty( 'voices') # we get the list of voices [boy voice,girl voice] #print(voices[1].id) we get the women voice #print(voices[0].id) we get the men voice computer.setProperty( 'voice', voices[1].id) # here we set the voice , either of a girl or a boy def speak(audio): # by this function,we can make the computer to speak computer.say(audio) computer.runAndWait() def wishme():
from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.by import By import time import pyttsx3 as p import speech_recognition as sr from webdriver_manager.chrome import ChromeDriverManager driver = webdriver.Chrome(ChromeDriverManager().install()) sr.Microphone(device_index=1) r = sr.Recognizer() engine = p.init() engine.setProperty('rate', 165) voices = engine.getProperty('voices') engine.setProperty('voice', voices[7].id) def send_msg(): driver = webdriver.Chrome('chromedriver.exe') driver.get("https://web.whatsapp.com/") wait = WebDriverWait(driver, 500) c = 0 while c != 1: with sr.Microphone() as source: engine.say("Who do you want to send the message to?") engine.runAndWait() r.adjust_for_ambient_noise(source)
def talk(text,x): engine = pyttsx3.init() voices = engine.getProperty('voices') engine.setProperty('voice', voices[x].id) engine.say(text) engine.runAndWait()
def __init__(self): self._engine = pyttsx3.init() self._engine.setProperty('volume',0.9) self._rate = self._engine.getProperty('rate') self.speak_in_english(("Hi, this is robot. Speaking module is working.")) self._engine.runAndWait()
def speak(audio): engine = pyttsx3.init('sapi5') voices = engine.getProperty('voices') engine.setProperty('voice', voices[0].id) engine.say(audio) engine.runAndWait()
import pyttsx3 filename = "output.wav" engine = pyttsx3.init() #object creation voices = engine.getProperty('voices') #getting details of current voice #engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female text = input('Enter the text : ') engine.say(text) engine.runAndWait() #Saving File engine.save_to_file("Hello", filename) #yet not implemented
import speech_recognition import pyttsx3 from datetime import date, datetime ai_ear = speech_recognition.Recognizer() ai_mouth = pyttsx3.init() ai_brain = "" while True: with speech_recognition.Microphone() as mic: print("Ai:I'm listening") audio = ai_ear.listen(mic) print("Ai:...") try: you = ai_ear.recognize_google(audio) except: you == "" print("You: " + you) if you == "": ai_brain = " i can't hear you, try again,please!" elif "hello" in you: ai_brain = "Hello Bella" elif "today" in you: today = date.today() ai_brain = today.strftime("%B %D, %Y") elif "time" in you: now = datetime.now() ai_brain = now.strftime("%H hours %M minutes %S seconds") elif "Who are you?" in you: ai_brain = "Your friend!"
import PyPDF2 import pyttsx3 infile = open('Related/sample.pdf', 'rb') pdfReader = PyPDF2.PdfFileReader(infile) num_Pages = pdfReader.numPages print(num_Pages) start = pyttsx3.init() print("Playing audio..") for i in range(0, num_Pages): page = pdfReader.getPage(i) text = page.extractText() start.say(text) start.runAndWait()
import pyttsx3 as py engine=py.init() voices=engine.getProperty('voices') engine.setProperty('voice',voices[2].id) engine.setProperty('rate',130) def spk(st): engine.say(st) engine.runAndWait()
import pyttsx3 engine = pyttsx3.init() engine.say("Hello World").runAndWait() engine.runAndWait()
import subprocess import sys import pyttsx3 from pytube import YouTube requests.packages.urllib3.disable_warnings() try: _create_unverified_https_context=ssl._create_unverified_context except 'AttributeError': pass else: ssl._create_default_https_context=_create_unverified_https_context headers = {'''user-agent':'Chrome/53.0.2785.143'''} #speak=wicl.Dispatch("SAPI.SpVoice") speak = pyttsx3.init() def events(frame,put): identity_keywords = ["who are you", "who r u", "what is your name"] youtube_keywords = ("play ", "stream ", "queue ") launch_keywords = ["open ", "launch "] search_keywords = ["search "] wikipedia_keywords = ["wikipedia ", "wiki "] location_keywords = ["locate","spot"] check_keywords = ["what","when","was","how","has","had","should","would","can","could","cool","good"] #could or cool or good download_music=("download ","download music ") search_pc= ("find ","lookfor ") close_keywords=("close ","over ","stop ","exit ") link = put.split() #Add note
import pyttsx3 speaker = pyttsx3.init() import PyPDF2 speaker.say("I'm Ready to read your book") voices = speaker.getProperty("voices") speaker.setProperty("voice", voices[2].id) speaker.runAndWait() book = open( r"C:\Users\Prj Rock's\Documents\books abroad authors\EDC\Neamen solid state device gate.pdf", "rb") pdfReader = PyPDF2.PdfFileReader(book) page = pdfReader.numPages print(page) for num in range(page): singlepage = pdfReader.getPage(num) text = singlepage.extractText() speaker.say(text) speaker.runAndWait()
#canvas.delete(ALL) canvas.create_image(0, 0, image=frame, anchor=NW) canvas.update() time.sleep(0.1) if flag == False: canvas.create_image(0, 0, image=img1, anchor=NW) canvas.update() break def main(): speak("Hello everyone! How Are you, I am fine Thankyou. Take Care") transition2() engine = pyttsx3.init('sapi5') # Windows voices = engine.getProperty('voices') engine.setProperty('voice', voices[0].id) root = Tk() root.title("Transition") frames = [] for i in range(15): filename = "{}.gif".format(i) frames.append(PhotoImage(file=filename)) #frames = [PhotoImage(file='chatgif2.gif',format = 'gif -index %i' %(i)) for i in range(15)] canvas = Canvas(root, width=1067, height=600) canvas.pack() img1 = PhotoImage(file='chatgif5.gif') canvas.create_image(0, 0, image=img1, anchor=NW)
import webbrowser #simply calling the open() function from this module will do the right thing import os #The OS module provides a way of using operating system dependent functionality. import smtplib #The smtplib module defines an SMTP client session object that can be #used to send mail to any Internet machine with an SMTP or ESMTP listener daemon. import pyaudio # PyAudio provides Python bindings for PortAudio, the cross-platform audio I/O library. # With PyAudio, you can easily use Python to play and record audio on a variety of platforms import json # json = The json module provides an API similar to pickle for converting in-memory Python objects to a serialized representation # known as JavaScript Object Notation (JSON) # is probably most widely used for communicating between the web server and client in an AJAX application, # but is not limited to that problem domain. engine = pyttsx3.init( 'sapi5' ) #Microsoft Speech API (SAPI5) is the technology for voice recognition and synthesis voices = engine.getProperty('voices') engine.setProperty('voice', voices[1].id) def speak(audio): '''This is a function used by the jarvis to speak ''' engine.say(audio) engine.setProperty( 'rate', 150) #makes the voice slower, default value is 200 words per minutes engine.runAndWait() def wishMe():
def runPtts(txt): engine = pyttsx3.init() engine.say(txt) engine.runAndWait()
def __init__(self, query_words, voice_rate=120): self.query_words = query_words self.rate = voice_rate self.engine = pyttsx3.init()