예제 #1
0
def speak(text):
    engineio = pyttsx3.init()
    voices = engineio.getProperty('voices')
    engineio.setProperty('rate',
                         130)  # Aquí puedes seleccionar la velocidad de la voz
    engineio.setProperty('voice', voices[0].id)
    engineio.say(text)
    engineio.runAndWait()
def identifyTrafficSign(image):
    '''
    In this function we select some ROI in which we expect to have the sign parts. If the ROI has more active pixels than threshold we mark it as 1, else 0
    After path through all four regions, we compare the tuple of ones and zeros with keys in dictionary SIGNS_LOOKUP
    '''

    # define the dictionary of signs segments so we can identify
    # each signs on the image
    SIGNS_LOOKUP = {
        (1, 0, 0, 1): 'Turn Right', # turnRight
        (0, 0, 1, 1): 'Turn Left', # turnLeft
        (0, 1, 0, 1): 'Move Straight', # moveStraight
        (1, 0, 1, 1): 'Turn Back', # turnBack
    }

    THRESHOLD = 150
    
    image = cv2.bitwise_not(image)
    # (roiH, roiW) = roi.shape
    #subHeight = thresh.shape[0]/10
    #subWidth = thresh.shape[1]/10
    (subHeight, subWidth) = np.divide(image.shape, 10)
    subHeight = int(subHeight)
    subWidth = int(subWidth)

    # mark the ROIs borders on the image
    cv2.rectangle(image, (subWidth, 4*subHeight), (3*subWidth, 9*subHeight), (0,255,0),2) # left block
    cv2.rectangle(image, (4*subWidth, 4*subHeight), (6*subWidth, 9*subHeight), (0,255,0),2) # center block
    cv2.rectangle(image, (7*subWidth, 4*subHeight), (9*subWidth, 9*subHeight), (0,255,0),2) # right block
    cv2.rectangle(image, (3*subWidth, 2*subHeight), (7*subWidth, 4*subHeight), (0,255,0),2) # top block

    # substract 4 ROI of the sign thresh image
    leftBlock = image[4*subHeight:9*subHeight, subWidth:3*subWidth]
    centerBlock = image[4*subHeight:9*subHeight, 4*subWidth:6*subWidth]
    rightBlock = image[4*subHeight:9*subHeight, 7*subWidth:9*subWidth]
    topBlock = image[2*subHeight:4*subHeight, 3*subWidth:7*subWidth]

    # we now track the fraction of each ROI
    leftFraction = np.sum(leftBlock)/(leftBlock.shape[0]*leftBlock.shape[1])
    centerFraction = np.sum(centerBlock)/(centerBlock.shape[0]*centerBlock.shape[1])
    rightFraction = np.sum(rightBlock)/(rightBlock.shape[0]*rightBlock.shape[1])
    topFraction = np.sum(topBlock)/(topBlock.shape[0]*topBlock.shape[1])

    segments = (leftFraction, centerFraction, rightFraction, topFraction)
    segments = tuple(1 if segment > THRESHOLD else 0 for segment in segments)

    cv2.imshow("Warped", image)
    if segments in SIGNS_LOOKUP:
        k=SIGNS_LOOKUP[segments]
        print(k)
        
        engineio.say(k)
        engineio.runAndWait()

        #print(p+" "+k)
        return SIGNS_LOOKUP[segments]        
    else:
        return None
예제 #3
0
def speak():
    "SPEAK 是存放输入字符串的列表,定义正全局变量"
    engineio = pyttsx3.init()  # 初始化
    voices = engineio.getProperty('voices')
    engineio.setProperty('rate', 130)  # 这里是管控语音速度的
    engineio.setProperty('voice', voices[0].id)

    if len(SPEAK):
        engineio.say(SPEAK[len(SPEAK) - 1])  # 获取最新输入的值
        engineio.runAndWait()
        data = SPEAK[0]
        if data != SPEAK[len(SPEAK) - 1]:  # 比较两个值要是不同就证明有新的值进入了,将原先的给删除掉
            del SPEAK[0]
    else:
        engineio.say("请输入你要翻译的英文或者汉字")
        engineio.runAndWait()
        sys.exit()
예제 #4
0
        to_confirm = preferences
        return 'ask confirmation', preferences

    #default new state if we did not recognise the input / old state combination
    return 'ask to repeat', preferences

print('Done!\n')

while True:
    #give some output to the user
    output = generateOutput(currentstate)

    #text to speech output is available
    if textToSpeech == True:
        engineio = pyttsx3.init()
        engineio.say(output)
        engineio.runAndWait()

    #prints info for debugging purposes
    if verbose:
        print(currentstate)
        print(preferences)

    print(output)

    #presents user with info about remaining number of utterances
    if maxUtterances:
        if utterancecount != utterancemax:
            print('[', utterancemax - utterancecount, 'sentences left ]')

    #end program if we're done
예제 #5
0
 def speak(text):
     engineio.say(text)
     engineio.runAndWait()
예제 #6
0
def speak(text):
    engineio.say(text) # fala a string setada
    engineio.runAndWait() # executa e espera
예제 #7
0
파일: main.py 프로젝트: timkoeppel/glados
def GLaDOS(audio):
    """speaks audio passed as argument"""
    print(audio)
    for line in audio.splitlines():
        engineio.say(audio)
        engineio.runAndWait()
예제 #8
0
def speak(text):  #Function to speak text
    engineio.say(text)
    engineio.runAndWait()
            cv2.putText(
                        img, 
                        str(id), 
                        (x+5,y-5), 
                        font, 
                        1, 
                        (255,255,255), 
                        2
                    )
        
      
    
    cv2.imshow('camera',img) 
    k = cv2.waitKey(10) & 0xff 
    if k == 27:
        break
    
engineio.say('Thank you {0}'.format(id))
engineio.runAndWait()
print("\n Thank you for the test.")
cam.release()
cv2.destroyAllWindows()








예제 #10
0
import pyttsx3
import engineio

import speech_recognition as sr

r = sr.Recognizer()
engineio = pyttsx3.init()

engineio.say('say something.')
engineio.runAndWait()
with sr.Microphone(device_index=6, sample_rate=16000,
                   chunk_size=1024) as source:
    audio = r.listen(source)
    try:
        print("You said " + r.recognize_google(audio))
    except sr.UnknownValueError:
        print("Could not understand audio")
    except sr.RequestError as e:
        print("Could not request results; {0}".format(e))
예제 #11
0
def speak(audio):
    engineio.say(audio)
    engineio.runAndWait()
예제 #12
0
import pyttsx3
import engineio
import time

engineio = pyttsx3.init()
voices = engineio.getProperty('voices')
print(voices)
engineio.setProperty('rate', 100)
engineio.setProperty('voice', voices[1].id)

file = open("HandsOn_ModuleB.txt", "rt")
for line in file:
    for word in line.split():
        engineio.say(word)

engineio.runAndWait()