Exemplo n.º 1
0
def reactToTheMistake2( pose, wordsBefore, wordsAfter, pause, factorSpeed = 1.0):
	""" If the keys pressed are due to detection of mistake, the robot reacts.
		The motionProxy is a physical movement and certain words which shows robot's remorse

	"""
	global blinkThread
	#blinkThread.sleep()
	#idleMovementMode("OFF")
	blinkingModeOFF()
	idleMovementModeOFF()
	motionProxy.setBreathEnabled('Arms', False)

	emotionReaction = EYEEMOTIONS(proxy)

	times = changeSpeed(pose.times, factorSpeed)
	id = motionProxy.post.angleInterpolationBezier(pose.names, times, pose.keys)
	story.post.say(wordsBefore)
	emotionReaction.set_emotion("happy")
	turn_on_eye()
	#time.sleep(pause)
	#motionProxy.wait(id,0)
	#time.sleep(1)
	story.say(wordsAfter)
	#time.sleep(5)
	correctFlag = True
	pitch_angle = 0.3
	LookAtTheBook(pitch_angle)
	motionProxy.setBreathEnabled('Arms', True)
	#motionProxy.setBreathEnabled('Head', True)
	#postureProxy.goToPosture("Stand", 1.0)
	#readTheTaggedStory(selectedStory, correctFlag)
	#readTheTaggedStoryWithLevel(selectedStory, correctFlag)
	blinkingModeON("ON")
	idleMovementModeON("ON")
Exemplo n.º 2
0
def reactToTheMistake(emotion,
                      pose,
                      wordsBefore,
                      wordsAfter,
                      pause,
                      factorSpeed=1.0):
    """ If the keys pressed are due to detection of mistake, the robot reacts.
		The motionProxy is a physical movement and certain words which shows robot's remorse

	"""
    motionProxy.setBreathEnabled('Arms', False)

    emotionReaction = EYEEMOTIONS(proxy)

    times = changeSpeed(pose.times, factorSpeed)
    id = motionProxy.post.angleInterpolationBezier(pose.names, times,
                                                   pose.keys)
    story.post.say(wordsBefore)
    emotionReaction.set_emotion(emotion)
    turn_on_eye()
    #time.sleep(pause)
    #motionProxy.wait(id,0)
    #time.sleep(1)
    story.say(wordsAfter)
    #time.sleep(5)
    correctFlag = True
    pitch_angle = 0.5
    LookAtTheBook(pitch_angle)
    motionProxy.setBreathEnabled('Arms', True)
def reactWithSpecificPoses(emotion, pose, pause, wordsAfter , factorSpeed = 1.0):

	global blinkThread
	#idleMovementMode("OFF")
	#blinkingModeOFF()
	#blinkThread.sleep()
	motionProxy.setBreathEnabled('Arms', False)

	emotionReaction = EYEEMOTIONS(proxy)

	times = changeSpeed(pose.times, factorSpeed)
	id = motionProxy.post.angleInterpolationBezier(pose.names, times, pose.keys)
	audioReactionProxy.post.playFile("/home/nao/audio/wav/monsterGrowl.wav")
	#story.post.say(wordsBefore)
	emotionReaction.set_emotion(emotion)
	#turn_on_eye()
	#time.sleep(pause)
	#motionProxy.wait(id,0)
	#time.sleep(1)
	#story.say(wordsAfter)
	#time.sleep(5)
	correctFlag = True
	pitch_angle = 0.3
	LookAtTheBook(pitch_angle)
	motionProxy.setBreathEnabled('Arms', True)
Exemplo n.º 4
0
def reactToBoredness(emotion, pose, pause, wordsAfter , factorSpeed = 1.0):

	global blinkThread
	#idleMovementMode("OFF")
	#blinkingModeOFF()
	#blinkThread.sleep()
	motionProxy.setBreathEnabled('Arms', False)

	emotionReaction = EYEEMOTIONS(proxy)

	times = changeSpeed(pose.times, factorSpeed)
	id = motionProxy.post.angleInterpolationBezier(pose.names, times, pose.keys)
	#story.post.say(wordsBefore)
	#emotionReaction.set_emotion(emotion)
	#turn_on_eye()
	#time.sleep(pause)
	#motionProxy.wait(id,0)
	#time.sleep(1)
	#story.say(wordsAfter)
	#time.sleep(5)
	correctFlag = True
	pitch_angle = 0.3
	LookAtTheBook(pitch_angle)
	motionProxy.setBreathEnabled('Arms', True)
	#if wordsAfter != None:
	story.say(wordsAfter)
Exemplo n.º 5
0
def blinkingModeON(mode):

    global blinkThread
    blinkThread = threading.Timer(5, blinkingModeON, [mode])
    emotionReaction = EYEEMOTIONS(proxy)
    animationSelection = MOTION_ANIMATION_SELECTION()
    if mode == "ON":
        blinkThread.start()

        emotionReaction.blink_eyes()
        #animationSelection.reactionIdleMovement()

    if mode == "OFF":
        emotionReaction.turn_off_eye()
        blinkThread.cancel()
Exemplo n.º 6
0
    def __init__(self, proxy):

        tag = "=WordNum"
        #self.eyeProxy = proxy
        self.recordingReaction = EYEEMOTIONS(proxy)
Exemplo n.º 7
0
class RECORDANDTRANSCRIBE:

    def __init__(self, proxy):

        tag = "=WordNum"
        #self.eyeProxy = proxy
        self.recordingReaction = EYEEMOTIONS(proxy)

    def recordTheOutput(self, output):
    	CHUNK = 1024
    	FORMAT = pyaudio.paInt16
    	CHANNELS = 1
    	RATE = 44100
    	RECORD_SECONDS = 5
    	WAVE_OUTPUT_FILENAME = output
        self.fileName = output

    	p = pyaudio.PyAudio()

    	stream = p.open(format=FORMAT,
    	                channels=CHANNELS,
    	                rate=RATE,
    	                input=True,
    	                frames_per_buffer=CHUNK)

    	print("* recording")
        self.recordingReaction.set_emotion("listen")

    	frames = []

    	for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
    	    data = stream.read(CHUNK)
    	    frames.append(data)

    	print("* done recording")

    	stream.stop_stream()
    	stream.close()
    	p.terminate()

        wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
        wf.setnchannels(CHANNELS)
        wf.setsampwidth(p.get_sample_size(FORMAT))
        wf.setframerate(RATE)
        wf.writeframes(b''.join(frames))
        wf.close()



    def getSpeechService(self):
        credentials = GoogleCredentials.get_application_default().create_scoped(
            ['https://www.googleapis.com/auth/cloud-platform'])
        http = httplib2.Http()
        credentials.authorize(http)

        return discovery.build(
            'speech', 'v1beta1', http=http, discoveryServiceUrl=DISCOVERY_URL)


    def transcribeTheOutput(self):
        """Transcribe the given audio file.

        Args:
            speech_file: the name of the audio file.
        """

        with open(self.fileName, 'rb') as speech:
            speech_content = base64.b64encode(speech.read())

        service = self.getSpeechService()
        service_request = service.speech().syncrecognize(
            body={
                'config': {
                    'encoding': 'LINEAR16',  # raw 16-bit signed LE samples
                    'sampleRate': 44100,  # 16 khz
                    'languageCode': 'en-US',  # a BCP-47 language tag
                },
                'audio': {
                    'content': speech_content.decode('UTF-8')
                    }
                })
        response = service_request.execute()
        print(json.dumps(response))

        return response