Beispiel #1
0
def main(responses):
    pub = rospy.Publisher('tocabi/emotion', Int64, queue_size=10)
    rospy.init_node('Emotion')
    prev_speaking_flag = -1
    prev_action = 1
    cur_action = 1
    D = Decision()
    for response in responses:
        
        if not response.results:
            continue

        result = response.results[0]
        
        cur_flag = 0 if result.is_final == False else 1
        if prev_speaking_flag != cur_flag and not result.is_final:
            # TODO: Modulize Action Part 
            IS_SPEAKING = True
            cur_action = D.decide(IS_SPEAKING, "")
            print("Started Speaking!")
            if cur_action != prev_action:
                prev_action = cur_action

            prev_speaking_flag = cur_flag
            pub.publish(cur_action)
        
        if not result.alternatives:
            continue
        
        transcript = result.alternatives[0].transcript 
        
        if result.is_final:
            IS_SPEAKING = False
            # TODO: Modulize Action Part  
            cur_action = D.decide(IS_SPEAKING, transcript)
            print "Finished speaking. Recognized sentence: ", "'",transcript,"'"
            print "Classified Emotion Index: ", cur_action,"\n"

            if cur_action != prev_action:
                prev_action = cur_action

            prev_speaking_flag = -1
            pub.publish(cur_action)
            # Exit recognition if any of the transcribed phrases could be one of ["exit", "quit"]
            if re.search(r"\b(exit|quit)\b", transcript, re.I):
                print("Exiting..")
                break
Beispiel #2
0
def main(responses):
    pub = rospy.Publisher('tocabi/emotion', Int64, queue_size=10)
    overlay_control_pub = rospy.Publisher('overlay_command',
                                          String,
                                          queue_size=5)
    pose_calibration_pub = rospy.Publisher(
        '/tocabi/dg/avatar/pose_calibration_flag', Int8, queue_size=5)
    retargeting_sync_pub = rospy.Publisher('/tocabi/dg/upperbodymodecommand',
                                           Float32,
                                           queue_size=5)

    rospy.init_node('Emotion')
    prev_speaking_flag = -1
    prev_action = 1
    cur_action = 1
    D = Decision()
    for response in responses:

        if not response.results:
            continue

        result = response.results[0]

        cur_flag = 0 if result.is_final == False else 1
        if prev_speaking_flag != cur_flag and not result.is_final:
            # TODO: Modulize Action Part
            IS_SPEAKING = True
            cur_action = D.decide(IS_SPEAKING, "")
            print("Started Speaking!")
            if cur_action != prev_action:
                prev_action = cur_action

            prev_speaking_flag = cur_flag
            pub.publish(cur_action)

        if not result.alternatives:
            continue

        transcript = result.alternatives[0].transcript.lower()

        if result.is_final:

            if "close" in transcript:
                overlay_control_pub.publish("close")
            if "open" in transcript:
                overlay_control_pub.publish("open")
            if "right" in transcript:
                overlay_control_pub.publish("right")
            if "left" in transcript:
                overlay_control_pub.publish("left")
            if "up" in transcript:
                overlay_control_pub.publish("up")
            if "down" in transcript:
                overlay_control_pub.publish("down")
            if "front" in transcript:
                overlay_control_pub.publish("front")
            if "back" in transcript:
                overlay_control_pub.publish("back")
            if "opacity" in transcript:
                overlay_control_pub.publish(transcript)

            if "1" in transcript:
                pose_calibration_pub.publish(1)
            elif "2" in transcript:
                pose_calibration_pub.publish(2)
            elif "3" in transcript:
                pose_calibration_pub.publish(3)
            elif "4" in transcript:
                pose_calibration_pub.publish(4)
            elif "5" in transcript:
                pose_calibration_pub.publish(5)

            if "stop" in transcript:
                retargeting_sync_pub.publish(3)

            IS_SPEAKING = False
            # TODO: Modulize Action Part
            cur_action = D.decide(IS_SPEAKING, transcript)
            print "Finished speaking. Recognized sentence: ", "'", transcript, "'"
            print "Classified Emotion Index: ", cur_action, "\n"

            if cur_action != prev_action:
                prev_action = cur_action

            prev_speaking_flag = -1
            pub.publish(cur_action)
            # Exit recognition if any of the transcribed phrases could be one of ["exit", "quit"]
            if re.search(r"\b(exit|quit)\b", transcript, re.I):
                print("Exiting..")
                break