Beispiel #1
0
 def test_chat(self):
     import re
     r = re.compile('[\W_]+')
     pub, msg_class = rostopic.create_publisher('/chatbot_speech',
                                                'chatbot/ChatMessage', True)
     words = ['Hi', 'How are you', 'What\'s your name']
     duration = 5
     queue = MessageQueue()
     queue.subscribe('/chatbot_responses', String)
     for word in words:
         cam_output = '%s/cam_%s.avi' % (self.output_video, r.sub('', word))
         screen_output = '%s/screen_%s.avi' % (self.output_video,
                                               r.sub('', word))
         with capture_camera(cam_output, duration):
             with capture_screen(screen_output, duration):
                 pub.publish(msg_class(word, 100))
         msg = queue.get()
         cam_output_new = '%s/cam_%s_%s.avi' % (
             self.output_video, r.sub('', word), r.sub('', msg.data))
         shutil.move(cam_output, cam_output_new)
         screen_output_new = '%s/screen_%s_%s.avi' % (
             self.output_video, r.sub('', word), r.sub('', msg.data))
         shutil.move(screen_output, screen_output_new)
         files = glob.glob('%s/*.wav' % self.tts_output)
         self.assertEqual(len(files), 1)
         shutil.move(files[0],
                     '%s/%s.wav' % (self.output_audio, r.sub('', msg.data)))
Beispiel #2
0
 def test_chat(self):
     import re
     r = re.compile('[\W_]+')
     pub, msg_class = rostopic.create_publisher(
         '/chatbot_speech', 'chatbot/ChatMessage', True)
     words = ['Hi', 'How are you', 'What\'s your name']
     duration = 5
     queue = MessageQueue()
     queue.subscribe('/chatbot_responses', String)
     for word in words:
         cam_output = '%s/cam_%s.avi' % (
             self.output_video, r.sub('', word))
         screen_output = '%s/screen_%s.avi' % (
             self.output_video, r.sub('',word))
         with capture_camera(cam_output, duration):
             with capture_screen(screen_output, duration):
                 pub.publish(msg_class(word, 100))
         msg = queue.get()
         cam_output_new = '%s/cam_%s_%s.avi' % (
             self.output_video, r.sub('', word), r.sub('', msg.data))
         shutil.move(cam_output, cam_output_new)
         screen_output_new = '%s/screen_%s_%s.avi' % (
             self.output_video, r.sub('', word), r.sub('', msg.data))
         shutil.move(screen_output, screen_output_new)
         files = glob.glob('%s/*.wav' % self.tts_output)
         self.assertEqual(len(files), 1)
         shutil.move(
             files[0], '%s/%s.wav' % (self.output_audio, r.sub('', msg.data)))
Beispiel #3
0
    def test(self):
        new_arrival_emotions = [
            x.strip() for x in self.behavior_config.get(
                'emotion', 'new_arrival_emotions').split(',')
        ]
        pub, msg_class = rostopic.create_publisher('/behavior_switch',
                                                   'std_msgs/String', True)
        pub.publish(msg_class('btree_on'))
        bag_file = get_rosbag_file('face')

        emo_msg_listener = create_msg_listener(
            '/blender_api/set_emotion_state', EmotionState, 10)
        emo_msg_listener.start()
        cam_output = '%s/cam_new_arrival_emotion.avi' % \
                        self.output_video
        screen_output = '%s/screen_new_arrival_emotion.avi' % \
                        self.output_video
        duration = 5
        with capture_camera(cam_output, duration):
            with capture_screen(screen_output, duration):
                job = play_rosbag([bag_file, '-q'])
        job.join()
        emo_msg = emo_msg_listener.join()

        self.assertIn(emo_msg.name, new_arrival_emotions)
Beispiel #4
0
    def test(self):
        new_arrival_emotions = [
            x.strip() for x in self.behavior_config.get(
                    'emotion', 'new_arrival_emotions').split(',')]
        pub, msg_class = rostopic.create_publisher(
            '/behavior_switch', 'std_msgs/String', True)
        pub.publish(msg_class('btree_on'))
        bag_file = get_rosbag_file('face')

        emo_msg_listener = create_msg_listener(
            '/blender_api/set_emotion_state', EmotionState, 10)
        emo_msg_listener.start()
        cam_output = '%s/cam_new_arrival_emotion.avi' % \
                        self.output_video
        screen_output = '%s/screen_new_arrival_emotion.avi' % \
                        self.output_video
        duration = 5
        with capture_camera(cam_output, duration):
            with capture_screen(screen_output, duration):
                job = play_rosbag([bag_file, '-q'])
        job.join()
        emo_msg = emo_msg_listener.join()

        self.assertIn(emo_msg.name, new_arrival_emotions)