示例#1
0
    def test_face_interaction1(self):
        positive_gestures = [
            x.strip() for x in self.behavior_config.get(
                    'gesture', 'positive_gestures').split(',')]

        pub, msg_class = rostopic.create_publisher(
            '/blender_api/available_emotion_states',
            'blender_api_msgs/AvailableEmotionStates', True)
        pub.publish(msg_class(['happy']))

        queue = MessageQueue()
        queue.subscribe('/blender_api/set_emotion_state', EmotionState)
        queue.subscribe('/blender_api/set_gesture', SetGesture)
        time.sleep(1)

        self.behavior_switch('btree_on')
        pub, msg_class = rostopic.create_publisher(
            '/camera/face_event', 'pi_face_tracker/FaceEvent', True)
        pub.publish(msg_class('new_face', 1))

        time.sleep(20)
        self.assertTrue(not queue.queue.empty())
        emotions = []
        gestures = []
        while not queue.queue.empty():
            msg = queue.get(timeout=1)
            if isinstance(msg, EmotionState):
                emotions.append(msg.name)
            elif isinstance(msg, SetGesture):
                gestures.append(msg.name)

        self.assertTrue(set(['happy']) | set(emotions))
        self.assertTrue(set(positive_gestures) & set(gestures))
示例#2
0
    def test_face_interaction1(self):
        positive_gestures = [
            x.strip() for x in self.behavior_config.get(
                'gesture', 'positive_gestures').split(',')
        ]

        pub, msg_class = rostopic.create_publisher(
            '/blender_api/available_emotion_states',
            'blender_api_msgs/AvailableEmotionStates', True)
        pub.publish(msg_class(['happy']))

        queue = MessageQueue()
        queue.subscribe('/blender_api/set_emotion_state', EmotionState)
        queue.subscribe('/blender_api/set_gesture', SetGesture)
        time.sleep(1)

        self.behavior_switch('btree_on')
        pub, msg_class = rostopic.create_publisher(
            '/camera/face_event', 'pi_face_tracker/FaceEvent', True)
        pub.publish(msg_class('new_face', 1))

        time.sleep(20)
        self.assertTrue(not queue.queue.empty())
        emotions = []
        gestures = []
        while not queue.queue.empty():
            msg = queue.get(timeout=1)
            if isinstance(msg, EmotionState):
                emotions.append(msg.name)
            elif isinstance(msg, SetGesture):
                gestures.append(msg.name)

        self.assertTrue(set(['happy']) | set(emotions))
        self.assertTrue(set(positive_gestures) & set(gestures))
示例#3
0
    def test(self):
        new_arrival_emotions = [
            x.strip() for x in self.behavior_config.get(
                'emotion', 'new_arrival_emotions').split(',')
        ]
        pub, msg_class = rostopic.create_publisher('/behavior_switch',
                                                   'std_msgs/String', True)
        pub.publish(msg_class('btree_on'))
        bag_file = get_rosbag_file('face')

        emo_msg_listener = create_msg_listener(
            '/blender_api/set_emotion_state', EmotionState, 10)
        emo_msg_listener.start()
        cam_output = '%s/cam_new_arrival_emotion.avi' % \
                        self.output_video
        screen_output = '%s/screen_new_arrival_emotion.avi' % \
                        self.output_video
        duration = 5
        with capture_camera(cam_output, duration):
            with capture_screen(screen_output, duration):
                job = play_rosbag([bag_file, '-q'])
        job.join()
        emo_msg = emo_msg_listener.join()

        self.assertIn(emo_msg.name, new_arrival_emotions)
示例#4
0
 def behavior_switch(self, msg):
     while not self.runner.is_node_running(self.node):
         time.sleep(0.01)
     topic = '/behavior_switch'
     pub, msg_class = rostopic.create_publisher(
         topic, 'std_msgs/String', True)
     pub.publish(msg_class(msg))
示例#5
0
 def behavior_switch(self, msg):
     while not self.runner.is_node_running(self.node):
         time.sleep(0.01)
     topic = '/behavior_switch'
     pub, msg_class = rostopic.create_publisher(topic, 'std_msgs/String',
                                                True)
     pub.publish(msg_class(msg))
示例#6
0
 def test_chat(self):
     import re
     r = re.compile('[\W_]+')
     pub, msg_class = rostopic.create_publisher(
         '/chatbot_speech', 'chatbot/ChatMessage', True)
     words = ['Hi', 'How are you', 'What\'s your name']
     duration = 5
     queue = MessageQueue()
     queue.subscribe('/chatbot_responses', String)
     for word in words:
         cam_output = '%s/cam_%s.avi' % (
             self.output_video, r.sub('', word))
         screen_output = '%s/screen_%s.avi' % (
             self.output_video, r.sub('',word))
         with capture_camera(cam_output, duration):
             with capture_screen(screen_output, duration):
                 pub.publish(msg_class(word, 100))
         msg = queue.get()
         cam_output_new = '%s/cam_%s_%s.avi' % (
             self.output_video, r.sub('', word), r.sub('', msg.data))
         shutil.move(cam_output, cam_output_new)
         screen_output_new = '%s/screen_%s_%s.avi' % (
             self.output_video, r.sub('', word), r.sub('', msg.data))
         shutil.move(screen_output, screen_output_new)
         files = glob.glob('%s/*.wav' % self.tts_output)
         self.assertEqual(len(files), 1)
         shutil.move(
             files[0], '%s/%s.wav' % (self.output_audio, r.sub('', msg.data)))
示例#7
0
 def test_chat(self):
     import re
     r = re.compile('[\W_]+')
     pub, msg_class = rostopic.create_publisher('/chatbot_speech',
                                                'chatbot/ChatMessage', True)
     words = ['Hi', 'How are you', 'What\'s your name']
     duration = 5
     queue = MessageQueue()
     queue.subscribe('/chatbot_responses', String)
     for word in words:
         cam_output = '%s/cam_%s.avi' % (self.output_video, r.sub('', word))
         screen_output = '%s/screen_%s.avi' % (self.output_video,
                                               r.sub('', word))
         with capture_camera(cam_output, duration):
             with capture_screen(screen_output, duration):
                 pub.publish(msg_class(word, 100))
         msg = queue.get()
         cam_output_new = '%s/cam_%s_%s.avi' % (
             self.output_video, r.sub('', word), r.sub('', msg.data))
         shutil.move(cam_output, cam_output_new)
         screen_output_new = '%s/screen_%s_%s.avi' % (
             self.output_video, r.sub('', word), r.sub('', msg.data))
         shutil.move(screen_output, screen_output_new)
         files = glob.glob('%s/*.wav' % self.tts_output)
         self.assertEqual(len(files), 1)
         shutil.move(files[0],
                     '%s/%s.wav' % (self.output_audio, r.sub('', msg.data)))
示例#8
0
 def check_msg(self, msg_str, expects):
     pub, msg_class = rostopic.create_publisher(
         '/blender_api/get_pau', 'pau2motors/pau', True)
     msg = msg_class()
     fill_message_args(msg, yaml.load(msg_str))
     pub.publish(msg)
     msg = wait_for_message('/joint_states_combined', JointState, 2)
     for key in ['position', 'name']:
         self.assertListEqual(list(getattr(msg, key)), expects[key])
     pub.unregister()
示例#9
0
 def check_msg(self, msg_str, expects):
     pub, msg_class = rostopic.create_publisher('/blender_api/get_pau',
                                                'pau2motors/pau', True)
     msg = msg_class()
     fill_message_args(msg, yaml.load(msg_str))
     pub.publish(msg)
     msg = wait_for_message('/joint_states_combined', JointState, 2)
     for key in ['position', 'name']:
         self.assertListEqual(list(getattr(msg, key)), expects[key])
     pub.unregister()
示例#10
0
 def test_btree_on(self):
     timeout = 30
     queue = MessageQueue()
     queue.subscribe('/blender_api/set_gesture', SetGesture)
     time.sleep(1)
     self.behavior_switch('btree_off')
     pub, msg_class = rostopic.create_publisher(
         '/camera/face_event', 'pi_face_tracker/FaceEvent', True)
     pub.publish(msg_class('new_face', 1))
     time.sleep(timeout)
     self.assertTrue(queue.queue.empty())
def main():
    #rospy.init_node('goal_sender')
    argv = sys.argv
    #argv = rospy.myargv(argv)

    from optparse import OptionParser
    parser = OptionParser(usage="usage: %prog list [/namespace]", prog=NAME)
    parser.add_option("-f",
                      dest="filename",
                      default=None,
                      help="Name of yaml file name of options")
    parser.add_option("-w",
                      dest="timeout",
                      default=3,
                      type="float",
                      help="time to wait for server and response")
    parser.add_option("-t", dest="topicname", default=None)
    parser.add_option("-r", dest="responsetopic", default=None)
    parser.add_option("--topic-type", dest="topictype", default=None)

    (options, args) = parser.parse_args(argv)
    if not options.filename or not options.topicname:
        rospy.logfatal("No yaml file name given")
        sys.exit(-1)

    topictype, topicname, fn = rostopic.get_topic_type(options.topicname,
                                                       False)

    if not topictype:
        rospy.logerror("Topic not currently subscribed %s" % topicname)

    if not options.topictype and not topictype:
        exit(-1)

    if options.topictype and topictype and options.topictype is not topictype:
        rospy.logerror(
            "Topic type does not agree: topic %s, given type: %s, detected type: %s"
            % (topicname, options.topictype, topictype))
        exit(-1)

    if options.topictype:
        topictype = options.topictype

    pub, msg_class = rostopic.create_publisher(options.topicname, topictype,
                                               False)

    rostopic.stdin_publish(pub, msg_class, 1, True, options.filename, True)

    if options.responsetopic:
        subtype, responsetopic, fn = rostopic.get_topic_class(
            options.responsetopic, False)
        msg = rospy.wait_for_message(options.responsetopic, subtype,
                                     options.timeout)
    print(msg)
示例#12
0
 def test_btree_on(self):
     timeout = 30
     queue = MessageQueue()
     queue.subscribe('/blender_api/set_gesture', SetGesture)
     time.sleep(1)
     self.behavior_switch('btree_off')
     pub, msg_class = rostopic.create_publisher(
         '/camera/face_event', 'pi_face_tracker/FaceEvent', True)
     pub.publish(msg_class('new_face', 1))
     time.sleep(timeout)
     self.assertTrue(queue.queue.empty())
def main():
    #rospy.init_node('goal_sender')
    argv = sys.argv
    #argv = rospy.myargv(argv)

    from optparse import OptionParser
    parser = OptionParser(usage="usage: %prog list [/namespace]", prog=NAME)
    parser.add_option("-f",
                      dest="filename", default=None,
                      help="Name of yaml file name of options")
    parser.add_option("-w",
                      dest="timeout", default=3, type="float",
                      help="time to wait for server and response")
    parser.add_option("-t",
                      dest="topicname", default=None)
    parser.add_option("-r",
                       dest="responsetopic", default=None)
    parser.add_option("--topic-type",
                      dest="topictype", default=None)

    (options, args) = parser.parse_args(argv)
    if not options.filename or not options.topicname:
        rospy.logfatal("No yaml file name given")
        sys.exit(-1)


    topictype, topicname, fn = rostopic.get_topic_type(options.topicname, False)


    if not topictype:
        rospy.logerror("Topic not currently subscribed %s"%topicname)


    if not options.topictype and not topictype:
        exit(-1)

    if options.topictype and topictype and options.topictype is not topictype:
        rospy.logerror("Topic type does not agree: topic %s, given type: %s, detected type: %s"
                       % (topicname, options.topictype, topictype))
        exit(-1)

    if options.topictype:
        topictype = options.topictype

    pub, msg_class = rostopic.create_publisher(options.topicname, topictype, False)

    rostopic.stdin_publish(pub, msg_class, 1, True, options.filename, True)

    if options.responsetopic:
        subtype, responsetopic, fn = rostopic.get_topic_class(options.responsetopic, False)
        msg = rospy.wait_for_message(options.responsetopic, subtype, options.timeout)
    print(msg)
示例#14
0
 def test_long_viseme(self):
     filename = get_rosbag_file("long_viseme")
     # job = play_rosbag(filename)
     # job.join()
     pub, msg_class = rostopic.create_publisher("/blender_api/queue_viseme", "blender_api_msgs/Viseme", True)
     bag = rosbag.Bag(filename)
     duration = bag.get_end_time() - bag.get_start_time()
     fps = bag.get_message_count() / float(duration)
     wait = 1.0 / fps / 10  # 10 times faster than the speed of the msg recoreded
     for topic, msg, _ in rosbag_msg_generator(filename):
         pub.publish(msg)
         time.sleep(wait)
     # Test if blender is still alive
     self.assertIn("/blender_api", rosnode.get_node_names())
     self.assertTrue(any(["blender_api" in name for name in self.runner.pm.get_active_names()]))
示例#15
0
    def test_face_tf(self):
        pub, msg_class = rostopic.create_publisher('/camera/face_locations',
                                                   'pi_face_tracker/Faces',
                                                   True)
        msg = msg_class()
        msg_str = """faces:
- id: 1
  point:
    x: 0.76837966452
    y: -0.132697071241
    z: 0.0561996095957
  attention: 0.990000009537
- id: 2
  point:
    x: 0.807249662369
    y: -0.00428759906469
    z: 0.035407830253
  attention: 0.990000009537"""
        fill_message_args(msg, [yaml.load(msg_str)])
        pub.publish(msg)
        msg = wait_for_message('/tf', tfMessage, 5)
        expect_str = """
transforms:
  -
    header:
      seq: 0
      stamp:
        nsecs: 752310037
      frame_id: camera
    child_frame_id: face_base1
    transform:
      translation:
        x: 0.76837966452
        y: -0.132697071241
        z: 0.0561996095957
      rotation:
        x: 0.0
        y: 0.0
        z: 0.0
        w: 1.0
"""
        expect = tfMessage()
        fill_message_args(expect, [yaml.load(expect_str)])
        #self.assertEqual(msg.transforms[0].child_frame_id,
        #    expect.transforms[0].child_frame_id)
        self.assertEqual(msg.transforms[0].transform,
                         expect.transforms[0].transform)
        pub.unregister()
    def test_left_pololu_message(self):
        reader = PololuSerialReader('%s/left_pololu1' % CWD)
        topic = '/han/left/command'
        pub, msg_class = rostopic.create_publisher(
            topic, 'ros_pololu/MotorCommand', True)

        time.sleep(3) # wait for pololu to be connected

        loop = 100
        for i in range(loop):
            pub.publish(msg_class('EyeLid_L_L', 0.1, 2.0, 2.0))
            pub.publish(msg_class('EyeLid_U_L', 0.1, 2.0, 2.0))

        for i in range(3*2*loop):
            id, cmd, value = reader.read()
            print "set motor %s %s value %s" % (id, cmd, value)
    def test_middle_pololu_message(self):
        reader = PololuSerialReader('%s/middle_pololu1' % CWD)
        topic = '/han/middle/command'
        pub, msg_class = rostopic.create_publisher(
            topic, 'ros_pololu/MotorCommand', True)

        time.sleep(3) # wait for pololu to be connected

        loop = 100
        for i in range(loop):
            pub.publish(msg_class('Brow_Inner_L', 0.1, 2.0, 2.0)) #1
            pub.publish(msg_class('Inner_Brow_R_UD', 0.1, 2.0, 2.0)) #4

        for i in range(3*2*loop):
            id, cmd, value = reader.read()
            print "set motor %s %s value %s" % (id, cmd, value)
示例#18
0
    def test_face_tf(self):
        pub, msg_class = rostopic.create_publisher(
            '/camera/face_locations', 'pi_face_tracker/Faces', True)
        msg = msg_class()
        msg_str = """faces:
- id: 1
  point:
    x: 0.76837966452
    y: -0.132697071241
    z: 0.0561996095957
  attention: 0.990000009537
- id: 2
  point:
    x: 0.807249662369
    y: -0.00428759906469
    z: 0.035407830253
  attention: 0.990000009537"""
        fill_message_args(msg, [yaml.load(msg_str)])
        pub.publish(msg)
        msg = wait_for_message('/tf', tfMessage, 5)
        expect_str = """
transforms:
  -
    header:
      seq: 0
      stamp:
        nsecs: 752310037
      frame_id: camera
    child_frame_id: face_base1
    transform:
      translation:
        x: 0.76837966452
        y: -0.132697071241
        z: 0.0561996095957
      rotation:
        x: 0.0
        y: 0.0
        z: 0.0
        w: 1.0
"""
        expect = tfMessage()
        fill_message_args(expect, [yaml.load(expect_str)])
        #self.assertEqual(msg.transforms[0].child_frame_id,
        #    expect.transforms[0].child_frame_id)
        self.assertEqual(msg.transforms[0].transform,
            expect.transforms[0].transform)
        pub.unregister()
示例#19
0
 def test_emotion_state(self):
     available_emotions = parse_msg(run_shell_cmd("rostopic echo -n1 /blender_api/available_emotion_states", True))
     available_emotions = available_emotions[:1]
     pub, msg_class = rostopic.create_publisher(
         "/blender_api/set_emotion_state", "blender_api_msgs/EmotionState", True
     )
     timeout = 2
     videos = []
     for emotion in available_emotions:
         video = "%s/emotion-%s.avi" % (self.output_dir, emotion)
         with capture_screen(video, timeout):
             pub.publish(msg_class(emotion, 1, Duration(1, 0)))
         add_text_to_video(video)
         videos.append(video)
     ofile = "%s/emotions.avi" % self.output_dir
     concatenate_videos(videos, ofile, True)
     pub.unregister()
示例#20
0
 def test_gesture(self):
     available_gestures = parse_msg(run_shell_cmd("rostopic echo -n1 /blender_api/available_gestures", True))
     available_gestures = available_gestures[:2]
     pub, msg_class = rostopic.create_publisher("/blender_api/set_gesture", "blender_api_msgs/SetGesture", True)
     timeout = 2
     videos = []
     for gesture in available_gestures:
         if gesture == "all":
             continue
         video = "%s/gesture-%s.avi" % (self.output_dir, gesture)
         with capture_screen(video, timeout):
             pub.publish(msg_class(gesture, 1, 1, 1))
         add_text_to_video(video)
         videos.append(video)
     ofile = "%s/gestures.avi" % self.output_dir
     concatenate_videos(videos, ofile, True)
     pub.unregister()
示例#21
0
 def test_viseme(self):
     available_visemes = parse_msg(run_shell_cmd("rostopic echo -n1 /blender_api/available_visemes", True))
     available_visemes = available_visemes[:1]
     pub, msg_class = rostopic.create_publisher("/blender_api/queue_viseme", "blender_api_msgs/Viseme", True)
     timeout = 2
     videos = []
     for viseme in available_visemes:
         if "old" in viseme:
             continue
         video = "%s/viseme-%s.avi" % (self.output_dir, viseme)
         with capture_screen(video, timeout):
             pub.publish(msg_class(viseme, Duration(0, 0), Duration(0, 5 * 1e8), 0.1, 0.8, 1))
         add_text_to_video(video)
         videos.append(video)
     ofile = "%s/viseme.avi" % self.output_dir
     concatenate_videos(videos, ofile, True)
     pub.unregister()
示例#22
0
    def test_middle_pololu_message(self):
        reader = PololuSerialReader('%s/middle_pololu1' % CWD)
        topic = '/han/middle/command'
        pub, msg_class = rostopic.create_publisher(topic,
                                                   'ros_pololu/MotorCommand',
                                                   True)

        time.sleep(3)  # wait for pololu to be connected

        loop = 100
        for i in range(loop):
            pub.publish(msg_class('Brow_Inner_L', 0.1, 2.0, 2.0))  #1
            pub.publish(msg_class('Inner_Brow_R_UD', 0.1, 2.0, 2.0))  #4

        for i in range(3 * 2 * loop):
            id, cmd, value = reader.read()
            print "set motor %s %s value %s" % (id, cmd, value)
示例#23
0
    def test_left_pololu_message(self):
        reader = PololuSerialReader('%s/left_pololu1' % CWD)
        topic = '/han/left/command'
        pub, msg_class = rostopic.create_publisher(topic,
                                                   'ros_pololu/MotorCommand',
                                                   True)

        time.sleep(3)  # wait for pololu to be connected

        loop = 100
        for i in range(loop):
            pub.publish(msg_class('EyeLid_L_L', 0.1, 2.0, 2.0))
            pub.publish(msg_class('EyeLid_U_L', 0.1, 2.0, 2.0))

        for i in range(3 * 2 * loop):
            id, cmd, value = reader.read()
            print "set motor %s %s value %s" % (id, cmd, value)
示例#24
0
 def test_gesture(self):
     available_gestures = parse_msg(
         run_shell_cmd('rostopic echo -n1 /blender_api/available_gestures',
                       True))
     pub, msg_class = rostopic.create_publisher(
         '/blender_api/set_gesture', 'blender_api_msgs/SetGesture', True)
     timeout = 2
     videos = []
     for gesture in available_gestures:
         if gesture == 'all': continue
         video = '%s/gesture-%s.avi' % (self.output_dir, gesture)
         with capture_screen(video, timeout):
             pub.publish(msg_class(gesture, 1, 1, 1))
         add_text_to_video(video)
         videos.append(video)
     ofile = '%s/gestures.avi' % self.output_dir
     concatenate_videos(videos, ofile, True)
     pub.unregister()
示例#25
0
 def test_emotion_state(self):
     available_emotions = parse_msg(
         run_shell_cmd(
             'rostopic echo -n1 /blender_api/available_emotion_states',
             True))
     pub, msg_class = rostopic.create_publisher(
         '/blender_api/set_emotion_state', 'blender_api_msgs/EmotionState',
         True)
     timeout = 2
     videos = []
     for emotion in available_emotions:
         video = '%s/emotion-%s.avi' % (self.output_dir, emotion)
         with capture_screen(video, timeout):
             pub.publish(msg_class(emotion, 1, Duration(1, 0)))
         add_text_to_video(video)
         videos.append(video)
     ofile = '%s/emotions.avi' % self.output_dir
     concatenate_videos(videos, ofile, True)
     pub.unregister()
示例#26
0
 def test_viseme(self):
     available_visemes = parse_msg(
         run_shell_cmd('rostopic echo -n1 /blender_api/available_visemes',
                       True))
     pub, msg_class = rostopic.create_publisher('/blender_api/queue_viseme',
                                                'blender_api_msgs/Viseme',
                                                True)
     timeout = 2
     videos = []
     for viseme in available_visemes:
         if 'old' in viseme: continue
         video = '%s/viseme-%s.avi' % (self.output_dir, viseme)
         with capture_screen(video, timeout):
             pub.publish(
                 msg_class(viseme, Duration(0, 0), Duration(0, 5 * 1e8),
                           0.1, 0.8, 1))
         add_text_to_video(video)
         videos.append(video)
     ofile = '%s/viseme.avi' % self.output_dir
     concatenate_videos(videos, ofile, True)
     pub.unregister()
示例#27
0
 def test_face_target(self):
     pub, msg_class = rostopic.create_publisher("/blender_api/set_face_target", "blender_api_msgs/Target", True)
     timeout = 2
     targets = {
         "center": (1, 0, 0, 1),
         "right": (0, 1, 0, 1),
         "left": (0, -1, 0, 1),
         "up": (1, 0, 0.5, 1),
         "down": (1, 0, -0.5, 1),
     }
     videos = []
     # for name in ['right', 'up', 'left', 'down', 'center']:
     for name in ["right", "center"]:
         video = "%s/face-%s.avi" % (self.output_dir, name)
         with capture_screen(video, timeout):
             pub.publish(msg_class(*targets[name]))
         add_text_to_video(video)
         videos.append(video)
     ofile = "%s/face.avi" % self.output_dir
     concatenate_videos(videos, ofile, True)
     pub.unregister()
示例#28
0
 def test_face_target(self):
     pub, msg_class = rostopic.create_publisher(
         '/blender_api/set_face_target', 'blender_api_msgs/Target', True)
     timeout = 2
     targets = {
         'center': (1, 0, 0),
         'right': (0, 1, 0),
         'left': (0, -1, 0),
         'up': (1, 0, 0.5),
         'down': (1, 0, -0.5)
     }
     videos = []
     for name in ['right', 'up', 'left', 'down', 'center']:
         video = '%s/face-%s.avi' % (self.output_dir, name)
         with capture_screen(video, timeout):
             pub.publish(msg_class(*targets[name]))
         add_text_to_video(video)
         videos.append(video)
     ofile = '%s/face.avi' % self.output_dir
     concatenate_videos(videos, ofile, True)
     pub.unregister()
示例#29
0
 def test_face_target(self):
     pub, msg_class = rostopic.create_publisher(
         '/blender_api/set_face_target',
         'blender_api_msgs/Target', True)
     timeout = 2
     targets = {
         'center': (1,0,0,1),
         'right':(0,1,0,1),
         'left':(0,-1,0,1),
         'up':(1,0,0.5,1),
         'down':(1,0,-0.5,1)}
     videos = []
     for name in ['right', 'up', 'left', 'down', 'center']:
         video = '%s/face-%s.avi' % (self.output_dir, name)
         with capture_screen(video, timeout):
             pub.publish(msg_class(*targets[name]))
         add_text_to_video(video)
         videos.append(video)
     ofile = '%s/face.avi' % self.output_dir
     concatenate_videos(videos, ofile, True)
     pub.unregister()
示例#30
0
 def test_long_viseme(self):
     filename = get_rosbag_file('long_viseme')
     #job = play_rosbag(filename)
     #job.join()
     pub, msg_class = rostopic.create_publisher('/blender_api/queue_viseme',
                                                'blender_api_msgs/Viseme',
                                                True)
     bag = rosbag.Bag(filename)
     duration = bag.get_end_time() - bag.get_start_time()
     fps = bag.get_message_count() / float(duration)
     wait = 1.0 / fps / 10  # 10 times faster than the speed of the msg recoreded
     for topic, msg, _ in rosbag_msg_generator(filename):
         pub.publish(msg)
         time.sleep(wait)
     # Test if blender is still alive
     self.assertIn('/blender_api', rosnode.get_node_names())
     self.assertTrue(
         any([
             'blender_api' in name
             for name in self.runner.pm.get_active_names()
         ]))
示例#31
0
    def test(self):
        new_arrival_emotions = [
            x.strip() for x in self.behavior_config.get(
                    'emotion', 'new_arrival_emotions').split(',')]
        pub, msg_class = rostopic.create_publisher(
            '/behavior_switch', 'std_msgs/String', True)
        pub.publish(msg_class('btree_on'))
        bag_file = get_rosbag_file('face')

        emo_msg_listener = create_msg_listener(
            '/blender_api/set_emotion_state', EmotionState, 10)
        emo_msg_listener.start()
        cam_output = '%s/cam_new_arrival_emotion.avi' % \
                        self.output_video
        screen_output = '%s/screen_new_arrival_emotion.avi' % \
                        self.output_video
        duration = 5
        with capture_camera(cam_output, duration):
            with capture_screen(screen_output, duration):
                job = play_rosbag([bag_file, '-q'])
        job.join()
        emo_msg = emo_msg_listener.join()

        self.assertIn(emo_msg.name, new_arrival_emotions)