def getPau(): msg = paumsg.pau() head = api.getHeadData() msg.m_headRotation.x = head['x'] msg.m_headRotation.y = head['y'] msg.m_headRotation.z = -head['z'] msg.m_headRotation.w = head['w'] neck = api.getNeckData() msg.m_neckRotation.x = neck['x'] msg.m_neckRotation.y = neck['y'] msg.m_neckRotation.z = -neck['z'] msg.m_neckRotation.w = neck['w'] eyes = api.getEyesData() msg.m_eyeGazeLeftPitch = eyes['l']['p'] msg.m_eyeGazeLeftYaw = eyes['l']['y'] msg.m_eyeGazeRightPitch = eyes['r']['p'] msg.m_eyeGazeRightYaw = eyes['r']['y'] shapekeys = api.getFaceData() msg.m_coeffs = shapekeys.values() # Manage timeout for set_pau if api.pauTimeout < time.time(): api.setAnimationMode(api.pauAnimationMode & ~api.PAU_ACTIVE) return msg
def map_pau(selfself, trackMsg): # Final Message to be sent. Currently only m_coeffs has to be remapped msg = pau() #eyes and head remains same. msg.m_headRotation = copy.deepcopy(trackMsg.m_headRotation) # Correction msg.m_headRotation.y = trackMsg.m_headRotation.x msg.m_headRotation.z = -trackMsg.m_headRotation.y msg.m_headRotation.x = -trackMsg.m_headRotation.z msg.m_eyeGazeLeftPitch = math.radians(trackMsg.m_eyeGazeLeftPitch) msg.m_eyeGazeLeftYaw = math.radians(trackMsg.m_eyeGazeLeftYaw) msg.m_eyeGazeRightYaw = math.radians(trackMsg.m_eyeGazeRightYaw) msg.m_eyeGazeRightPitch = math.radians(trackMsg.m_eyeGazeRightPitch) # Empty m_coeffs msg.m_coeffs = [0]*ShapekeyStore.getLength() # Map by shapekeys fs = trackMsg.m_coeffs for i,sk in enumerate(ShapekeyStore.getList()): if sk in blend_shape_names: msg.m_coeffs[i] = fs[blend_shape_names.index(sk)] # Outer brows are same as inner msg.m_coeffs[5] = msg.m_coeffs[3] msg.m_coeffs[6] = msg.m_coeffs[4] msg.m_coeffs[10] = msg.m_coeffs[8] msg.m_coeffs[11] = msg.m_coeffs[9] return msg
def test_motor2(self): msg = pau() msg.m_neckRotation.x = 1.0 msg.m_neckRotation.y = 0.5 msg.m_neckRotation.z = 0.0 msg.m_neckRotation.w = 1.0 sub = self.queue.subscribe('/pub_topic2_controller/command', Float64) rospy.sleep(2) self.check_msg(msg, 0.5) # max = 0.5 sub.unregister()
def setUp(self): self.sub = rospy.Subscriber("eyes_pau", pau, self.last_message) self.get_pau = rospy.Publisher("/blender_api/get_pau", pau, queue_size=10) self.last_msg = False time.sleep(1) msg = pau() msg.m_eyeGazeLeftYaw = 1 self.get_pau.publish(msg) time.sleep(1) rospy.logwarn("Setup")
def test_motor1(self): msg = pau() msg.m_headRotation.x = 0.5 msg.m_headRotation.y = 0.5 msg.m_headRotation.z = 0.0 msg.m_headRotation.w = 1.0 sub = self.queue.subscribe('/pub_topic_controller/command', Float64) rospy.sleep(2) self.check_msg(msg, math.asin(0.5)) sub.unregister()
def point_head(self, req): rospy.loginfo( "Point head (yaw: %s, pitch: %s, roll: %s)", req.yaw, req.pitch, req.roll ) msg = pau() msg.m_headRotation = Quaternion( *Utils.Quat.fromInYZX(req.yaw, req.pitch, req.roll).params ) self.pub_neck.publish(msg)
def neckrot(hx, hy, nx, ny) : global neck msg = paumsg.pau() msg.m_headRotation.x = hx msg.m_headRotation.y = hy msg.m_headRotation.z = 0.0 msg.m_headRotation.w = math.sqrt(1.0 - hx*hx + hy*hy) msg.m_neckRotation.x = nx msg.m_neckRotation.y = ny msg.m_neckRotation.z = 0.0 msg.m_neckRotation.w = math.sqrt(1.0 - nx*nx + ny*ny) neck.publish(msg)
def eyes_calib_send(pitch, yaw, delta): if delta > 0.2 or delta < -0.2: return msg = pau() msg.m_eyeGazeLeftPitch = pitch msg.m_eyeGazeLeftYaw = yaw msg.m_eyeGazeRightPitch = pitch msg.m_eyeGazeRightYaw = yaw msg.m_eyeGazeLeftPitch += delta msg.m_eyeGazeRightPitch += delta msg.m_eyeGazeLeftYaw += delta msg.m_eyeGazeRightYaw += delta pub.publish(msg) print "pub {}".format(msg)
def getPauFromMotors(motors): global msg msg = pau() msg.m_shapekeys = [-1.0]*len(ShapekeyStore._shkey_list) for m,v in motors.items(): #Get the shapekey and the value cfg = get_cfg(m) if not cfg: print("No motor config {}".format(m)) continue if cfg['function'] == 'weightedsum': weightedsum(cfg, v) if cfg['function'] == 'linear': linear(cfg, v) return msg
def getPau(): msg = paumsg.pau() head = api.getHeadData() msg.m_headRotation.x = head['x'] msg.m_headRotation.y = head['y'] msg.m_headRotation.z = -head['z'] msg.m_headRotation.w = head['w'] eyes = api.getEyesData() msg.m_eyeGazeLeftPitch = eyes['l']['p'] msg.m_eyeGazeLeftYaw = eyes['l']['y'] msg.m_eyeGazeRightPitch = eyes['r']['p'] msg.m_eyeGazeRightYaw = eyes['r']['y'] shapekeys = api.getFaceData() msg.m_coeffs = shapekeys.values() return msg
def test_message_fwd(self): # IF node active the pau messages should be forwarded from /blender_api to eyes_pau msg = pau() self.last_msg = False self.get_pau.publish(msg) time.sleep(0.1) self.assertTrue(self.last_msg != False, "Message not received") # Check if eye positions been modified t = time.time() modified = False # Checks if eyes positions has been changed while time.time() - t < 3: self.get_pau.publish(msg) time.sleep(1) if self.last_msg.m_eyeGazeRightPitch != msg.m_eyeGazeRightPitch: modified = True break self.assertTrue(modified, "Eye positions were not adjusted")
def _build_msg(cls, expr_entry=None): """ Takes an entry from the config file and returns a constructed ROS message. """ msg = pau() msg.m_coeffs = [0]*cls.M_COEFFS_LEN # Allow creation of a neutral face without an expression yaml entry. if expr_entry == None: return msg for shapekey in expr_entry: index = ShapekeyStore.getIndex(shapekey) if (index == None): rospy.logwarn("Invalid shapekey in expression data: %s", shapekey) else: msg.m_coeffs[index] = expr_entry[shapekey] return msg
def talker(): pub = rospy.Publisher('/blender_api/set_pau', pau, queue_size=10) rospy.init_node('telnet2pau', anonymous=True) HOST = sys.argv[1] PORT = 8888 tn = telnetlib.Telnet(HOST, PORT) rospy.loginfo("Opened connection...") angle_range = 45.0 tn.read_until("\n") while 1: msg = pau() msg.m_headRotation.x = 0 msg.m_headRotation.y = 0 msg.m_headRotation.z = 0 msg.m_headRotation.w = 0 msg.m_headTranslation.x = 0 msg.m_headTranslation.y = 0 msg.m_headTranslation.z = 0 msg.m_neckRotation.x = 0 msg.m_neckRotation.y = 0 msg.m_neckRotation.z = 0 msg.m_neckRotation.w = 0 msg.m_eyeGazeLeftPitch = 0 msg.m_eyeGazeLeftYaw = 0 msg.m_eyeGazeRightPitch = 0 msg.m_eyeGazeRightYaw = 0 ## Read the frame frame_str = tn.read_until("\n").strip("\n") print(frame_str) frame = np.array(frame_str.split(",")) frame = frame.astype(np.float) frame *= 0.01 frame = np.power(frame, 2.0) frame = np.minimum(1.0, frame) ## Delete superfluous frames: cut_frame = np.delete(frame, np.s_[13:27], axis=0) msg.m_coeffs = cut_frame msg.m_shapekeys = ['brow_center_DN', 'brow_center_UP', 'brow_inner_DN.L', 'brow_inner_UP.L', 'brow_inner_DN.R', 'brow_inner_UP.R', 'brow_outer_DN.L', 'brow_outer_UP.L', 'brow_outer_DN.R', 'brow_outer_up.R', 'wince.L', 'wince.R', 'lip-JAW.DN', 'eye-blink.LO.L', 'eye-flare.LO.L', 'eye-blink.LO.R', 'eye-flare.LO.R', 'eye-blink.UP.L', 'eye-flare.UP.L', 'eye-blink.UP.R', 'eye-flare.UP.R', 'lips-wide.L', 'lips-narrow.L', 'lips-wide.R', 'lips-narrow.R', 'lips-frown.L', 'lips-frown.R', 'lip-DN.C.DN', 'lip-DN.C.UP', 'lip-DN.L.DN', 'lip-DN.L.UP', 'lip-DN.R.DN', 'lip-DN.R.UP', 'lips-smile.L', 'lips-smile.R', 'lip-UP.C.DN', 'lip-UP.C.UP', 'lip-UP.L.DN', 'lip-UP.L.UP', 'lip-UP.R.DN', 'lip-UP.R.UP', 'sneer.L', 'sneer.R'] assert(len(msg.m_shapekeys) == len(cut_frame)) print("sending frame...") pub.publish(msg) if rospy.is_shutdown(): break
from rospkg import RosPack ROBOT = "sophia_body" #import bpy #rospy.init_node("expression_tools") motors = rospy.get_param('/{}/motors'.format(ROBOT)) expressions = rospy.get_param('/{}/expressions'.format(ROBOT)) animations = rospy.get_param('/{}/animations'.format(ROBOT)) from pau2motors.msg import pau from pau2motors import ShapekeyStore rp = RosPack() config_root = rp.get_path('robots_config') with open(config_root+"/"+ROBOT+"/motors_settings.yaml", 'r') as stream: motors = yaml.load(stream) msg = pau() msg.m_shapekeys = [-1.0]*len(ShapekeyStore._shkey_list) def get_motor_value(motor, relative): for m in motors: if m['name'] == motor: v = m['min']+relative*(m['max'] - m['min']) return int(v) return -1 def get_cfg(motor): global motors for m in motors: if m['name'] == motor: return m return None
def point_head(self, req): msg = pau() msg.m_headRotation = Quaternion( *Utils.Quat.fromInYZX(req.roll, -req.yaw, -req.pitch).params ) self.pub_neck.publish(msg)
def point_head(self, req): msg = pau() msg.m_headRotation = Quaternion( *Quat.Quat.fromInYZX(req.roll, -req.yaw, -req.pitch).params) self.pub_neck.publish(msg)
def HandleTimer(self,data): # this is the heart of the synthesizer, here the lookat and eyecontact state machines take care of where the robot is looking, and random expressions and gestures are triggered to look more alive (like RealSense Tracker) ts = data.current_expected # ==== handle lookat if self.lookat == LookAt.IDLE: # no specific target, let Blender do it's soma cycle thing () elif self.lookat == LookAt.AVOID: # TODO: find out where there is no saliency, hand or face # TODO: head_focus_pub () elif self.lookat == LookAt.SALIENCY: self.saliency_counter -= 1 if self.saliency_counter == 0: self.InitSaliencyCounter() self.SelectNextSaliency() if self.current_saliency_ts != 0: cursaliency = self.saliencies[self.current_saliency_ts] self.UpdateGaze(cursaliency.direction) elif self.lookat == LookAt.HAND: # stare at hand if self.hand != None: self.UpdateGaze(self.hand.position) elif self.lookat == LookAt.AUDIENCE: self.audience_counter -= 1 if self.audience_counter == 0: self.InitAudienceCounter() self.SelectNextAudience() # TODO: self.UpdateGaze() elif self.lookat == LookAt.SPEAKER: () # TODO: look at the speaker, according to speaker ROI else: if self.lookat == LookAt.ALL_FACES: self.faces_counter -= 1 if self.faces_counter == 0: self.InitFacesCounter() self.SelectNextFace() # take the current face if self.current_face_id != 0: curface = self.faces[self.current_face_id] face_pos = curface.position # ==== handle eyecontact (only for LookAt.ONE_FACE and LookAt.ALL_FACES) # calculate where left eye, right eye and mouth are on the current face left_eye_pos = Float32XYZ() right_eye_pos = Float32XYZ() mouth_pos = Float32XYZ() # all are 5cm in front of the center of the face left_eye_pos.x = face_pos.x - 0.05 right_eye_pos.x = face_pos.x - 0.05 mouth_pos.x = face_pos.x - 0.05 left_eye_pos.y = face_pos.y + 0.03 # left eye is 3cm to the left of the center right_eye_pos.y = face_pos.y - 0.03 # right eye is 3cm to the right of the center mouth_pos.y = face_pos.y # mouth is dead center left_eye_pos.z = face_pos.z + 0.06 # left eye is 6cm above the center right_eye_pos.z = face_pos.z + 0.06 # right eye is 6cm above the center mouth_pos.z = face_pos.z - 0.04 # mouth is 4cm below the center if self.eyecontact == EyeContact.IDLE: # look at center of the head self.UpdateGaze(face_pos) elif self.eyecontact == EyeContact.LEFT_EYE: # look at left eye self.UpdateGaze(left_eye_pos) elif self.eyecontact == EyeContact.RIGHT_EYE: # look at right eye self.UpdateGaze(right_eye_pos) elif self.eyecontact == EyeContact.BOTH_EYES: # switch between eyes back and forth self.eyes_counter -= 1 if self.eyes_counter == 0: self.InitEyesCounter() if self.current_eye == 1: self.current_eye = 0 else: self.current_eye = 1 # look at that eye if self.current_eye == 0: cur_eye_pos = left_eye_pos else: cur_eye_pos = right_eye_pos self.UpdateGaze(cur_eye_pos) elif self.eyecontact == EyeContact.TRIANGLE: # cycle between eyes and mouth self.eyes_counter -= 1 if self.eyes_counter == 0: self.InitEyesCounter() if self.current_eye == 2: self.current_eye = 0 else: self.current_eye += 1 # look at that eye if self.current_eye == 0: cur_eye_pos = left_eye_pos elif self.current_eye == 1: cur_eye_pos = right_eye_pos elif self.current_eye == 2: cur_eye_pos = mouth_pos self.UpdateGaze(cur_eye_pos) # mirroring msg = pau() msg.m_coeffs = [ ] msg.m_shapekeys = [ ] if self.mirroring == Mirroring.EYEBROWS or self.mirroring == Mirroring.EYES or self.mirroring == Mirroring.MOUTH_EYEBROWS or self.mirroring == Mirroring.ALL: # mirror eyebrows left_brow = curface.left_brow right_brow = curface.right_brow msg.m_coeffs.append("brow_outer_UP.L") msg.m_shapekeys.append(left_brow) msg.m_coeffs.append("brow_inner_UP.L") msg.m_shapekeys.append(left_brow * 0.8) msg.m_coeffs.append("brow_outer_DN.L") msg.m_shapekeys.append(1.0 - left_brow) msg.m_coeffs.append("brow_outer_up.R") msg.m_shapekeys.append(right_brow) msg.m_coeffs.append("brow_inner_UP.R") msg.m_shapekeys.append(right_brow * 0.8) msg.m_coeffs.append("brow_outer_DN.R") msg.m_shapekeys.append(1.0 - right_brow) if self.mirroring == Mirroring.EYELIDS or self.mirroring == Mirroring.EYES or self.mirroring == Mirroring.MOUTH_EYELIDS or self.mirroring == Mirroring.ALL: # mirror eyelids eyes_closed = ((1.0 - curface.left_eyelid) + (1.0 - curface.right_eyelid)) / 2.0 msg.m_coeffs.append("eye-blink.UP.R") msg.m_shapekeys.append(eyes_closed) msg.m_coeffs.append("eye-blink.UP.L") msg.m_shapekeys.append(eyes_closed) msg.m_coeffs.append("eye-blink.LO.R") msg.m_shapekeys.append(eyes_closed) msg.m_coeffs.append("eye-blink.LO.L") msg.m_shapekeys.append(eyes_closed) if self.mirroring == Mirroring.MOUTH or self.mirroring == Mirroring.MOUTH_EYEBROWS or self.mirroring == Mirroring.MOUTH_EYELIDS: # mirror mouth mouth_open = curface.mouth_open msg.m_coeffs.append("lip-JAW.DN") msg.m_shapekeys.append(mouth_open) if self.mirroring != Mirroring.IDLE: self.StartPauMode() self.setpau_pub.publish(msg) # start random gestures self.gesture_counter -= 1 if self.gesture_counter == 0: self.InitGestureCounter() if self.animations != None: # list all gestures that would fire right now according to probability firing = [] for g in self.animations[self.current_gestures_name]: if random.uniform(0.0,1.0) <= g["probability"]: firing.append(g) # start randomly from that list if len(firing) > 0: g = firing[random.randint(0,len(firing) - 1)] msg = SetGesture() msg.name = g["name"] msg.repeat = False msg.speed = random.uniform(g["speed_min"],g["speed_max"]) msg.magnitude = random.uniform(g["magnitude_min"],g["magnitude_max"]) self.gestures_pub.publish(msg) # start random expressions self.expression_counter -= 1 if self.expression_counter == 0: self.InitExpressionCounter() if self.animations != None: # list all expressions that would fire right now according to probability firing = [] for g in self.animations[self.current_expressions_name]: if random.uniform(0.0,1.0) <= g["probability"]: firing.append(g) # start randomly from that list if len(firing) > 0: g = firing[random.randint(0,len(firing) - 1)] msg = EmotionState() msg.name = g["name"] msg.magnitude = random.uniform(g["magnitude_min"],g["magnitude_max"]) msg.duration = rospy.Duration(random.uniform(g["duration_min"],g["duration_max"])) self.expressions_pub.publish(msg) prune_before_time = ts - rospy.Duration.from_sec(self.keep_time) # flush faces dictionary, update current face accordingly to_be_removed = [] for face in self.faces.values(): if face.ts < prune_before_time: to_be_removed.append(face.cface_id) # remove the elements for key in to_be_removed: del self.faces[key] # make sure the selected face is always valid if self.current_face_id == key: self.SelectNextFace() # remove hand if it is too old if self.hand != None: if self.hand.ts < prune_before_time: self.hand = None # flush saliency dictionary to_be_removed = [] for key in self.saliencies.keys(): if key < prune_before_time: to_be_removed.append(key) # remove the elements for key in to_be_removed: del self.saliencies[key] # make sure the selected saliency is always valid if self.current_saliency_ts == key: self.SelectNextSaliency() # decay from FOCUSED to IDLE if hand was not seen for a while if self.state == State.FOCUSED and self.last_hand_ts < ts - rospy.Duration.from_sec(self.hand_state_decay): self.SetState(State.IDLE) self.UpdateStateDisplay() # decay from SPEAKING or LISTENING to IDLE if ((self.state == State.SPEAKING) or (self.state == State.LISTENING)) and self.last_talk_ts < ts - rospy.Duration.from_sec(self.face_state_decay): self.SetState(State.IDLE) self.UpdateStateDisplay() # have gaze or head follow head or gaze after a while if self.gaze_delay_counter > 0 and self.gaze_pos != None: self.gaze_delay_counter -= 1 if self.gaze_delay_counter == 0: if self.gaze == Gaze.GAZE_LEADS_HEAD: self.SetHeadFocus(self.gaze_pos,self.gaze_speed) self.gaze_delay_counter = int(self.gaze_delay * self.synthesizer_rate) elif self.gaze == Gaze.HEAD_LEADS_GAZE: self.SetGazeFocus(self.gaze_pos,self.gaze_speed) self.gaze_delay_counter = int(self.gaze_delay * self.synthesizer_rate) # when speaking, sometimes look at all faces if self.state == State.SPEAKING: if self.lookat == LookAt.AVOID: self.all_faces_start_counter -= 1 if self.all_faces_start_counter == 0: self.InitAllFacesStartCounter() self.SetLookAt(LookAt.ALL_FACES) self.UpdateStateDisplay() elif self.lookat == LookAt.ALL_FACES: self.all_faces_duration_counter -= 1 if self.all_faces_duration_counter == 0: self.InitAllFacesDurationCounter() self.SetLookAt(LookAt.AVOID) self.UpdateStateDisplay()
#!/usr/bin/env python import rospy import socket import math import copy import struct import pprint import ShapekeyStore from pau2motors.msg import pau trackMsg=pau() LISTENING_PORT = 33433 #BINDING_ADDR = "127.0.0.1" # Good for local work BINDING_ADDR = '' # Empty string means: bind to all network interfaces BLOCK_ID_TRACKING_STATE = 33433 # According to faceshift docs # Delay between modal timed updates when entered the modal command mode. In seconds. UPDATE_DELAY = 0.04 # These are the names of the FaceShift control channels #updated mapping for 2014.1 blend_shape_names = [ "eye-blink.UP.L", "eye-blink.UP.R", "02_EyeSquint_L", "03_EyeSquint_R", "04_EyeDown_L",
#!/usr/bin/env python import rospy import socket import math import copy import struct import pprint import ShapekeyStore from pau2motors.msg import pau trackMsg = pau() LISTENING_PORT = 33433 #BINDING_ADDR = "127.0.0.1" # Good for local work BINDING_ADDR = '' # Empty string means: bind to all network interfaces BLOCK_ID_TRACKING_STATE = 33433 # According to faceshift docs # Delay between modal timed updates when entered the modal command mode. In seconds. UPDATE_DELAY = 0.04 # These are the names of the FaceShift control channels #updated mapping for 2014.1 blend_shape_names = [ "eye-blink.UP.L", "eye-blink.UP.R", "02_EyeSquint_L", "03_EyeSquint_R", "04_EyeDown_L", "05_EyeDown_R",
def talker(): pub = rospy.Publisher('/blender_api/set_pau', pau, queue_size=10) rospy.init_node('csv2pau', anonymous=True) rate = rospy.Rate(120) # 29fps approx. measure this exactly. data = np.loadtxt(sys.argv[1], delimiter=',') rospy.loginfo("loaded file with %d frames." % len(data)) angle_range = 45.0 for frame in data: msg = pau() msg.m_headRotation.x = frame[0] msg.m_headRotation.y = frame[1] msg.m_headRotation.z = frame[2] msg.m_headRotation.w = frame[3] msg.m_headTranslation.x = 0 msg.m_headTranslation.y = 0 msg.m_headTranslation.z = 0 msg.m_neckRotation.x = 0 msg.m_neckRotation.y = 0 msg.m_neckRotation.z = 0 msg.m_neckRotation.w = 0 msg.m_eyeGazeLeftPitch = 0 msg.m_eyeGazeLeftYaw = 0 msg.m_eyeGazeRightPitch = 0 msg.m_eyeGazeRightYaw = 0 msg.m_coeffs = frame[16:] print("conf: %f speak: %f" % (frame[-2], frame[-1])) msg.m_shapekeys = [ 'eye-flare.UP.R', 'lips-narrow.L', 'lips-frown.R', 'eye-blink.UP.R', 'lip-UP.L.UP', 'eye-blink.UP.L', 'lips-frown.L', 'lips-narrow.R', 'eye-flare.UP.L', 'lip-DN.C.UP', 'eye-flare.LO.R', 'lip-DN.R.UP', 'brow_inner_UP.R', 'brow_outer_UP.L', 'brow_inner_UP.L', 'eye-flare.LO.L', 'brow_center_DN', 'lips-smile.R', 'lip-JAW.DN', 'lip-DN.R.DN', 'wince.L', 'lips-smile.L', 'eye-blink.LO.R', 'lip-UP.R.UP', 'lip-UP.C.DN', 'eye-blink.LO.L', 'brow_center_UP', 'lip-DN.L.DN', 'lip-DN.L.UP', 'wince.R', 'sneer.L', 'lips-wide.L', 'brow_outer_DN.R', 'lip-UP.R.DN', 'brow_inner_DN.L', 'brow_outer_up.R', 'brow_inner_DN.R', 'lip-DN.C.DN', 'lip-UP.L.DN', 'brow_outer_DN.L', 'lip-UP.C.UP', 'lips-wide.R', 'sneer.R' ] #msg.m_shapekeys = ['EyeBlink_L', 'EyeBlink_R', 'EyeSquint_L', 'EyeSquint_R', 'EyeDown_L', 'EyeDown_R', 'EyeIn_L', 'EyeIn_R', 'EyeOpen_L', 'EyeOpen_R', 'EyeOut_L', 'EyeOut_R', 'EyeUp_L', 'EyeUp_R', 'BrowsD_L', 'BrowsD_R', 'BrowsU_C_L', 'BrowsU_C_R', 'BrowsU_L', 'BrowsU_R', 'BrowsSqueeze_L', 'BrowsSqueeze_R', 'JawOpen', 'LipsTogether', 'JawLeft', 'JawRight', 'JawFwd', 'LipsUpperUp_L', 'LipsUpperUp_R', 'LipsLowerDown_L', 'LipsLowerDown_R', 'LipsUpperClose', 'LipsLowerClose', 'LipsLowerOpen', 'LipsUpperOpen', 'MouthSharpCornerPull_L', 'MouthSharpCornerPull_R', 'MouthSmile_L', 'MouthSmile_R', 'MouthDimple_L', 'MouthDimple_R', 'LipsStretch_L', 'LipsStretch_R', 'MouthFrown_L', 'MouthFrown_R', 'MouthPress_L', 'MouthPress_R', 'LipsPucker_L', 'LipsPucker_R', 'LipsFunnel_L', 'LipsFunnel_R', 'MouthLeft', 'MouthRight', 'ChinLowerRaise', 'ChinUpperRaise', 'Sneer_L', 'Sneer_R', 'Puff', 'CheekSquint_L', 'CheekSquint_R' ] rospy.loginfo("sending frame.") pub.publish(msg) rate.sleep() if rospy.is_shutdown(): break
def talker(): pub = rospy.Publisher('/blender_api/set_pau', pau, queue_size=10) rospy.init_node('csv2pau', anonymous=True) rate = rospy.Rate(120) # 29fps approx. measure this exactly. data = np.loadtxt(sys.argv[1], delimiter=',') rospy.loginfo("loaded file with %d frames." % len(data)) angle_range = 45.0 for frame in data: msg = pau() msg.m_headRotation.x = frame[0] msg.m_headRotation.y = frame[1] msg.m_headRotation.z = frame[2] msg.m_headRotation.w = frame[3] msg.m_headTranslation.x = 0 msg.m_headTranslation.y = 0 msg.m_headTranslation.z = 0 msg.m_neckRotation.x = 0 msg.m_neckRotation.y = 0 msg.m_neckRotation.z = 0 msg.m_neckRotation.w = 0 msg.m_eyeGazeLeftPitch = 0 msg.m_eyeGazeLeftYaw = 0 msg.m_eyeGazeRightPitch = 0 msg.m_eyeGazeRightYaw = 0 frame = np.power(frame, 2.0) msg.m_coeffs = frame[16:] msg.m_shapekeys = [ 'brow_center_DN', 'brow_center_UP', 'brow_inner_DN.L', 'brow_inner_UP.L', 'brow_inner_DN.R', 'brow_inner_UP.R', 'brow_outer_DN.L', 'brow_outer_UP.L', 'brow_outer_DN.R', 'brow_outer_up.R', 'wince.L', 'wince.R', 'lip-JAW.DN', 'eye-blink.LO.L', 'eye-flare.LO.L', 'eye-blink.LO.R', 'eye-flare.LO.R', 'eye-blink.UP.L', 'eye-flare.UP.L', 'eye-blink.UP.R', 'eye-flare.UP.R', 'lips-wide.L', 'lips-narrow.L', 'lips-wide.R', 'lips-narrow.R', 'lips-frown.L', 'lips-frown.R', 'lip-DN.C.DN', 'lip-DN.C.UP', 'lip-DN.L.DN', 'lip-DN.L.UP', 'lip-DN.R.DN', 'lip-DN.R.UP', 'lips-smile.L', 'lips-smile.R', 'lip-UP.C.DN', 'lip-UP.C.UP', 'lip-UP.L.DN', 'lip-UP.L.UP', 'lip-UP.R.DN', 'lip-UP.R.UP', 'sneer.L', 'sneer.R' ] rospy.loginfo("sending frame.") pub.publish(msg) rate.sleep() if rospy.is_shutdown(): break
def talker(): pub = rospy.Publisher('/blender_api/set_pau', pau, queue_size=10) rospy.init_node('csv2pau', anonymous=True) rate = rospy.Rate(120) # 29fps approx. measure this exactly. file_name = sys.argv[1] file_type = file_name.split('_')[-1] data = np.loadtxt(file_name, delimiter=',') if file_name.endswith('_face.dat'): data = np.delete(data, np.s_[13:27], axis=1) rospy.loginfo("loaded file with %d frames." % len(data)) phon = np.loadtxt(sys.argv[2], delimiter=',', dtype=str) angle_range = 45.0 for frame in data: msg = pau() msg.m_headRotation.x = 0 msg.m_headRotation.y = 0 msg.m_headRotation.z = 0 msg.m_headRotation.w = 0 msg.m_headTranslation.x = 0 msg.m_headTranslation.y = 0 msg.m_headTranslation.z = 0 msg.m_neckRotation.x = 0 msg.m_neckRotation.y = 0 msg.m_neckRotation.z = 0 msg.m_neckRotation.w = 0 msg.m_eyeGazeLeftPitch = 0 msg.m_eyeGazeLeftYaw = 0 msg.m_eyeGazeRightPitch = 0 msg.m_eyeGazeRightYaw = 0 if file_type = '_mout.dat': msg.m_coeffs = frame msg.m_shapekeys = ['lip-JAW.DN', 'lips-wide.L', 'lips-narrow.L', 'lips-wide.R', 'lips-narrow.R', 'lips-frown.L', 'lips-frown.R' 'lip-DN.C.DN', 'lip-DN.C.UP', 'lip-DN.L.DN', 'lip-DN.L.UP', 'lip-DN.R.DN', 'lip-DN.R.UP', 'lips-smile.L', 'lips-smile.R', 'lip-UP.C.DN', 'lip-UP.C.UP', 'lip-UP.L.DN', 'lip-UP.L.UP', 'lip-UP.R.DN', 'lip-UP.R.UP'] else: msg.m_coeffs = frame[16:] msg.m_shapekeys = ['brow_center_DN', 'brow_center_UP', 'brow_inner_DN.L', 'brow_inner_UP.L', 'brow_inner_DN.R', 'brow_inner_UP.R', 'brow_outer_DN.L', 'brow_outer_UP.L', 'brow_outer_DN.R', 'brow_outer_up.R', 'wince.L', 'wince.R', 'lip-JAW.DN', 'eye-blink.LO.L', 'eye-flare.LO.L', 'eye-blink.LO.R', 'eye-flare.LO.R', 'eye-blink.UP.L', 'eye-flare.UP.L', 'eye-blink.UP.R', 'eye-flare.UP.R', 'lips-wide.L', 'lips-narrow.L', 'lips-wide.R', 'lips-narrow.R', 'lips-frown.L', 'lips-frown.R', 'lip-DN.C.DN', 'lip-DN.C.UP', 'lip-DN.L.DN', 'lip-DN.L.UP', 'lip-DN.R.DN', 'lip-DN.R.UP', 'lips-smile.L', 'lips-smile.R', 'lip-UP.C.DN', 'lip-UP.C.UP', 'lip-UP.L.DN', 'lip-UP.L.UP', 'lip-UP.R.DN', 'lip-UP.R.UP', 'sneer.L', 'sneer.R'] rospy.loginfo("sending frame.") pub.publish(msg) rate.sleep() if rospy.is_shutdown(): break