def realsense(): stream = request.get_json() #iterate over each element in json f = Faces() faces = [] if stream != None: for i in stream: if i is not None: if 'people' in i and stream[i] is not None: for j in range(0,len(stream[i])): p = Face() p.id = stream[i][j]['ID'] p.point.x = (stream[i][j]['z'])/1000.0 p.point.y = -(stream[i][j]['x'])/1000.0 # Center of the face p.point.z = (stream[i][j]['y'])/1000.0 # p.pose.x = stream[i][j]['rx'] # p.pose.y = stream[i][j]['ry'] # p.pose.z = stream[i][j]['rz'] # p.pose.w = stream[i][j]['rw'] p.attention = stream[i][j]['confidence'] # This is an indication that the depth image is near or far a way. Low values indicate that it's not a good value. #Now let's get the emotion related data out of the elements. # if stream[i][j]['expression'] is not None: # for expressions in j['expression']: # Now this equivalent makes sure that there is alway the expression being processed in the realsense demo application # p.emotions.append(str(expressions)) # p.emotion_values.append(stream[i][j]['expression'][expressions]) faces.append(p) if len(faces) > 0: f.faces = faces # Check if it's not empty before publishing it. facePub.publish(f) return Response(json_encode({'success': True}), mimetype="application/json")
def publish_faces(self): faces =[] for f in self.faces.keys(): if self.faces[f].is_trackable(): face = Face() face.id = f face.point = self.faces[f].get_filtered_3d_point() faces.append(face) msg = Faces() msg.faces = faces self.faces_pub.publish(msg)
def publish_faces(self): faces = [] for f in self.faces.keys(): if self.faces[f].is_trackable(): face = Face() face.id = f face.point = self.faces[f].get_filtered_3d_point() face.attention = self.faces[f].attention faces.append(face) msg = Faces() msg.faces = faces self.faces_pub.publish(msg)
def realsense(): stream = request.get_json() #iterate over each element in json f = Faces() faces = [] if stream != None: for i in stream: if i is not None: if 'people' in i and stream[i] is not None: for j in range(0, len(stream[i])): p = Face() p.id = stream[i][j]['ID'] p.point.x = (stream[i][j]['z']) / 1000.0 p.point.y = -( stream[i][j]['x']) / 1000.0 # Center of the face p.point.z = (stream[i][j]['y']) / 1000.0 # p.pose.x = stream[i][j]['rx'] # p.pose.y = stream[i][j]['ry'] # p.pose.z = stream[i][j]['rz'] # p.pose.w = stream[i][j]['rw'] p.attention = stream[i][j][ 'confidence'] # This is an indication that the depth image is near or far a way. Low values indicate that it's not a good value. #Now let's get the emotion related data out of the elements. # if stream[i][j]['expression'] is not None: # for expressions in j['expression']: # Now this equivalent makes sure that there is alway the expression being processed in the realsense demo application # p.emotions.append(str(expressions)) # p.emotion_values.append(stream[i][j]['expression'][expressions]) faces.append(p) if len(faces) > 0: f.faces = faces # Check if it's not empty before publishing it. facePub.publish(f) return Response(json_encode({'success': True}), mimetype="application/json")
def callback(data): global previous_faces current_faces = {}; for user in data.tf_bundles: user_id = int(re.sub("user_","",user.user_id)) location = user.head.translation current_faces[user_id] = location new_face = findDiff(list(current_faces.keys()), list(previous_faces.keys())) lost_face = findDiff(list(previous_faces.keys()), list(current_faces.keys())) # Publishers # if new face is found if (new_face): face_event_msg = FaceEvent() face_event_msg.face_event = "new_face" face_event_msg.face_id = new_face event_publisher.publish(face_event_msg) # if face is lost if (lost_face): face_event_msg = FaceEvent() face_event_msg.face_event = "lost_face" face_event_msg.face_id = lost_face event_publisher.publish(face_event_msg) # Face Location faces_msg = Faces() faces_msg.faces = [] for user, location in current_faces.items(): face_msg = Face() face_msg.id = user face_msg.point = location face_msg.attention = 1 faces_msg.faces.append(face_msg) face_publisher.publish(faces_msg) previous_faces = current_faces