コード例 #1
0
ファイル: state-sync-simple.py プロジェクト: rdeits/pronto
def on_pose_body(channel, data):
  global atlas_state, joint_names
  if (atlas_state.num_joints==0):
    return

  m = pose_t.decode(data)

  o = robot_state_t()
  o.utime = m.utime
  o.num_joints = atlas_state.num_joints
  #o.joint_name = ["" for x in range(o.num_joints)]
  o.joint_name = joint_name_list
  o.joint_position = atlas_state.joint_position
  o.joint_velocity = atlas_state.joint_velocity
  o.joint_effort = atlas_state.joint_effort


  o.pose.translation.x =m.pos[0];
  o.pose.translation.y =m.pos[1];
  o.pose.translation.z =m.pos[2];
  o.pose.rotation.w = m.orientation[0];
  o.pose.rotation.x = m.orientation[1];
  o.pose.rotation.y = m.orientation[2];
  o.pose.rotation.z = m.orientation[3];

  lc.publish("EST_ROBOT_STATE",o.encode())  
コード例 #2
0
def on_pose_body(channel, data):
    global atlas_state, joint_names
    if (atlas_state.num_joints == 0):
        return

    m = pose_t.decode(data)

    o = robot_state_t()
    o.utime = m.utime
    o.num_joints = atlas_state.num_joints
    #o.joint_name = ["" for x in range(o.num_joints)]
    o.joint_name = joint_name_list
    o.joint_position = atlas_state.joint_position
    o.joint_velocity = atlas_state.joint_velocity
    o.joint_effort = atlas_state.joint_effort

    o.pose.translation.x = m.pos[0]
    o.pose.translation.y = m.pos[1]
    o.pose.translation.z = m.pos[2]
    o.pose.rotation.w = m.orientation[0]
    o.pose.rotation.x = m.orientation[1]
    o.pose.rotation.y = m.orientation[2]
    o.pose.rotation.z = m.orientation[3]

    lc.publish("EST_ROBOT_STATE", o.encode())
コード例 #3
0
def on_pose(channel, data):

    m = pose_t.decode(data)
    if (mode is 0):
        print m.utime * 1E-6, " vic"
    else:
        print m.utime * 1E-6, ",0,0"
コード例 #4
0
def on_pb(channel, data):
    # record a queue of messages and compute the largest translational error between them, display that.
    global correctionTime, initialPoseBodyMsg, counterPosesAfterInitial
    msg = pose_t.decode(data)

    # Get time start
    global utimeStart
    if utimeStart == None:
        utimeStart = msg.utime

    # if there is a correction, record the current pose body we know that the next
    # measurement will be a correction, therefore we need to store this pose, in
    # order to estimate the trajectory
    if msg.utime > correctionTime and correctionTime != None:
        if initialPoseBodyMsg == None:
            initialPoseBodyMsg = msg
            return
        # once we have received a correction - don't record the first pose body
        correctionTime = None

    # now check if we have received a pose body initially, only then compute the
    # difference and publish it
    if initialPoseBodyMsg != None:
        # compute and publish the diff
        counterPosesAfterInitial = counterPosesAfterInitial + 1
        publish_error(initialPoseBodyMsg, msg)
コード例 #5
0
def on_pose(channel, data):

    m = pose_t.decode(data)
    if (mode is 0):
        print m.utime, " bdi"
    else:
        print m.utime, ",0,0"
コード例 #6
0
def on_pose_body(channel, data):
  if (skip_joint_angles is False):
    if (received_joint_state is False):
      return

  m = pose_t.decode(data)

  o = robot_state_t()
  o.utime = m.utime

  o.num_joints = joint_state.num_joints
  o.joint_name = joint_state.joint_name
  o.joint_position = joint_state.joint_position
  o.joint_velocity = joint_state.joint_velocity
  o.joint_effort = joint_state.joint_effort

  o.pose.translation.x =m.pos[0];
  o.pose.translation.y =m.pos[1];
  o.pose.translation.z =m.pos[2];
  o.pose.rotation.w = m.orientation[0];
  o.pose.rotation.x = m.orientation[1];
  o.pose.rotation.y = m.orientation[2];
  o.pose.rotation.z = m.orientation[3];

  lc.publish("EST_ROBOT_STATE",o.encode())  
コード例 #7
0
ファイル: printMessageTimes.py プロジェクト: ompugao/pronto
def on_pose(channel, data):

  m = pose_t.decode(data)
  if (mode is 0):
    print m.utime, " bdi"
  else:
    print m.utime,",0,0"
コード例 #8
0
ファイル: print_poses.py プロジェクト: openhumanoids/pronto
def on_pose(channel, data):
  global utime_last_print, str_alt, str_body, str_vicon
  m = pose_t.decode(data)
  rpy = botpy.quat_to_euler(m.orientation)


  str_out = channel + ", " + str(m.utime) + ", "
  str_out = str_out + str(m.pos[0]) + ", " + str(m.pos[1]) + ", " + str(m.pos[2]) + ", "
  str_out = str_out + str(m.orientation[0]) + ", " + str(m.orientation[1]) + ", " + str(m.orientation[2]) + ", " + str(m.orientation[3]) + ", "
  str_out = str_out + str(rpy[0]*180.0/math.pi) + ", " + str(rpy[1]*180.0/math.pi) + ", " + str(rpy[2]*180.0/math.pi)

  if (channel == "POSE_BODY_ALT"):
    str_alt = str_out
  elif (channel == "POSE_BODY"):
    str_pose = str_out
  elif (channel == "POSE_VICON"):
    str_vicon = str_out

  if (m.utime > utime_last_print + 1E6):
    if (channel == "POSE_BODY"):
      if (str_alt is None):
        return
      if (str_pose is None):
        return
      if (str_vicon is None):
        return

      utime_last_print = m.utime
      print str_pose
      print str_alt
      print str_vicon
コード例 #9
0
def on_pose(channel, data):

  m = pose_t.decode(data)
  if (mode is 0):
    print m.utime*1E-6, " vic"
  else:
    print m.utime*1E-6,",0,0"
コード例 #10
0
def on_pose(channel, data):
    global utime_last_print, str_alt, str_body, str_vicon
    m = pose_t.decode(data)
    rpy = botpy.quat_to_euler(m.orientation)

    str_out = channel + ", " + str(m.utime) + ", "
    str_out = str_out + str(m.pos[0]) + ", " + str(m.pos[1]) + ", " + str(
        m.pos[2]) + ", "
    str_out = str_out + str(m.orientation[0]) + ", " + str(
        m.orientation[1]) + ", " + str(m.orientation[2]) + ", " + str(
            m.orientation[3]) + ", "
    str_out = str_out + str(rpy[0] * 180.0 / math.pi) + ", " + str(
        rpy[1] * 180.0 / math.pi) + ", " + str(rpy[2] * 180.0 / math.pi)

    if (channel == "POSE_BODY_ALT"):
        str_alt = str_out
    elif (channel == "POSE_BODY"):
        str_pose = str_out
    elif (channel == "POSE_VICON"):
        str_vicon = str_out

    if (m.utime > utime_last_print + 1E6):
        if (channel == "POSE_BODY"):
            if (str_alt is None):
                return
            if (str_pose is None):
                return
            if (str_vicon is None):
                return

            utime_last_print = m.utime
            print str_pose
            print str_alt
            print str_vicon
コード例 #11
0
def on_pose(channel, data):
    m = pose_t.decode(data)

    delta_time = (m.utime - s.prev_utime) / 1E6
    if (delta_time < 1):
        return

    rpy = quat_to_euler(m.orientation)
    xyz = m.pos
    measure_drift(delta_time, m.utime, xyz, rpy)
コード例 #12
0
def on_pose(channel, data):
  m = pose_t.decode(data)

  delta_time = (m.utime - s.prev_utime)/1E6
  if (delta_time < 1):
    return

  rpy = quat_to_euler(m.orientation )
  xyz = m.pos
  measure_drift(delta_time, m.utime, xyz, rpy)
コード例 #13
0
def on_pose(channel, data):

    m = pose_t.decode(data)

    rpy = botpy.quat_to_euler(m.orientation)
    print rpy[2] * 180.0 / math.pi

    rpy[2] = rpy[2]  #- 20.0*math.pi/180.0
    print rpy[2] * 180.0 / math.pi
    m.orientation = botpy.euler_to_quat(rpy)

    pos = list(m.pos)
    pos[0] = pos[0] - 0.05
    print pos[0]
    m.pos = pos

    lc.publish("POSE_BODY_CORRECTED", m.encode())
    quit()
コード例 #14
0
def on_pose_body(channel, data):
  m = pose_t.decode(data)

  out = indexed_measurement_t()

  out.utime = m.utime;
  out.state_utime = m.utime;

  out.measured_dim = 4;
  out.z_indices = [8,9,10,11] # yaw, x, y, z
  #msg->state[8]
  # out.z_effective = { rpyl[2] , msg->state[9], msg->state[10], msg->state[11]};
  out.z_effective = [0.1,0,0,0]# msg->state[8] , msg->state[9], msg->state[10], msg->state[11]};
  out.measured_cov_dim = 16;
  out.R_effective = [0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0]
  out.R_effective[0]  = 1000#cl_cfg_.yaw_cov; // yaw
  out.R_effective[5]  = 1000#cl_cfg_.xyz_cov; // x
  out.R_effective[10] = 1000#cl_cfg_.xyz_cov; // y
  out.R_effective[15] = 1000#cl_cfg_.xyz_cov; // z

  lc.publish("GPF_MEASUREMENT_QUICK_LOCK", out.encode())
コード例 #15
0
def on_pose_body(channel, data):
  global core_robot_state, joint_names
  if (core_robot_state.num_joints==0):
    return

  m = pose_t.decode(data)

  o = robot_state_t()
  o.utime = m.utime
  o.num_joints = core_robot_state.num_joints
  o.joint_name = core_robot_state.joint_name
  o.joint_position = core_robot_state.joint_position
  o.joint_velocity = core_robot_state.joint_velocity
  o.joint_effort = core_robot_state.joint_effort


  nrot = quaternion_t()
  nvec = vector_3d_t()
  p = position_3d_t()
  p.rotation = nrot
  p.translation = nvec
  o.pose = p
 
  t = twist_t()
  t.linear_velocity = nvec
  t.angular_velocity = nvec
  o.twist = t

  o.pose.translation.x =m.pos[0];
  o.pose.translation.y =m.pos[1];
  o.pose.translation.z =m.pos[2];
  o.pose.rotation.w = m.orientation[0];
  o.pose.rotation.x = m.orientation[1];
  o.pose.rotation.y = m.orientation[2];
  o.pose.rotation.z = m.orientation[3];

  ft = force_torque_t()
  o.force_torque = ft

  lc.publish("EST_ROBOT_STATE",o.encode())  
コード例 #16
0
def on_pose_gt(channel, data):
  m = pose_t.decode(data)

  rpy = botpy.quat_to_euler(m.orientation)

  # pre-init:
  if (s.last_est.utime < 0):
    return


  # after the reliable time
  s.counter = s.counter + 1
  if (s.counter % 10 == 1):
    string1 = ''
    string1 = string1 + str(m.utime) +", "+ str(m.pos[0]) +", "+ str(m.pos[1]) +", "+ str(m.pos[2])
    string1 = string1+ ", " +str(m.orientation[0]) +", "+ str(m.orientation[1]) +", "+ str(m.orientation[2]) +", "+ str(m.orientation[3])

    d= s.last_est
    string1 = string1 + ", " +str(d.utime) +", "+ str(d.pos[0]) +", "+ str(d.pos[1]) +", "+ str(d.pos[2])
    string1 = string1 + ", " +str(d.orientation[0]) +", "+ str(d.orientation[1]) +", "+ str(m.orientation[2]) +", "+ str(d.orientation[3])

    print string1
コード例 #17
0
def on_pose_body(channel, data):
    global core_robot_state, joint_names
    if (core_robot_state.num_joints == 0):
        return

    m = pose_t.decode(data)

    o = robot_state_t()
    o.utime = m.utime
    o.num_joints = core_robot_state.num_joints
    o.joint_name = core_robot_state.joint_name
    o.joint_position = core_robot_state.joint_position
    o.joint_velocity = core_robot_state.joint_velocity
    o.joint_effort = core_robot_state.joint_effort

    nrot = quaternion_t()
    nvec = vector_3d_t()
    p = position_3d_t()
    p.rotation = nrot
    p.translation = nvec
    o.pose = p

    t = twist_t()
    t.linear_velocity = nvec
    t.angular_velocity = nvec
    o.twist = t

    o.pose.translation.x = m.pos[0]
    o.pose.translation.y = m.pos[1]
    o.pose.translation.z = m.pos[2]
    o.pose.rotation.w = m.orientation[0]
    o.pose.rotation.x = m.orientation[1]
    o.pose.rotation.y = m.orientation[2]
    o.pose.rotation.z = m.orientation[3]

    ft = force_torque_t()
    o.force_torque = ft

    lc.publish("EST_ROBOT_STATE", o.encode())
コード例 #18
0
def on_pose_body(channel, data):
    m = pose_t.decode(data)

    out = indexed_measurement_t()

    out.utime = m.utime
    out.state_utime = m.utime

    out.measured_dim = 4
    out.z_indices = [8, 9, 10, 11]  # yaw, x, y, z
    #msg->state[8]
    # out.z_effective = { rpyl[2] , msg->state[9], msg->state[10], msg->state[11]};
    out.z_effective = [
        0.1, 0, 0, 0
    ]  # msg->state[8] , msg->state[9], msg->state[10], msg->state[11]};
    out.measured_cov_dim = 16
    out.R_effective = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    out.R_effective[0] = 1000  #cl_cfg_.yaw_cov; // yaw
    out.R_effective[5] = 1000  #cl_cfg_.xyz_cov; // x
    out.R_effective[10] = 1000  #cl_cfg_.xyz_cov; // y
    out.R_effective[15] = 1000  #cl_cfg_.xyz_cov; // z

    lc.publish("GPF_MEASUREMENT_QUICK_LOCK", out.encode())
コード例 #19
0
def on_pose_body(channel, data):
    m = pose_t.decode(data)
    if (state.got_pose_body == False): print "Received", channel
    state.got_pose_body = True
    state.pose_body = getBotCorePose3dAsBotTrans(m)
コード例 #20
0
ファイル: printMessageTimes.py プロジェクト: andybarry/pronto
def on_pose(channel, data):
  m = pose_t.decode(data)
  print m.utime, " bdi"
コード例 #21
0
def on_pose(channel, data):
    estimatedPose = pose_t.decode(data)

    if viconPose != 0:

        errors = double_array_t()
        errors.utime = estimatedPose.utime
        errors.num_values = 2
        errors.values = [0] * errors.num_values

        # Compute 3D translation error
        transErr = math.sqrt(
            pow(viconPose.pos[0] - estimatedPose.pos[0], 2.0) +
            pow(viconPose.pos[1] - estimatedPose.pos[1], 2.0) +
            pow(viconPose.pos[2] - estimatedPose.pos[2], 2.0))

        # Compute 3D rotation error
        viconRot = quat_to_rot(viconPose.orientation)
        estimatedRot = quat_to_rot(estimatedPose.orientation)

        try:
            viconRotInverse = np.linalg.inv(viconRot)
        except np.linalg.LinAlgError:
            # Not invertible. Skip this one.
            pass

        deltaRot = np.dot(estimatedRot, viconRotInverse)
        traceRot = deltaRot.trace()
        rotErr = math.acos((traceRot - 1) / 2) * 180.0 / math.pi

        # Set output variable
        errors.values[0] = transErr
        errors.values[1] = rotErr

        # Get time in seconds
        global index
        if index == 0:
            global utimeStart
            utimeStart = errors.utime

        utimeCurrent = (errors.utime - utimeStart)
        timeCurrent = utimeCurrent * 1e-6

        # Set updated correction flag
        global updatedCorrection
        updatedFlag = False
        if updatedCorrection:
            updatedFlag = True
            updatedCorrection = False

        # Write to file
        global file
        # rowIndex, time [s], translErr [m], rotErr [m], updatedCorrectionFlag
        file.write('%d \t' % index)
        file.write('%f \t' % timeCurrent)
        file.write('%f \t' % transErr)
        file.write('%f \t' % rotErr)
        file.write('%i \n' % updatedFlag)
        index = index + 1

        # Publish to LCM
        lc.publish("POSE_ERRORS", errors.encode())
コード例 #22
0
statusMsg = ""
startTime = 0
imageChannels = []
lastPose =pose_t()
lastPose.utime =-1;

imageFnames = {}
imagePoses = {}
imageMeta = {}
imagesWritten =0
for e in log:
    if msgCount == 0: 
        startTime = e.timestamp
        
    if e.channel=="POSE":
        lastPose = pose_t.decode(e.data);
    elif lastPose.utime<0:
        continue       
 
    msgCount = msgCount + 1
    if (msgCount % 5000) == 0:
        statusMsg = deleteStatusMsg(statusMsg)
        statusMsg = "read % d messages, % d %% done, %d images written" % (msgCount, log.tell() / float(log.size())*100,imagesWritten)
        sys.stderr.write(statusMsg)
        sys.stderr.flush()

    #if (msgCount>=100000):
	#break

    isImage = channels.match(e.channel)
    if not isImage:
コード例 #23
0
def on_pose_est(channel, data):
  m = pose_t.decode(data)  
  s.last_est = m
コード例 #24
0
def on_pose(channel, data):
    errorTransform = pose_t.decode(data)

    errorsToPub = double_array_t()
    errorsToPub.utime = errorTransform.utime
    errorsToPub.num_values = 8
    errorsToPub.values = [0] * errorsToPub.num_values

    # Compute 3D translation error
    translErr = math.sqrt(
        pow(errorTransform.pos[0], 2.0) + pow(errorTransform.pos[1], 2.0) +
        pow(errorTransform.pos[2], 2.0))

    # Compute 3D rotation error
    errorRotMatrix = quat_to_rot(errorTransform.orientation)

    traceRot = errorRotMatrix.trace()
    rotErr = 180.0 - (math.acos((traceRot - 1) / 2) * 180.0 / math.pi)

    # Decomposed errors
    xErr = errorTransform.pos[0]
    yErr = errorTransform.pos[1]
    zErr = errorTransform.pos[2]
    yawErr = math.atan(errorRotMatrix[1, 0] / errorRotMatrix[0, 0])
    pitchErr = math.atan(-errorRotMatrix[2, 0] / math.sqrt(
        pow(errorRotMatrix[2, 1], 2.0) + pow(errorRotMatrix[2, 2], 2.0)))
    rollErr = math.atan(errorRotMatrix[2, 1] / errorRotMatrix[2, 2])

    # Set output variable
    errorsToPub.values[0] = translErr
    errorsToPub.values[1] = rotErr
    errorsToPub.values[2] = xErr
    errorsToPub.values[3] = yErr
    errorsToPub.values[4] = zErr
    errorsToPub.values[5] = yawErr
    errorsToPub.values[6] = pitchErr
    errorsToPub.values[7] = rollErr

    # Get time in seconds
    global index
    if index == 0:
        global utimeStart
        utimeStart = errorsToPub.utime

    utimeCurrent = (errorsToPub.utime - utimeStart)
    timeCurrent = utimeCurrent * 1e-6

    # Set updated correction flag
    global updatedCorrection
    updatedFlag = False
    if updatedCorrection:
        updatedFlag = True
        updatedCorrection = False

    # Write to file
    global file
    # rowIndex, time [s], translErr [m], rotErr [m], updatedCorrectionFlag
    file.write('%d \t' % index)
    file.write('%f \t' % timeCurrent)
    file.write('%f \t' % translErr)
    file.write('%f \t' % rotErr)
    file.write('%i \n' % updatedFlag)
    index = index + 1

    # Publish to LCM
    lc.publish("POSE_ERRORS", errorsToPub.encode())
コード例 #25
0
ファイル: drift_per_distance.py プロジェクト: gtinchev/pronto
def on_pose_est(channel, data):
  m = pose_t.decode(data)
  #print '%.6f' % (m.utime*1e-6)  
  s.most_recent_est = m
コード例 #26
0
def on_pose(channel, data):
    m = pose_t.decode(data)
    print m.utime, " bdi"
コード例 #27
0
statusMsg = ""
startTime = 0
imageChannels = []
lastPose = pose_t()
lastPose.utime = -1

imageFnames = {}
imagePoses = {}
imageMeta = {}
imagesWritten = 0
for e in log:
    if msgCount == 0:
        startTime = e.timestamp

    if e.channel == "POSE":
        lastPose = pose_t.decode(e.data)
    elif lastPose.utime < 0:
        continue

    msgCount = msgCount + 1
    if (msgCount % 5000) == 0:
        statusMsg = deleteStatusMsg(statusMsg)
        statusMsg = "read % d messages, % d %% done, %d images written" % (
            msgCount, log.tell() / float(log.size()) * 100, imagesWritten)
        sys.stderr.write(statusMsg)
        sys.stderr.flush()

    #if (msgCount>=100000):
#break

    isImage = channels.match(e.channel)
コード例 #28
0
def on_correction(channel, data):
    global correctionTime
    # record the timestamp at which we have received the message
    msg = pose_t.decode(data)
    correctionTime = msg.utime
コード例 #29
0
def on_pose(channel, data):
    m = pose_t.decode(data)
    #print "p %s" % (p.utime)
    dt = float(timestamp_now() - m.utime) / 1000000
コード例 #30
0
def on_vicon(channel, data):
    global viconPose
    viconPose = pose_t.decode(data)
コード例 #31
0
 def pose_handler(self, channel, data):
     msg = pose_t.decode(data)
     #gobject.idle_add(self.callback, "turn_right")
     t = threading.Thread( target=functools.partial(self.callback , "turn_right"))
     t.start()
コード例 #32
0
ファイル: drift_per_distance.py プロジェクト: gtinchev/pronto
def on_pose_gt(channel, data):
  m = pose_t.decode(data)
  #print '                            %.6f' % (m.utime*1e-6)  

  rpy = botpy.quat_to_euler(m.orientation)

  # pre-init:
  if (s.most_recent_est.utime < 0):
    return
  if (s.last.utime < 0):
    s.last = m
    s.last_est = s.most_recent_est
    return

  # after the reliable time
  #if (m.utime - s.last.utime > 1e6):
  if decide_new_measurement(m):
    #print '%.6f' % (s.last.utime*1e-6)

    gt_a = poseToBotTrans(s.last)
    gt_b = poseToBotTrans(m)
    gt_ab = botpy.transform_relative(gt_a, gt_b)

    se_a = poseToBotTrans(s.last_est)
    se_b = poseToBotTrans(s.most_recent_est)
    se_ab = botpy.transform_relative(se_a, se_b)

    #print "==========="
    #print gt_ab.trans_vec
    #print se_ab.trans_vec

    distanceTravelled = np.linalg.norm(np.array(m.pos) - np.array(s.last.pos))
    drift_xyz = np.array(se_ab.trans_vec) - np.array(gt_ab.trans_vec)
    drift = np.linalg.norm(np.array(se_ab.trans_vec) - np.array(gt_ab.trans_vec))
    percentDriftPerDistanceTravelled = 100*drift/distanceTravelled

    # yaw is postive anti clockwise, zero facing towards x
    gt_ab_yaw = botpy.quat_to_euler(gt_ab.rot_quat)[2]*180.0/np.pi
    se_ab_yaw = botpy.quat_to_euler(se_ab.rot_quat)[2]*180.0/np.pi
    yaw_error = se_ab_yaw - gt_ab_yaw

    #print "======================"
    #print "======================"
    #print "yaw gt pre: ", botpy.quat_to_euler(s.last.orientation)[2]*180.0/np.pi
    #print "yaw gt now: ", botpy.quat_to_euler(m.orientation)[2]*180.0/np.pi
    #print "yaw se pre: ", botpy.quat_to_euler(s.last_est.orientation)[2]*180.0/np.pi
    #print "yaw se now: ", botpy.quat_to_euler(s.most_recent_est.orientation)[2]*180.0/np.pi
    #print "======================"
    #print gt_ab_yaw
    #print se_ab_yaw

    print yaw_error, "deg yaw drift |", drift_xyz, "or" , drift, "m drift in", distanceTravelled , "m travelled, thus" , percentDriftPerDistanceTravelled, "%DDT" 


    em = error_metrics_t()
    em.utime = m.utime
    em.pos_error = drift_xyz
    em.pos_error_norm = drift
    em.rpy_error = [0,0,yaw_error]

    em.distance_travelled = distanceTravelled
    em.time_elapsed = (s.last.utime - m.utime)*1e-6
    em.percent_ddt = percentDriftPerDistanceTravelled
    lc.publish("PRONTO_ERROR", em.encode())


    s.last = m
    s.last_est = s.most_recent_est
    return
コード例 #33
0
ファイル: footstep_spoof.py プロジェクト: andybarry/pronto
def on_pose_body(channel, data):
  m = pose_t.decode(data)
  if (state.got_pose_body == False): print "Received",channel
  state.got_pose_body = True
  state.pose_body = getBotCorePose3dAsBotTrans(m)
コード例 #34
0
def on_pose(channel, data):
  m = pose_t.decode(data)
  #print "p %s" % (p.utime)
  dt = float(  timestamp_now() - m.utime )/1000000