コード例 #1
0
ファイル: viconpose_tf.py プロジェクト: jitete/FYP_workspace
def frame_tf(msg):
    br = tf2_ros.TransformBroadcaster()
    t = TransformStamped()
    t.header.stamp = rospy.Time.now()
    t.header.frame_id = "ned"
    t.child_frame_id = "vicon"
    t.transform.translation.x=0
    t.transform.translation.y=0
    t.transform.translation.z=0
    q = tf.transformations.quaternion_from_euler(math.pi, 0, -53.0*math.pi/180)
    t.transform.rotation.x = q[0]
    t.transform.rotation.y = q[1]
    t.transform.rotation.z = q[2]
    t.transform.rotation.w = q[3]
    br.sendTransform(t)

    t2 = TransformStamped()
    t2.header.stamp = rospy.Time.now()
    t2.header.frame_id = "vicon"
    t2.child_frame_id = "uav"
    t2.transform.translation.x = msg.pose.position.x
    t2.transform.translation.y = msg.pose.position.y
    t2.transform.translation.z = msg.pose.position.z
    t2.transform.rotation.x = msg.pose.orientation.x
    t2.transform.rotation.y = msg.pose.orientation.y
    t2.transform.rotation.z = msg.pose.orientation.z
    t2.transform.rotation.w = msg.pose.orientation.w
    br.sendTransform(t2)
コード例 #2
0
ファイル: localizer.py プロジェクト: g41903/MIT-RACECAR
    def _publish_tf(self, stamp):
        # check that we know which frames we need to publish from
        if self.map is None or self.base_frame is None:
            rospy.loginfo('Not publishing until map and odometry frames found')
            return

        # calculate the mean position
        x = np.mean([p.x for p in self.particles])
        y = np.mean([p.y for p in self.particles])
        theta = np.mean([p.theta for p in self.particles]) #TODO - wraparound

        # map to base_link
        map2base_frame = PyKDL.Frame(
            PyKDL.Rotation.Quaternion(*transformations.quaternion_from_euler(0, 0, theta)),
            PyKDL.Vector(x,y,0)
        )

        odom2base_frame = tf2_kdl.transform_to_kdl(self.tf_buffer.lookup_transform(
            target_frame=self.odom_frame,
            source_frame=self.base_frame,
            time=stamp,
            timeout=rospy.Duration(4.0)
        ))

        # derive frame according to REP105
        map2odom = map2base_frame * odom2base_frame.Inverse() 

        br = tf2_ros.TransformBroadcaster()
        t = TransformStamped()

        t.header.stamp = stamp
        t.header.frame_id = self.map.frame
        t.child_frame_id = self.odom_frame
        t.transform.translation = Vector3(*map2odom.p)
        t.transform.rotation = Quaternion(*map2odom.M.GetQuaternion())
        br.sendTransform(t)


        # for Debugging
        if False:
            t.header.stamp = stamp
            t.header.frame_id = self.map.frame
            t.child_frame_id = self.base_frame+"_old"
            t.transform.translation = Vector3(*map2base_frame.p)
            t.transform.rotation = Quaternion(*map2base_frame.M.GetQuaternion())
            br.sendTransform(t)

        if self.publish_cloud:
            msg = PoseArray(
                header=Header(stamp=stamp, frame_id=self.map.frame),
                poses=[
                    Pose(
                        position=Point(p.x, p.y, 0),
                        orientation=Quaternion(*transformations.quaternion_from_euler(0, 0, p.theta))
                    )
                    for p in self.particles
                ]
            )
            self.pose_array.publish(msg)
コード例 #3
0
def read_nav_csv(fname, origin):
    ''' Reads the nav.csv , converts into a local coordinate frame and returns tf msgs '''
    msgs = []
    DEG2RAD = m.pi/180.0  
    with open(fname, 'r') as f:
        # example data 
        # 1354679075.295000000,6248548.85357,332949.839026,-41.4911136152,-0.00740605127066,-0.0505019649863,1.95254564285
        # a Nx7 matrix containing pose [time N E D R P Y] UTM cartesian coordinate system  
        for i, line in enumerate(f):
            words = line.split(',')
            time = words[0]
            [secs, nsecs]  = map(int, time.split('.'))
            x = float(words[1])
            y = float(words[2])
            z = float(words[3])
            R = float(words[4])
            P = float(words[5])
            Y = float(words[6])
            # convert to local coordinate frame, located at the origin 
            x = x - origin[0]
            y = y - origin[1]
            z = z - origin[2]
            # create TF msg 
            tf_msg = tfMessage()
            geo_msg = TransformStamped()
            geo_msg.header.stamp = Time(secs,nsecs) 
            geo_msg.header.seq = i
            geo_msg.header.frame_id = "map"
            geo_msg.child_frame_id = "body"
            geo_msg.transform.translation.x = x
            geo_msg.transform.translation.y = y
            geo_msg.transform.translation.z = z
            angles = tf.transformations.quaternion_from_euler(R,P,Y) 
            geo_msg.transform.rotation.x = angles[0]
            geo_msg.transform.rotation.y = angles[1]
            geo_msg.transform.rotation.z = angles[2]
            geo_msg.transform.rotation.w = angles[3]
            # rviz frame 
            geo_msg2 = TransformStamped()
            geo_msg2.header.stamp = Time(secs,nsecs) 
            geo_msg2.header.seq = i
            geo_msg2.header.frame_id = "map_rviz"
            geo_msg2.child_frame_id = "map"
            geo_msg2.transform.translation.x = 0
            geo_msg2.transform.translation.y = 0
            geo_msg2.transform.translation.z = 0
            angles = tf.transformations.quaternion_from_euler(DEG2RAD*180, 0, 0) # ax, ay, az 
            geo_msg2.transform.rotation.x = angles[0]
            geo_msg2.transform.rotation.y = angles[1]
            geo_msg2.transform.rotation.z = angles[2]
            geo_msg2.transform.rotation.w = angles[3]
            # push all transformations 
            tf_msg.transforms.append(geo_msg)
            tf_msg.transforms.append(geo_msg2)
            msgs.append(tf_msg)

    return msgs
コード例 #4
0
    def model_state_cb(self, msg):
        self.names = set( rospy.get_param('/nav_experiments/people', []) )
        objects = rospy.get_param('/nav_experiments/scenario/objects', {})

        people_list = PositionMeasurementArray()
        people_list.header.stamp = rospy.Time.now()
        people_list.header.frame_id = '/map'
        for name, pose, twist in zip(msg.name, msg.pose, msg.twist):
            if len(self.names)==len(people_list.people):
                break
            if name not in self.names:
                continue

            p = PositionMeasurement()
            oname = name[:-10]
            p.name = oname
            p.object_id = oname
            p.reliability = 1.0
            

            properties = objects[oname]
            if 'movement' not in properties:
                p.pos.x = properties['xyz'][0]
                p.pos.y = properties['xyz'][1]
                p.pos.z = properties['xyz'][2]
            else:
                trans = TransformStamped()
                trans.header.frame_id = '/map'
                trans.child_frame_id = '/start'
                trans.transform.translation.x = properties['xyz'][0]
                trans.transform.translation.y = properties['xyz'][1]
                trans.transform.translation.z = properties['xyz'][2]
                trans.transform.rotation.x = 0
                trans.transform.rotation.y = 0
                trans.transform.rotation.z = 0
                trans.transform.rotation.w = 1
                self.tf.setTransform(trans)
                trans.header.frame_id = '/start'
                trans.child_frame_id = '/pos'
                trans.transform.translation = pose.position
                self.tf.setTransform(trans)
                nt = self.tf.lookupTransform('/map', '/pos', rospy.Time(0))
                
                p.pos.x = nt[0][0]
                p.pos.y = nt[0][1]
                p.pos.z = nt[0][2]
            p.header.stamp = people_list.header.stamp
            p.header.frame_id = '/map'
            people_list.people.append(p)
        self.pub.publish(people_list)
コード例 #5
0
    def _toTransform(self, my_x, my_y):
        transform = TransformStamped()
        transform.header.stamp = rospy.Time.now()
        transform.header.frame_id = self.camera_frame
        transform.child_frame_id = self.blob.name

        (x, y, z) = self._projectTo3d(my_x, my_y)
        transform.transform.translation.x = x
        transform.transform.translation.y = y
        transform.transform.translation.z = z

        transform.transform.rotation.w = 1.0

        # If our parent frame is not the camera frame then an additional transformation is required.
        if self.parent_frame != self.camera_frame:
            point = PointStamped()
            point.header.frame_id = self.camera_frame
            point.header.stamp = rospy.Time(0)
            point.point.x = transform.transform.translation.x
            point.point.y = transform.transform.translation.y
            point.point.z = transform.transform.translation.z

            # Now we've gone from the regular camera frame to the correct parent_frame.
            point_transformed = self.listener.transformPoint(self.parent_frame, point)

            transform.header.frame_id = self.parent_frame
            transform.transform.translation.x = point_transformed.point.x
            transform.transform.translation.y = point_transformed.point.y
            transform.transform.translation.z = point_transformed.point.z

        return transform
コード例 #6
0
ファイル: visser_core.py プロジェクト: garamizo/visser_power
def pack_transrot(translation, rotation, time, child, parent):
    """
    :param translation: the translation of the transformtion as a tuple (x, y, z)
    :param rotation: the rotation of the transformation as a tuple (x, y, z, w)
    :param time: the time of the transformation, as a rospy.Time()
    :param child: child frame in tf, string
    :param parent: parent frame in tf, string

    Broadcast the transformation from tf frame child to parent on ROS topic ``"/tf"``.
    """

    t = TransformStamped()
    t.header.frame_id = parent
    t.header.stamp = time
    t.child_frame_id = child
    t.transform.translation.x = translation[0]
    t.transform.translation.y = translation[1]
    t.transform.translation.z = translation[2]

    rotation = np.array(rotation)
    rotation = rotation / np.linalg.norm(rotation, ord=2)
    t.transform.rotation.x = rotation[0]
    t.transform.rotation.y = rotation[1]
    t.transform.rotation.z = rotation[2]
    t.transform.rotation.w = rotation[3]
    
    return t
コード例 #7
0
ファイル: pose.py プロジェクト: sylvestre/morse
def post_tf(self, component_instance):
    component_name = component_instance.blender_obj.name
    frame_id = self._properties[component_name]['frame_id']
    child_frame_id = self._properties[component_name]['child_frame_id']

    euler = mathutils.Euler((component_instance.local_data['roll'],
                             component_instance.local_data['pitch'],
                             component_instance.local_data['yaw']))
    quaternion = euler.to_quaternion()

    t = TransformStamped()
    t.header.frame_id = frame_id
    t.header.stamp = rospy.Time.now()
    t.child_frame_id = child_frame_id
    t.transform.translation.x = component_instance.local_data['x']
    t.transform.translation.y = component_instance.local_data['y']
    t.transform.translation.z = component_instance.local_data['z']

    t.transform.rotation = quaternion

    tfm = tfMessage([t])

    for topic in self._topics:
        # publish the message on the correct topic    
        if str(topic.name) == str("/tf"):
            topic.publish(tfm)
コード例 #8
0
def pose_to_tf_msg(parent, child, position, orientation):
  ts = TransformStamped()
  ts.header.frame_id = parent
  ts.child_frame_id = child
  ts.transform.rotation = Quaternion(*orientation)
  ts.transform.translation = Vector3(*position)
  return ts
コード例 #9
0
ファイル: imu.py プロジェクト: DefaultUser/morse
def sendTransform(self, translation, rotation, time, child, parent):
        """
        :param translation: the translation of the transformtion as a tuple (x, y, z)
        :param rotation: the rotation of the transformation as a tuple (x, y, z, w)
        :param time: the time of the transformation, as a rospy.Time()
        :param child: child frame in tf, string
        :param parent: parent frame in tf, string

        Broadcast the transformation from tf frame child to parent on ROS topic ``"/tf"``.
        """

        t = TransformStamped()
        t.header.frame_id = parent
        t.header.stamp = time
        t.child_frame_id = child
        t.transform.translation.x = translation[0]
        t.transform.translation.y = translation[1]
        t.transform.translation.z = translation[2]

        t.transform.rotation.x = rotation[0]
        t.transform.rotation.y = rotation[1]
        t.transform.rotation.z = rotation[2]
        t.transform.rotation.w = rotation[3]

        tfm = tfMessage([t])
        self.pub_tf.publish(tfm)
コード例 #10
0
    def publish_state(self, t):
        state_msg = TransformStamped()
        t_ned_imu = tft.translation_matrix(self.model.get_position())
        R_ned_imu = tft.quaternion_matrix(self.model.get_orientation())
        T_ned_imu = tft.concatenate_matrices(t_ned_imu, R_ned_imu)
        #rospy.loginfo("T_ned_imu = \n" + str(T_ned_imu))
        if self.T_imu_vicon is None:
            # grab the static transform from imu to vicon frame from param server:
            frames = rospy.get_param("frames", None)
            ypr = frames['vicon_to_imu']['rotation']
            q_vicon_imu = tft.quaternion_from_euler(*[radians(x) for x in ypr], axes='rzyx') # xyzw
            R_vicon_imu = tft.quaternion_matrix(q_vicon_imu)
            t_vicon_imu = tft.translation_matrix(frames['vicon_to_imu']['translation'])
#            rospy.loginfo(str(R_vicon_imu))
#            rospy.loginfo(str(t_vicon_imu))
            self.T_vicon_imu = tft.concatenate_matrices(t_vicon_imu, R_vicon_imu)
            self.T_imu_vicon = tft.inverse_matrix(self.T_vicon_imu)
            self.T_enu_ned = tft.euler_matrix(radians(90), 0, radians(180), 'rzyx')
            rospy.loginfo(str(self.T_enu_ned))
            rospy.loginfo(str(self.T_imu_vicon))
            #rospy.loginfo(str(T_vicon_imu))
        # we have /ned -> /imu, need to output /enu -> /vicon:
        T_enu_vicon = tft.concatenate_matrices(self.T_enu_ned, T_ned_imu, self.T_imu_vicon )
        state_msg.header.stamp  = t
        state_msg.header.frame_id = "/enu"
        state_msg.child_frame_id = "/simflyer1/flyer_vicon"
        stt = state_msg.transform.translation
        stt.x, stt.y, stt.z = T_enu_vicon[:3,3]
        stro = state_msg.transform.rotation
        stro.x, stro.y, stro.z, stro.w = tft.quaternion_from_matrix(T_enu_vicon)
        
        self.pub_state.publish(state_msg)
コード例 #11
0
    def publishLatchTransform(self, arm):
        if arm == 'left':
            self.pub_tf_left = rospy.Publisher("/tf", tfMessage, queue_size=1, latch=True)
        else:
            self.pub_tf_right = rospy.Publisher("/tf", tfMessage, queue_size=1, latch=True)
            
        f = open("/home/davinci2/catkin_ws/src/davinci_vision/launch/BC_registration/transform_" + arm + \
            ".txt", "r")
        transform = f.readline().split()
        f.close()
        print(transform)
        # Send static link transforms
        msg = TransformStamped()

        msg.header.stamp = rospy.Time.now()
        msg.transform.rotation.x =  float(transform[3])
        msg.transform.rotation.y =  float(transform[4])
        msg.transform.rotation.z =  float(transform[5])
        msg.transform.rotation.w =  float(transform[6])
        msg.child_frame_id = "left_BC"   
        msg.transform.translation.x = float(transform[0])
        msg.transform.translation.y = float(transform[1])
        msg.transform.translation.z = float(transform[2])
        if arm == 'left':
            # msg.header.frame_id = "two_psm_base_link"
            msg.header.frame_id = "two_remote_center_link"
        else:
            msg.header.frame_id = "one_remote_center_link"
        while not rospy.is_shutdown():
                msg.header.stamp = rospy.Time.now()
                self.pub_tf_right.publish([msg])
                rospy.sleep(0.5)
コード例 #12
0
 def on_result(self, boxes, classes):
     rospy.logdebug("on_result")
     msg = ObjectDetection()
     msg.header = boxes.header
     for box, label, prob in zip(boxes.boxes, classes.label_names, classes.label_proba):
         if not label:
             continue
         pose = Object6DPose()
         pose.pose = box.pose
         pose.reliability = prob
         pose.type = label
         msg.objects.append(pose)
         if self.publish_tf:
             t = TransformStamped()
             t.header = box.header
             t.child_frame_id = label
             t.transform.translation.x = box.pose.position.x
             t.transform.translation.y = box.pose.position.y
             t.transform.translation.z = box.pose.position.z
             t.transform.rotation.x = box.pose.orientation.x
             t.transform.rotation.y = box.pose.orientation.y
             t.transform.rotation.z = box.pose.orientation.z
             t.transform.rotation.w = box.pose.orientation.w
             self.tfb.sendTransform(t)
     self.pub_detect.publish(msg)
コード例 #13
0
    def transformer(data):
        for marker in data.markers:
            if marker.subject_name:
                if marker.subject_name not in crazyflies:
                    crazyflies[marker.subject_name] = {}
                crazyflie = crazyflies[marker.subject_name]
                crazyflie[marker.marker_name] = marker.translation
                crazyflie["frame_id"] = str(data.frame_number)

        transforms = []
        for crazyflie_name in crazyflies:
            crazyflie = crazyflies[crazyflie_name]
            trans = TransformStamped()
            # trans.child_frame_id = crazyflie["frame_id"]
            trans.child_frame_id = "1"
            trans.header.frame_id = crazyflie["frame_id"]

            top = marker_to_vector(crazyflie["top"])
            bottom = marker_to_vector(crazyflie["bot"])
            left = marker_to_vector(crazyflie["left"])
            front = marker_to_vector(crazyflie["front"])

            center = get_center(top, left, bottom)
            rotation = get_rotation(center, front, top, left, bottom)

            mTrans = trans.transform.translation
            mTrans.x, mTrans.y, mTrans.z = center
            mRot = trans.transform.rotation
            mRot.x, mRot.y, mRot.z, mRot.w = rotation
            transforms.append(trans)

        msg.transforms = transforms
        pubtf.publish(msg)
コード例 #14
0
    def tag_callback(self, msg_tag):
        # Listen for the transform of the tag in the world
        avg = PoseAverage.PoseAverage()
        for tag in msg_tag.detections:
            try:
                Tt_w = self.tfbuf.lookup_transform(self.world_frame, "tag_{id}".format(id=tag.id), rospy.Time(), rospy.Duration(1))
                Mtbase_w=self.transform_to_matrix(Tt_w.transform)
                Mt_tbase = tr.concatenate_matrices(tr.translation_matrix((0,0,0.17)), tr.euler_matrix(0,0,np.pi))
                Mt_w = tr.concatenate_matrices(Mtbase_w,Mt_tbase)
                Mt_r=self.pose_to_matrix(tag.pose)
                Mr_t=np.linalg.inv(Mt_r)
                Mr_w=np.dot(Mt_w,Mr_t)
                Tr_w = self.matrix_to_transform(Mr_w)
                avg.add_pose(Tr_w)
                self.publish_sign_highlight(tag.id)
            except(tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as ex:
                rospy.logwarn("Error looking up transform for tag_%s", tag.id)
                rospy.logwarn(ex.message)

        Tr_w =  avg.get_average() # Average of the opinions

        # Broadcast the robot transform
        if Tr_w is not None:
            # Set the z translation, and x and y rotations to 0
            Tr_w.translation.z = 0
            rot = Tr_w.rotation
            rotz=tr.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))[2]
            (rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_euler(0, 0, rotz)
            T = TransformStamped()
            T.transform = Tr_w
            T.header.frame_id = self.world_frame
            T.header.stamp = rospy.Time.now()
            T.child_frame_id = self.duckiebot_frame
            self.pub_tf.publish(TFMessage([T]))
            self.lifetimer = rospy.Time.now()
コード例 #15
0
def main():
    b = tf2_ros.Buffer()
    t = TransformStamped()
    t.transform.translation.x = 1
    t.transform.rotation.x = 1
    t.header.stamp = rospy.Time(2.0)
    t.header.frame_id = 'a'
    t.child_frame_id = 'b'
    b.set_transform(t, 'eitan_rocks')
    print b.lookup_transform('a','b', rospy.Time(2.0), rospy.Duration(2.0))

    v = PointStamped()
    v.header.stamp = rospy.Time(2)
    v.header.frame_id = 'a'
    v.point.x = 1
    v.point.y = 2
    v.point.z = 3
    print b.transform(v, 'b')

    v = Vector3Stamped()
    v.header.stamp = rospy.Time(2)
    v.header.frame_id = 'a'
    v.vector.x = 1
    v.vector.y = 2
    v.vector.z = 3
    print b.transform(v, 'b')

    v = PoseStamped()
    v.header.stamp = rospy.Time(2)
    v.header.frame_id = 'a'
    v.pose.position.x = 1
    v.pose.position.y = 2
    v.pose.position.z = 3
    v.pose.orientation.x = 1
    print b.transform(v, 'b')
コード例 #16
0
def get_tree(frame, depth, step, frame_name, bush=False, idepth=0):
  branch_factor = get_tree.branch_factor
  twig_height   = get_tree.twig_height

  tfs = []
  F = kdl.Frame()

  for i in range(branch_factor):
    F.M = kdl.Rotation.RotZ(2.0*pi*i/branch_factor)
    F.p = F.M*kdl.Vector(step, 0, twig_height) + bush*frame.p

    msg = TransformStamped()
    msg.header.stamp = rospy.Time.now()
    msg.transform.translation = Vector3(*F.p)
    msg.transform.rotation = Quaternion(*(F.M.GetQuaternion()))

    fr_name = frame_name +str(i)
    msg.child_frame_id  = fr_name
    msg.header.frame_id = fr_name[:-(1 + idepth*bush)]

    tfs.append(msg)

    #recurse
    if depth > 1:
      tfs.extend(get_tree(F, depth - 1, step / 2.0,
                          fr_name, bush, idepth + 1))

  return tfs
コード例 #17
0
def pack_pose(time, child, parent, matrix=None, trans=None, quat=None):

    if matrix is not None and (trans is None and quat is None):
        trans = tfs.translation_from_matrix(matrix)
        quat = tfs.quaternion_from_matrix(matrix)
    elif matrix is None and trans is not None and quat is not None:
        matrix = None  # nothing
    else:
        print 'invalid use'

    t = TransformStamped()
    t.header.frame_id = parent
    t.header.stamp = time
    t.child_frame_id = child
    t.transform.translation.x = trans[0]
    t.transform.translation.y = trans[1]
    t.transform.translation.z = trans[2]

    quat = numpy.array(quat)
    quat = quat / numpy.linalg.norm(quat, ord=2)
    t.transform.rotation.x = quat[0]
    t.transform.rotation.y = quat[1]
    t.transform.rotation.z = quat[2]
    t.transform.rotation.w = quat[3]

    return t
コード例 #18
0
ファイル: sensors.py プロジェクト: cyy1991/carla
    def _compute_transform(self, sensor_data, cur_time):
        parent_frame_id = "base_link"
        child_frame_id = self.name

        t = TransformStamped()
        t.header.stamp = cur_time
        t.header.frame_id = parent_frame_id
        t.child_frame_id = child_frame_id

        # for camera we reorient it to look at the same axis as the opencv projection
        # in order to get easy depth cloud for RGBD camera
        t.transform = carla_transform_to_ros_transform(
            self.carla_object.get_transform())

        rotation = t.transform.rotation
        quat = [rotation.x, rotation.y, rotation.z, rotation.w]
        quat_swap = tf.transformations.quaternion_from_matrix(
            [[0, 0, 1, 0],
            [-1, 0, 0, 0],
            [0, -1, 0, 0],
            [0, 0, 0, 1]])
        quat = tf.transformations.quaternion_multiply(quat, quat_swap)

        t.transform.rotation.x = quat[0]
        t.transform.rotation.y = quat[1]
        t.transform.rotation.z = quat[2]
        t.transform.rotation.w = quat[3]

        self.process_msg_fun('tf', t)
コード例 #19
0
def pack_transform(src_frame,dest_frame,trans,rot):
    transf = TransformStamped()    
    transf.header.frame_id = dest_frame
    transf.child_frame_id = src_frame
    transf.transform.translation = trans
    transf.transform.rotation = rot
    return transf
コード例 #20
0
 def broadcast(self):
     f = open("/home/davinci2/catkin_ws/src/davinci_vision/launch/BC_registration/camera_frame.p", "rb")
     self.camera_transform = pickle.load(f)
     f.close()
     #SOMETIMES NEED TO INVERT X & Y AXES; just change from 1 to -1 and vice versa
     offset = tfx.transform([SKETCH_OFFSET, 0, CAP_OFFSET], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
     self.camera_transform = tfx.transform(self.camera_transform).as_transform() * offset
     transform = tfx.inverse_tf(self.camera_transform)
     pt = transform.translation
     rot = transform.rotation
     msg = TransformStamped()
     msg.header.stamp = rospy.Time.now()
     msg.transform.rotation.x = rot.x
     msg.transform.rotation.y = rot.y
     msg.transform.rotation.z = rot.z
     msg.transform.rotation.w = rot.w
     msg.child_frame_id = "left_BC"   
     msg.transform.translation.x = pt.x
     msg.transform.translation.y = pt.y
     msg.transform.translation.z = pt.z
     msg.header.frame_id = "registration_brick_right"
     msg.header.stamp = rospy.Time.now()
     print('boo')
     while not rospy.is_shutdown():
         msg.header.stamp = rospy.Time.now()
         self.pub.publish([msg])
         rospy.sleep(0.1)
コード例 #21
0
ファイル: pepper_bridge.py プロジェクト: code-iai/iai_robots
 def service_handler(self, req):
     #req is of type PepperCommRequest
     
     #prepare message to Pepper
     pepper_cmd = {}
     pepper_cmd['speak'] = req.speak + '\n'
     coord = [req.point_at.point.x, req.point_at.point.y, req.point_at.point.z]
     pepper_cmd['point_at'] = coord
     pepper_cmd['point_at_frame'] = req.point_at.header.frame_id
     pepper_cmd['get_pose'] = req.get_pose
     pepper_cmd['get_heard'] = req.get_heard
     
     #Encode into json
     pepper_net_string = self.enc.encode(pepper_cmd)
     
     #Send over the socket
     self.sock.sendall(pepper_net_string)
     
     timeout = 1.0  #In seconds
     
     start_time = time.time()
     
     while ((time.time() - start_time) < timeout):
         received = self.sock.recv(1024)
         
         if received != "":
             data = self.dec.decode(received)
             break
         
         else:
             time.sleep(0.05)
     
     
     rospy.loginfo('Received:')
     rospy.loginfo(data)
     
     #data['pose'] should be 3 floats:  pos_x, pos_y, rot_z (in m and rad) in /map frame
     pepper_pose = data['robot_pos']
     
     pepper_ts = TransformStamped()
     pepper_ts.header.frame_id = self.map_frame
     pepper_ts.child_frame_id = self.pepper_frame
     pepper_ts.transform.translation.x = pepper_pose[0]
     pepper_ts.transform.translation.y = pepper_pose[1]
     pepper_ts.transform.translation.z = 0.0
     
     rotz = pepper_pose[2]
     quat = kdl.Rotation.RotZ(rotz).GetQuaternion()
     pepper_ts.transform.rotation.x = quat[0]
     pepper_ts.transform.rotation.y = quat[1]
     pepper_ts.transform.rotation.z = quat[2]
     pepper_ts.transform.rotation.w = quat[3]
     
     ans = PepperCommResponse()
     ans.heard = data['robot_heard']
     ans.pepper_pose = pepper_ts
     
     
     return ans
コード例 #22
0
ファイル: scan_scene.py プロジェクト: HLP-R/hlpr_lookat
def transform_helper(vec3, frame):
    pos = TransformStamped()
    pos.child_frame_id = frame
    pos.header = Header()
    pos.transform = Transform()
    pos.transform.translation = vec3

    return pos
コード例 #23
0
 def timer_tf_pub(self, event):
     t = TransformStamped()
     t.header.stamp = rospy.Time.now()
     t.header.frame_id = self.world_frame
     t.child_frame_id = self.target_frame
     t.transform.translation = self.marker_pose.position
     t.transform.rotation = self.marker_pose.orientation
     self.pub_tf.sendTransform(t)
コード例 #24
0
    def publish(self, data):
        q = Quaternion()
        q.x = 0
        q.y = 0
        q.z = data[6]
        q.w = data[7]
        odomMsg = Odometry()
        odomMsg.header.frame_id = self._odom
        odomMsg.header.stamp = rospy.get_rostime()
        odomMsg.child_frame_id = self._baseLink
        odomMsg.pose.pose.position.x = data[0]
        odomMsg.pose.pose.position.y = data[1]
        odomMsg.pose.pose.position.z = 0
        odomMsg.pose.pose.orientation = q
        if self._firstTimePub:
            self._prevOdom = odomMsg
            self._firstTimePub = False
            return

        velocity = Twist()

        deltaTime = odomMsg.header.stamp.to_sec() - self._prevOdom.header.stamp.to_sec()
        yaw, pitch, roll = euler_from_quaternion(
            [odomMsg.pose.pose.orientation.w, odomMsg.pose.pose.orientation.x, odomMsg.pose.pose.orientation.y,
             odomMsg.pose.pose.orientation.z])
        prevYaw, prevPitch, prevRollprevYaw = euler_from_quaternion(
            [self._prevOdom.pose.pose.orientation.w, self._prevOdom.pose.pose.orientation.x,
             self._prevOdom.pose.pose.orientation.y, self._prevOdom.pose.pose.orientation.z])
        if deltaTime > 0:
            velocity.linear.x = (data[8] / deltaTime)

        deltaYaw = yaw - prevYaw

        # rospy.loginfo("yaw: %f\t\tpevYaw: %f\t\tdeltaYaw: %f" % (yaw,prevYaw,deltaYaw))

        if deltaYaw > math.pi: deltaYaw -= 2 * math.pi
        elif deltaYaw < -math.pi: deltaYaw += 2 * math.pi

        if deltaTime > 0:
            velocity.angular.z = -(deltaYaw / deltaTime)

        # rospy.loginfo("deltaYaw after check: %f\t\t angular: %f" % (deltaYaw, velocity.angular.z))

        odomMsg.twist.twist = velocity

        self._prevOdom = odomMsg

        traMsg = TransformStamped()
        traMsg.header.frame_id = self._odom
        traMsg.header.stamp = rospy.get_rostime()
        traMsg.child_frame_id = self._baseLink
        traMsg.transform.translation.x = data[0]
        traMsg.transform.translation.y = data[1]
        traMsg.transform.translation.z = 0
        traMsg.transform.rotation = q

        self._pub.publish(odomMsg)
        self._broadCase.sendTransformMessage(traMsg)
コード例 #25
0
 def test_fromTf(self):
     transformer = Transformer(True, rospy.Duration(10.0))
     m = TransformStamped()
     m.header.frame_id = 'wim'
     m.child_frame_id = 'james'
     m.transform.translation.x = 2.71828183
     m.transform.rotation.w = 1.0
     transformer.setTransform(m)
     b = pm.fromTf(transformer.lookupTransform('wim', 'james', rospy.Time(0)))
コード例 #26
0
ファイル: geometry.py プロジェクト: PDuckworth/soma
 def to_ros_tf(self):
     t = TransformStamped()
     t.header.stamp = rospy.Time.from_sec(self.stamp)
     t.header.frame_id = self.ros_frame_id
     t.child_frame_id = ""
     tran = t.transform.translation
     tran.x, tran.y, tran.z = self.position.as_numpy()
     rot = t.transform.rotation
     rot.x, rot.y, rot.z, rot.w = self.quaternion.as_numpy
     return t
コード例 #27
0
ファイル: localization.py プロジェクト: lsouchet/naoqi_bridge
 def get_navigation_tf(self, navigation_pose):
     navigation_tf = TransformStamped()
     navigation_tf.header.frame_id = "/map"
     navigation_tf.header.stamp = rospy.Time.now()
     navigation_tf.child_frame_id = "/odom"
     navigation_tf.transform.translation .x = navigation_pose.x
     navigation_tf.transform.translation .y = navigation_pose.y
     navigation_tf.transform.rotation = self.get_ros_quaternion(
                 almath.Quaternion_fromAngleAndAxisRotation(navigation_pose.theta, 0, 0, 1))
     return navigation_tf
コード例 #28
0
    def run(self):
        marker_map = rospy.get_param('~marker_map', {})
        args = {
            'device': rospy.get_param('~device'),
            'marker_map': marker_map,
            'callback_global': self.callback_global,
            'callback_local': self.callback_local,
        }
        parameters = self.get_options()

        self.fixed_frame_id = rospy.get_param('~fixed_frame_id', 'map')
        self.robot_frame_id = rospy.get_param('~robot_frame_id', 'base_link')
        self.stargazer_frame_id = rospy.get_param('~stargazer_frame_id', 'stargazer')
        self.map_frame_prefix = rospy.get_param('~map_frame_prefix', 'stargazer/map_')
        self.marker_frame_prefix = rospy.get_param('~marker_frame_prefix', 'stargazer/marker_')

        self.covariance = rospy.get_param('~covariance', None)
        if self.covariance is None:
            raise Exception('The "covariance" parameter is required.')
        elif len(self.covariance) != 36:
            raise Exception('The "covariance" parameter must be a 36 element vector.')

        # Publish static TF frames for the Stargazer map.
        stamp_now = rospy.Time.now()
        map_tf_msg = TFMessage()

        for marker_id, Tmap_marker in marker_map.iteritems():
            marker_tf_msg = TransformStamped()
            marker_tf_msg.header.stamp = stamp_now
            marker_tf_msg.header.frame_id = self.fixed_frame_id
            marker_tf_msg.child_frame_id = self.map_frame_prefix + marker_id
            marker_tf_msg.transform = matrix_to_transform(Tmap_marker)
            map_tf_msg.transforms.append(marker_tf_msg)

        self.tf_static_pub = rospy.Publisher('tf_static', TFMessage, latch=True)
        self.tf_static_pub.publish(map_tf_msg)
        
        # Start publishing Stargazer data.
        with StarGazer(**args) as stargazer:
            # The StarGazer might be streaming data. Turn off streaming mode.
            stargazer.stop_streaming()

            # Set all parameters, possibly to their default values. This is the
            # safest option because the parameters can be corrupted when the
            # StarGazer is powered off.
            for name, value in parameters.iteritems():
                stargazer.set_parameter(name, value)

            # Start streaming. ROS messages will be published in callbacks.
            stargazer.start_streaming()
            rospy.spin()

            # Stop streaming. Try to clean up after ourselves.
            stargazer.stop_streaming()
コード例 #29
0
    def update_tf_tree(self):
        t = TransformStamped()
        t.header.stamp = rospy.Time.now()
        t.header.frame_id = "world"
        t.child_frame_id = self.child_frame
        t.transform.translation.x = self.point.x
        t.transform.translation.y = self.point.y
        t.transform.translation.z = self.depth
        t.transform.rotation = self.quaternion

        self.br.sendTransform(t)
コード例 #30
0
    def broadcast_transform(self):
        rospy.init_node('robot_broadcaster', anonymous=True)
        self.pub = rospy.Publisher("/tf", tfMessage, queue_size=1, latch=True)

        f = open("/home/davinci2/catkin_ws/src/davinci_vision/launch/BC_registration/robot_transform_" + self.arm + \
            "_good.p", "r")
        p = pickle.load(f)
        f.close()

        pt = p.translation
        rot = p.rotation
        print("x: " + str(pt.x))
        print("y: " + str(pt.y))
        print("z: " + str(pt.z))
        print("x: " + str(rot.x))
        print("y: " + str(rot.y))
        print("z: " + str(rot.z))
        print("w: " + str(rot.w))
        
        msg = TransformStamped()

        msg.header.stamp = rospy.Time.now()
        msg.transform.rotation.x = rot.x
        msg.transform.rotation.y = rot.y
        msg.transform.rotation.z = rot.z
        msg.transform.rotation.w = rot.w
        msg.child_frame_id = "registration_brick"   
        msg.transform.translation.x = pt.x
        msg.transform.translation.y = pt.y
        msg.transform.translation.z = pt.z
        if self.arm == 'left':
            msg.header.frame_id = "one_remote_center_link"
            msg.child_frame_id = "registration_brick"
        else:
            msg.header.frame_id = "two_remote_center_link"
            msg.child_frame_id = "registration_brick_right"
            # ???
        while not rospy.is_shutdown():
                msg.header.stamp = rospy.Time.now()
                self.pub.publish([msg])
                rospy.sleep(0.5)
コード例 #31
0
 def empty_transform():
     t = TransformStamped()
     t.transform.rotation = Quaternion(0, 0, 0, 1)
     t.header.frame_id = "tool_link"
     t.child_frame_id = "TCP"
     return t
コード例 #32
0
rospy.init_node('tf_lookup')

tf_buffer = tf2_ros.Buffer(cache_time=rospy.Duration(20))
tl = tf2_ros.TransformListener(tf_buffer)

br = tf2_ros.TransformBroadcaster()
ts = TransformStamped()
ts.transform.rotation.w = 1.0
ts.transform.translation.x = 1.0
ts.transform.translation.y = 1.0

offset = rospy.get_param("~offset", -1.0)
parent = rospy.get_param("~parent", "map")
ts.header.frame_id = parent
child = rospy.get_param("~child", "frame1")
ts.child_frame_id = rospy.get_param("~child_old", "frame1_old")

while not rospy.is_shutdown():
    rospy.sleep(0.01)
    lookup_time = rospy.Time.now() + rospy.Duration(offset)
    try:
        trans = tf_buffer.lookup_transform(parent, child, lookup_time)
    except tf2.LookupException as ex:
        rospy.logwarn_throttle(5.0, lookup_time.to_sec())
        rospy.logwarn_throttle(5.0, traceback.format_exc())
        continue
    except tf2.ExtrapolationException as ex:
        rospy.logwarn_throttle(5.0, lookup_time.to_sec())
        rospy.logwarn_throttle(5.0, traceback.format_exc())
        continue
    rospy.loginfo_throttle(5.0, trans.transform.translation.x)
コード例 #33
0
	def callback_img(self, msg_in):
		if msg_in.header.stamp > self.time_finished_processing:
			# Don't bother to process image if we don't have the camera calibration
			cv_image = None	
			self.model_image_square = None	
			self.model_image_triangle = None	
			success_square = False;
			success_triangle = False;
			if self.got_camera_info:
				#Convert ROS image to CV image
				try:
					if self.param_use_compressed:
						cv_image = self.bridge.compressed_imgmsg_to_cv2( msg_in, "bgr8" )
					else:
						cv_image = self.bridge.imgmsg_to_cv2( msg_in, "bgr8" )
				except CvBridgeError as e:
					rospy.loginfo(e)
					return

				# Image mask for colour filtering
				mask_image_square = self.process_image(cv_image, 1)
				mask_image_triangle = self.process_image(cv_image, 2)

			if cv_image is not None:
				# ===================
				# Do processing here!
				# ===================
				sign_square = self.sign_cascade_square.detectMultiScale(mask_image_square, 1.01, 1, minSize=(25,25))
				sign_triangle = self.sign_cascade_triangle.detectMultiScale(mask_image_triangle, 1.01, 1, minSize=(25,25))

				for (x,y,w,h) in sign_square:
					cv2.rectangle(cv_image,(x,y),(x+w,y+h),(0,165,255),2)
					self.model_image_square = np.array([
											(x, y),
											(x+w, y+h),
											(x+w, y-h),
											(x-w, y+h),
											(x-w, y-h)], dtype=np.float64)
				
				for (x,y,w,h) in sign_triangle:
					cv2.rectangle(cv_image,(x,y),(x+w,y+h),(255,0,0),2)
					self.model_image_triangle = np.array([
											(x, y),
											(x+w, y+h),
											(x+w, y-h),
											(x-w, y+h),
											(x-w, y-h)], dtype=np.float64)

				# Do the SolvePnP method
				if self.model_image_square is not None:
					(success_square, rvec_square, tvec_square) = cv2.solvePnP(self.model_object_square, self.model_image_square, self.camera_matrix, self.dist_coeffs)

				# If a result was found, send to TF2

				if success_square:
					msg_out = TransformStamped()
					msg_out.header = msg_in.header
					msg_out.child_frame_id = "Square"
					msg_out.transform.translation.x = tvec_square[0]
					msg_out.transform.translation.y = tvec_square[1]
					msg_out.transform.translation.z = tvec_square[2]
					msg_out.transform.rotation.w = 1.0	# Could use rvec, but need to convert from DCM to quaternion first
					msg_out.transform.rotation.x = 0.0
					msg_out.transform.rotation.y = 0.0
					msg_out.transform.rotation.z = 0.0

					self.tfbr.sendTransform(msg_out)
					self.square_found = True
                                                                                self.triangle_found = False

				if self.model_image_triangle is not None:
					(success_triangle, rvec_triangle, tvec_triangle) = cv2.solvePnP(self.model_object_triangle, self.model_image_triangle, self.camera_matrix, self.dist_coeffs)

				# If a result was found, send to TF2

				if success_triangle:
					msg_out = TransformStamped()
					msg_out.header = msg_in.header
					msg_out.child_frame_id = "Triangle"
					msg_out.transform.translation.x = tvec_triangle[0]
					msg_out.transform.translation.y = tvec_triangle[1]
					msg_out.transform.translation.z = tvec_triangle[2]
					msg_out.transform.rotation.w = 1.0	# Could use rvec, but need to convert from DCM to quaternion first
					msg_out.transform.rotation.x = 0.0
					msg_out.transform.rotation.y = 0.0
					msg_out.transform.rotation.z = 0.0

					self.tfbr.sendTransform(msg_out)
					self.square_found = False
                                                                                self.triangle_found = True

				self.time_finished_processing = rospy.Time.now()
				# Convert CV image to ROS image and publish
				try:
					self.pub_overlay.publish( self.bridge.cv2_to_compressed_imgmsg( cv_image ) )
					self.pub_mask.publish( self.bridge.cv2_to_compressed_imgmsg( mask_image_square ) )
					#self.pub_mask_triangle.publish( self.bridge.cv2_to_compressed_imgmsg( mask_image_triangle ) )
				except CvBridgeError as e:
					print(e)
コード例 #34
0
ファイル: tilt_scanner.py プロジェクト: dawonn/MichiganTech
    msg_tf.header.stamp = rospy.get_rostime() + rospy.Duration.from_sec(0.1)
    msg_tf.transform.rotation.x = q[0]
    msg_tf.transform.rotation.y = q[1]
    msg_tf.transform.rotation.z = q[2]
    msg_tf.transform.rotation.w = q[3]

    pub_tf.sendTransform(msg_tf)


if __name__ == '__main__':
    # Init ROS node
    rospy.init_node('tilt_sweep')

    # TF publisher
    global pub_tf
    global msg_tf
    msg_tf = TransformStamped()
    msg_tf.header.frame_id = "tilt_mount_base"
    msg_tf.child_frame_id = "tilt_mount"
    pub_tf = tf2_ros.TransformBroadcaster()

    # Servo angle control
    global angle
    global direction
    angle = 90
    fwd = True
    rospy.Timer(rospy.Duration(0.02), sweepCB)

    rospy.spin()
    def update(self, m_array):
        """
            Using poses v1
            """
        if self.old_msg == m_array:
            # Message old
            return

        self.old_msg = m_array
        n_markers = len(m_array.markers)
        kalman_gains = []
        odom_to_filtered_transforms = []
        for marker in m_array.markers:
            time_stamp = marker.header.stamp
            # TODO: make this general (no hardcoded Qs)
            if marker.id > 9:
                frame_detected = "sign_detected/stop"
                frame_marker = "sign/stop"
                print("Using stop sign!!!")
                Q = np.diag([0.5, 0.5, 0.5])
            else:
                frame_detected = "aruco/detected" + str(marker.id)
                frame_marker = "aruco/marker" + str(marker.id)
                Q = np.diag([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])

            base_to_measured = self.get_base_to_measured(
                frame_marker, frame_detected, time_stamp)
            if not base_to_measured: return
            # failes
            #base_to_measured_no_broadcast = self.get_base_to_measured_no_broadcast(frame_marker, frame_detected, time_stamp)
            #self.broadcaster.sendTransform(base_to_measured_no_broadcast) # just for visualization
            time_stamp = base_to_measured.header.stamp

            # translation to be filtered
            base_to_measured_trans_odom = self.get_trans_in_odom(
                base_to_measured)
            translation_tbf = base_to_measured_trans_odom

            # rotation to be filtered
            odom_to_base = self.get_odom_to_base(time_stamp)
            odom_to_odom_measured_rot = self.get_odom_to_odom_measured_rotation(
                odom_to_base.transform.rotation,
                base_to_measured.transform.rotation)
            rotation_tbf = odom_to_odom_measured_rot

            maha_dist = self.mahalanobis_dist(translation_tbf, rotation_tbf, Q)

            print("Mahalanobis dist (kinda): {}".format(maha_dist))
            if maha_dist > 200.7:
                # outlier

                print("Outlier")
                return

            #K = 0.5
            #K = np.eye(6)*K
            K = self.kf.kalman_gain(Q)
            #K = [True, True, True, False, False, True]*K
            kalman_gains.append(K)
            filtered_translation, filtered_rot = self.filter_trans_and_rot(
                base_to_measured_trans_odom, odom_to_odom_measured_rot,
                K.diagonal())

            odom_to_filtered = self.get_odom_to_filtered(
                odom_to_base.transform.translation, filtered_translation,
                filtered_rot, frame_marker)
            odom_to_filtered.header.stamp = time_stamp
            #self.broadcaster.sendTransform(odom_to_filtered)
            odom_to_filtered_transforms.append(odom_to_filtered)

        if odom_to_filtered_transforms:
            # filtered to cf1/odom_filtered
            odom_to_filtered = self.average_transforms(
                odom_to_filtered_transforms)
            K = sum(kalman_gains) / len(odom_to_filtered_transforms)
            #print(K.round(2))
            #K = kalman_gains[-1]
            #odom_to_filtered.transform.rotation = odom_to_filtered_transforms[-1].transform.rotation

            odom_to_filtered.header = odom_to_filtered_transforms[-1].header
            odom_to_filtered.header.frame_id = "cf1/odom"
            odom_to_filtered.child_frame_id = "cf1/base_link/filtered"
            self.broadcaster.sendTransform(odom_to_filtered)

            filtered_to_odom_filtered = TransformStamped(
            )  # use this when using all measurements
            filtered_to_odom_filtered.header.stamp = time_stamp
            filtered_to_odom_filtered.header.frame_id = "cf1/base_link/filtered"
            filtered_to_odom_filtered.child_frame_id = "cf1/odom/filtered"
            filtered_to_odom_filtered.transform.translation.x = -odom_to_base.transform.translation.x
            filtered_to_odom_filtered.transform.translation.y = -odom_to_base.transform.translation.y
            filtered_to_odom_filtered.transform.translation.z = -odom_to_base.transform.translation.z
            filtered_to_odom_filtered.transform.rotation.w = 1
            self.broadcaster.sendTransform(filtered_to_odom_filtered)

            # map to odom
            try:
                # Might need to use rospy.Time(0)
                map_to_odom_filtered = self.tf_buf.lookup_transform(
                    "map", "cf1/odom/filtered", time_stamp,
                    rospy.Duration(1.0))
            except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                    tf2_ros.ExtrapolationException) as e:
                print(e)
                print(
                    "Transform lookup between map and cf1/odom/filtered failed"
                )
                return
            else:
                map_to_odom_filtered.header.frame_id = "map"
                map_to_odom_filtered.child_frame_id = "cf1/odom"
                map_to_odom = map_to_odom_filtered
                map_to_odom.header.stamp = rospy.Time.now()
                print("Used {}/{} markers measurements".format(
                    len(odom_to_filtered_transforms), n_markers))
                #rospy.loginfo("Created new transform between map and cf1/odom")
                K = sum(kalman_gains) / len(odom_to_filtered_transforms)
                self.kf.update_with_gain(K)
                #self.last_transform = map_to_odom

                return map_to_odom
コード例 #36
0
def main(portName):
    print "[MOTOR_TEST]:: Inicializando motores en modo de PRUEBA"

    ###Connection with ROS
    rospy.init_node("motor_test")

    #Suscripcion a Topicos
    subSpeeds = rospy.Subscriber("/hardware/motors/speeds", Float32MultiArray, callbackSpeeds)  #Topico para obtener vel y dir de los motores

    #Publicacion de Topicos

    pubRobPos = rospy.Publisher("mobile_base/position", Float32MultiArray,queue_size = 1)
    #Publica los datos de la posición actual del robot

    pubOdometry = rospy.Publisher("mobile_base/odometry", Odometry, queue_size = 1) 
    #Publica los datos obtenidos de los encoders y analizados para indicar la posicion actual del robot

    #Estableciendo parametros de ROS
    br = tf.TransformBroadcaster() #Adecuando los datos obtenidos al sistema de coordenadas del robot
    rate = rospy.Rate(1)

    #Comunicacion serial con la tarjeta roboclaw Roboclaw

    print "[MOTOR_TEST]:: Roboclaw.-> Abriendo conexion al puerto serial designacion: \"" + str(portName) + "\""
    RC= Roboclaw(portName, 38400)
    #Roboclaw.Open(portName, 38400)
    RC.Open()
    address = 0x80
    print "[MOTOR_TEST]:: Roboclaw.-> Conexion establecida en el puerto serila designacion \"" + portName + "\" a 38400 bps (Y)"
    print "[MOTOR_TEST]:: Roboclaw.-> Limpiando lecturas de enconders previas"
    RC.ResetEncoders(address)

    #Varibles de control de la velocidad
    global leftSpeed
    global rightSpeed
    global newSpeedData

    leftSpeed = 0 #[-1:0:1]
    rightSpeed = 0 #[-1:0:1]
    newSpeedData = False #Al inciar no existe nuevos datos de movmiento
    speedCounter = 5

    #Variables para la Odometria
    robotPos = Float32MultiArray()
    robotPos = [0, 0, 0] # [X,Y,TETHA]


    #Ciclo ROS
    print "[VEGA]:: Probando los motores de ROTOMBOTTO"
    while not rospy.is_shutdown():

        if newSpeedData: #Se obtuvieron nuevos datos del topico /hardware/motors/speeds

            newSpeedData = False
            speedCounter = 5

            #Indicando la informacion de velocidades a la Roboclaw

            #Realizando trasnformacion de la informacion
            leftSpeed = int(leftSpeed*127)
            rightSpeed = int(rightSpeed*127)

            #Asignando las direcciones del motor derecho
            if rightSpeed >= 0:
                RC.ForwardM1(address, rightSpeed)
            else:
                RC.BackwardM1(address, -rightSpeed)

            #Asignando las direcciones del motor izquierdo
            if leftSpeed >= 0:
                RC.ForwardM2(address, leftSpeed)
            else:
                RC.BackwardM2(address, -leftSpeed)

        else: #NO se obtuvieron nuevos datos del topico, los motores se detienen

            speedCounter -= 1 

            if speedCounter == 0:
                RC.ForwardM1(address, 0)
                RC.ForwardM2(address, 0)
 
            if speedCounter < -1:
                speedCounter = -1

        #-------------------------------------------------------------------------------------------------------

        #Obteniendo informacion de los encoders
        encoderLeft = RC.ReadEncM2(address) #Falta multiplicarlo por -1, tal vez no sea necesario
        encoderRight = RC.ReadEncM1(address) #El valor negativo obtenido en este encoder proviene de la posicion de orientacion del motor.
        RC.ResetEncoders(address)

        print "[MOTOR_TEST]:: Lectura Encoders: EncIzq" + str(encoderLeft) + "  EncDer" + str(encoderRight)

        ###Calculo de la Odometria
        robotPos = calculateOdometry(robotPos, encoderLeft, encoderRight)

        pubRobPos=.publish(robotPos) ##Publicando los datos de la pocición obtenida

        ts = TransformStamped()
        ts.header.stamp = rospy.Time.now()
        ts.header.frame_id = "odom"
        ts.child_frame_id = "base_link"

        ts.transform.translation.x = robotPos[0]
        ts.transform.translation.y = robotPos[1]
        ts.transform.translation.z = 0
        ts.transform.rotation = tf.transformations.quaternion_from_euler(0, 0, robotPos[2])

        br.sendTransform((robotPos[0], robotPos[1], 0), ts.transform.rotation, rospy.Time.now(), ts.child_frame_id, ts.header.frame_id)

        msgOdom = Odometry()

        msgOdom.header.stamp = rospy.Time.now()
        msgOdom.pose.pose.position.x = robotPos[0]
        msgOdom.pose.pose.position.y = robotPos[1]
        msgOdom.pose.pose.position.z = 0
        msgOdom.pose.pose.orientation.x = 0
        msgOdom.pose.pose.orientation.y = 0
        msgOdom.pose.pose.orientation.z = math.sin(robotPos[2]/2)
        msgOdom.pose.pose.orientation.w = math.cos(robotPos[2]/2)

        pubOdometry.publish(msgOdom) #Publicando los datos de odometría

        rate.sleep()

    #Fin del WHILE ROS
    #------------------------------------------------------------------------------------------------------

    RC.ForwardM1(address, 0)
    RC.ForwardM2(address, 0)
    RC.Close()
コード例 #37
0
    goal_callback)  # Sees aruco -> go to aurocopose function
goal = None

rospy.sleep(1)

# t_odom_init=TransformStamped()
# t_odom_init.header.stamp = rospy.Time.now()
# t_odom_init.header.frame_id = "map"
# t_odom_init.child_frame_id = 'cf1/odom'
# t_odom_init.transform.rotation.w = 1
# br.sendTransform(t_odom_init)  # send it so that its visible in rviz, what the crazyflie sees

# wait_to_trans = True
t_final_odom_update = TransformStamped()
t_final_odom_update.header.frame_id = "map"
t_final_odom_update.child_frame_id = 'cf1/odom'
t_final_odom_update.transform.rotation.w = 1

# ADDED HERE!!!!!!!!!!!!!!!!!!!!!
counter = 0
x_average = []
y_average = []
z_average = []

roll_average = []
pitch_average = []
yaw_average = []
# ADDED ABOVE!!!!!!!!!!!!!!!!!!!!!

if __name__ == "__main__":
コード例 #38
0
ファイル: tf_world_NED.py プロジェクト: goromal/aerowake_sim
import tf.transformations as TR


def homogeneous_matrix(trans, quat):
    return TR.concatenate_matrices(TR.translation_matrix(trans),
                                   TR.quaternion_matrix(quat))


if __name__ == "__main__":
    rospy.init_node('tf_world_NED')
    broadcaster = tf2_ros.StaticTransformBroadcaster()
    static_transformStamped = TransformStamped()

    static_transformStamped.header.stamp = rospy.Time.now()
    static_transformStamped.header.frame_id = "world"
    static_transformStamped.child_frame_id = "NED"

    H_NED_world = homogeneous_matrix([0.0, 0.0, 0.0],
                                     TR.quaternion_from_euler(pi, 0.0, 0.0))
    t_world_NED = TR.translation_from_matrix(H_NED_world)
    q_world_NED = TR.quaternion_from_matrix(H_NED_world)

    static_transformStamped.transform.translation.x = t_world_NED[0]
    static_transformStamped.transform.translation.y = t_world_NED[1]
    static_transformStamped.transform.translation.z = t_world_NED[2]

    static_transformStamped.transform.rotation.x = q_world_NED[0]
    static_transformStamped.transform.rotation.y = q_world_NED[1]
    static_transformStamped.transform.rotation.z = q_world_NED[2]
    static_transformStamped.transform.rotation.w = q_world_NED[3]
コード例 #39
0
def fid_cb(msg):
    """
    callback which takes fiducial transfroms and publishes pose of
    robot base relative to map
    @param msg: A fiducial_msgs FidcucialTransformArray object
    """
    global tf_broadcaster
    global tf_listener
    global FIDUCIAL_NAMES
    global pose_pub

    # if driving, don't interupt
    if get_state() not in [States.LOST, States.TELEOP]:  #removed docked state
        return
    # if fiducials found, take the first
    if len(msg.transforms) == 0:
        return
    transform = msg.transforms[0]

    # swap y and z axes to fit map frame of reference
    pos = transform.transform.translation
    rot = transform.transform.rotation
    pos.x, pos.y, pos.z = pos.x, pos.z, pos.y
    rot.x, rot.y, rot.z = rot.x, rot.z, rot.y
    transform.transform.translation = pos
    transform.transform.rotation = rot

    # invert the transform
    homo_mat = PoseConv().to_homo_mat(transform.transform)
    inverted_tf =  PoseConv().to_tf_msg(np.linalg.inv(homo_mat))

    # send a transform from camera to fiducial
    m = TransformStamped()
    m.transform = inverted_tf
    m.header.frame_id = FIDUCIAL_NAMES.get(str(transform.fiducial_id))
    m.header.stamp = rospy.Time.now()
    m.child_frame_id = "fiducial_camera"
    tf_broadcaser.sendTransform(m)

    # calculate transform from map to base
    try:
        latest_time = tf_listener.getLatestCommonTime("/map","/fiducial_base")
        base_to_map = tf_listener.lookupTransform("/map","/fiducial_base",latest_time)
    except tf2_ros.TransformException:
        rospy.logwarn("failed to transform, is fiducial {} mapped?".format(transform.fiducial_id))
        return

    # convert transform to PoseWithCovarianceStamped
    robot_pose = PoseConv().to_pose_msg(base_to_map)
    pose_w_cov_stamped = PoseWithCovarianceStamped()
    pose_w_cov_stamped.pose.pose = robot_pose
    pose_w_cov_stamped.header.stamp = rospy.Time.now()
    pose_w_cov_stamped.header.frame_id = "map"
    rospy.logdebug("Sending fiducial pose:\n{}".format(robot_pose))

    # publish to /initialpose
    pose_pub.publish(pose_w_cov_stamped)

    # update state
    try:
        prev_state = get_state()
        if prev_state == States.LOST:
            change_state(States.WAITING)
            rospy.loginfo("Fiducial {} seen, no longer lost".format(transform.fiducial_id))
    except rospy.ServiceException:
        rospy.logerr("Could not access state service")
コード例 #40
0
    def step(self, ms):
        super().step(ms)

        stamp = Time(seconds=self.robot.getTime()).to_msg()

        time_diff_s = self.robot.getTime() - self._last_odometry_sample_time
        left_wheel_ticks = self.left_wheel_sensor.getValue()
        right_wheel_ticks = self.right_wheel_sensor.getValue()

        if time_diff_s == 0.0:
            return

        # Calculate velocities
        v_left_rad = (left_wheel_ticks -
                      self._prev_left_wheel_ticks) / time_diff_s
        v_right_rad = (right_wheel_ticks -
                       self._prev_right_wheel_ticks) / time_diff_s
        v_left = v_left_rad * self._wheel_radius
        v_right = v_right_rad * self._wheel_radius
        v = (v_left + v_right) / 2
        omega = (v_right - v_left) / self._wheel_distance

        # Calculate position & angle
        # Fourth order Runge - Kutta
        # Reference: https://www.cs.cmu.edu/~16311/s07/labs/NXTLabs/Lab%203.html
        k00 = v * cos(self._prev_angle)
        k01 = v * sin(self._prev_angle)
        k02 = omega
        k10 = v * cos(self._prev_angle + time_diff_s * k02 / 2)
        k11 = v * sin(self._prev_angle + time_diff_s * k02 / 2)
        k12 = omega
        k20 = v * cos(self._prev_angle + time_diff_s * k12 / 2)
        k21 = v * sin(self._prev_angle + time_diff_s * k12 / 2)
        k22 = omega
        k30 = v * cos(self._prev_angle + time_diff_s * k22 / 2)
        k31 = v * sin(self._prev_angle + time_diff_s * k22 / 2)
        k32 = omega
        position = [
            self._prev_position[0] + (time_diff_s / 6) *
            (k00 + 2 * (k10 + k20) + k30), self._prev_position[1] +
            (time_diff_s / 6) * (k01 + 2 * (k11 + k21) + k31)
        ]
        angle = self._prev_angle + \
            (time_diff_s / 6) * (k02 + 2 * (k12 + k22) + k32)

        # Update variables
        self._prev_position = position.copy()
        self._prev_angle = angle
        self._prev_left_wheel_ticks = left_wheel_ticks
        self._prev_right_wheel_ticks = right_wheel_ticks
        self._last_odometry_sample_time = self.robot.getTime()

        # Pack & publish odometry
        msg = Odometry()
        msg.header.stamp = stamp
        msg.header.frame_id = self._odometry_frame
        msg.child_frame_id = self._robot_base_frame
        msg.twist.twist.linear.x = v
        msg.twist.twist.angular.z = omega
        msg.pose.pose.position.x = position[0]
        msg.pose.pose.position.y = position[1]
        msg.pose.pose.orientation = euler_to_quaternion(0, 0, angle)
        self._odometry_publisher.publish(msg)

        # Pack & publish transforms
        tf = TransformStamped()
        tf.header.stamp = stamp
        tf.header.frame_id = self._odometry_frame
        tf.child_frame_id = self._robot_base_frame
        tf.transform.translation.x = position[0]
        tf.transform.translation.y = position[1]
        tf.transform.translation.z = 0.0
        tf.transform.rotation = euler_to_quaternion(0, 0, angle)
        self._tf_broadcaster.sendTransform(tf)
コード例 #41
0
    def calibrateCamera(self):
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30,
                    0.001)

        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
        objp = np.zeros((6 * 8, 3), np.float32)
        objp[:, :2] = np.mgrid[0:8, 0:6].T.reshape(-1, 2)

        # Arrays to store object points and image points from all the images.
        objpoints = []  # 3d point in real world space
        imgpoints = []  # 2d points in image plane.
        num_samples = 0
        axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]]).reshape(-1, 3)
        print axis

        has_checker = False
        while (1):
            img = self.img_kinect_rgb
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            self.img_calibration = self.img_kinect_rgb.copy()

            # Find the chess board corners
            ret, corners = cv2.findChessboardCorners(gray, (8, 6), None)
            #imshow("gray", gray)

            # If found, add object points, image points (after refining them)
            if ret == True:
                #num_samples += 1
                has_checker = True

                #print "Sameple added: " + str(num_samples)
                objpoints.append(objp)

                cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
                imgpoints.append(corners)

                # Draw and display the corners
                #cv2.drawChessboardCorners(self.img_calibration, (8,6), corners,ret)
                #cv2.imshow('Checker Board',self.img_calibration)
                #waitKey(10)

                mtx = []
                dist = []
                rvecs = []
                tvecs = []

                ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
                    objpoints, imgpoints, gray.shape[::-1], None, None)
                rvecs, tvecs, inliers = cv2.solvePnPRansac(
                    objp, corners, mtx, dist)

                # camera pose from object points
                RMat = cv2.Rodrigues(rvecs)[0]
                cam_pos = -np.matrix(RMat).T * np.matrix(tvecs)
                '''
				# get Euler angles form R
				P = np.float32([[RMat[0][0], RMat[0][1], RMat[0][2], cam_pos[0]], [RMat[1][0], RMat[1][1], RMat[1][2], cam_pos[1]], [RMat[2][0], RMat[2][1], RMat[2][2], cam_pos[2]]]).reshape(3, -1)
				eulerAngles = cv2.decomposeProjectionMatrix(P)[-1]
				'''

                print(chr(27) + "[2J")
                print "-------T-------"
                print cam_pos
                print "-------R-------"
                print rvecs
                #print "-------Euler Angles-------"
                #print eulerAngles
                '''
				self.cam_marker = Marker()
				self.cam_marker.header.frame_id = "camera_link"
				self.cam_marker.header.stamp = rospy.Time()
				self.cam_marker.ns = "/viz"
				self.cam_marker.id = 0
				self.cam_marker.type = Marker.CUBE
				self.cam_marker.action = Marker.ADD
				self.cam_marker.scale.x = 1.0
				self.cam_marker.scale.y = 1.0
				self.cam_marker.scale.z = 1.0
				self.cam_marker.color.a = 1.0
				self.cam_marker.color.g = 1.0
				self.cam_marker.pose.orientation.x = eulerAngles[0]
				self.cam_marker.pose.orientation.y = eulerAngles[1]
				self.cam_marker.pose.orientation.z = eulerAngles[2]
				self.cam_marker.pose.orientation.w = 1.0
				self.cam_marker.pose.position.x = cam_pos[0]
				self.cam_marker.pose.position.y = cam_pos[1]
				self.cam_marker.pose.position.z = cam_pos[2]
				
				self.pub_viz_campose.publish(self.cam_marker)
				'''
                '''
				roll = atan2(-cam_pos[2][1], cam_pos[2][2])
				pitch = asin(cam_pos[2][0])
				yaw = atan2(-cam_pos[1][0], cam_pos[0][0])
				'''

                # visualization
                '''
				imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
				img_project = self.draw(self.img_calibration,corners,imgpts)
				cv2.imshow('Checker Board',self.img_calibration)
				waitKey(10)
				'''

                # tf publishing
                m = TransformStamped()
                m.header.frame_id = "left_wrist"
                m.child_frame_id = "kinect"
                m.transform.translation.x = cam_pos[0]
                m.transform.translation.y = cam_pos[1]
                m.transform.translation.z = cam_pos[2]

                m.transform.rotation.x = (rvecs[0])[0]
                m.transform.rotation.y = (rvecs[1])[0]
                m.transform.rotation.z = (rvecs[2])[0]
                #m.transform.rotation.w = (rvecs[3])[0]
                self.tf.setTransform(m)

                self.camera_pos_filters[0].add(cam_pos[2] / 50.0)
                self.camera_pos_filters[0].average()

                self.camera_pos_filters[1].add(-cam_pos[1] / 50.0)
                self.camera_pos_filters[1].average()

                self.camera_pos_filters[2].add(cam_pos[0] / 50.0)
                self.camera_pos_filters[2].average()

                self.camera_rot_filters[0].add(((rvecs[2])[0]))  # aboiut gren
                self.camera_rot_filters[0].average()
                if (self.camera_rot_filters[0].size > 0):
                    print self.camera_rot_filters[0].avg

                self.camera_rot_filters[1].add(-(rvecs[1])[0])  # about blue
                self.camera_rot_filters[1].average()
                if (self.camera_rot_filters[1].size > 0):
                    print self.camera_rot_filters[1].avg

                self.camera_rot_filters[2].add(((rvecs[0])[0]))  # about red
                self.camera_rot_filters[2].average()
                if (self.camera_rot_filters[2].size > 0):
                    print self.camera_rot_filters[2].avg

                self.br.sendTransform(
                    (self.camera_pos_filters[0].avg,
                     self.camera_pos_filters[1].avg,
                     self.camera_pos_filters[2].avg),
                    (self.camera_rot_filters[0].avg,
                     self.camera_rot_filters[1].avg,
                     self.camera_rot_filters[2].avg, -math.pi / 10.0),
                    rospy.Time.now(), "kinect", "left_wrist")

                imgpoints = []
                objpoints = []
                '''
			if(has_checker and num_samples > 20):

				# calibrate the camera
				ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)

				h,  w = gray.shape[:2]
				newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
				print newcameramtx
				print "-------------"
				
				dst = cv2.undistort(gray, mtx, dist, None, newcameramtx)
				cv2.imshow('After Calibration',self.img_calibration)
				
				cv2.waitKey(-1)
				return
				'''

        cv2.destroyAllWindows()
コード例 #42
0
    def transCB(self, t):

        for fid_trans in t.transforms:
            # Check the image error
            if fid_trans.image_error > 0.3:
                return

            # Check whether detected fiducial is in the map
            for fid_gps in self.fiducial_gps_map.fiducials:
                # Only process the detected fiducial in the map
                if fid_trans.fiducial_id != fid_gps.fiducial_id:
                    continue

                self.reference_id = fid_trans.fiducial_id
                fid_name = str(self.reference_id)

                # Keep publishing tf until it is created
                while True:
                    # Publish tf from fid to camera
                    tf_fid_cam = TransformStamped()
                    tf_fid_cam.header.frame_id = self.camera_frame
                    tf_fid_cam.child_frame_id = fid_name
                    tf_fid_cam.header.stamp = rospy.Time.now()
                    tf_fid_cam.transform = fid_trans.transform
                    tfmsg_fid_cam = tf2_msgs.msg.TFMessage([tf_fid_cam])
                    self.tf_pub.publish(tfmsg_fid_cam)

                    # Publish tf from fid to utm
                    tf_fid_utm = TransformStamped()
                    tf_fid_utm.header.frame_id = self.gps_frame
                    tf_fid_utm.child_frame_id = "fiducial" + fid_name
                    tf_fid_utm.header.stamp = rospy.Time.now()
                    tf_fid_utm.transform.translation.x, tf_fid_utm.transform.translation.y = self.projection(
                        fid_gps.x, fid_gps.y)
                    tf_fid_utm.transform.translation.z = fid_gps.z
                    quat = tf.transformations.quaternion_from_euler(
                        -math.radians(fid_gps.rx), -math.radians(fid_gps.ry),
                        -math.radians(fid_gps.rz))
                    tf_fid_utm.transform.rotation.x = quat[0]
                    tf_fid_utm.transform.rotation.y = quat[1]
                    tf_fid_utm.transform.rotation.z = quat[2]
                    tf_fid_utm.transform.rotation.w = quat[3]
                    tfmsg_fid_utm = tf2_msgs.msg.TFMessage([tf_fid_utm])
                    self.tf_pub.publish(tfmsg_fid_utm)

                    # Publish tf from robot to fid
                    try:
                        robot_fid_trans = self.tfBuffer.lookup_transform(
                            fid_name, self.robot_frame, rospy.Time())
                        tf_robot_fid = TransformStamped()
                        tf_robot_fid.header.frame_id = "fiducial" + fid_name
                        tf_robot_fid.child_frame_id = "robot_fid"
                        tf_robot_fid.header.stamp = rospy.Time.now()
                        tf_robot_fid.transform = robot_fid_trans.transform
                        tfmsg_robot_fid = tf2_msgs.msg.TFMessage(
                            [tf_robot_fid])
                        self.tf_pub.publish(tfmsg_robot_fid)
                    except (tf2_ros.LookupException,
                            tf2_ros.ConnectivityException,
                            tf2_ros.ExtrapolationException):
                        debug_info(self.debug_output,
                                   "Creating tf from robot to fid")
                        continue

                    # Publish tf from robot to utm
                    try:
                        verify_stamp = tf_fid_utm.header.stamp
                        robot_utm_trans = self.tfBuffer.lookup_transform(
                            self.gps_frame, "robot_fid", rospy.Time())
                        if verify_stamp > robot_utm_trans.header.stamp:
                            debug_info(self.debug_output,
                                       "Looking up transformations")
                            continue
                        robot_gps_pose = Odometry()
                        robot_gps_pose.header.stamp = t.header.stamp  # Important to apply offset
                        robot_gps_pose.pose.pose.position.z = robot_utm_trans.transform.translation.z
                        robot_gps_pose.pose.pose.position.x, robot_gps_pose.pose.pose.position.y = self.projection(
                            robot_utm_trans.transform.translation.x,
                            robot_utm_trans.transform.translation.y,
                            inverse=True)
                        robot_gps_pose.pose.pose.orientation.x = -robot_utm_trans.transform.rotation.x
                        robot_gps_pose.pose.pose.orientation.y = -robot_utm_trans.transform.rotation.y
                        robot_gps_pose.pose.pose.orientation.z = -robot_utm_trans.transform.rotation.z
                        robot_gps_pose.pose.pose.orientation.w = robot_utm_trans.transform.rotation.w
                        robot_gps_pose.pose.pose.orientation = quat_rot(
                            robot_gps_pose.pose.pose.orientation, 0, 0, 90)
                        self.robot_gps_pub.publish(robot_gps_pose)
                        debug_info(self.debug_output,
                                   "Fiducial position updated")

                        break
                    except (tf2_ros.LookupException,
                            tf2_ros.ConnectivityException,
                            tf2_ros.ExtrapolationException):
                        debug_info(self.debug_output, "Updating the position")
                        continue
コード例 #43
0
    def imuCallback(self, data):

        self.stamp = data.header.stamp
        dt = self.stamp - self.last_stamp
        dt = dt.to_sec() if dt.to_sec() < 0.5 else 0.05
        self.last_stamp = data.header.stamp
        # print("dt = ", dt)

        acceleration = np.array([
            data.linear_acceleration.x, data.linear_acceleration.y,
            data.linear_acceleration.z
        ])
        g = np.array([0.0, 0.0, 0.0])

        accelX = acceleration[0]
        accelY = acceleration[1]
        accelZ = acceleration[2]

        angularVelocity = np.array([
            data.angular_velocity.x, data.angular_velocity.y,
            data.angular_velocity.z
        ])

        gyroX = angularVelocity[0]
        gyroY = angularVelocity[1]
        gyroZ = angularVelocity[2]

        # Reference : https://ww2.mathworks.cn/help/fusion/ref/insfiltererrorstate.html
        q0 = self.imuState[0, 0]
        q1 = self.imuState[1, 0]
        q2 = self.imuState[2, 0]
        q3 = self.imuState[3, 0]

        positionN = self.imuState[4, 0]
        positionE = self.imuState[5, 0]
        positionD = self.imuState[6, 0]
        vN = self.imuState[7, 0]
        vE = self.imuState[8, 0]
        vD = self.imuState[9, 0]
        gyrobiasX = self.imuState[10, 0]
        gyrobiasY = self.imuState[11, 0]
        gyrobiasZ = self.imuState[12, 0]
        accelbiasX = self.imuState[13, 0]
        accelbiasY = self.imuState[14, 0]
        accelbiasZ = self.imuState[15, 0]
        # scaleFactor = self.imuState[16]

        d_gyroX = gyrobiasX / 2 - gyroX / 2
        d_gyroY = gyrobiasY / 2 - gyroY / 2
        d_gyroZ = gyrobiasZ / 2 - gyroZ / 2
        d_accelX = accelbiasX - accelX
        d_accelY = accelbiasY - accelY
        d_accelZ = accelbiasZ - accelZ
        q123 = q1 * d_accelX + q2 * d_accelY + q3 * d_accelZ
        q30_1 = q3 * d_accelX + q0 * d_accelY - q1 * d_accelZ
        q_210 = -q2 * d_accelX + q1 * d_accelY + q0 * d_accelZ
        q0_32 = q0 * d_accelX - q3 * d_accelY + q2 * d_accelZ

        q0_next = q0 + \
            (+q1 * d_gyroX + q2 * d_gyroY + q3 * d_gyroZ)*dt
        q1_next = q1 + \
            (-q0 * d_gyroX + q3 * d_gyroY - q2 * d_gyroZ)*dt
        q2_next = q2 + \
            (-q3 * d_gyroX - q0 * d_gyroY + q1 * d_gyroZ)*dt
        q3_next = q3 + \
            (+q2 * d_gyroX - q1 * d_gyroY - q0 * d_gyroZ)*dt
        q_norm = math.sqrt(q0_next * q0_next + q1_next * q1_next +
                           q2_next * q2_next + q3_next * q3_next)
        positionN_next = positionN + dt * vN
        positionE_next = positionE + dt * vE
        positionD_next = positionD + dt * vD
        vN_next = vN - dt * (q0 * q0_32 + q1 * q123 + q2 * q_210 - q3 * q30_1 +
                             g[0])
        vE_next = vE - dt * (q0 * q30_1 - q1 * q_210 + q2 * q123 + q3 * q0_32 +
                             g[1])
        vD_next = vD - dt * (q0 * q_210 + q1 * q30_1 - q2 * q0_32 - q3 * q123 +
                             g[2])

        self.imuState[0, 0] = q0_next / q_norm if False else data.orientation.w
        self.imuState[1, 0] = q1_next / q_norm if False else data.orientation.x
        self.imuState[2, 0] = q2_next / q_norm if False else data.orientation.y
        self.imuState[3, 0] = q3_next / q_norm if False else data.orientation.z
        self.imuState[4, 0] = positionN_next
        self.imuState[5, 0] = positionE_next
        self.imuState[6, 0] = positionD_next
        self.imuState[7, 0] = vN_next
        self.imuState[8, 0] = vE_next
        self.imuState[9, 0] = vD_next

        # self.imuState[10] = gyrobiasX
        # self.imuState[11] = gyrobiasY
        # self.imuState[12] = gyrobiasZ
        # self.imuState[13] = accelbiasX
        # self.imuState[14] = accelbiasY
        # self.imuState[15] = accelbiasZ
        # self.imuState[16] = scaleFactor

        if self.flag % 200 == 0:
            print("acceleration", acceleration)
            print("d_accelX", d_accelX, d_accelY, d_accelZ)
            print("self.imuState", self.imuState[:10])

        self.flag = self.flag + 1

        data_output = Odometry()
        data_output.header = data.header
        data_output.header.frame_id = "odom"
        data_output.child_frame_id = "imu_link"
        data_output.pose.pose.orientation.w = self.imuState[0, 0]
        data_output.pose.pose.orientation.x = self.imuState[1, 0]
        data_output.pose.pose.orientation.y = self.imuState[2, 0]
        data_output.pose.pose.orientation.z = self.imuState[3, 0]
        data_output.pose.pose.position.x = self.imuState[4]
        data_output.pose.pose.position.y = self.imuState[5]
        data_output.pose.pose.position.z = self.imuState[6]
        self.pub_imuOdom.publish(data_output)

        m = TransformStamped()
        m.header.frame_id = "odom"
        m.child_frame_id = "imu_link"
        m.transform.rotation = data_output.pose.pose.orientation
        m.transform.translation.x = data_output.pose.pose.position.x
        m.transform.translation.y = data_output.pose.pose.position.y
        m.transform.translation.z = data_output.pose.pose.position.z
        self.tf.setTransform(m)
コード例 #44
0
    def reset_position(self,
                       publish_basecamera=False,
                       from_world_frame=False,
                       timestamp=rospy.Time(0)):
        trans = None
        rot = None

        if from_world_frame:
            base_frame = 'world'
            target_frame = self.robot_name + '/camera_gt'
        else:
            base_frame = self.robot_name + '/odom'
            target_frame = self.robot_name + '/camera'

        while not rospy.is_shutdown():
            try:
                (trans, rot) = self.tf_listener.lookupTransform(
                    base_frame, target_frame, timestamp)
                break
            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException):
                rospy.logwarn_throttle(
                    10, "Waiting for transformation from /world to " +
                    self.robot_name + "/camera")
                rospy.sleep(0.05)

        if rospy.is_shutdown():
            exit(0)

        assert trans is not None
        assert rot is not None

        if not from_world_frame:
            euler = Transformation.get_euler_from_quaternion(rot)
            euler[0] = 0
            rot_no_yaw = Transformation.get_quaternion_from_euler(euler)
            self.pose = Transformation(trans, rot_no_yaw)
        else:
            self.pose = Transformation(trans, rot)

        # Send transform of the camera to camera footprint
        if publish_basecamera:
            br = tf2_ros.TransformBroadcaster()
            camera_footprint = TransformStamped()
            camera_footprint.header.frame_id = self.robot_name + "/odom"
            camera_footprint.child_frame_id = self.robot_name + "/base_camera"
            camera_footprint.header.stamp = timestamp

            euler = Transformation.get_euler_from_quaternion(rot)
            euler[1] = 0
            euler[2] = 0
            rot_only_yaw = Transformation.get_quaternion_from_euler(euler)
            camera_footprint.transform.translation.x = trans[0]
            camera_footprint.transform.translation.y = trans[1]
            camera_footprint.transform.translation.z = 0

            camera_footprint.transform.rotation.x = rot_only_yaw[0]
            camera_footprint.transform.rotation.y = rot_only_yaw[1]
            camera_footprint.transform.rotation.z = rot_only_yaw[2]
            camera_footprint.transform.rotation.w = rot_only_yaw[3]
            br.sendTransform(camera_footprint)
コード例 #45
0
            objectPose.T_knob_wrt_machine_corner[1, 3] = 0
            objectPose.T_knob_wrt_machine_corner[2, 3] = 0

            objectPose.T_knob_wrt_machine_corner[:3, :3] = np.identity(
                3, dtype=np.float32)
            objectPose.T_knob_wrt_machine_corner[3, :] = [0, 0, 0, 1]
            # Multiplication matrices
            objectPose.T_washOrigin_Depth = np.dot(
                objectPose.T_knob_wrt_machine_corner,
                objectPose.T_washOrigin_Depth)

            ##### Publishers ###########################################################
            # Structuring message for transformation matrix between wash machine origin and base link
            tr_msg.header.stamp = rospy.Time.now()
            tr_msg.header.frame_id = "base"
            tr_msg.child_frame_id = "wash_machine"
            tr_msg.transform.translation.x = objectPose.T_wrt_mobilebase[0, 3]
            tr_msg.transform.translation.y = objectPose.T_wrt_mobilebase[1, 3]
            tr_msg.transform.translation.z = objectPose.T_wrt_mobilebase[2, 3]
            q_orig = transforms3d.quaternions.mat2quat(
                objectPose.T_wrt_mobilebase[:3, :3])
            euler_orig = transforms3d.euler.quat2euler(q_orig, axes='sxyz')
            print(euler_orig)
            tr_msg.transform.rotation.x = q_orig[1]
            tr_msg.transform.rotation.y = q_orig[2]
            tr_msg.transform.rotation.z = q_orig[3]
            tr_msg.transform.rotation.w = q_orig[0]

            # Structuring message for transformation matrix between wash machine origin and depth optical frame
            tr_msg_depth_opt_frame.translation.x = objectPose.T_washOrigin_Depth[
                0, 3]
コード例 #46
0
    def setup_cameras(self):
        """ Initializes image-related members.

            Sends camera parameter, position and rotation data to the simulator
            to properly reset them as specified in the node arguments.
            Calculates and sends static transforms for the left and
            right cameras relative to the body frame.
            Also constructs the CameraInfo messages for left and right cameras,
            to be published with every frame.
        """
        # Set camera parameters once for the entire simulation.
        # Set all cameras to have same intrinsics:
        for camera in self.cameras:
            camera_id = camera[0]
            if camera_id is not Camera.THIRD_PERSON:
                resp = None
                while resp is None:
                    print(
                        "TESSE_ROS_NODE: Setting intrinsic parameters for camera: ",
                        camera_id)
                    resp = self.env.request(
                        SetCameraParametersRequest(camera_id,
                                                   self.camera_height,
                                                   self.camera_width,
                                                   self.camera_fov,
                                                   self.near_draw_dist,
                                                   self.far_draw_dist))

        # TODO(marcus): add SetCameraOrientationRequest option.
        # TODO(Toni): this is hardcoded!! what if don't want IMU in the middle?
        # Also how is this set using x? what if it is y, z?
        left_cam_position = Point(x=-self.stereo_baseline / 2, y=0.0, z=0.0)
        right_cam_position = Point(x=self.stereo_baseline / 2, y=0.0, z=0.0)
        cameras_orientation = Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)

        resp = None
        while resp is None:
            print "TESSE_ROS_NODE: Setting position of left camera..."
            resp = self.env.request(
                SetCameraPositionRequest(
                    Camera.RGB_LEFT,
                    left_cam_position.x,
                    left_cam_position.y,
                    left_cam_position.z,
                ))

        resp = None
        while resp is None:
            print "TESSE_ROS_NODE: Setting position of right camera..."
            resp = self.env.request(
                SetCameraPositionRequest(
                    Camera.RGB_RIGHT,
                    right_cam_position.x,
                    right_cam_position.y,
                    right_cam_position.z,
                ))

        # Set position depth and segmentation cameras to align with left:
        resp = None
        while resp is None:
            print "TESSE_ROS_NODE: Setting position of depth camera..."
            resp = self.env.request(
                SetCameraPositionRequest(
                    Camera.DEPTH,
                    left_cam_position.x,
                    left_cam_position.y,
                    left_cam_position.z,
                ))
        resp = None
        while resp is None:
            print "TESSE_ROS_NODE: Setting position of segmentation camera..."
            resp = self.env.request(
                SetCameraPositionRequest(
                    Camera.SEGMENTATION,
                    left_cam_position.x,
                    left_cam_position.y,
                    left_cam_position.z,
                ))

        for camera in self.cameras:
            camera_id = camera[0]
            if camera_id is not Camera.THIRD_PERSON:
                resp = None
                while resp is None:
                    print(
                        "TESSE_ROS_NODE: Setting orientation of all cameras to identity..."
                    )
                    resp = self.env.request(
                        SetCameraOrientationRequest(
                            camera_id,
                            cameras_orientation.x,
                            cameras_orientation.y,
                            cameras_orientation.z,
                            cameras_orientation.w,
                        ))

        # Left cam static tf.
        static_tf_cam_left = TransformStamped()
        static_tf_cam_left.header.frame_id = self.body_frame_id
        static_tf_cam_left.header.stamp = rospy.Time.now()
        static_tf_cam_left.transform.translation = left_cam_position
        static_tf_cam_left.transform.rotation = cameras_orientation
        static_tf_cam_left.child_frame_id = self.left_cam_frame_id

        # Right cam static tf.
        static_tf_cam_right = TransformStamped()
        static_tf_cam_right.header.frame_id = self.body_frame_id
        static_tf_cam_right.header.stamp = rospy.Time.now()
        static_tf_cam_right.transform.translation = right_cam_position
        static_tf_cam_right.transform.rotation = cameras_orientation
        static_tf_cam_right.child_frame_id = self.right_cam_frame_id

        # Send static tfs over the ROS network
        self.static_tf_broadcaster.sendTransform(
            [static_tf_cam_right, static_tf_cam_left])

        # Camera_info publishing for VIO.
        left_cam_data = None
        while left_cam_data is None:
            print("TESSE_ROS_NODE: Acquiring left camera data...")
            left_cam_data = tesse_ros_bridge.utils.parse_cam_data(
                self.env.request(CameraInformationRequest(
                    Camera.RGB_LEFT)).metadata)
            assert (left_cam_data['id'] == 0)
            assert (left_cam_data['parameters']['height'] > 0)
            assert (left_cam_data['parameters']['width'] > 0)

        right_cam_data = None
        while right_cam_data is None:
            print("TESSE_ROS_NODE: Acquiring right camera data...")
            right_cam_data = tesse_ros_bridge.utils.parse_cam_data(
                self.env.request(CameraInformationRequest(
                    Camera.RGB_RIGHT)).metadata)
            assert (right_cam_data['id'] == 1)
            assert (left_cam_data['parameters']['height'] > 0)
            assert (left_cam_data['parameters']['width'] > 0)

        assert (left_cam_data['parameters']['height'] == self.camera_height)
        assert (left_cam_data['parameters']['width'] == self.camera_width)
        assert (right_cam_data['parameters']['height'] == self.camera_height)
        assert (right_cam_data['parameters']['width'] == self.camera_width)

        cam_info_msg_left, cam_info_msg_right = \
            tesse_ros_bridge.utils.generate_camera_info(
                left_cam_data, right_cam_data)

        # TODO(Toni) we should extend the above to get camera info for depth and segmentation!
        # for now, just copy paste from left cam...
        cam_info_msg_segmentation = cam_info_msg_left
        cam_info_msg_depth = cam_info_msg_left

        # TODO(Toni): do a check here by requesting all camera info and checking that it is
        # as the one requested!
        self.cam_info_msgs = [
            cam_info_msg_left, cam_info_msg_right, cam_info_msg_segmentation,
            cam_info_msg_depth
        ]
コード例 #47
0
import tf2_ros
import tf_conversions
from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Quaternion

if __name__ == '__main__':
    rospy.init_node('tf2_broadcaster')

    br = tf2_ros.TransformBroadcaster()

    r = rospy.Rate(1.0)

    while not rospy.is_shutdown():
        t = TransformStamped()
        t.header.stamp = rospy.Time.now()
        t.header.frame_id = 'frame1'
        t.child_frame_id = 'frame2'

        translation = Vector3(0, 0, 1.0)
        t.transform.translation = translation

        q = tf_conversions.transformations.quaternion_from_euler(
            0, 0, 0, 'sxyz')
        rotation = Quaternion(*q)
        t.transform.rotation = rotation

        br.sendTransform(t)
        rospy.loginfo('Transform Published')
        r.sleep()
コード例 #48
0
def odom_cb():
    global publish_tf
    global tf_prefix
    global odom_pub
    global x_pos
    global y_pos
    global yaw
    global old_x_pos
    global old_y_pos
    global old_yaw
    global last_odom_time

    global RESET

    if RESET == True:
        return

    # First run through we have no dt
    if last_odom_time == None:
        last_odom_time = rospy.Time.now()
        return

    now = rospy.Time.now()

    dt = now - last_odom_time
    dt = dt.to_sec()

    if dt < 0.001:
        return

    dx = x_pos - old_x_pos
    dy = y_pos - old_y_pos
    dyaw = yaw - old_yaw

    old_x_pos += dx
    old_y_pos += dy
    old_yaw += dyaw

    dx /= dt
    dy /= dt
    dyaw /= dt

    odom = Odometry()
    odom.header.stamp = now
    odom.header.frame_id = tf_prefix + "/odom"
    odom.child_frame_id = tf_prefix + "/base_link"

    odom.pose.pose.position.x = old_x_pos
    odom.pose.pose.position.y = old_y_pos

    q = quat_from_eul(0, 0, old_yaw)

    odom.pose.pose.orientation.x = q[0]
    odom.pose.pose.orientation.y = q[1]
    odom.pose.pose.orientation.z = q[2]
    odom.pose.pose.orientation.w = q[3]

    odom.twist.twist.linear.x = dx
    odom.twist.twist.linear.y = dy
    odom.twist.twist.angular.z = dyaw

    odom.pose.covariance[0] = 1e-4
    odom.pose.covariance[7] = 1e-4
    odom.pose.covariance[35] = 1e-3

    odom.twist.covariance[0] = 1e-3
    odom.twist.covariance[7] = 1e-3
    odom.twist.covariance[35] = 1e-2

    odom_pub.publish(odom)

    br = tf2_ros.TransformBroadcaster()
    t = TransformStamped()

    t.header = odom.header
    t.child_frame_id = odom.child_frame_id
    t.transform.translation.x = odom.pose.pose.position.x
    t.transform.translation.y = odom.pose.pose.position.y
    t.transform.translation.z = odom.pose.pose.position.z
    t.transform.rotation = odom.pose.pose.orientation

    br.sendTransform(t)
コード例 #49
0
def main(portName1, portName2, simulated):
    print "MobileBase.->INITIALIZING MOBILE BASE BY MARCOSOFT..."
    #Roboclaw1 = Roboclaw
    #Roboclaw2 = Roboclaw

    ###Connection with ROS
    rospy.init_node("omni_mobile_base")
    pubOdometry = rospy.Publisher("mobile_base/odometry",
                                  Odometry,
                                  queue_size=1)
    pubBattery = rospy.Publisher("robot_state/base_battery",
                                 Float32,
                                 queue_size=1)

    subStop = rospy.Subscriber("robot_state/stop", Empty, callbackStop)
    subSpeeds = rospy.Subscriber("/hardware/mobile_base/speeds",
                                 Float32MultiArray, callbackSpeeds)
    subCmdVel = rospy.Subscriber("/hardware/mobile_base/cmd_vel", Twist,
                                 callbackCmdVel)

    br = tf.TransformBroadcaster()
    rate = rospy.Rate(30)
    ###Communication with the Roboclaw
    if not simulated:
        print "MobileBase.-> Trying to open serial port on \"" + portName1 + "\""
        Roboclaw1.Open(
            portName2, 38400
        )  #ttyACM0  --- M1: front  --- M2: rear #Port names are inverted due to the way motors are wired
        print "MobileBase.-> Trying to open serial port on \"" + portName2 + "\""
        Roboclaw2.Open(portName1, 38400)  #ttyACM1  --- M1: right  --- M2: left

        address1 = 0x80
        address2 = 0x80
        print "MobileBase.-> Serial port openned on \"" + portName1 + "\" at 38400 bps (Y)"
        print "MobileBase.-> Serial port openned on \"" + portName2 + "\" at 38400 bps (Y)"
        print "MobileBase.-> Clearing previous encoders readings"
        #print Roboclaw1.ReadEncM1(address1)
        #print Roboclaw2.ReadEncM2(address2)
        Roboclaw1.ResetEncoders(address1)
        Roboclaw2.ResetEncoders(address2)
    ###Variables for setting tire speeds
    global leftSpeed
    global rightSpeed
    global frontSpeed
    global rearSpeed
    global newSpeedData
    leftSpeed = 0
    rightSpeed = 0
    frontSpeed = 0
    rearSpeed = 0
    newSpeedData = False
    speedCounter = 5
    ###Variables for odometry
    robotPos = [0, 0, 0]

    while not rospy.is_shutdown():
        if newSpeedData:
            newSpeedData = False
            speedCounter = 5
            if not simulated:
                leftSpeed = int(leftSpeed * 16.0 / 35.0 * 110)
                rightSpeed = int(rightSpeed * 16.0 / 35.0 * 110)
                frontSpeed = int(frontSpeed * 110)
                rearSpeed = int(rearSpeed * 110)
                #print "lS: " + str(leftSpeed) + " rS: " + str(rightSpeed) + " fS: " + str(frontSpeed) + " rS: " + str(rearSpeed)
                try:
                    if leftSpeed >= 0:
                        Roboclaw2.ForwardM1(address2, leftSpeed)
                    else:
                        Roboclaw2.BackwardM1(address2, -leftSpeed)

                    if rightSpeed >= 0:
                        Roboclaw2.ForwardM2(address2, rightSpeed)
                    else:
                        Roboclaw2.BackwardM2(address2, -rightSpeed)

                    if frontSpeed >= 0:
                        Roboclaw1.BackwardM1(address1, frontSpeed)
                    else:
                        Roboclaw1.ForwardM1(address1, -frontSpeed)

                    if rearSpeed >= 0:
                        Roboclaw1.BackwardM2(address1, rearSpeed)
                    else:
                        Roboclaw1.ForwardM2(address1, -rearSpeed)
                except:
                    print "MobileBase.->Error trying to write speeds :("
                    #Roboclaw1.ForwardM1(address1, 0)
                    #Roboclaw1.ForwardM2(address1, 0)
                    #Roboclaw2.ForwardM1(address2, 0)
                    #Roboclaw2.ForwardM2(address2, 0)
        else:
            speedCounter -= 1
            if speedCounter == 0:
                if not simulated:
                    Roboclaw1.ForwardM1(address1, 0)
                    Roboclaw1.ForwardM2(address1, 0)
                    Roboclaw2.ForwardM1(address2, 0)
                    Roboclaw2.ForwardM2(address2, 0)
                else:
                    leftSpeed = 0
                    rightSpeed = 0
                    frontSpeed = 0
                    rearSpeed = 0

            if speedCounter < -1:
                speedCounter = -1
        if not simulated:
            a1, encoderLeft, a2 = Roboclaw2.ReadEncM1(address2)
            #print Roboclaw2.ReadEncM2(address2)
            #print Roboclaw2.ReadEncM1(address2)
            #print Roboclaw1.ReadEncM1(address1)
            #print Roboclaw1.ReadEncM2(address1)
            b1, encoderRight, b2 = Roboclaw2.ReadEncM2(
                address2
            )  #The negative sign is just because it is the way the encoders are wired to the roboclaw
            c1, encoderRear, c2 = Roboclaw1.ReadEncM2(address1)
            d1, encoderFront, d2 = Roboclaw1.ReadEncM1(address1)
            #print "encLeft: " + str(encoderLeft) + " encFront: " + str(encoderFront)
            Roboclaw1.ResetEncoders(address1)
            Roboclaw2.ResetEncoders(address2)
            encoderFront *= -1
            encoderRear *= -1
        else:
            encoderLeft = leftSpeed * 0.05 * 158891.2
            encoderRight = rightSpeed * 0.05 * 158891.2
            encoderFront = frontSpeed * 0.05 * 336857.5
            encoderRear = rearSpeed * 0.05 * 336857.5
        ###Odometry calculation
        robotPos = calculateOdometry(robotPos, encoderLeft, encoderRight,
                                     encoderRear, encoderFront)
        #print "Encoders: " + str(encoderLeft) + "  " + str(encoderRight)
        ##Odometry and transformations
        ts = TransformStamped()
        ts.header.stamp = rospy.Time.now()
        ts.header.frame_id = "odom"
        ts.child_frame_id = "base_link"
        ts.transform.translation.x = robotPos[0]
        ts.transform.translation.y = robotPos[1]
        ts.transform.translation.z = 0
        ts.transform.rotation = tf.transformations.quaternion_from_euler(
            0, 0, robotPos[2])
        br.sendTransform((robotPos[0], robotPos[1], 0), ts.transform.rotation,
                         rospy.Time.now(), ts.child_frame_id,
                         ts.header.frame_id)
        msgOdom = Odometry()
        msgOdom.header.stamp = rospy.Time.now()
        msgOdom.pose.pose.position.x = robotPos[0]
        msgOdom.pose.pose.position.y = robotPos[1]
        msgOdom.pose.pose.position.z = 0
        msgOdom.pose.pose.orientation.x = 0
        msgOdom.pose.pose.orientation.y = 0
        msgOdom.pose.pose.orientation.z = math.sin(robotPos[2] / 2)
        msgOdom.pose.pose.orientation.w = math.cos(robotPos[2] / 2)
        pubOdometry.publish(msgOdom)
        ###Reads battery and publishes the corresponding topic
        motorBattery = 12.0
        if not simulated:
            motorBattery = Roboclaw1.ReadMainBatteryVoltage(address1)[
                1] / 10.0 + 0.2  #There is an offset in battery reading
            #print motorBattery
        msgBattery = Float32()
        msgBattery.data = motorBattery
        pubBattery.publish(msgBattery)
        rate.sleep()
    #End of while
    if not simulated:
        Roboclaw1.Close()
        Roboclaw2.Close()
コード例 #50
0
    def process_image(self):
        """Process the image

        This code is run for reach image
        """

        # Size of original image
        width = self.image.shape[1]
        height = self.image.shape[0]

        # Make copy of image for display purposes
        display_img = self.image.copy()

        # Determine optical center
        if self.center_x == None or self.center_y == None:
            Camera.detect_optical_center(self.image)  # optional
            self.center_x = int(round(Camera.center[0]))
            self.center_y = int(round(Camera.center[1]))

        # Draw crosshair
        north = (self.center_x, height - 1)
        south = (north[0], 0)
        east = (width - 1, self.center_y)
        west = (0, east[1])
        cv2.line(display_img, south, north, (0, 255, 0))
        cv2.line(display_img, west, east, (0, 255, 0))
        cv2.circle(display_img, (self.center_x, self.center_y), 0, (0, 255, 0),
                   5)

        # Detect soccer training cones (pylons)
        hsv = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)
        color_min = np.array([0, 150, 150], np.uint8)  # min HSV color
        color_max = np.array([20, 255, 255], np.uint8)  # max HSV color
        blobs = Utils.detect_colored_blobs(hsv, color_min, color_max)
        #cv2.imshow('blobs', blobs)
        pylons = cv2.bitwise_and(self.image, self.image, mask=blobs)
        #cv2.imshow('pylons', pylons)
        contours = Utils.find_contours(blobs)
        if len(contours) > 1:  # we have seen pylons
            cnts = Utils.largest_contours(contours, number=2)

            # Calculate and draw position of both goal posts (pylons)

            # OPTION1: Center of contour
            #pylon1 = Utils.center_of_contour(cnts[0])
            #pylon2 = Utils.center_of_contour(cnts[1])

            # OPTION2: Closest point on contour
            def closest_point(point, points):
                point = np.asarray(point)
                points = np.asarray(points)
                dist_2 = np.sum((points - point)**2, axis=1)
                return np.argmin(dist_2)

            pylon1_points = cnts[0].ravel().reshape((len(cnts[0]), 2))
            pylon2_points = cnts[1].ravel().reshape((len(cnts[1]), 2))
            pylon1 = pylon1_points[closest_point(
                (self.center_x, self.center_y), pylon1_points)]
            pylon2 = pylon2_points[closest_point(
                (self.center_x, self.center_y), pylon2_points)]

            # OPTION3: Center of rotated rectangle
            pylon1_rect = cv2.minAreaRect(
                cnts[0]
            )  # rect = ( (center_x,center_y), (width,height), angle )
            pylon2_rect = cv2.minAreaRect(
                cnts[0]
            )  # rect = ( (center_x,center_y), (width,height), angle )
            pylon_radius = int(
                round((pylon1_rect[1][0] + pylon2_rect[1][0]) / 4.0))
            #pylon1_box = np.int0(cv2.boxPoints(pylon1_rect))
            #cv2.drawContours(display_img,[pylon1_box],0,(0,0,255),1)
            #pylon2_box = np.int0(cv2.boxPoints(pylon2_rect))
            #cv2.drawContours(display_img,[pylon2_box],0,(0,0,255),1)
            #pylon1 = np.int0(pylon1_rect[0])
            #pylon2 = np.int0(pylon2_rect[0])

            cv2.circle(display_img, tuple(pylon1), 0, (0, 255, 0), 5)
            cv2.circle(display_img, tuple(pylon2), 0, (0, 255, 0), 5)

            if pylon1[0] > pylon2[0]:
                pylon1, pylon2 = pylon2, pylon1

            # Draw goal-line
            cv2.line(display_img, tuple(pylon1), tuple(pylon2), (0, 255, 0), 1)

            # Calculate the center of the goal in pixel coordinates
            goal = np.round(Utils.middle_between(pylon1, pylon2)).astype("int")
            goal_x = goal[0]
            goal_y = goal[1]

            # Draw line from optical center to goal center
            cv2.circle(display_img, tuple(goal), 0, (0, 0, 255), 5)
            cv2.line(display_img, (self.center_x, self.center_y), tuple(goal),
                     (0, 0, 255), 1)

            # Calculate the goal center real world coordinates
            goal_relative_x = goal_x - self.center_x
            goal_relative_y = goal_y - self.center_y
            goal_rho, goal_phi = Utils.cart2pol(goal_relative_x,
                                                goal_relative_y)
            goal_rho += pylon_radius  # correct for radius of object
            goal_real_phi = goal_phi
            goal_real_rho = Camera.pixels2meters(goal_rho)
            goal_x_cart, goal_y_cart = Utils.pol2cart(goal_real_rho,
                                                      goal_real_phi)
            self.goal_x, self.goal_y = Camera.image2robot(
                goal_x_cart, goal_y_cart)

            # Calculate goal orientation (theta)
            goal_normal_relative = Utils.perpendicular(
                (pylon1[0] - pylon2[0], pylon1[1] - pylon2[1]))
            goal_normal = (goal[0] + goal_normal_relative[0],
                           goal[1] + goal_normal_relative[1])
            cv2.line(display_img, tuple(goal), tuple(goal_normal), (0, 255, 0),
                     1)
            x_axis = (0, -self.center_y)
            self.goal_theta = Utils.angle_between(x_axis, goal_normal_relative)
            #print("goal_theta", self.goal_theta)

        # Publish transform from camera to goal
        transform_msg = TransformStamped()
        transform_msg.header.stamp = rospy.Time.now()
        transform_msg.header.frame_id = self.frame_id
        transform_msg.child_frame_id = self.goal_frame_id
        transform_msg.transform.translation.x = self.goal_x
        transform_msg.transform.translation.y = self.goal_y
        transform_msg.transform.translation.z = 0.0
        quaternion = tf.transformations.quaternion_from_euler(
            0, 0, self.goal_theta)
        transform_msg.transform.rotation.x = quaternion[0]
        transform_msg.transform.rotation.y = quaternion[1]
        transform_msg.transform.rotation.z = quaternion[2]
        transform_msg.transform.rotation.w = quaternion[3]

        self.transform_publisher.publish(transform_msg)

        #self.transform_broadcaster.sendTransform(transform_msg)
        self.transform_broadcaster.sendTransform(
            (self.goal_x, self.goal_y, 0.0),
            (quaternion[0], quaternion[1], quaternion[2], quaternion[3]),
            rospy.Time.now(), self.goal_frame_id, self.frame_id)

        # Show augmented image
        if self.display:
            cv2.imshow("image", display_img)
            cv2.waitKey(1)
コード例 #51
0
def arucopose(data):
    global counter
    global x_average
    global y_average
    global z_average
    global roll_average
    global pitch_average
    global yaw_average
    global t_final_odom_update

    for elm in data.markers:
        # if len(data.markers) > 0:
        #     elm = data.markers[0]
        cam_aruco = PoseStamped()
        cam_aruco.pose = elm.pose.pose
        cam_aruco.header.frame_id = 'cf1/camera_link'
        cam_aruco.header.stamp = rospy.Time.now()

        if not tfBuffer.can_transform(cam_aruco.header.frame_id, 'cf1/odom',
                                      rospy.Time(0)):
            rospy.logwarn_throttle(
                5.0,
                'No transform from %s to cf1/odom' % cam_aruco.header.frame_id)
            return

        print('detected marker', elm.id)

        send_transform = tfBuffer.transform(cam_aruco, 'cf1/odom',
                                            rospy.Duration(0.5))

        t = TransformStamped()
        t.header.stamp = rospy.Time.now()
        t.header.frame_id = 'cf1/odom'
        t.child_frame_id = 'arucopostion' + str(elm.id)

        t.transform.translation = send_transform.pose.position
        t.transform.rotation = send_transform.pose.orientation

        # t gives the position of the aruco in the odometry frame
        br.sendTransform(
            t)  # send it so that its visible in rviz, what the crazyflie sees

        # Fix coordinates to arucos based on ID from map here
        # -------------------------------

        x_map = aruco_positions[elm.id][0]
        y_map = aruco_positions[elm.id][1]
        z_map = aruco_positions[elm.id][2]

        roll_map = math.radians(
            aruco_positions[elm.id]
            [3])  #Transform to radians since degrees is given in map json
        pitch_map = math.radians(aruco_positions[elm.id][4])
        yaw_map = math.radians(aruco_positions[elm.id][5])

        map_orientation = quaternion_from_euler(
            roll_map, pitch_map, yaw_map
        )  # Transform to quaternion in order to take difference between where aruco is in map and where crazyflie sees it.
        # Goal is to move odometry so that base_link starts where it supposed to in map.

        #-----------------------------------

        # --------- Take the difference of the orientations between map and odometry frame in order to update the orientation.----------------

        q1_inv = [
            t.transform.rotation.x, t.transform.rotation.y,
            t.transform.rotation.z, -t.transform.rotation.w
        ]  # negate last element for inverse

        qr = quaternion_multiply(map_orientation,
                                 q1_inv)  # (currentpose, previouspose)

        # New transformstamped in order to see result of transformation

        t_update_odom_rotation = TransformStamped()
        t_update_odom_rotation.header.stamp = rospy.Time.now()
        t_update_odom_rotation.header.frame_id = 'cf1/odom'
        t_update_odom_rotation.child_frame_id = "rotation_odometry"
        t_update_odom_rotation.transform.rotation.x = qr[0]
        t_update_odom_rotation.transform.rotation.y = qr[1]
        t_update_odom_rotation.transform.rotation.z = qr[2]
        t_update_odom_rotation.transform.rotation.w = qr[3]

        # sendtransform to see result in RVIZ of the rotated odometry frame
        # rospy.loginfo(t_update_odom_rotation)
        br.sendTransform(t_update_odom_rotation)
        rospy.sleep(0.1)

        #Set point position of aruco in the new rotated frame
        t_aruco_rotation = PoseStamped()
        t_aruco_rotation.header.stamp = t_update_odom_rotation.header.stamp
        t_aruco_rotation.header.frame_id = t_update_odom_rotation.child_frame_id
        t_aruco_rotation.pose.position = t.transform.translation
        t_aruco_rotation.pose.orientation.w = 1

        #Get position of aruco marker after rotation in the original odometry frame
        aruco_pos_final = tfBuffer.transform(t_aruco_rotation, 'cf1/odom',
                                             rospy.Duration(1))
        # aruco_pos_after_rotation = tfBuffer.lookup_transform('cf1/odom',  t_aruco_rotation.child_frame_id , rospy.Duration(0.5) )

        # Difference between real aruco position in map and the position of the aruco after the rotation is performed

        x_add = x_map - aruco_pos_final.pose.position.x
        y_add = y_map - aruco_pos_final.pose.position.y
        z_add = z_map - aruco_pos_final.pose.position.z

        # Set orienttion to same as in t_update_odom_rotation

        qr_0_add = qr[0]
        qr_1_add = qr[1]
        qr_2_add = qr[2]
        qr_3_add = qr[3]

        orientation_add = euler_from_quaternion(
            (qr_0_add, qr_1_add, qr_2_add, qr_3_add))
        x_average.append(x_add)
        y_average.append(y_add)
        z_average.append(z_add)

        roll_average.append(orientation_add[0])
        pitch_average.append(orientation_add[1])
        yaw_average.append(orientation_add[2])

        counter += 1
        if counter > 19:

            #Perform final update of the odometry frame

            #t_final_odom_update=TransformStamped()
            # t_final_odom_update.header.stamp = rospy.Time.now()
            # t_final_odom_update.header.frame_id = "map"
            # t_final_odom_update.child_frame_id = 'cf1/odom'

            x_final = sum(x_average) / len(x_average)
            y_final = sum(y_average) / len(y_average)
            z_final = sum(z_average) / len(z_average)

            t_final_odom_update.transform.translation.x = x_final
            t_final_odom_update.transform.translation.y = y_final
            t_final_odom_update.transform.translation.z = z_final

            roll_final = sum(roll_average) / len(roll_average)
            pitch_final = sum(pitch_average) / len(pitch_average)
            yaw_final = sum(yaw_average) / len(yaw_average)

            final_rotation = quaternion_from_euler(roll_final, pitch_final,
                                                   yaw_final)

            t_final_odom_update.transform.rotation.x = final_rotation[0]
            t_final_odom_update.transform.rotation.y = final_rotation[1]
            t_final_odom_update.transform.rotation.z = final_rotation[2]
            t_final_odom_update.transform.rotation.w = final_rotation[3]

            print('Updating odom frame')
            # br.sendTransform(t_final_odom_update)

            counter = 0

            x_average = []
            y_average = []
            z_average = []

            roll_average = []
            pitch_average = []
            yaw_average = []