예제 #1
0
    def execute(self, userdata):

        point = PointStamped()
        if type(userdata.in_pos) is Point:
            print("this is a point")
            point.point = userdata.in_pos
        elif type(userdata.in_pos) is Pose:
            print("this is a pose")
            point.point = userdata.in_pos.position
        else:
            Logger.loginfo('ERROR in ' + str(self.name) +
                           ' : in_pos is not a Pose() nor a Point()')
            return 'fail'

        point.header.frame_id = self.in_ref
        self.listener.waitForTransform(self.in_ref, self.out_ref,
                                       rospy.Time(0), rospy.Duration(1))
        print("Frame : " + self.in_ref)
        print(" point : " + str(point.point))

        point = self.listener.transformPoint(self.out_ref, point)

        print("Frame : " + self.out_ref)
        print(" point : " + str(point.point))
        userdata.out_pos = point.point
        return 'done'
예제 #2
0
def node():
    rospy.init_node('test_trajectory')
    pub = rospy.Publisher('state_arrays', DeltaStateArray, queue_size=10)
    width=100
    depth = -925
    p1 = PointStamped()
    p1.point = Point(width, width, depth)
    p1.header.stamp = rospy.Time.now()
    p1.header.frame_id = "robot_base_plate"
    p2 = PointStamped()
    p2.point = Point(width, -width, depth)
    p2.header.stamp = rospy.Time.now()
    p2.header.frame_id = "robot_base_plate"
    p3 = PointStamped()
    p3.point = Point(-width, -width, depth)
    p3.header.stamp = rospy.Time.now()
    p3.header.frame_id = "robot_base_plate"
    p4 = PointStamped()
    p4.point = Point(-width, width, depth)
    p4.header.stamp = rospy.Time.now()
    p4.header.frame_id = "robot_base_plate"
    states = DeltaStateArray([
        DeltaState(position=p1, actuator=Bool(True)),
        DeltaState(position=p2, actuator=Bool(False)),
        DeltaState(position=p3, actuator=Bool(True)),
        DeltaState(position=p4, actuator=Bool(False)),
    ])
    while not rospy.is_shutdown():
        pub.publish(states)
        rospy.sleep(5)
예제 #3
0
def get_obj_frames(blue_obj, red_obj, base_tf, base2kinect_tf):
    base_pose = Pose()
    base_pose.position = base_tf.transform.translation
    base_pose.orientation = base_tf.transform.rotation
    base_kdl = tf_conversions.fromMsg(base_pose)
    base_unitX = base_kdl.M.UnitX()
    base_unitY = base_kdl.M.UnitY()
    base_unitZ = base_kdl.M.UnitZ()

    ### Frame for Blue Object

    blue_center_kinect = PointStamped()
    blue_center_kinect.header = base2kinect_tf.header
    blue_center_kinect.point = blue_obj.bb_center
    blue_center = tf2_geometry_msgs.do_transform_point(blue_center_kinect,
                                                       base2kinect_tf)

    blue_pose = Pose()
    blue_pose.position = blue_center.point
    blue_pose.position.z = blue_pose.position.z - blue_obj.bb_dims.z / 2
    blue_pose.orientation = base_tf.transform.rotation
    blue_kdl = tf_conversions.fromMsg(blue_pose)
    blue_pos = blue_kdl.p
    blue_rot = PyKDL.Rotation(-base_unitX, -base_unitY, base_unitZ)
    blue_kdl = PyKDL.Frame(blue_rot, blue_pos)
    blue_pose = tf_conversions.toMsg(blue_kdl)

    blue_frame = TransformStamped()
    blue_frame.header.frame_id = base_frame
    blue_frame.header.stamp = rospy.Time.now()
    blue_frame.child_frame_id = 'blue_frame'
    blue_frame.transform.translation = blue_pose.position
    blue_frame.transform.rotation = blue_pose.orientation

    ### Frame for Red Object

    red_center_kinect = PointStamped()
    red_center_kinect.header = base2kinect_tf.header
    red_center_kinect.point = red_obj.bb_center
    red_center = tf2_geometry_msgs.do_transform_point(red_center_kinect,
                                                      base2kinect_tf)

    red_pose = Pose()
    red_pose.position = red_center.point
    red_pose.position.z = red_pose.position.z - red_obj.bb_dims.z / 2
    red_pose.orientation = base_tf.transform.rotation
    red_kdl = tf_conversions.fromMsg(red_pose)
    red_pos = red_kdl.p
    red_rot = PyKDL.Rotation(-base_unitX, -base_unitY, base_unitZ)
    red_kdl = PyKDL.Frame(red_rot, red_pos)
    red_pose = tf_conversions.toMsg(red_kdl)

    red_frame = TransformStamped()
    red_frame.header.frame_id = base_frame
    red_frame.header.stamp = rospy.Time.now()
    red_frame.child_frame_id = 'red_frame'
    red_frame.transform.translation = red_pose.position
    red_frame.transform.rotation = red_pose.orientation

    return blue_frame, red_frame
예제 #4
0
def calculate_goalpoint_distance_from_wall(laser_data, distance):
    p1 = get_lidar_point_at_angle(laser_data, -90)
    p2 = get_lidar_point_at_angle(laser_data, -80)
    p3 = get_lidar_point_at_angle(laser_data, -70)

    def calc_point(p1, p2, d):
        mp = (p1+p2)/2
        slopeinv = (p1[0]-p2[0])/(p1[1]-p2[1])
        dx = abs(np.sqrt((d**2)/(1+(1/slopeinv**2))))
        dy = abs(np.sqrt((d**2)/(1+(slopeinv**2))))
        rp = np.array([-1*(mp[0]-dx), (mp[1]-dy)])
        # print("GP Details: P1= "+str(p1), "P2="+str(p2), "MP= "+str(mp), "Dx= "+str(dx), "Dy= "+str(dy), "RP= "+str(rp))
        return rp

    gp1 = calc_point(p1, p2, distance)
    gp2 = calc_point(p2, p3, distance)

    # Publish for visualization

    center = PointStamped()
    center.header = Header(stamp=rospy.Time.now(), frame_id="laser")
    center.point = Point(gp1[0], gp1[1], 0)
    pub_point1.publish(center)
    center.point = Point(gp2[0], gp2[1], 0)
    pub_point2.publish(center)
    
    return (gp1 + gp2)/2
    def GenPlane(self, point1, point2, point3):
        point1_stamped = PointStamped();
        point1_stamped.header.frame_id = self.tf_name
        point1_stamped.point = point1

        point2_stamped = PointStamped();
        point2_stamped.header.frame_id = self.tf_name
        point2_stamped.point = point2

        point3_stamped = PointStamped();
        point3_stamped.header.frame_id = self.tf_name
        point3_stamped.point = point3

        point1_world = self.tf_listener.transformPoint("/map", point1_stamped)
        point2_world = self.tf_listener.transformPoint("/map", point2_stamped)
        point3_world = self.tf_listener.transformPoint("/map", point3_stamped)
        
        scale2 = 1 # -2/ (point2_world.point.z - point1_world.point.z)
        point2_scaled = Point( point1_world.point.x + (point2_world.point.x - point1_world.point.x) * scale2,
                                                 point1_world.point.y + (point2_world.point.y - point1_world.point.y) * scale2,
                                                 point1_world.point.z + (point2_world.point.z - point1_world.point.z) * scale2)

	if( not math.isnan(point2_scaled.x) ):
	        marker.points.append(point1_world.point)
	        marker.points.append(point2_scaled)
               
#        marker.points.append(point1_world.point)
#        marker.points.append(point2_world.point)
#        marker.points.append(point1_world.point)
#        marker.points.append(point3_world.point)
        return self.GenPlaneFromWorldPoints(point1_world.point, point2_world.point, point3_world.point)
예제 #6
0
 def publish_pos(self, pos=None):
     ps = PointStamped()
     ps.header.frame_id = "/map"
     if pos is not None:
         ps.point = pos.position
     else:
         ps.point = self.position.position
     self.position_publisher.publish(ps)
예제 #7
0
	def publisher(self):
		rate=rospy.Rate(60)
		corner_time=100
		while not rospy.is_shutdown():
			corner_time+=1
			if corner_time<100:
                                print "pass"
				continue 
			point =Point32()
			spoint=PointStamped()
			if not self.detected and not self.obs_detected:
				
				point.x=self.walldata_x
				point.y=self.walldata_y
				point.z=0.0
				spoint.point = point 
				#spoint.header.frame_id="odom"
				spoint.header.frame_id = 'base_link'
				spoint.header.stamp=self.time 
				print "false: ", spoint
				#self.pubs.publish(spoint)
			elif self.detected:
				corner_time=0
				print "CornerDetected:"
				y=self.dist*np.sin(self.angle)
				x=self.dist*np.cos(self.angle)

				point.x=abs(x)
				point.y=-y
				point.z=0.0
				spoint.point = point 
				#spoint.header.frame_id="odom"
				spoint.header.frame_id = 'base_link'
				spoint.header.stamp=self.time 
				print "True", point 

				#self.pubs.publish(spoint)
			else:
				print "OBS_DETECTED"
				point.x=max(4.0,abs(self.escape.x))
				point.y=self.escape.y 
				point.z=0.0
				spoint.point=point
				spoint.header.frame_id = 'base_link'
				spoint.header.stamp=self.time 

				#self.pubs.publis
			goal = PoseStamped()
		        goal.pose.position.x = spoint.point.x
			goal.pose.position.y = -spoint.point.y
	        	goal.pose.position.z = 0.0
		        goal.pose.orientation.w = 1.0#math.atan2(y,x)
		        goal.header.frame_id = 'base_link'
		        self.pubs.publish(goal)
			rate.sleep()
예제 #8
0
    def do_action(self, agent_action):
        self._actions = agent_action
        for act in self._actions['MOVE_TO']:
            if len(act) > 0:
                for elm in act:
                    # entity_id = elm.popitem()[0]  # get key of dictionary
                    entity_id = elm
                    goal = PointStamped()
                    lon, lat, alt = act[entity_id][0].toLongLatAlt()
                    goal.point = Point(x=lat, y=lon, z=alt)
                    self.move_entity_to_goal(entity_id, goal)
                    # self._actions['MOVE_TO'].remove(elm)
        for act in self._actions['LOOK_AT']:
            if len(act) > 0:
                for elm in act:
                    # entity_id = elm.popitem()[0]  # get key of dictionary
                    entity_id = elm
                    goal = PointStamped()
                    lon, lat, alt = act[entity_id][0].toLongLatAlt()
                    goal.point = Point(x=lat, y=lon, z=alt)
                    self.look_at_goal(entity_id, goal)
                    # self._actions['LOOK_AT'].remove(elm)
        for act in self._actions['ATTACK']:
            if len(act) > 0:
                for elm in act:
                    #entity_id = elm.popitem()[0]  # get key of dictionary
                    entity_id = elm
                    self.node.get_logger().info('Entity:' + entity_id +
                                                " attack at:" +
                                                ascii(act[entity_id][0].x) +
                                                ", " +
                                                ascii(act[entity_id][0].y) +
                                                ", " +
                                                ascii(act[entity_id][0].z))
                    goal = PointStamped()
                    lon, lat, alt = act[entity_id][0].toLongLatAlt()
                    goal.point = Point(x=lat, y=lon, z=alt)
                    if entity_id == 'UGV':
                        self.attack_goal(entity_id, goal)
                    else:
                        self.move_entity_to_goal(entity_id, goal)

        for act in self._actions['TAKE_PATH']:
            if len(act) > 0:
                for elm in act:
                    entity_id = elm
                    # entity_id = elm.popitem()[0]  # get key of dictionary
                    path_name = act[entity_id][0]
                    path = OPath()
                    path.name = path_name
                    # Here goal is Point
                    lon, lat, alt = act[entity_id][1].toLongLatAlt()
                    goal_point = Point(x=lat, y=lon, z=alt)
                    self.take_goal_path(entity_id, path, goal_point)
예제 #9
0
    def lookAt(self, target, tag_base='head'):

        head_frame = rospy.get_param('hrl_manipulation_task/head_audio_frame')
        headClient = actionlib.SimpleActionClient("/head_traj_controller/point_head_action", \
                                                  pr2_controllers_msgs.msg.PointHeadAction)
        headClient.wait_for_server()
        rospy.logout('Connected to head control server')

        pos = Point()

        ps = PointStamped()
        ps.header.frame_id = self.torso_frame

        head_goal_msg = pr2_controllers_msgs.msg.PointHeadGoal()
        head_goal_msg.pointing_frame = head_frame
        head_goal_msg.pointing_axis.x = 1
        head_goal_msg.pointing_axis.y = 0
        head_goal_msg.pointing_axis.z = 0
        head_goal_msg.min_duration = rospy.Duration(1.0)
        head_goal_msg.max_velocity = 1.0

        if target is None:
            ## tag_id = rospy.get_param('hrl_manipulation_task/'+tag_base+'/artag_id')

            while not rospy.is_shutdown() and self.mouth_frame_kinect is None:
                rospy.loginfo("Search " + tag_base + " tag")

                pos.x = 0.8
                pos.y = 0.4
                pos.z = 0.0

                ps.point = pos
                head_goal_msg.target = ps

                headClient.send_goal(head_goal_msg)
                headClient.wait_for_result()
                rospy.sleep(2.0)

            self.mouth_frame = copy.deepcopy(self.mouth_frame_kinect)
            target = self.mouth_frame

        pos.x = target.p.x()
        pos.y = target.p.y()
        pos.z = target.p.z()

        ps.point = pos
        head_goal_msg.target = ps

        headClient.send_goal(head_goal_msg)
        headClient.wait_for_result()
        rospy.sleep(1.0)

        return "Success"
예제 #10
0
    def request_exploration(self):
        # exploration client
        client = actionlib.SimpleActionClient('explore_server',
                                              ExploreTaskAction)
        client.wait_for_server()

        # empty polygon
        polygonStamped = PolygonStamped()
        polygonStamped.header.frame_id = 'map'
        polygonStamped.header.stamp = rospy.Time.now()
        # starting point from turtlebot position
        initialGoal = PointStamped()
        initialGoal.header.frame_id = 'map'
        initialGoal.point = Point32(x=0, y=0, z=0)

        # unbounded exploration, uncomment for bounded
        # for x, y in zip(self.points_x, self.points_y):
        # polygonStamped.polygon.points.append(Point32(x=x, y=y, z=0))

        # setting exploration goal
        exploration_goal = ExploreTaskGoal()
        exploration_goal.explore_boundary = polygonStamped
        exploration_goal.explore_center = initialGoal

        # starting exploration
        client.send_goal(exploration_goal)

        rospy.loginfo("Exploration Started")
        client.wait_for_result()
예제 #11
0
    def runFilter(self):

        r = rospy.Rate(self.filterRate)
        while not rospy.is_shutdown():
            if self.flag_reset:
                self.kf.reset(self.getReset())
                self.flag_reset = 0

            self.kf.iteration(self.getMeasures())

            self.pose_pub_.publish(self.kf.getState())

            person_point = PointStamped()
            person_point.header = self.kf.getState().header
            person_point.header.stamp = rospy.Time.now()
            person_point.point = self.kf.getState().pose.position
            self.point_pub_.publish(person_point)

            self.tf_person.sendTransform(
                (self.kf.getState().pose.position.x,
                 self.kf.getState().pose.position.y, 0),
                (self.kf.getState().pose.orientation.x,
                 self.kf.getState().pose.orientation.y,
                 self.kf.getState().pose.orientation.z,
                 self.kf.getState().pose.orientation.w), rospy.Time.now(),
                "person_link",
                self.kf.getState().header.frame_id)

            r.sleep()
    def _obstacle_callback(self, msg):
        try:
            transform = self.tf_buffer.lookup_transform(
                self.map_frame, msg.header.frame_id, msg.header.stamp,
                rospy.Duration(1.0))
        except (tf2.ConnectivityException, tf2.LookupException,
                tf2.ExtrapolationException) as e:
            rospy.logwarn(e)
            return

        for o in msg.obstacles:
            point = PointStamped()
            point.header = msg.header
            point.point = o.pose.pose.pose.position
            # Transfrom robot to map frame
            point = tf2_geometry_msgs.do_transform_point(point, transform)
            # Update robots that are close together
            cleaned_robots = []
            for old_robot in self.robots:
                # Check if there is another robot in memory close to it
                if np.linalg.norm(
                        ros_numpy.numpify(point.point) - ros_numpy.numpify(
                            old_robot.point)) > self.robot_merge_distance:
                    cleaned_robots.append(old_robot)
            # Update our robot list with a new list that does not contain the duplicates
            self.robots = cleaned_robots
            # Append our new robots
            self.robots.append(point)
    def _reference_update(self, pose_ref: PointStamped):
        if pose_ref.header.frame_id == "agent":
            cprint(f'got pose_ref in agent frame: {pose_ref.point}', self._logger)
            # TODO:
            # Use time stamp of message to project reference point to global frame
            # corresponding to the agent's pose at that time
            # Although, as references should be said once per second the small delay probably won't matter that much.

            # transform from agent to world frame.
            # Note that agent frame is not necessarily the same as agent_horizontal, so a small error is induced here
            # as pose_estimate.orientation only incorporates yaw turn because KF does not maintain roll and pitch.
            pose_ref.point = transform(points=[pose_ref.point],
                                       orientation=self.pose_est.pose.orientation,
                                       translation=self.pose_est.pose.position)[0]
            pose_ref.header.frame_id = "global"
            cprint(f'mapped to global frame: {pose_ref.point}', self._logger)
        if pose_ref != self.pose_ref:
            # reset pose error when new reference comes in.
            self.prev_pose_error = PointStamped(header=Header(frame_id="global"))
            self.prev_vel_error = PointStamped(header=Header(frame_id="global_rotated"))
            self.last_cmd = TwistStamped(header=Header(stamp=rospy.Time().now()))
            self.pose_ref = pose_ref
            _, _, yaw = euler_from_quaternion(self.pose_est.pose.orientation)
            self.desired_yaw = yaw + calculate_relative_orientation(robot_pose=self.pose_est,
                                                                    reference_pose=self.pose_ref)
            cprint(f'set pose_ref: {self.pose_ref.point}', self._logger)
def pre_load():
 rospy.loginfo('检测到预注册的位置')
 rospy.loginfo('读取预设位置')
 count=getpass.getuser()
 read=open('/home/%s/xu_slam/src/nav_staff/map/pre_regist_pose.txt'%count,'r')
 pose=read.readlines()
 read.close()
 poses=eval(pose[0])
 try:
  intial=rospy.wait_for_message("odom",Odometry)
  intial_point=PointStamped()
  intial_point.point=intial.pose.pose.position
  intial_point.header.stamp=rospy.Time.now()
  intial_point.header.frame_id='map'
 except:
  pass
 pose_list=[]
 for i in range(len(poses)):
  default_point=PointStamped()
  default_point.header.frame_id='map'
  default_point.header.stamp=rospy.Time.now()
  default_point.point.x=poses['pose_%s'%i]['x']
  default_point.point.y=poses['pose_%s'%i]['y']
  default_point.point.z=poses['pose_%s'%i]['z']
  default_point.header.seq=i+1
  pose_list.append(default_point)
 pose_list.append(intial_point)
 tasks(len(pose_list),pose_list)
예제 #15
0
파일: node.py 프로젝트: Kapim/arcor-pr2
    def move(self, group, target="", pose=None):

        assert target != "" or pose is not None

        # group.set_workspace([minX, minY, minZ, maxX, maxY, maxZ])

        if pose is not None:
            group.set_pose_target(pose)
            pt = PointStamped()
            pt.header = pose.header
            pt.point = pose.pose.position
            self.look_at_cb(pt)
        else:

            group.set_named_target(target)



        group.allow_looking(False)
        group.allow_replanning(False)
        group.set_num_planning_attempts(1)

        cnt = 3

        while cnt > 0:

            if group.go(wait=True):
                return True

            rospy.sleep(1)

        return False
예제 #16
0
    def pose_cb(self, pose):
        m_pose = PointStamped()
        m_pose.header = pose.header
        m_pose.point = pose.pose.position
        m_pose = self.listener.transformPoint(self.reference_frame, m_pose)
        self.goal_x = m_pose.point.x
        self.goal_y = m_pose.point.y
        print "New goal: %.2f %.2f" % (self.goal_x, self.goal_y)

        marker = Marker()
        marker.header = pose.header
        marker.ns = "goal_marker"
        marker.id = 10
        marker.type = Marker.CYLINDER
        marker.action = Marker.ADD
        marker.pose = pose.pose
        marker.scale.x = 0.1
        marker.scale.y = 0.1
        marker.scale.z = 0.5
        marker.color.a = 1.0
        marker.color.r = 1.0
        marker.color.g = 1.0
        marker.color.b = 0.0
        marker.lifetime.secs = -1.0
        self.marker_pub.publish(marker)
예제 #17
0
    def do(self, blackboard, reevaluate=False):
        if self.first_iteration:
            self.begin = rospy.get_rostime()

            enable = Bool()
            enable.data = True
            blackboard.person_detection_switch_pub.publish(enable)
            self.first_iteration = False

        if blackboard.person_detected:
            disable = Bool()
            disable.data = False
            blackboard.person_detection_switch_pub.publish(disable)

            point = PointStamped()
            point.header.frame_id = 'xtion_optical_frame'
            point.point = copy.deepcopy(blackboard.person_position)
            point = blackboard.tfl.transformPoint('map', point)
            blackboard.customer_position = point
            blackboard.person_detected = False
            self.pop()
        elif rospy.get_rostime() - self.begin >= rospy.Duration.from_sec(5.0):
            disable = Bool()
            disable.data = False
            blackboard.person_detection_switch_pub.publish(disable)
            self.pop()
예제 #18
0
    def body_pose_cb(self, msg):
        rospy.loginfo('%s: ERROR ERROR got 3D body_pose message' % (self._action_name))
        return

        if person_id != self.id_to_track:
            # not the right person, skip
            rospy.loginfo("%s: Body Tracker: Tracking ID is %d, skipping pose2D for ID %d", 
                self._action_name, self.id_to_track, person_id )
            return

       # position component of the target pose stored as a PointStamped() message.

        # create a PointStamped structure to transform via transformPoint
        target = PointStamped()
        target.header.frame_id = msg.header.frame_id
        target.point = msg.pose.position

        if target.point.z == 0.0:
            rospy.loginfo('%s: skipping blank message' % (self._action_name))
            return

        rospy.loginfo("%s: Body Tracker: Tracking person at %f, %f, %f", self._action_name,
          target.point.x, target.point.y, target.point.z)

        # convert from xyz to pan tilt angles
        # TODO: 1) Handle Children - currently assumes everyone is 6 foot tall!
        #       2) What happens if robot bows? 
        if target.point.x < 0.2:     # min range of most depth cameras
          #rospy.loginfo("%s: Body Tracker: Bad Distance (x) value! %f", 
          #  self._action_name, target.point.x)
          return

        # math shortcut for approx radians
        pan_angle =  target.point.y / target.point.x

        # OPTION 1: Track actual target height 
        #person_head_offset_radians = 0.52   # TB2S value - TODO Tune this 
        #tilt_angle = (target.point.z / target.point.x) + person_head_offset_radians 

        # OPTION 2: Guess height, based upon distance to person
        # FUTURE: combine the two, use "guess" when person is too close?
        tilt_angle = 0.4 / target.point.x # SHELDON, CHEST MOUNTED camera

        rospy.loginfo("%s: Body Tracker: Pan = %f (%f), Tilt = %f (%f)", self._action_name, 
          pan_angle, degrees(pan_angle), tilt_angle, degrees(tilt_angle))

        # Send servo commands
        if abs(pan_angle) > MAX_PAN:    # just over 45 degrees - TODO put in actual limits here!
            rospy.loginfo("%s: Body Tracker: Pan %f exceeds MAX", self._action_name, pan_angle)
            return
        if abs(tilt_angle) > MAX_TILT:    # Limit vertical to assure good tracking
            rospy.loginfo("%s: Body Tracker: Tilt %f exceeds MAX", self._action_name, tilt_angle)
            return

        pub_head_pan.publish(pan_angle)
        pub_head_tilt.publish(-tilt_angle)

        # SHELDON ONLY
        sidetiltAmt = 0.0
        pub_head_sidetilt.publish(sidetiltAmt)
예제 #19
0
파일: test_ji.py 프로젝트: JeffsanC/svcam
def estimation(msg):
    uwbdis = msg.distance

    point = PointStamped()
    point.header = Header()
    point.header.frame_id = "vicon"
    point.header.stamp = rospy.Time.now()
    point.point = msg.position

 
    listener.waitForTransform("ned", "vicon", rospy.Time.now(), rospy.Duration(0.05))
    point_tf = listener.transformPoint("ned", point)
    uwbanchor = array([point_tf.point.x, point_tf.point.y, point_tf.point.z])

    imp = array(msg.point)
    global q,a,r,xe
    
    #xe, _ = uwb.locate(xe, Q, 1.0/100, uwbdis, uwbanchor, q, a, r)
    xe, _ = vision.locate(xe, Q, 1.0/100, imp, visionanchor, q, a, r)
 
    
    x_msg = state()
    x_msg.state.header = Header()
    x_msg.state.header.frame_id = "ned"
    x_msg.state.header.stamp = rospy.Time.now()
    x_msg.state.pose.position = Point(xe[0], xe[1], xe[2])
    x_msg.state.pose.orientation = Quaternion(xe[3], xe[4], xe[5], xe[6])
    x_msg.velocity = Vector3(xe[7], xe[8], xe[9])
    x_msg.bias = xe[10]
    pub.publish(x_msg)
예제 #20
0
def pre_load():
 rospy.loginfo('检测到预注册的位置')
 rospy.loginfo('读取预设位置')
 count=getpass.getuser()
 read=open('/home/%s/xu_slam/src/nav_staff/map/pre_regist_pose.txt'%count,'r')
 pose=read.readlines()
 read.close()
 poses=eval(pose[0])
 try:
  intial=rospy.wait_for_message("odom",Odometry)
  intial_point=PointStamped()
  intial_point.point=intial.pose.pose.position
  intial_point.header.stamp=rospy.Time.now()
  intial_point.header.frame_id='map'
 except:
  pass
 pose_list=[]
 for i in range(len(poses)):
  default_point=PointStamped()
  default_point.header.frame_id='map'
  default_point.header.stamp=rospy.Time.now()
  default_point.point.x=poses['pose_%s'%i]['x']
  default_point.point.y=poses['pose_%s'%i]['y']
  default_point.point.z=poses['pose_%s'%i]['z']
  default_point.header.seq=i+1
  pose_list.append(default_point)
 pose_list.append(intial_point)
 tasks(len(pose_list),pose_list)
예제 #21
0
    def face_callback(self, msg):
        if not self.found_face:
            face = PointStamped()
            face.header = msg.people[0].header
            face.point = msg.people[0].pos
            self.face_parent_frame = msg.people[0].header.frame_id
            # self.listener.waitForTransform(face.header.frame_id, 'base_link', rospy.Time.now(), rospy.Duration(5.0))
            d = sqrt(face.point.x * face.point.x + face.point.y * face.point.y)
       
            # Change the axes from camera-type axes 
            self.quaternion = quaternion_from_euler(pi/2, pi/2, 0.0)
            pose = PoseStamped()
            pose.header = face.header
            pose.pose.position = face.point
            pose.pose.orientation.x = self.quaternion[0]
            pose.pose.orientation.y = self.quaternion[1]
            pose.pose.orientation.z = self.quaternion[2]
            pose.pose.orientation.w = self.quaternion[3]

            # Transform to base_link
            # pose = self.listener.transformPose('base_link', pose)
            face = pose.pose.position
            self.quaternion = (pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z, pose.pose.orientation.w)


            self.origin = (face.x, face.y, face.z)

            # Flip the bit
            self.found_face = True
예제 #22
0
    def _publish_detection(self, detection_data, detection_frame,
                           detection_stamp):
        source_detection_point = PointStamped()
        source_detection_point.header.frame_id = detection_frame
        source_detection_point.header.stamp = detection_stamp
        source_detection_point.point = detection_data.location

        transform = self._tf_buffer.lookup_transform(
            target_frame=self._utm_frame,
            source_frame=detection_frame,
            time=rospy.Time.now(),
            timeout=rospy.Duration(1.0))
        utm_detection_point = tf2_geometry_msgs.do_transform_point(
            source_detection_point, transform)

        utm_pose = PoseStamped()
        utm_pose.pose.orientation.x = 0.0
        utm_pose.pose.orientation.y = 0.0
        utm_pose.pose.orientation.z = 0.0
        utm_pose.pose.orientation.w = 1.0
        utm_pose.pose.position.x = utm_detection_point.point.x
        utm_pose.pose.position.y = utm_detection_point.point.y
        utm_pose.pose.position.z = utm_detection_point.point.z
        geo_pose = utm_to_wgs84_pose(utm_pose, self._current_nav_sat_fix)
        geo_pose.header.frame_id = detection_data.category
        self._perception_pub.publish(geo_pose)
        rospy.logwarn(geo_pose)
예제 #23
0
    def transform_object_pose_to_robot_rf(self, object_pose):
        #kinect camera axis not the same as the robot axis so we could have
        #to perform the necessary transforms first to get both axes aligned
        #and then to transform camera rf to robot's rf
        #goal_pose is the final pose of the marker wrt the robot's rf

        transform = TransformStamped()
        transform.child_frame_id = 'camera_rgb_optical_frame'
        transform.transform.translation.x = 0.188441686871
        transform.transform.translation.y = -0.0448522594062
        transform.transform.translation.z = 0.80968

        transform.transform.rotation.x = -0.701957989835
        transform.transform.rotation.y = 0.701150861488
        transform.transform.rotation.z = -0.0883868019887
        transform.transform.rotation.w = 0.0884885482708

        trans_pose = tf2_geometry_msgs.do_transform_pose(
            object_pose, transform)
        #finetuning
        trans_pose.pose.position.x += 0.08
        trans_pose.pose.position.z -= 0.07
        trans_pose.pose.orientation.x = 1.0
        trans_pose.pose.orientation.y = 0
        trans_pose.pose.orientation.z = 0
        trans_pose.pose.orientation.w = 0
        self.transposearray_pub.publish(trans_pose)
        point = PointStamped()
        point.header = object_pose.header
        point.header.frame_id = "base"
        point.point = trans_pose.pose.position
        self.testtranspoint_pub.publish(point)
        return trans_pose
예제 #24
0
    def pose_cb(self, pose):
        m_pose = PointStamped()
        m_pose.header = pose.header
        m_pose.point = pose.pose.position
        m_pose = self.listener.transformPoint(self.reference_frame,m_pose)
        self.goal_x = m_pose.point.x
        self.goal_y = m_pose.point.y
        print "New goal: %.2f %.2f" % (self.goal_x, self.goal_y)

        marker = Marker()
        marker.header = pose.header
        marker.ns = "goal_marker"
        marker.id = 10
        marker.type = Marker.CYLINDER
        marker.action = Marker.ADD
        marker.pose = pose.pose
        marker.scale.x = 0.1
        marker.scale.y = 0.1
        marker.scale.z = 0.5;
        marker.color.a = 1.0;
        marker.color.r = 1.0;
        marker.color.g = 1.0;
        marker.color.b = 0.0;
        marker.lifetime.secs=-1.0
        self.marker_pub.publish(marker)
예제 #25
0
	def runFilter(self):

		r = rospy.Rate(self.filterRate) 
		while not rospy.is_shutdown():
			if self.flag_reset:
				self.kf.reset(self.getReset())
				self.flag_reset = 0

			self.kf.iteration(self.getMeasures())

			self.pose_pub_.publish(self.kf.getState())
			
			person_point = PointStamped()
			person_point.header = self.kf.getState().header
			person_point.header.stamp = rospy.Time.now()
			person_point.point = self.kf.getState().pose.position
			self.point_pub_.publish(person_point)

			self.tf_person.sendTransform((self.kf.getState().pose.position.x,self.kf.getState().pose.position.y,0),
                     		(self.kf.getState().pose.orientation.x,self.kf.getState().pose.orientation.y,self.kf.getState().pose.orientation.z,self.kf.getState().pose.orientation.w),
                     		rospy.Time.now(),
                     		"person_link",
                     		self.kf.getState().header.frame_id)

			r.sleep()
def create_point_stamped(point, frame_id='base_link'):
    m = PointStamped()
    m.header.frame_id = frame_id
    m.header.stamp = rospy.Time()
    #m.header.stamp = rospy.get_rostime()
    m.point = Point(*point)
    return m
예제 #27
0
    def process_image_cb(self, msg):
        try:
            start = time.time()

            bgr_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
            path_image, mask = self.get_path_image(bgr_image)
            x, y = self.get_roi_center_naiive(mask)

            colored_roi = path_image[-self.ROI_HEIGHT -
                                     self.ROI_OFFSET:-self.ROI_OFFSET, :]
            self.color_roi_center(colored_roi, x, y)

            # publish images to show in rviz
            self.img_pub.publish(
                self.bridge.cv2_to_imgmsg(path_image.astype(np.uint8),
                                          encoding='bgr8'))
            self.mask_pub.publish(
                self.bridge.cv2_to_imgmsg(mask.astype(np.uint8),
                                          encoding='mono8'))
            self.roi_pub.publish(
                self.bridge.cv2_to_imgmsg(colored_roi.astype(np.uint8),
                                          encoding='bgr8'))
            #cv2.imwrite("/home/nvidia/roi.png", colored_roi)
            #rospy.logerr("roi saved")

            # publish the ROI center
            msg = PointStamped()
            msg.point = Point(x, y, 0)
            msg.header.stamp = rospy.get_rostime()
            self.pub.publish(msg)

            self.times.append(time.time() - start)
            #rospy.logerr(len(self.times))
        except CvBridgeError as e:
            rospy.logerr(e)
예제 #28
0
 def new_place_position(self):
     current_pose = utils.manipulation.get_arm_move_group().get_current_pose()
     destination = PointStamped()
     destination.header.frame_id = current_pose.header.frame_id
     destination.point = current_pose.pose.position
     destination.point.z = 0
     return destination
    def transformMap(self, odometry):

        odom_position = PointStamped()
        odom_position.header = odometry.header
        odom_position.point = odometry.pose.pose.position

        try:
            # Get transform
            self._listener.listener().waitForTransform(
                self._target_frame, odometry.header.frame_id,
                odometry.header.stamp, self._timeout)
            return self._listener.listener().transformPoint(
                self._target_frame, odom_position)

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException) as e:
            Logger.logwarn(
                'Failed to get the transformation to target_frame\n   %s' %
                str(e))
            self._failed = True
            return None
        except Exception as e:
            Logger.logwarn(
                'Failed to get the transformation to target frame due to unknown error\n   %s'
                % str(e))
            self._failed = True
            return None
예제 #30
0
 def publish(self):
     # Result as a PointStamp.
     result_viz = PointStamped()
     result_viz.point = self.point
     result_viz.header.stamp = rospy.Time()
     result_viz.header.frame_id = "map"
     self.viz_publisher.publish(result_viz)
예제 #31
0
    def get_head_translation(self, timestamp, subject_id):
        trans_reshaped = self.last_tvec[subject_id].reshape(3, 1)
        nose_center_3d_rot = [
            -float(trans_reshaped[0] / 1000.0),
            -float(trans_reshaped[1] / 1000.0),
            -float(trans_reshaped[2] / 1000.0)
        ]

        nose_center_3d_rot_frame = self.rgb_frame_id

        try:
            nose_center_3d_rot_pt = PointStamped()
            nose_center_3d_rot_pt.header.frame_id = nose_center_3d_rot_frame
            nose_center_3d_rot_pt.header.stamp = timestamp
            nose_center_3d_rot_pt.point = Point(x=nose_center_3d_rot[0],
                                                y=nose_center_3d_rot[1],
                                                z=nose_center_3d_rot[2])
            nose_center_3d = self.tf_listener.transformPoint(
                self.rgb_frame_id_ros, nose_center_3d_rot_pt)
            nose_center_3d.header.stamp = timestamp

            nose_center_3d_tf = gaze_tools.position_ros_to_tf(
                nose_center_3d.point)

            print('Translation based on landmarks', nose_center_3d_tf)
            return nose_center_3d_tf
        except ExtrapolationException as e:
            print(e)
            return None
    def convertStereo(self, u, v, disparity):
        """
        Converts two pixel coordinates u and v along with the disparity to give PointStamped
        
        This code was taken from stereo_click package
        """

        if not self.canProcess():
            return None
        
        for key in self.locks.keys():
            self.locks[key].acquire()

        stereoModel = image_geometry.StereoCameraModel()
        stereoModel.fromCameraInfo(self.leftInfo, self.rightInfo)
        (x,y,z) = stereoModel.projectPixelTo3d((u,v), disparity)
        
        cameraPoint = PointStamped()
        cameraPoint.header.frame_id = self.leftInfo.header.frame_id
        cameraPoint.header.stamp = rospy.Time.now()
        cameraPoint.point = Point(x,y,z)

        self.listener.waitForTransform(self.outputFrame, cameraPoint.header.frame_id, rospy.Time.now(), rospy.Duration(4.0))
        outputPoint = self.listener.transformPoint(self.outputFrame, cameraPoint)

        for key in self.locks.keys():
            self.locks[key].release()

        return outputPoint
def create_point_stamped(point, frame_id = 'base_link'):
    m = PointStamped()
    m.header.frame_id = frame_id
    m.header.stamp = rospy.Time()
    #m.header.stamp = rospy.get_rostime()
    m.point = Point(*point)
    return m
예제 #34
0
def euroc_object_to_odom_combined(euroc_object):
    header = Header(0, rospy.Time(0), euroc_object.frame_id)

    # Convert the centroid
    camera_point = PointStamped()
    camera_point.header = header
    camera_point.point = euroc_object.c_centroid
    odom_point = manipulation.transform_to(camera_point, '/odom_combined')
    euroc_object.c_centroid = odom_point.point
    euroc_object.frame_id = '/odom_combined'

    # Convert the cuboid
    if euroc_object.c_cuboid_success:
        cuboid_posestamped = PoseStamped(header, euroc_object.object.primitive_poses[0])
        cuboid_posestamped = manipulation.transform_to(cuboid_posestamped, '/odom_combined')
        euroc_object.object.primitive_poses[0] = cuboid_posestamped.pose
        euroc_object.object.header.frame_id = '/odom_combined'

    # Convert the mpe_object
    if euroc_object.mpe_success:
        for i in range(0, len(euroc_object.mpe_object.primitive_poses)):
            posestamped = PoseStamped(header, euroc_object.mpe_object.primitive_poses[i])
            posestamped = manipulation.transform_to(posestamped, '/odom_combined')
            euroc_object.mpe_object.primitive_poses[i] = posestamped.pose
            euroc_object.mpe_object.header.frame_id = '/odom_combined'
    def publishPosition(self, position):
        msg = PointStamped()
        msg.header.stamp = rospy.Time.now()
        msg.header.frame_id = self.frame

        msg.point = Point(position[0], position[1], 0)
        self.publisher.publish(msg)
예제 #36
0
    def transform_to_odom(self, laser_point): 
    

        laser_point_stamped  = PointStamped() 

        laser_point_stamped.header.frame_id = "/base_laser"
        laser_point_stamped.header.stamp = rospy.Time() 

        laser_point_stamped.point = laser_point 
        
        try: 
            
            

            ''' Program runs successfully on changing the frame to any frame other than odom '''

            target_frame = '/odom'

            #self.listener.waitForTransform('/base_laser'  , target_frame , rospy.Time().now(), rospy.Duration(1.0))
               
            
            odom_point = self.listener.transformPoint(target_frame , laser_point_stamped)
            
            
            return odom_point
        except Exception as e: 

            print "Following exception was enconuntered while performing the transformation -> " + str(e)
예제 #37
0
        def publishWaypoints():

            command = Cmd()
            try:
                command.destination = self.waypoints[0].point
            except:
                print("destination publish failed")
                printWaypoints()
                exit()
            command.stop = False
            command.is_relative = False
            command.is_deca = False

            command.speed = 0.43

            command.destination_stop_distance = 0
            command.emergency_stop_distance = 0.15
            command.emergency_stop_angle = 30

            self.point_publisher.publish(command)

            # Publish the points in rviz.
            #print("publishing...")
            for w in self.waypoints:
                w.publish()

            roboPosX = getRoboMapPosition().x
            roboPosY = getRoboMapPosition().y
            roboPos = PointStamped()
            roboPos.header.stamp = rospy.Time()
            roboPos.header.frame_id = "map"
            roboPos.point = Point(roboPosX, roboPosY, 1)

            self.robot_publisher.publish(roboPos)
예제 #38
0
        def publishWaypoints():
            # First get the waypoint, which is in units.
            waypoint = self.waypoints.get(1)

            # Calculate the waypoint in meters. Cannot use getPointInMeters method as it returns
            # a PointStamped. We want this published in a format that camp_goto to read: a Point.
            x = (0.05 * waypoint.x) + self.mapActual.info.origin.position.x
            y = (0.05 * waypoint.y) + self.mapActual.info.origin.position.y
            result = Point(x, y, waypoint.z)
            self.point_publisher.publish(result)

            # Publish the points in rviz.
            self.viz_publisher_1.publish(getPointInMeters(1))
            self.viz_publisher_2.publish(getPointInMeters(2))
            self.viz_publisher_3.publish(getPointInMeters(3))
            self.viz_publisher_4.publish(getPointInMeters(4))

            roboPosX = getRoboMapPosition().x
            roboPosY = getRoboMapPosition().y
            roboPos = PointStamped()
            roboPos.header.stamp = rospy.Time()
            roboPos.header.frame_id = "map"
            roboPos.point = Point(roboPosX, roboPosY, 1)

            self.robot_publisher.publish(roboPos)
예제 #39
0
def main():
    rospy.init_node("hallucinated_lookat")
    wait_for_time()

    start = PoseStamped()
    start.header.frame_id = 'base_link'
                                                                               
    reader = ArTagReader(tf.TransformListener())
    rospy.sleep(0.5)
    sub = rospy.Subscriber("/ar_pose_marker", AlvarMarkers, callback=reader.callback, queue_size=10) # Subscribe to AR tag poses, use reader.callback
    
    while len(reader.markers) == 0: 
        rospy.sleep(0.1)

    head = robot_api.FullBodyLookAt(tf_listener=reader._tf_listener)
    sp = PointStamped()
    for marker in reader.markers:
        # TODO: get the pose to move to
        sp.header = marker.header
        sp.point = marker.pose.pose.position
        # print(sp)
        error = head.look_at(sp)
        if error:
            rospy.loginfo('Moved to marker {}'.format(marker.id))
            # return
        else:
            rospy.logwarn('Failed to move to marker {}'.format(marker.id))
    rospy.logerr('Failed to move to any markers!')
예제 #40
0
    def callback(self, data):

        pt = PointStamped()
        pt.header.frame_id = data.markers[0].header.frame_id
        pt.header.stamp = rospy.get_rostime()
        pt.point = data.markers[0].pose.position

        try:
            self.listener.waitForTransform(self.kinect.rgb_optical_frame,
                                           pt.header.frame_id,
                                           rospy.get_rostime(),
                                           rospy.Duration(4.0))
            pt = self.listener.transformPoint(self.kinect.rgb_optical_frame,
                                              pt)
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            return
        if (self.kinect_type == "Kinect2"):
            self.last_pose = self.kinect.world_to_depth(
                (pt.point.x, pt.point.y, pt.point.z))
        else:
            self.last_pose = self.kinect.world_to_depth(
                (pt.point.x, pt.point.y, pt.point.z), use_distortion=False)

        return
예제 #41
0
 def publish(self, msg):
     
     pt = PointStamped()
     
     pt.header = msg.header
     pt.point = msg.pos   
     
     self._point_pub.publish(pt)
예제 #42
0
def people_cb(meas):
    global mouth_center
    ps = PointStamped()
    ps.point = meas.pos
    ps.header = meas.header
    ps.header.stamp = rospy.Time(0)
    point = tfl.transformPoint(model.tf_frame, ps)
    mouth_center = model.project3dToPixel((point.point.x, point.point.y + mouth_offset, point.point.z))
    def ConvertToWorldPoint(self, point):
    
        point_stamped = PointStamped();
        point_stamped.header.frame_id = self.tf_name
       	point_stamped.point = point
        
        point_world = self.tf_listener.transformPoint("/map", point_stamped)
	        
        return point_world.point
 def transform_pos(self, pt, hdr):
     ps = PointStamped()
     ps.point = pt
     ps.header = hdr
     #ps.header.stamp = hdr.stamp
     print "set:"+str(ps)
     self.tfl.waitForTransform(ps.header.frame_id, '/map', ps.header.stamp, rospy.Duration(3.0))
     point_in_map = self.tfl.transformPoint('/map', ps)
     return point_in_map
 def publish_lla(self):
     '''
     Publish a point message with the current LLA computed from an odom and absodom message.
     '''
     _, _, lla = self.enu(self.enu_pos)
     lla_msg = PointStamped()
     lla_msg.header.frame_id = 'lla'
     lla_msg.header.stamp = self.stamp
     lla_msg.point = numpy_to_point(lla)
     self.lla_pub.publish(lla_msg)
예제 #46
0
	def transformPoint(self,target_frame,ps, time):
		r = PointStamped()
		self.tl.waitForTransform(target_frame,ps.header.frame_id,time, rospy.Duration(5))
		point_translation_upper,point_rotation_upper = self.tl.lookupTransform(target_frame,ps.header.frame_id,time)
		transform_matrix = numpy.dot(tf.transformations.translation_matrix(point_translation_upper), tf.transformations.quaternion_matrix(point_rotation_upper))
		xyz = tuple(numpy.dot(transform_matrix, numpy.array([ps.point.x, ps.point.y, ps.point.z, 1.0])))[:3] 
		r.header.stamp = ps.header.stamp 
		r.header.frame_id = target_frame 
		r.point = geometry_msgs.msg.Point(*xyz) 	
		return r	
예제 #47
0
def transformToLocal(point):
	pointIn = PointStamped()
	pointIn.point = point
	pointIn.header.frame_id = 'odom'
	pointIn.header.stamp = rospy.Time(0)

	pointOut = PointStamped()

	pointOut = transform.transformPoint('map', pointIn)

	return pointOut.point
예제 #48
0
    def move_to_point(self, pose):

        pt = PointStamped()
        pt.header = pose.header
        pt.point = pose.pose.position
        self.look_at_pub.publish(pt)

        self.group.set_pose_target(pose)
        self.move_to_pose_pub.publish(pose)
        if not self.group.go(wait=True):
            return False
        return True
def callbackPoly(data):
  global listener
  polyfile = open("polygon_log.txt", 'w')
  listener.waitForTransform(data.header.frame_id, "r_sole", data.header.stamp, rospy.Duration(0.5))
  for p in data.polygon.points:
    p_s = PointStamped()
    p_s.point = p
    p_s.header = data.header
    p_st = listener.transformPoint('r_sole',p_s)
    polyfile.write("%f %f\n" % (p_st.point.x, p_st.point.y))
    rospy.loginfo("Writing Poly: %f %f",p_st.point.x, p_st.point.y);
  
  polyfile.close()
예제 #50
0
 def ar_cb(self, markers):
     for m in markers.markers:
         self.listener.waitForTransform(self.target_frame,'/RotMarker%02d'% m.id, m.header.stamp, rospy.Duration(1.0))
         self.listener.waitForTransform("/%s/ground"%self.name,m.header.frame_id, m.header.stamp, rospy.Duration(1.0))
         ((x,y,z),rot) = self.listener.lookupTransform(self.target_frame,'/RotMarker%02d'% m.id, m.header.stamp)
         L = vstack([x,y])
         m_pose = PointStamped()
         m_pose.header = m.header
         m_pose.point = m.pose.pose.position
         m_pose = self.listener.transformPoint("/%s/ground"%self.name,m_pose)
         Z = vstack([m_pose.point.x,m_pose.point.y])
         # TODO
         self.filter.update_ar(Z,L,self.ar_precision)
def convertStereo(u, v, disparity, info):
    """
    Converts two pixel coordinates u and v along with the disparity to give PointStamped       
    """
    stereoModel = image_geometry.StereoCameraModel()
    stereoModel.fromCameraInfo(info["l"], info["r"])
    (x, y, z) = stereoModel.projectPixelTo3d((u, v), disparity)

    cameraPoint = PointStamped()
    cameraPoint.header.frame_id = info["l"].header.frame_id
    cameraPoint.header.stamp = rospy.Time.now()
    cameraPoint.point = Point(x, y, z)
    return cameraPoint
예제 #52
0
    def execute(self, gaze_goal):
        self.enable_srv()
        self.speed_srv(gaze_goal.speed)
        self.speed_srv(interpolate(gaze_goal.speed, 0.0, 1.0, 0.0, 0.3))
        self.joints_stopped = False
        count = 0

        while not rospy.is_shutdown() and not self.action_server.is_preempt_requested() and self.action_server.is_active():
            entity = EntityProxy(gaze_goal.target)
            entity_tf_frame = entity.tf_frame()

            try:
                (target_trans, target_rot) = self.tl.lookupTransform(self.origin_frame, entity_tf_frame, rospy.Time(0))
                point_stamped = PointStamped()
                point_stamped.header.frame_id = self.origin_frame
                point_stamped.header.stamp = rospy.Time().now()
                point_stamped.point = Point(x=target_trans[0], y=target_trans[1], z=target_trans[2])
                self.gaze_target_pub.publish(point_stamped)

            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue

            try:
                if self.joints_stopped:
                    count += 1

                (curr_trans, curr_rot) = self.tl.lookupTransform(self.end_effector_frame, entity_tf_frame, rospy.Time(0))

                i = self.get_axis_index(self.axes[0])
                j = self.get_axis_index(self.axes[1])

                y = curr_trans[i]
                z = curr_trans[j]
                distance_to_target = sqrt(y*y + z*z)
                rospy.loginfo('gaze_frame: {0}, entity_tf_frame: {1}, y: {2}, z: {3}, distance: {4}'.format(self.end_effector_frame, entity_tf_frame, y, z, distance_to_target))
                self.send_feedback(distance_to_target)

                print("STOPPED: " + str(self.joints_stopped))

                if distance_to_target < self.success_distance or (self.joints_stopped and count < BlenderTargetServer.MIN_ITERATIONS_STOPPED):
                    self.action_server.set_succeeded()
                    print("SUCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCEEEEEEEEEEEEEEEEEEEEEEEDDDDDDDDDDDDDEEEEEEEEEEEEEEEEEEDDDDDDDDDDDDDDDDDDDDd")
                    break

            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue

            self.rate.sleep()

        self.disable_srv()
예제 #53
0
    def _get_pose_3D(self, corners):
        tvec, rvec = self.rect_model.get_pose_3D(corners, cam=self.cam, rectified=True)
        if tvec[2][0] < 0.3:  # Sanity check on position estimate
            rospy.logwarn("Marker too close, must be wrong...")
            return False
        rmat, _ = cv2.Rodrigues(rvec)
        vec = rmat[:, 0]

        # Convert position estimate and 2d direction vector to messages to they can be transformed
        ps = PointStamped()
        ps.header.frame_id = self.cam.tfFrame()
        ps.header.stamp = self.image_sub.last_image_time
        ps.point = Point(*tvec)
        vec3 = Vector3Stamped()
        vec3.vector.x = vec[0]
        vec3.vector.y = vec[1]
        vec3.header.frame_id = self.cam.tfFrame()
        vec3.header.stamp = self.image_sub.last_image_time
        map_vec3 = None
        map_ps = None

        # Transform pose estimate to map frame
        try:
            self.tf_listener.waitForTransform('/map', ps.header.frame_id, ps.header.stamp, rospy.Duration(0.1))
            map_ps = self.tf_listener.transformPoint('/map', ps)
            map_vec3 = self.tf_listener.transformVector3('/map', vec3)
        except tf.Exception as err:
            rospy.logwarn("Could not transform {} to /map error={}".format(self.cam.tfFrame(), err))
            return False
        # Try to ensure vector always points the same way, so kf is not thrown off at some angles
        if map_vec3.vector.y < 0.0:
            map_vec3.vector.y = -map_vec3.vector.y
            map_vec3.vector.x = -map_vec3.vector.x
        measurement = (map_ps.point.x, map_ps.point.y, map_ps.point.z, map_vec3.vector.y, map_vec3.vector.x)

        # Update filter and found state with the pose estimate from this frame
        self._update_kf(measurement)

        if self.debug_ros:
            # Draw coordinate axis onto object using pose estimate to project
            refs, _ = cv2.projectPoints(self.REFERENCE_POINTS, rvec, tvec, self.cam.intrinsicMatrix(), np.zeros((5, 1)))
            refs = np.array(refs, dtype=np.int)
            cv2.line(self.last_image, (refs[0][0][0], refs[0][0][1]),
                     (refs[1][0][0], refs[1][0][1]), (0, 0, 255))  # X axis refs
            cv2.line(self.last_image, (refs[0][0][0], refs[0][0][1]),
                     (refs[2][0][0], refs[2][0][1]), (0, 255, 0))  # Y axis ref
            cv2.line(self.last_image, (refs[0][0][0], refs[0][0][1]),
                     (refs[3][0][0], refs[3][0][1]), (255, 0, 0))  # Z axis ref
            self._send_debug_marker()
        return True
예제 #54
0
 def ar_cb(self, markers):
     for m in markers.markers:
         self.listener.waitForTransform(self.target_frame,'/%s/ground'% self.name, m.header.stamp, rospy.Duration(1.0))
         self.listener.waitForTransform("/%s/ground"%self.name,m.header.frame_id, m.header.stamp, rospy.Duration(1.0))
         ((x,y,z),rot) = self.listener.lookupTransform(self.target_frame,'/%s/ground'%self.name, m.header.stamp)
         euler = euler_from_quaternion(rot)
         X = vstack([x,y,euler[2]])
         m_pose = PointStamped()
         m_pose.header = m.header
         m_pose.point = m.pose.pose.position
         m_pose = self.listener.transformPoint("/%s/ground"%self.name,m_pose)
         Z = vstack([m_pose.point.x,m_pose.point.y])
         self.mapper.update_ar(Z,X,m.id,self.ar_precision)
     self.mapper.publish(self.target_frame,markers.header.stamp)
예제 #55
0
 def to_point_stamped_msg(*args):
     header, homo_mat, quat_rot, _ = PoseConverter._make_generic(args)
     if homo_mat is None:
         rospy.logwarn("[pose_converter] Unknown pose type.")
         return None, None, None, None
     ps = PointStamped()
     if header is None:
         ps.header.stamp = rospy.Time.now()
     else:
         ps.header.seq = header[0]
         ps.header.stamp = header[1]
         ps.header.frame_id = header[2]
     ps.point = Point(*homo_mat[:3,3].T.A[0])
     return ps
예제 #56
0
def handle_two_points(req):
    '''
        Real_World_width =  distance_camera_to_object * Pixel_width / constant 

    Calculate real world coordinates of desired camera location in the robot frame
    8.79167 is a constant calculated from experimenting with the camera fogal length
    distance_camera_to_object = end_camera_height(in robot frame)+ 0.21658(vertical translation from course to robot)-req.point2.z(height of object)
    '''
    
    #command filter: handles pid range as well as minimum commands
    #if (p < ((diffheight/2)*0.71428571428571))
        #p=

    diffheight = (end_camera_z-req.point2.z)
    py=((-req.point2.x + req.point1.x)*(diffheight-0.04)/8.79167)/100*0.5
    print 'py', py
    #ycomp1=((end_camera_x-0.03442)-(p))
    #print "end_camera_height", end_camera_height
    #print "h*p/c" 
    #print "p", p/100
    #xcomp1=((end_camera_x)-25*((req.point2.x + req.point1.x)/(10000000*(end_camera_height+0.18-req.point2.z))))
    #xcomp1=((end_camera_x)+(1.0/840)*(end_camera_height+0.21658+0.15-req.point2.z)*(req.point2.y + req.point1.y))
    
    ycomp1=end_camera_y+py
    d1= end_camera_z
    print 'd1', d1
    px=((req.point2.y - req.point1.y)*(diffheight-0.04)/8.79167)/100*0.7
    print 'px', px
    print 'd1', d1 
    print 'end_camera_x', end_camera_x
    xcomp1=(((end_camera_x)-px))

    #xcomp1=0.034+((end_camera_x)+((-req.point2.y + req.point1.y)*((end_camera_height-req.point2.z)/8.79167)/50*0.2))
    #ycomp1=((end_camera_y)+(1.0/840)*(end_camera_height+0.21658+0.15-req.point2.z)*(req.point2.x + req.point1.x))


    dist1 = (xcomp1, ycomp1, 0.150) 
    dist2 = Point(*dist1)           #convert from list to Point msg
    #print "dist", dist
    #print "Returning vector components [%s , %s to %s , %s = %s]"%(req.point1.x, req.point1.y, req.point2.x, req.point2.y, (dist))
    
    
    dist = PointStamped()               #make a point stamped to send to the arm ontroller
    dist.header.stamp = rospy.Time.now()
    dist.header.frame_id = '/robot'
    dist.point = dist2
    #dist = (header.frame_id, header.stamp, point)
    print dist
    return dist
예제 #57
0
 def ar_cb(self, markers):
     for m in markers.markers:
         if m.id > 32:
             continue
         self.listener.waitForTransform("/%s/ground"%self.name,m.header.frame_id, m.header.stamp, rospy.Duration(1.0))
         m_pose = PointStamped()
         m_pose.header = m.header
         m_pose.point = m.pose.pose.position
         m_pose = self.listener.transformPoint("/%s/ground"%self.name,m_pose)
         Z = vstack([m_pose.point.x,m_pose.point.y])
         if self.ignore_id:
             self.mapper.update_ar(Z,0,self.ar_precision)
         else:
             self.mapper.update_ar(Z,m.id,self.ar_precision)
     self.mapper.publish(self.target_frame,markers.header.stamp)
예제 #58
0
 def setUp(self):
     pub = rospy.Publisher('clicked_point', PointStamped, queue_size=4)
     rospy.sleep(1)
     points = np.array([[0, 0, 0],
                        [10, 0, 0],
                        [10, 10, 0],
                        [0, 10, 0]])
     for point in points:
         ps = PointStamped()
         ps.header.frame_id = 'a'
         ps.point = numpy_to_point(point)
         rospy.loginfo(ps)
         pub.publish(ps)
         rospy.sleep(1)
     rospy.sleep(1)