コード例 #1
0
def main():
	rospy.init_node('display_grasps',anonymous=True)
	
	parser = argparse.ArgumentParser()
	
	parser.add_argument("name")
	parser.add_argument("selection",nargs='*',type=int)
	parser.add_argument('--frame')
	
	myargv = rospy.myargv()
	del myargv[0]
	
	args = parser.parse_args(myargv)
	print args
	
	pub = rospy.Publisher('grasp_poses',PoseArray)
	
	name = args.name
	print name
	rospy.loginfo('loading object %s', name)
	ref_pc_dir = roslib.packages.get_pkg_subdir('google_goggles','data/points/ref')
	filename = os.path.join(ref_pc_dir, name + '.bag')
	
	bag = rosbag.Bag(filename)
	
	grasps = None
	grasp_poses = None
	for topic, msg, t in bag.read_messages():
		if msg._type == 'google_goggles_msgs/ObjectReferenceData':
			rospy.loginfo('Got reference data on topic %s',topic)
			grasps = msg.grasps
		elif msg._type == 'geometry_msgs/PoseArray':
			grasp_poses = msg
	
	if grasps:
		grasp_poses = PoseArray()
		grasp_poses.poses = [grasp.grasp_pose for grasp in grasps]
		print [grasp.success_probability for grasp in grasps]
		
	
	if args.selection:
		grasp_poses.poses = [grasp_poses.poses[i] for i in args.selection]
	
	grasp_poses.header.stamp = rospy.Time.now()
	if args.frame:
		grasp_poses.header.frame_id = args.frame
	
	rospy.loginfo('Publishing %d poses in frame %s',len(grasp_poses.poses),grasp_poses.header.frame_id)
	pub.publish(grasp_poses)
コード例 #2
0
def main():
    arg_fmt = argparse.RawDescriptionHelpFormatter
    parser = argparse.ArgumentParser(formatter_class=arg_fmt,
                                     description=main.__doc__)
    required = parser.add_argument_group('required arguments')
    required.add_argument(
        '-l', '--limb', required=True, choices=['left', 'right'],
        help='send joint trajectory to which limb'
    )

    args = parser.parse_args(rospy.myargv()[1:])
    limb = args.limb

    rospy.init_node("estimate_depth")
    
    de = DepthEstimator(limb)
    de.subscribe()
    de.publish()
    print "subscribed"
    rate = rospy.Rate(100)
    while not rospy.is_shutdown():
        if len(de.goal_poses) > 0:
            print de.goal_poses
            pose_msg = PoseArray()
            pose_msg.poses = de.goal_poses
            de.handler_pub.publish(pose_msg)
        rate.sleep()
コード例 #3
0
    def run(self):
        r = rospy.Rate(5)
        while not rospy.is_shutdown():
            # Publish centroid for visualization
            my_point = PointStamped()
            my_point.header.frame_id = 'STAR'
            my_point.point.x = self.centroid[0]
            my_point.point.y = self.centroid[1]
            self.centroid_pub.publish(my_point)
            
            # Publish arena bounds for visualization
            my_polygon = PolygonStamped()
            my_polygon.header.frame_id = 'STAR'
            my_polygon.polygon.points = [
                Point32(x=-0.87, y=0.53, z=0),
                Point32(x=-0.87, y=-3.73, z=0),
                Point32(x=2.2, y=-3.73, z=0),
                Point32(x=2.2, y=0.53, z=0),
            ]
            self.arena_pub.publish(my_polygon)

            # Publish robot poses for visualization
            my_posearray = PoseArray()
            my_posearray.header.frame_id = 'STAR'
            my_posearray.poses = [p for p in self.bot_pos if p is not None]
            self.robot_poses_pub.publish(my_posearray)

            # If we have received a pose for all robots, start sending packets
            if not None in self.bot_pos:
                self.bot_pos_copy = deepcopy(self.bot_pos)
                for i in range(len(self.bot_pos)):
                    self.send_pkt(i)
            r.sleep()
コード例 #4
0
 def update_obstacle_viz(self, inmsg):
     msg = PoseArray()
     msg.header = Header()
     # since the obstacles are derived from the laser, specify that frame id
     msg.header.frame_id = "laser"
     msg.poses = [Pose(o.center, None) for o in inmsg.obstacles]
     self.obstacle_poses_pub.publish(msg)
コード例 #5
0
def main():

    arg_fmt = argparse.RawDescriptionHelpFormatter
    parser = argparse.ArgumentParser(formatter_class=arg_fmt,
                                     description=main.__doc__)
    required = parser.add_argument_group('required arguments')
    required.add_argument(
        '-l', '--limb', required=False, choices=['left', 'right'],
        help='send joint trajectory to which limb'
    )

    args = parser.parse_args(rospy.myargv()[1:])
    if args.limb is None:
        limb = 'right'
    else:
        limb = args.limb

    rospy.init_node("get_goal_poses")

    pose_calc = PoseCalculator(limb)
    pose_calc.subscribe()
    pose_calc.publish()
    rate = rospy.Rate(params['rate'])
    while not rospy.is_shutdown():
        if len(pose_calc.goal_poses) > 0:
            print pose_calc.goal_poses
            pose_msg = PoseArray()
            pose_msg.poses = pose_calc.goal_poses
            pose_calc.handler_pub.publish(pose_msg)
        rate.sleep()
コード例 #6
0
ファイル: stair_marker.py プロジェクト: YuOhara/jsk_demos
def processFeedback(feedback):
    (frame_transform_pos, frame_transform_rot) = tf_listener.lookupTransform(
        lleg_end_coords, feedback.header.frame_id, rospy.Time(0.0))
    frame_pose = concatenate_matrices(translation_matrix(frame_transform_pos),
                                      quaternion_matrix(frame_transform_rot))
    center_local_pose = poseMsgToMatrix(feedback.pose)
    left_local_offset = translation_matrix([0, foot_margin / 2.0, 0])
    left_local_pose = concatenate_matrices(center_local_pose, left_local_offset)
    right_local_offset = translation_matrix([0, - foot_margin / 2.0, 0])
    right_local_pose = concatenate_matrices(center_local_pose,
                                            right_local_offset)
    left_global_pose = concatenate_matrices(frame_pose, left_local_pose)
    right_global_pose = concatenate_matrices(frame_pose, right_local_pose)
    footsteps = PoseArray()
    footsteps.header.frame_id = lleg_end_coords
    footsteps.header.stamp = feedback.header.stamp
    footsteps.poses = [poseMatrixToMsg(right_global_pose),
                       poseMatrixToMsg(left_global_pose)]
    pub_debug_current_pose_array.publish(footsteps)
    if feedback.menu_entry_id == 0:
        return                  # do nothing
    elif feedback.menu_entry_id == 1: # reset to origin
        server.setPose(feedback.marker_name, poseMatrixToMsg(identity_matrix()))
        server.applyChanges()
        return
    elif feedback.menu_entry_id == 2: # reset orientation to origin
        position = [feedback.pose.position.x,
                    feedback.pose.position.y,
                    feedback.pose.position.z]
        server.setPose(feedback.marker_name, poseMatrixToMsg(translation_matrix(position)))
        server.applyChanges()
        return
    elif feedback.menu_entry_id == 3: # execute
        pub_goal.publish(footsteps)
コード例 #7
0
def publish_poses(pose_array):
	pub = rospy.Publisher('adept_wrist_orientation', PoseArray, queue_size=10)
	pose_msg = PoseArray()
	pose_msg.poses = pose_array
	pose_msg.header.stamp = rospy.Time.now()
	pose_msg.header.frame_id = "/world"
	
	pub.publish(pose_msg)
コード例 #8
0
   def publish_particles(self):
      pose_arr = PoseArray()
      pose_arr.header.stamp = rospy.Time.now()
      pose_arr.header.frame_id = 'map'
      pose_arr.poses = []
      for p in self.particle_ls:
         pose_arr.poses.append(p.pose) 

      self.particle_cloud_pub.publish(pose_arr) 
コード例 #9
0
ファイル: stair_marker.py プロジェクト: iory/jsk_demos
def processFeedback(feedback):
    (frame_transform_pos, frame_transform_rot) = tf_listener.lookupTransform(
        lleg_end_coords, feedback.header.frame_id, rospy.Time(0.0))
    frame_pose = concatenate_matrices(translation_matrix(frame_transform_pos),
                                      quaternion_matrix(frame_transform_rot))
    center_local_pose = poseMsgToMatrix(feedback.pose)
    center_local_pos = translation_from_matrix(center_local_pose)
    left_local_offset = translation_matrix([0, foot_margin / 2.0, 0])
    left_local_pose = concatenate_matrices(center_local_pose, left_local_offset)
    right_local_offset = translation_matrix([0, - foot_margin / 2.0, 0])
    right_local_pose = concatenate_matrices(center_local_pose,
                                            right_local_offset)
    left_global_pose = concatenate_matrices(frame_pose, left_local_pose)
    right_global_pose = concatenate_matrices(frame_pose, right_local_pose)

    left_global_pos = translation_from_matrix(left_global_pose)
    right_global_pos = translation_from_matrix(right_global_pose)
    footsteps = PoseArray()
    footsteps.header.frame_id = lleg_end_coords
    footsteps.header.stamp = feedback.header.stamp
    footsteps.poses = [poseMatrixToMsg(right_global_pose),
                       poseMatrixToMsg(left_global_pose)]
    pub_debug_current_pose_array.publish(footsteps)
    # check distance
    need_to_fix = False
    if (feedback.event_type == InteractiveMarkerFeedback.MOUSE_UP and 
        (abs(center_local_pos[0]) + 0.01> max_x or
         abs(center_local_pos[2]) + 0.01> max_z or
         center_local_pos[2] - 0.01 < 0)):
        if abs(center_local_pos[0]) > max_x:
            center_local_pos[0] = max_x * center_local_pos[0] / abs(center_local_pos[0])
        if center_local_pos[2] < 0:
            center_local_pos[2] = 0
        elif abs(center_local_pos[2]) > max_z:
            center_local_pos[2] = max_z
        rospy.logwarn("need to reset")
        new_center_pose = translation_matrix(center_local_pos)
        server.setPose(feedback.marker_name, poseMatrixToMsg(new_center_pose))
        server.applyChanges()
    elif feedback.menu_entry_id == 0:
        return                  # do nothing
    elif feedback.menu_entry_id == 1: # reset to origin
        server.setPose(feedback.marker_name, poseMatrixToMsg(identity_matrix()))
        server.applyChanges()
        return
    elif feedback.menu_entry_id == 2: # reset orientation to origin
        position = [feedback.pose.position.x,
                    feedback.pose.position.y,
                    feedback.pose.position.z]
        server.setPose(feedback.marker_name, poseMatrixToMsg(translation_matrix(position)))
        server.applyChanges()
        return
    elif feedback.menu_entry_id == 3: # execute
        pub_goal.publish(footsteps)
コード例 #10
0
def transformed_waypoints():
    pub = rospy.Publisher("transformed_waypoints", PoseArray, queue_size=10)
    rospy.init_node("transformed_waypoints", anonymous=True)
    rate = rospy.Rate(1)  # 10hz
    b = Bagger()
    posarr = PoseArray()
    posarr.header.frame_id = "r_wrist_roll_link"
    posarr.header.stamp = rospy.get_rostime()
    posarr.poses = b.getTransformedWaypointsPoses()

    while not rospy.is_shutdown():
        pub.publish(posarr)
        rate.sleep()
コード例 #11
0
ファイル: updateParticle.py プロジェクト: JamesKlee/part3
	def smudge_amcl(self, resampledPoses):
				
		cont = True
		pArray = PoseArray()

		if len(resampledPoses):
			temp = []
			val = Pose()
			count = 0
			
			# TEST this value, rounding scalar
			scale = 0.75
	
			while cont:
				temp = []
				val = resampledPoses[0]
				count = 0
	
				for i in range(0, len(resampledPoses)):
					if (resampledPoses[i] == val):
						count = count + 1
					else:
						temp.append(resampledPoses[i])
	
				resampledPoses = temp
				if (len(resampledPoses) == 0):
					cont = False
						
				# THIS NEEDS TESTS, look at scalar above
				if (count > 4) and len(resampledPoses) >= 50: #TEST
					#count = count - 2
					count = int(count * scale)
						
				for i in range(0, count):
					if i > 0:
						newPose = Pose()
						newPose.position.x = random.gauss(val.position.x, 0.15) #TEST THIS
						newPose.position.y = random.gauss(val.position.y, 0.15) #TEST THIS
						newPose.orientation = rotateQuaternion(val.orientation, random.vonmisesvariate(0, 7)) #TEST THIS
						pArray.poses.append(newPose)
						
					else:
						pArray.poses.append(val)
		else:
			pArray.poses = []

		return pArray
コード例 #12
0
ファイル: vision_stuff.py プロジェクト: arii/pr2_awesomeness
    def broadcast_objects(self, image_cv):
        image_cv, results  = self.detector.find_objects(image_cv)
        poses = []
        for result in results:
            center, (width,height), rotation  = result
            pos = Point(*self.pixel_to_base_link(image_cv, center))
            pos.z += 0.04
            #q = Quaternion(x=rotation) # placeholder
            poses.append( Pose(pos, Quaternion(1, 0, 0, 0) )) # placeholder)
        stamped_poses = PoseArray()
        stamped_poses.header.frame_id = "base_link"
        stamped_poses.header.stamp  = rospy.Time(0)
        stamped_poses.poses = poses
        
        self.pub_poses.publish(stamped_poses)

        return image_cv
コード例 #13
0
    def publish_information(self):
        information = PoseArray()

        for player in self.data_base.team['blue']:
            player_pose = Pose()
            ball_pose_gl = Pose()
            ball_pose_lc = Pose()
            ball_velo = Pose()

            player_pose.position.x, player_pose.position.y, player_pose.position.z = player.get_pos()
            ball_pose_gl.position.x, ball_pose_gl.position.y, _ = self.ball.get_pos(player.color)
            ball_velo.position.x = (self.ball.pos[0] - self.ball.past_pos[0])*10.0 
            ball_velo.position.y = (self.ball.pos[1] - self.ball.past_pos[1])*10.0

            ball_lc = geometry.coord_trans_global_to_local(player.get_pos(), self.ball.get_pos(player.color))
            ball_pose_lc.position.x, ball_pose_lc.position.y, _ = ball_lc

            information.poses = [player_pose, ball_pose_gl, ball_pose_lc, ball_velo]
            if ball_velo.position.x !=0 or ball_velo.position.y !=0:
                #print("engine:", ball_velo.position.x, ball_velo.position.y)
                pass
            self.pub.publish(information)
コード例 #14
0
    def publish_frontiers(self):
        def make_pose(frontier):
            pose = Pose()
            quat = quaternion_from_euler(0.0, 0.0,
                                         frontier[2])  # roll, pitch, yaw
            pose.orientation = Quaternion(*quat.tolist())
            pose.orientation.x = quat[0]
            pose.orientation.y = quat[1]
            pose.orientation.z = quat[2]
            pose.orientation.w = quat[3]
            pose.position.x = frontier[0]
            pose.position.y = frontier[1]
            pose.position.z = 0
            return pose

        poseArray = PoseArray()
        poseArray.header.frame_id = self.robotname + "/map"
        poseArray.header.stamp = rospy.Time()
        poseArray.header.seq = self.frontierFrames
        self.frontierFrames += 1
        poseArray.poses = map(make_pose, self.frontiers)
        self.frontier_publisher.publish(poseArray)
コード例 #15
0
def pubhumans():
    global pub3
    global people
    poses = PoseArray()
    poses.header.frame_id = "map"
    ps = []
    flag = False
    for person in people:
        flag = True
        if person.moving:
            p = Pose()
            p.position.x = float(person.x)
            p.position.y = float(person.y)
            p.orientation.x = 1
            p.orientation.y = 0.001
            p.orientation.z = 0
            p.orientation.w = 0
            if person.vx != 0 and person.vy != 0:
                p.orientation = rotateQuaternion(p.orientation, math.atan2(person.vy, person.vx))
            ps.append(p)
    poses.poses = ps
    pub3.publish(poses)
コード例 #16
0
ファイル: robot.py プロジェクト: ameliavu/robotloc
    def resample(self):

        spin = random.uniform(0, 1.0 / self.n - 0.000001)

        bound_map = []
        p_sum = 0.0
        index = 0
        for i in range(len(self.particle_array)):
            p_sum += self.particle_array[i].weight
            print "p_sum " + str(p_sum)
            bound_map.append(p_sum)
        new_particles = []
        k = 0.0
        pose_array = PoseArray()
        pose_array.header.stamp = rospy.Time.now()
        pose_array.header.frame_id = 'map'
        pose_array.poses = []
        for i in range(len(self.particle_array)):
            rand_num = i * 1.0 / self.n + spin
            # rand_num = random.uniform(0,1)
            for j in range(len(self.particle_array)):
                if bound_map[j] >= rand_num:
                    index = j
                    break
            picked = self.particle_array[index]
            new_x = picked.x + random.gauss(0, self.config['resample_sigma_x'])
            new_y = picked.y + random.gauss(0, self.config['resample_sigma_y'])
            new_theta = picked.theta + random.gauss(
                0, self.config['resample_sigma_angle'])
            new_particle = Particle(new_x, new_y, new_theta, picked.weight)
            k = k + picked.weight
            new_particles.append(new_particle)
            pose = get_pose(new_x, new_y, new_theta)
            pose_array.poses.append(pose)
        for p in new_particles:
            p.weight = float(p.weight / k)
        self.particle_array = deepcopy(new_particles)
        # publish to particlecloud
        self.pose_publisher.publish(pose_array)
コード例 #17
0
    def generate_viewpoints(self):
        """
        Given a set of random viewpoints in a roi, filter those too close, and define a yaw for each.
            1. needs to be further away than the lower threshold
            2. must be in the same roi as WP
            3. create yaw of robot view
            N/A. as a backup, calculate the viewpoint from the waypoint
        """
        view_goals = PoseArray()
        view_goals.header.frame_id = "/map"
        poses, yaws, dists = [], [], []

        obj = self.selected_object_pose
        for cnt, p in enumerate(self.possible_nav_goals.goals.poses):
            x_dist = p.position.x - obj.position.x
            y_dist = p.position.y - obj.position.y

            # print "xDist %s, yDist %s" %(x_dist, y_dist)
            dist = abs(math.hypot(x_dist, y_dist))
            if dist > self.view_dist_thresh_low:
                if self.robot_polygon.contains(
                        Point([p.position.x, p.position.y])):
                    # yaw = math.atan2(y_dist, x_dist)
                    p = self.add_quarternion(p, x_dist, y_dist)
                    poses.append(p)
                    # yaws.append(yaw)
                    # dists.append( (x_dist, y_dist) )

        view_goals.poses = poses
        self.pub_all_views.publish(view_goals)
        self.possible_poses = poses
        # self.possible_yaws = yaws

        # add the viewpoint from the waypoint - as a back up if all others fail
        x_dist = self.robot_pose.position.x - obj.position.x
        y_dist = self.robot_pose.position.y - obj.position.y
        dist = abs(math.hypot(x_dist, y_dist))
        self.robot_pose = self.add_quarternion(self.robot_pose, x_dist, y_dist)
コード例 #18
0
    def visualise_trajectory(self, waypoints):
        """Publish PoseArray representing the trajectory
        :waypoints: list of Waypoint obj
        :returns: None

        """
        pose_array = PoseArray()
        pose_array.header.frame_id = self.frame
        pose_array.header.stamp = rospy.Time.now()
        poses = []
        current_time = 0.0
        delta_time = 0.1
        x, y, theta = self.current_position
        last_wp_time = 0.0
        for self._vel_curve_handler.trajectory_index, wp in enumerate(
                waypoints[1:]):
            while current_time < wp.time:
                # current position has to be given (0,0,0) because the cmd_vel
                # are applied in robot's frame whereas the poses are published
                # in global frame
                x_vel, y_vel, theta_vel = self._vel_curve_handler.get_vel(
                    current_time - last_wp_time,
                    current_position=(x, y, theta))
                # current_position=(0.0, 0.0, 0.0))
                x += x_vel * math.cos(theta) * delta_time - y_vel * math.sin(
                    theta) * delta_time
                y += x_vel * math.sin(theta) * delta_time + y_vel * math.cos(
                    theta) * delta_time
                theta += theta_vel * delta_time
                pose = Utils.get_pose_from_x_y_theta(x, y, theta)
                poses.append(pose)
                current_time += delta_time
                # time.sleep(delta_time)
                # pose_array.poses = poses
                # self._trajectory_pub.publish(pose_array)
            last_wp_time = wp.time
        pose_array.poses = poses
        self._trajectory_pub.publish(pose_array)
コード例 #19
0
def metaclustering(clusters):
    cluster_poses = []
    for cluster in clusters:
        pose = Pose()
        pose.position.x = cluster[0]
        pose.position.y = cluster[1]
        cluster_poses.append(pose)
    pose_arr = PoseArray()
    pose_arr.poses = cluster_poses
    rospy.wait_for_service('/metaclustering')
    try:
        master_pose_rec_service = rospy.ServiceProxy('/metaclustering',
                                                     ClusterLocs)
        runtime = []
        runtime.append([get_size(pose_arr) * len(pose_arr.poses), 0])
        with open(
                "/DataStorage/clustering_runtime_send" +
                rospy.get_namespace().replace("/", '') + ".csv", "wb") as f:
            writer = csv.writer(f)
            writer.writerows(runtime)
        master_pose_rec_service(pose_arr)
    except rospy.ServiceException, e:
        print "Service call failed: %s" % e
コード例 #20
0
ファイル: pub_teststates.py プロジェクト: rahul003/catkin_ros
def pub_teststates():
	pub = rospy.Publisher('pub_teststates', PoseArray, queue_size=10)
	rospy.init_node('pub_teststates', anonymous=True)
	rate = rospy.Rate(1) # 10hz

	a = Bagger(filename=constants.TEST_FILE)
	markers = a.getMarkers()
	start_m = markers[0]
	goal_m = markers[-1]
	sg_markers = [start_m, goal_m]

	b = Bagger(filename=constants.DATA_FILE)
	# tranformed_sg_markers = b.transformStartGoal(sg_markers)
	
	posarr = PoseArray()
	posarr.header.frame_id = 'r_wrist_roll_link'
	posarr.header.stamp = rospy.get_rostime()
	posarr.poses = b.transformStartGoal(sg_markers)

	while not rospy.is_shutdown():
		pub.publish(posarr)
		rate.sleep()
		print "publishing transformed_start_goal"
コード例 #21
0
    def getMap(self, data):
        #check if the we know the goal and position
        if hasattr(self, "posx") and hasattr(self, "posy") and hasattr(
                self, "goalx") and hasattr(self, "goaly"):
            m = data.data  #m is a 2d array to be passed to the A*
            print("MAP: " + m)
            #run the astar algo on the occupancy grid
            path = aStar.aStar((self.posy, self.posx), (goaly, goalx),
                               occupancyGrid,
                               free=255)

            #convert the y,x tuples to poses
            poses = []
            for i in path:
                p = Pose()
                Pose.position.x = i[0]
                Pose.position.y = i[1]
                poses.append(p)
            pa = PA()
            pa.poses = poses
            self.pub.publish(pa)
        else:
            print("Unknown start/stop")
コード例 #22
0
    def publishParticles(self):
        """
        Function responsible for publishing the particles
        """

        # http://docs.ros.org/en/melodic/api/geometry_msgs/html/msg/PoseArray.html
        # http://docs.ros.org/en/diamondback/api/geometry_msgs/html/msg/PoseArray.html
        # https://answers.ros.org/question/60209/what-is-the-proper-way-to-create-a-header-with-python/
        # https://www.programcreek.com/python/example/86351/std_msgs.msg.Header

        particleCloud = PoseArray()

        # Create PoseArray header
        particleCloud.header = std_msgs.msg.Header()
        particleCloud.header.stamp = rospy.Time.now()
        particleCloud.header.frame_id = "map"

        particlePoses = []
        for i in range(self.numParticles):
            pose = Pose()
            pose.position.x = self.particlesPose[i, 0]
            pose.position.y = self.particlesPose[i, 1]

            # https://answers.ros.org/question/69754/quaternion-transformations-in-python/
            quaternion = tf.transformations.quaternion_from_euler(
                0, 0, self.particlesPose[i, 2])

            pose.orientation.x = quaternion[0]
            pose.orientation.y = quaternion[1]
            pose.orientation.z = quaternion[2]
            pose.orientation.w = quaternion[3]

            particlePoses.append(pose)

        particleCloud.poses = particlePoses

        self.particlePublisher.publish(particleCloud)
コード例 #23
0
    def get_eef_poses_from_trajectory(self, trajectory):
        """
        For a RobotTrajectory, determine the end-effector pose at each 
        time step along the trajectory.
        
        TODO: test this function
        """

        # Create the output array
        poses_out = PoseArray()
        poses_out.header.seq = copy.deepcopy(
            trajectory.joint_trajectory.header.seq)
        poses_out.header.stamp = rospy.Time.now()
        poses_out.header.frame_id = 1

        # Loop setup
        poses_tot = len(trajectory.joint_trajectory.points)
        pose_list = [None] * poses_tot
        joint_names = trajectory.joint_trajectory.joint_names

        # Build array of 'PoseStamped' waypoints
        for i in range(0, poses_tot):

            # Get the pose using FK
            joint_states = trajectory.joint_trajectory.points[i].positions
            time = trajectory.joint_trajectory.points[i].time_from_start
            resp = self.fk_solve(joint_states, joint_names)

            # Put the pose into list
            pose.header.seq = i
            pose.header.stamp = time
            pose_list[i] = resp.pose_stamped

        # Put generated list into the PoseArray
        poses_out.poses = pose_list

        return poses_out
コード例 #24
0
    def pub(self):
        particle_pose = PoseArray()
        particle_pose.header.frame_id = 'map'
        particle_pose.header.stamp = rospy.Time.now()
        particle_pose.poses = []
        estimated_pose = PoseWithCovarianceStamped()
        estimated_pose.header.frame_id = 'map'
        estimated_pose.header.stamp = rospy.Time.now()
        estimated_pose.pose.pose.position.x = np.mean(self.particles[:, 0])
        estimated_pose.pose.pose.position.y = np.mean(self.particles[:, 1])
        estimated_pose.pose.pose.position.z = 0.0
        quaternion = tf_conversions.transformations.quaternion_from_euler(
            0, 0, np.mean(self.particles[:, 2]))
        estimated_pose.pose.pose.orientation = geometry_msgs.msg.Quaternion(
            *quaternion)

        for ii in range(self.Np):
            pose = geometry_msgs.msg.Pose()
            point_P = (self.particles[ii, 0], self.particles[ii, 1], 0.0)
            pose.position = geometry_msgs.msg.Point(*point_P)

            quaternion = tf_conversions.transformations.quaternion_from_euler(
                0, 0, self.particles[ii, 2])
            pose.orientation = geometry_msgs.msg.Quaternion(*quaternion)

            particle_pose.poses.append(pose)

        self.pub_particlecloud.publish(particle_pose)
        self.pub_estimated_pos.publish(estimated_pose)
        #print(self.laser_frame)
        self.laser_tf_br.sendTransform(
            (np.mean(self.particles[:, 0]), np.mean(self.particles[:, 1]), 0),
            (estimated_pose.pose.pose.orientation.x,
             estimated_pose.pose.pose.orientation.y,
             estimated_pose.pose.pose.orientation.z,
             estimated_pose.pose.pose.orientation.w), rospy.Time.now(),
            self.laser_frame, "map")
コード例 #25
0
ファイル: localizer.py プロジェクト: AloePacci/RosPackage
    def publish(self):
        """Function to publish the particle cloud for visualization."""

        cloud_msg = PoseArray()
        cloud_msg.header.stamp = rospy.get_rostime()
        cloud_msg.header.frame_id = 'map'

        cloud_msg.poses = []
        for i in range(self.num_particles):
            p = Pose()
            p.position.x = self.particles[i].x
            p.position.y = self.particles[i].y
            p.position.z = 0.0

            quaternion = tf.transformations.quaternion_from_euler(
                0.0, 0.0, self.particles[i].theta)
            p.orientation.x = quaternion[0]
            p.orientation.y = quaternion[1]
            p.orientation.z = quaternion[2]
            p.orientation.w = quaternion[3]

            cloud_msg.poses.append(p)

        self.particles_publisher.publish(cloud_msg)
コード例 #26
0
    def visualize(self):
        self.state_lock.acquire()
        # (1) Publish tf between map and the laser. Depends on the expected pose.
        pose = self.expected_pose()
        tf_time = self.publish_tf(pose)
        # (2) Publish most recent laser measurement
        msg = self.sensor_model.last_laser
        msg.header.frame_id = "laser"
        msg.header.stamp = tf_time

        self.pub_laser.publish(msg)
        # (3) Publish PoseStamped message indicating the expected pose of the car.
        pose_msg = PoseStamped()
        pose_msg.header.seq = self.viz_seq
        pose_msg.header.stamp = rospy.Time.now()
        pose_msg.header.frame_id = "map"
        pose_msg.pose = utils.point_to_pose(pose)
        self.pose_pub.publish(pose_msg)

        # (4) Publish a subsample of the particles
        particle_indices = np.random.choice(np.arange(self.particles.shape[0]),
                                            size=self.MAX_VIZ_PARTICLES,
                                            replace=True,
                                            p=self.weights)
        particle_sample = self.particles[particle_indices]
        particles_msg = PoseArray()
        particles_msg.header.seq = self.viz_seq
        particles_msg.header.stamp = rospy.Time.now()
        particles_msg.header.frame_id = "map"
        particles_msg.poses = [utils.point_to_pose(p) for p in particle_sample]
        self.particle_pub.publish(particles_msg)

        #rospy.logerr(particles_msg)

        self.viz_seq += 1
        self.state_lock.release()
コード例 #27
0
 def publish_particles(self, particles):
     # publish the given particles as a PoseArray object
     pa = PoseArray()
     pa.header = Utils.make_header("map")
     pa.poses = Utils.particles_to_poses(particles)
     self.particle_pub.publish(pa)
コード例 #28
0
ファイル: robot.py プロジェクト: twluo/ParticleRobo
 def poseArrayTemplate(self):
     pose_array = PoseArray()
     pose_array.header.stamp = rospy.Time.now()
     pose_array.header.frame_id = 'map'
     pose_array.poses =[]
     return pose_array
コード例 #29
0
def create_in_place_rotation_trajectory(start_pose, goal_pose, ang_rate,
                                        ref_frame):
    """
    Creates a simple trajectory in which the object is rotated at a given
    angular rate to a new orientation with no positional change. Assumes 
    no obstacles are on the path for simplicity.
    """

    timestep = 0.1

    # Get start and end rotations in euler 'xyz' angles
    start_angles = tf.transformations.euler_from_quaternion([
        start_pose.orientation.x, start_pose.orientation.y,
        start_pose.orientation.z, start_pose.orientation.w
    ])
    goal_angles = tf.transformations.euler_from_quaternion([
        goal_pose.orientation.x, goal_pose.orientation.y,
        goal_pose.orientation.z, goal_pose.orientation.w
    ])

    #Get the angular distance between the start and goal angles for each stationary axis
    angle_x = goal_angles[0] - start_angles[0]
    angle_y = goal_angles[1] - start_angles[1]
    angle_z = goal_angles[2] - start_angles[2]
    total_angle = math.sqrt(angle_x**2 + angle_y**2 + angle_z**2)

    # Calculate the number of waypoints
    angle_time = total_angle / ang_rate
    angle_wp_len = int(math.ceil(angle_time / timestep))

    # Fill out array header
    trajectory = PoseArray()
    trajectory.header.seq = 0
    trajectory.header.frame_id = str(ref_frame)

    # Setup loop
    ax_step = (goal_angles[0] - start_angles[0]) / angle_wp_len
    ay_step = (goal_angles[1] - start_angles[1]) / angle_wp_len
    az_step = (goal_angles[2] - start_angles[2]) / angle_wp_len
    pose_list = [None] * (angle_wp_len + 1)
    timestamps = []

    # Fill out poses and timesteps
    for i in range(0, angle_wp_len):

        # Get orientation
        pose_ax_i = start_angles[0] + ax_step * i
        pose_ay_i = start_angles[1] + ay_step * i
        pose_az_i = start_angles[2] + az_step * i

        # Convert representation to quaternion
        quat_i = tf.transformations.quaternion_from_euler(
            pose_ax_i, pose_ay_i, pose_az_i)

        # Put into pose, then pose list
        pose_i = Pose()
        pose_i.orientation.x = quat_i[0]
        pose_i.orientation.y = quat_i[1]
        pose_i.orientation.z = quat_i[2]
        pose_i.orientation.w = quat_i[3]
        pose_i.position = start_pose.position
        pose_list[i] = pose_i

        # Get timestamp
        sec, nsec = get_stamp(i * timestep)
        timestamps.append({"secs": sec, "nsecs": nsec})

    # Place the goal point at the end as well
    temp_pose = copy.deepcopy(goal_pose)
    pose_list[angle_wp_len] = temp_pose
    sec, nsec = get_stamp((i + 1) * timestep)
    timestamps.append({"secs": sec, "nsecs": nsec})

    # Package result
    trajectory.poses = pose_list

    return trajectory, timestamps
コード例 #30
0
def create_pick_and_place_trajectory(start_pose, goal_pose, lift_height, speed,
                                     ref_frame):
    """
    Creates a simple pick and place trajectory composed of three smaller
    sub-trajectories, with no change in the orientation of the target
    object. It involves picking up the target object by a set height off
    the surface, moving to a point at the same set height above the goal 
    position, and then placing the object at the goal position. Assumes
    no obstacles are on the path for simplicity.
    
    TODO: REWORK TO USE THE SIMPLER TRAJECTORY FOUND ABOVE AND WITHOUT 
    POSESTAMPED.
    """

    timestep = 0.1
    trajectories = [None] * 3

    # Create intermediate waypoints
    waypoint_1 = copy.deepcopy(start_pose)
    waypoint_2 = copy.deepcopy(goal_pose)
    waypoint_1.position.z += lift_height
    waypoint_2.position.z += lift_height

    # Get distances between the waypoints 1 and 2 ('traverse')
    delta_x = waypoint_2.position.x - waypoint_1.position.x
    delta_y = waypoint_2.position.y - waypoint_1.position.y
    delta_z = waypoint_2.position.z - waypoint_1.position.z
    traverse_len = math.sqrt(delta_x**2 + delta_y**2 + delta_z**2)

    # Calculate the number of waypoints along each path (round to next-highest int)
    lift_time = lift_height / speed
    lift_wp_len = int(math.ceil(lift_time / timestep))
    traverse_time = traverse_len / speed
    traverse_wp_len = int(math.ceil(traverse_time / timestep))

    # Setup loop
    list_lens = [lift_wp_len, traverse_wp_len, lift_wp_len]
    points = [start_pose, waypoint_1, waypoint_2, goal_pose]

    # Create the trajectories for each subtrajectory
    for i in range(0, 3):

        # Fill out array header
        trajectory = PoseArray()
        trajectory.header.seq = i
        trajectory.header.frame_id = 1

        # Setup nested loop
        x_step = (points[i + 1].position.x -
                  points[i].position.x) / list_lens[i]
        y_step = (points[i + 1].position.y -
                  points[i].position.y) / list_lens[i]
        z_step = (points[i + 1].position.z -
                  points[i].position.z) / list_lens[i]
        pose_list = [None] * (list_lens[i] + 1)

        # Fill out poses
        for j in range(0, list_lens[i]):
            pose_j = PoseStamped()
            pose_j.header.seq = j
            pose_j.header.stamp = j * timestep
            pose_j.header.frame_id = ref_frame
            pose_j.pose.position.x = points[i].position.x + x_step * j
            pose_j.pose.position.y = points[i].position.y + y_step * j
            pose_j.pose.position.z = points[i].position.z + z_step * j
            pose_j.pose.orientation = start_pose.orientation
            pose_list[j] = pose_j

        # Place the goal point at the end as well
        temp_pose = PoseStamped()
        temp_pose.header.seq = j + 1
        temp_pose.header.stamp = rospy.Time.from_seconds((j + 1) * timestep)
        temp_pose.header.frame_id = ref_frame
        temp_pose.pose = copy.deepcopy(points[i + 1])
        pose_list[list_lens[i]] = temp_pose

        # Package result
        trajectory.poses = pose_list
        trajectories[i] = trajectory

    return trajectories
コード例 #31
0
ファイル: robot.py プロジェクト: s4byun/ROS
 def publish_pose(self,poses):
     pose_array = PoseArray()
     pose_array.header.stamp = rospy.Time.now()
     pose_array.header.frame_id = 'map'
     pose_array.poses = poses
     self.particle_pub.publish(pose_array)
コード例 #32
0
def convert_PoseWithCovArray_to_PoseArray(waypoints):
    """Used to publish waypoints as pose array so that you can see them in rviz, etc."""
    poses = PoseArray()
    poses.header.frame_id = 'map'
    poses.poses = [pose.pose.pose for pose in waypoints]
    return poses
コード例 #33
0
	def get_tags(self, msg):

		# Initialize the COG as a PoseStamped message
		tag_cog = PoseStamped()
		tag_cog_array = PoseArray()
		tag_cog_array4 = PoseArray()
		middle_pose = Pose()

		# Get the number of markers
		n = len(msg.markers)

		global avail_pair
		global avail_tags
		global counter
		global tag_marker_array
		global tagCount

		# If no markers detected, just retutn 
		if n == 0:
			return

		#print "test point 2"

		#tag_cog_array.poses = []
		# Iterate through the tags and sum the x, y and z coordinates
		for tag in msg.markers:

			# Skip any tags that are not in our list
			if self.tag_ids is not None and not tag.id in self.tag_ids:
				continue

			#calculate_distance(tag.pose.pose.position.x, tag.pose.pose.position.y)
			if tag.id in tag_states:
				print "in it"
				if tag.id not in avail_tags:
					avail_tags.append(tag.id)
					#tagCount = tag_number_dict[String(avail_tags)]
					print "String(tag.id)"
					x = str(tag.id)
					print x
					if str(tag.id) in tag_number_dict:
						tagCount = tag_number_dict[str(tag.id)]
						print "tagCount"
						print tagCount
				print "avail_tags"
				print sorted(avail_tags)
				#tag_cog_array.poses.append(middle_pose)



				if tag.id not in avail_pair:
					avail_pair.append(tag.id)
					tag_marker_array.append(tag)
					tag_cog_array.poses.append(middle_pose)
					#print "loop"
					#print len(tag_cog_array.poses)

				#tag_state_pub = rospy.Publisher("id_state", Int64, queue_size=5)
				#tag_state_pub.publish(tag.id)

				if len(avail_pair) >= (tagCount+1):
					avail_pair = []
					tag_cog_array.poses = []
					tag_marker_array = []



				avail_pair = sorted(avail_pair)
				print "avail_pair "
				print avail_pair 
				if avail_pair in tag_pairs:
					#print "tag_marker_array"
					#print tag_marker_array

					#print len(tag_marker_array)

					"""for eachpair in tag_marker_array:
						middle_pose.position.x = eachpair.pose.pose.position.x
						middle_pose.position.y = eachpair.pose.pose.position.y
						middle_pose.position.z = eachpair.pose.pose.position.z
						middle_pose.orientation.x = eachpair.pose.pose.orientation.x
						middle_pose.orientation.y = eachpair.pose.pose.orientation.y
						middle_pose.orientation.z = eachpair.pose.pose.orientation.z
						middle_pose.orientation.w = eachpair.pose.pose.orientation.w

						#tag_cog_array.poses.append(middle_pose)
						tag_cog_array.poses = addMatrix(tag_cog_array.poses, middle_pose)
						tag_cog_array.header.stamp = rospy.Time.now()
						tag_cog_array.header.frame_id = msg.markers[0].header.frame_id
						print eachpair.id

					i = 1
					tag_cog_array.poses[i].position.x = tag_marker_array[i].pose.pose.position.x
					tag_cog_array.poses[i].position.y = tag_marker_array[i].pose.pose.position.y
					tag_cog_array.poses[i].position.z = tag_marker_array[i].pose.pose.position.z
					tag_cog_array.poses[i].orientation.x = tag_marker_array[i].pose.pose.orientation.x
					tag_cog_array.poses[i].orientation.y = tag_marker_array[i].pose.pose.orientation.y
					tag_cog_array.poses[i].orientation.z = tag_marker_array[i].pose.pose.orientation.z
					tag_cog_array.poses[i].orientation.w = tag_marker_array[i].pose.pose.orientation.w
					#tag_cog_array.poses.append(middle_pose)
					#tag_cog_array.poses[i] = middle_pose"""
					if tagCount==2:
						i = 0
						mid = Pose()
						middle_pose.position.x = tag_marker_array[i].pose.pose.position.x
						middle_pose.position.y = tag_marker_array[i].pose.pose.position.y
						middle_pose.position.z = tag_marker_array[i].pose.pose.position.z
						middle_pose.orientation.x = tag_marker_array[i].pose.pose.orientation.x
						middle_pose.orientation.y = tag_marker_array[i].pose.pose.orientation.y
						middle_pose.orientation.z = tag_marker_array[i].pose.pose.orientation.z
						middle_pose.orientation.w = tag_marker_array[i].id

						i = 1
						mid.position.x = tag_marker_array[i].pose.pose.position.x
						mid.position.y = tag_marker_array[i].pose.pose.position.y
						mid.position.z = tag_marker_array[i].pose.pose.position.z
						mid.orientation.x = tag_marker_array[i].pose.pose.orientation.x
						mid.orientation.y = tag_marker_array[i].pose.pose.orientation.y
						mid.orientation.z = tag_marker_array[i].pose.pose.orientation.z
						mid.orientation.w = tag_marker_array[i].pose.pose.orientation.w

						if len(tag_cog_array.poses) == 1:
							tag_cog_array.poses.append(mid)

						print "length"
						print len(tag_cog_array.poses)

						if len(tag_cog_array.poses) == 2:

							tag_cog_array.poses[0] = middle_pose
							tag_cog_array.poses[1] = mid

							tag_cog_array.header.stamp = rospy.Time.now()
							tag_cog_array.header.frame_id = msg.markers[0].header.frame_id
								#print eachpair.id

							A = np.array((tag_marker_array[0].pose.pose.position.x, tag_marker_array[0].pose.pose.position.y, tag_marker_array[0].pose.pose.position.z))
							B = np.array((tag_marker_array[1].pose.pose.position.x, tag_marker_array[1].pose.pose.position.y, tag_marker_array[1].pose.pose.position.z))

							A1 = np.array((tag_cog_array.poses[0].position.x, tag_cog_array.poses[0].position.y, tag_cog_array.poses[0].position.z))
							B1 = np.array((tag_cog_array.poses[1].position.x, tag_cog_array.poses[1].position.y, tag_cog_array.poses[1].position.z))

							print tag_cog_array

							dist_AB = np.linalg.norm(A-B)
							print dist_AB

							dist_A1B1 = np.linalg.norm(A1-B1)
							print dist_A1B1

							story_pub.publish(str(avail_pair))
							self.tag_pub.publish(tag_cog_array)
						print avail_pair
						avail_pair = []
						avail_tags = []
						#print tag_cog_array
						tag_cog_array.poses = []
						tag_marker_array = []
						#tag_cog_array = []

						
					elif tagCount == 4:
						for j in range(tagCount):
							if tag_marker_array[j].id == avail_pair[0]:
								
								mid0 = Pose()
								mid0.position.x = tag_marker_array[j].pose.pose.position.x
								mid0.position.y = tag_marker_array[j].pose.pose.position.y
								mid0.position.z = tag_marker_array[j].pose.pose.position.z
								mid0.orientation.x = tag_marker_array[j].pose.pose.orientation.x
								mid0.orientation.y = tag_marker_array[j].pose.pose.orientation.y
								mid0.orientation.z = tag_marker_array[j].pose.pose.orientation.z
								mid0.orientation.w = tag_marker_array[j].id
								tag_cog_array4.poses.append(mid0)
						
						for j in range(tagCount):	
							if tag_marker_array[j].id == avail_pair[1]:
								
								mid1 = Pose()
								mid1.position.x = tag_marker_array[j].pose.pose.position.x
								mid1.position.y = tag_marker_array[j].pose.pose.position.y
								mid1.position.z = tag_marker_array[j].pose.pose.position.z
								mid1.orientation.x = tag_marker_array[j].pose.pose.orientation.x
								mid1.orientation.y = tag_marker_array[j].pose.pose.orientation.y
								mid1.orientation.z = tag_marker_array[j].pose.pose.orientation.z
								mid1.orientation.w = tag_marker_array[j].id
								tag_cog_array4.poses.append(mid1)
						for j in range(tagCount):

							if tag_marker_array[j].id == avail_pair[2]:
								
								mid2 = Pose()
								mid2.position.x = tag_marker_array[j].pose.pose.position.x
								mid2.position.y = tag_marker_array[j].pose.pose.position.y
								mid2.position.z = tag_marker_array[j].pose.pose.position.z
								mid2.orientation.x = tag_marker_array[j].pose.pose.orientation.x
								mid2.orientation.y = tag_marker_array[j].pose.pose.orientation.y
								mid2.orientation.z = tag_marker_array[j].pose.pose.orientation.z
								mid2.orientation.w = tag_marker_array[j].id
								tag_cog_array4.poses.append(mid2)
						
						for j in range(tagCount):
							if tag_marker_array[j].id == avail_pair[3]:
								
								mid3 = Pose()
								mid3.position.x = tag_marker_array[j].pose.pose.position.x
								mid3.position.y = tag_marker_array[j].pose.pose.position.y
								mid3.position.z = tag_marker_array[j].pose.pose.position.z
								mid3.orientation.x = tag_marker_array[j].pose.pose.orientation.x
								mid3.orientation.y = tag_marker_array[j].pose.pose.orientation.y
								mid3.orientation.z = tag_marker_array[j].pose.pose.orientation.z
								mid3.orientation.w = tag_marker_array[j].id
								tag_cog_array4.poses.append(mid3)								
						#if len(tag_cog_array.poses) == 1:
							#tag_cog_array.poses.append(mid)

						print "length"
						print len(tag_cog_array4.poses)

						if len(tag_cog_array4.poses) == 4:

							#tag_cog_array4.poses[0] = middle_pose
							#tag_cog_array4.poses[1] = mid

							tag_cog_array4.header.stamp = rospy.Time.now()
							tag_cog_array4.header.frame_id = msg.markers[0].header.frame_id
								#print ea4chpair.id

							A = np.array((tag_marker_array[0].pose.pose.position.x, tag_marker_array[0].pose.pose.position.y, tag_marker_array[0].pose.pose.position.z))
							B = np.array((tag_marker_array[1].pose.pose.position.x, tag_marker_array[1].pose.pose.position.y, tag_marker_array[1].pose.pose.position.z))

							A1 = np.array((tag_cog_array4.poses[0].position.x, tag_cog_array4.poses[0].position.y, tag_cog_array4.poses[0].position.z))
							B1 = np.array((tag_cog_array4.poses[1].position.x, tag_cog_array4.poses[1].position.y, tag_cog_array4.poses[1].position.z))

							print tag_cog_array4

							dist_AB = np.linalg.norm(A-B)
							print dist_AB

							dist_A1B1 = np.linalg.norm(A1-B1)
							print dist_A1B1

							print "tag_cog_array"
							print tag_cog_array4

							story_pub.publish(str(avail_pair))
							self.tag_pub.publish(tag_cog_array4)
						print avail_pair
						avail_pair = []
						avail_tags = []
						#print tag_cog_array
						#tag_cog_array = []
						tag_cog_array4.poses = []
						tag_marker_array = []

				
			if tag.id in card_tag_states:
				card_id = tag.id
				print card_id
				card_pub.publish(str(card_id))

				card_pos = Pose()
				card_pos.position.x = tag.pose.pose.position.x
				card_pos.position.y = tag.pose.pose.position.y
				card_pos.position.z = tag.pose.pose.position.z
				card_pos.orientation.x = tag.pose.pose.orientation.x
				card_pos.orientation.y = tag.pose.pose.orientation.y
				card_pos.orientation.z = tag.pose.pose.orientation.z
				
				self.card_pose_pub.publish(card_pos)

					



			# Compute the COG
			tag_cog.pose.position.x /= n
			tag_cog.pose.position.y /= n
			tag_cog.pose.position.z /= n

			# Give the tag a unit orientation
			tag_cog.pose.orientation.w = 1

			# Add a time stamp and frame_id
			tag_cog.header.stamp = rospy.Time.now()
			tag_cog.header.frame_id = msg.markers[0].header.frame_id
コード例 #34
0
 def poses_to_posearray(self):
     msg = PoseArray()
     msg.header = Utils.make_header('map')
     msg.poses = Utils.particles_to_poses(self.pose)
     return msg
コード例 #35
0
    def image_callback(self, img_msg):
        try:
            cv_image = bridge.imgmsg_to_cv2(img_msg, "bgr8")
            # if not self.corners:
            thresh = grayscale(cv_image)
            cnt = contours(thresh, cv_image)
            crnrs = corners(cv_image, cnt)
            self.H, status = cv2.findHomography(np.array(crnrs),
                                                np.array(HOMOGRAPHY_DEST))
            # print(type(self.H))
            # print(self.H)
            self.corners = crnrs
            if self.numConverge < 10:
                homography, status = cv2.findHomography(
                    np.array(self.corners), np.array(HOMOGRAPHY_DEST))
                if np.allclose(self.H, homography, atol=.1):
                    self.numConverge += 1
                else:
                    self.numConverge = 0
                self.H = homography
        except CvBridgeError as e:
            print(e)

        # if self.times_centers_found < 10:
        # mask = segment_by_color(cv_image, "purple")
        # cnts = contours(mask, cv_image)
        # self.centers.append(np.array(midpoint(cv_image, cnts)))
        # self.homography(cv_image)
        # self.times_centers_found += 1

        # purple_center = np.dot(self.H, np.array([self.centers[0][0], self.centers[0][1], 1]))
        # print((purple_center * conversion) / 100)
        # purple_center = (purple_center * conversion) / 100
        # self.homography(cv_image)

        if self.numConverge >= 10:
            positions = PoseArray()
            positions.poses = []
            positions.header.frame_id = ' '.join(COLORS)
            for i, color in enumerate(COLORS):
                mask = segment_by_color(cv_image, color)
                cnts = contours(mask, cv_image)
                mp = midpoint(cv_image, cnts)
                cntr = np.dot(self.H, np.array([mp[0], mp[1], 1]))
                cntr = cntr / cntr[2]
                cntr_xy = (cntr * conversion) / 100
                pose = Pose()
                # print(cntr_xy)
                # print(mp)
                # print(self.ar_pos[0] - cntr_xy[0], self.ar_pos[1] - cntr_xy[1])
                pose.position.x = self.ar_pos[0] + cntr_xy[0] - (
                    6.5 * conversion) + 0.09
                pose.position.y = self.ar_pos[1] - cntr_xy[1] + (
                    6.5 / 2 * conversion) + 0.03
                pose.position.z = self.ar_pos[2] + 0.08

                print(pose)
                # print(pose)
                positions.poses.append(pose)
                # self.centers[i] = np.array([cntr_xy[0], cntr_xy[1], 1])
                # self.centers[i] = np.array([cntr_xy[0] + self.ar_pos.x, cntr_xy[1] + self.ar_pos.y, self.ar_pos.z])
            # print(self.centers)
            self.homography(cv_image)
            self.position_publisher.publish(positions)

        cv2.imshow('image', cv_image)
        cv2.waitKey(1)
コード例 #36
0
 def link_poses_msg(self):
     poses = self.arm.get_link_poses()
     out = PoseArray()
     out.poses = poses
     return out
コード例 #37
0
    ret, frame = cap.read()
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    #cv.TM_CCOEFF,cv.TM_CCOEFF_NORMED,cv.TM_CCORR,cv.TM_CCORR_NORMED > using max value
    res = cv2.matchTemplate(gray_frame, template, cv2.TM_CCOEFF_NORMED)
    threshold = 0.7
    _, max_val, _, max_loc = cv2.minMaxLoc(res)
    top_left = max_loc
    bottom_right = (top_left[0] + w, top_left[1] + h)
    poseArr = PoseArray()
    if max_val >= threshold:
        cv2.rectangle(frame, top_left, bottom_right, (0, 255, 0), 3)

        point1 = Pose()
        point1.position.x = top_left[0]
        point1.position.y = top_left[1]
        point2 = Pose()
        point2.position.x = bottom_right[0]
        point2.position.y = bottom_right[1]
        poseArr.poses = [point1, point2]

        pub.publish(poseArr)

    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1)

    if key ==27:
        break

cap.release()
cv2.destroyAllWindows()
コード例 #38
0
    return


def fuse_centers15(data):
    fuse_centers(15, data.header.stamp, data.poses[0])

    return


if __name__ == '__main__':
    cluster_num = 0
    updates = 16 * [False]

    trees_centers = PoseArray()
    trees_centers.header.frame_id = "velodyne"
    trees_centers.poses = []
    pose = Pose()
    pose.position.x = 0
    pose.position.y = 0
    pose.position.z = 0
    pose.orientation.x = 0
    pose.orientation.y = 0
    pose.orientation.z = 0
    pose.orientation.w = 1

    for i in range(16):
        trees_centers.poses.append(pose)

    rospy.init_node('tree_coordinates', anonymous=True)

    rospy.Subscriber('/clustering/cluster_num',
コード例 #39
0
ファイル: gen_views.py プロジェクト: kunzel/viper
import viper.robots.scitos
robot = viper.robots.scitos.ScitosRobot()

NUM_OF_VIEWS = rospy.get_param('~num_of_views', 100)
FILENAME     = rospy.get_param('~output_file', 'views.json')

planner = ViewPlanner(robot)

rospy.loginfo('Generate views.')
views = planner.sample_views(NUM_OF_VIEWS)
rospy.loginfo('Generate views. Done.')


robot_poses  = PoseArray()
robot_poses.header.frame_id = '/map'
robot_poses.poses = []
for v in views:
    robot_poses.poses.append(v.get_robot_pose())
print len(robot_poses.poses)
robot_poses_pub.publish(robot_poses)

ptu_poses  = PoseArray()
ptu_poses.header.frame_id = '/map'
ptu_poses.poses = []
for v in views:
    ptu_poses.poses.append(v.get_ptu_pose())
print len(ptu_poses.poses)
ptu_poses_pub.publish(ptu_poses)

with open(FILENAME, "w") as outfile:
    json_data = jsonpickle.encode(views)
コード例 #40
0
ファイル: sm_approach.py プロジェクト: gt-ros-pkg/hrl
    # sm = sm_approach_table()

    sm = smach.StateMachine(outcomes=['succeeded','aborted','preempted'],
                            input_keys = [ 'approach_poses' ])

    p = Pose()
    p.position.x = 0.3879
    p.position.y = 0.79838
    p.position.z = 0.0

    p.orientation.z = -0.704
    p.orientation.w = 0.709

    pa = PoseArray()
    pa.header.frame_id = '/map'
    pa.poses = [ p ]

    sm.userdata.table_edge_poses = pa

    with sm:
        sm_table = sm_approach_table()

        smach.StateMachine.add(
            'APPROACH_TABLE',
            sm_table,
            remapping = {'table_edge_poses':'table_edge_poses', # input
                         'movebase_pose_global':'movebase_pose_global', # output
                         'table_edge_global':'table_edge_global'}, # output
            #transitions = {'succeeded':'MOVE_BACK'})
            transitions = {'succeeded':'succeeded'})
コード例 #41
0
 def publish_pose_array(self, publisher, frame_id, poses):
     pose_array = PoseArray()
     pose_array.header.frame_id = frame_id
     pose_array.header.stamp = rospy.Time.now()
     pose_array.poses = poses
     publisher.publish(pose_array)
コード例 #42
0
 def publish_particles(self, particles):
     # publish the given particles as a PoseArray object
     pa = PoseArray()
     pa.header = Utils.make_header("map")
     pa.poses = Utils.particles_to_poses(particles)
     self.particle_pub.publish(pa)
コード例 #43
0
ファイル: sm_approach.py プロジェクト: wklharry/hrl
    # sm = sm_approach_table()

    sm = smach.StateMachine(outcomes=['succeeded', 'aborted', 'preempted'],
                            input_keys=['approach_poses'])

    p = Pose()
    p.position.x = 0.3879
    p.position.y = 0.79838
    p.position.z = 0.0

    p.orientation.z = -0.704
    p.orientation.w = 0.709

    pa = PoseArray()
    pa.header.frame_id = '/map'
    pa.poses = [p]

    sm.userdata.table_edge_poses = pa

    with sm:
        sm_table = sm_approach_table()

        smach.StateMachine.add(
            'APPROACH_TABLE',
            sm_table,
            remapping={
                'table_edge_poses': 'table_edge_poses',  # input
                'movebase_pose_global': 'movebase_pose_global',  # output
                'table_edge_global': 'table_edge_global'
            },  # output
            #transitions = {'succeeded':'MOVE_BACK'})
コード例 #44
0
    def _trajectory_callback(self, msg):
        """
        15 waypoint for the next 3 secs

        geometry_msgs/Pose: 
        geometry_msgs/Point position
          float64 x
          float64 y
          float64 z
        geometry_msgs/Quaternion orientation
          float64 x
          float64 y
          float64 z
          float64 w

        :param msg:
        :return:
        """
        data = []
        time_mult = 1

        position_trajectory = []
        time_trajectory = []
        yaw_trajectory = []
        MPC_position_trajectory = []

        MPC_rviz_trajectory = PoseArray()
        MPC_rviz_trajectory.header.frame_id = "world"
        MPC_rviz_trajectory.header.stamp = rospy.Time.now()
        MPC_rviz_trajectory.poses = []

        current_time = time.time()
        for i in range(self.MPC_HORIZON):
            # NED to my frame
            x = msg.poses[i].position.x
            y = msg.poses[i].position.y
            z = msg.poses[i].position.z
            position_trajectory.append([y, -x, z])
            time_trajectory.append(0.1 * i * time_mult + current_time)

            # NED to world frame
            temp_pose_msg = Pose()
            temp_pose_msg.position.x = y
            temp_pose_msg.position.y = x
            temp_pose_msg.position.z = -z
            MPC_rviz_trajectory.poses.append(temp_pose_msg)

        for i in range(0, self.MPC_HORIZON - 1):
            yaw_trajectory.append(
                np.arctan2(
                    position_trajectory[i + 1][1] - position_trajectory[i][1],
                    position_trajectory[i + 1][0] - position_trajectory[i][0]))
        yaw_trajectory.append(yaw_trajectory[-1])

        position_trajectory = np.array(position_trajectory)
        yaw_trajectory = np.array(yaw_trajectory)
        self.MPC_rviz_trajectory_publisher.publish(MPC_rviz_trajectory)

        # Update MPC target
        if (self.timestep % self.MPC_TARGET_UPDATE_RATE == 0):
            self.MPC_position_target = position_trajectory[
                self.SELECT_MPC_TARGET]
            self.MPC_attitude_target = yaw_trajectory[
                self.SELECT_MPC_TARGET]  # to avoid dramatic yaw change
コード例 #45
0
def msgn(pqs, t, frame='base_link'):
    msg = PoseArray()
    msg.header.frame_id = frame
    msg.header.stamp = t
    msg.poses = [pose(p, q) for (p, q) in pqs]
    return msg
コード例 #46
0
 def publish_particles(self, particles):
     pa = PoseArray()
     pa.header = Utils.make_header("map")
     pa.poses = Utils.particles_to_poses(particles)
     self.particle_pub.publish(pa)
コード例 #47
0
    else:
        print "no offline plan found: a new plan will be computed"

        # loop over waypoints to get path plans between them
        start_pose = start_pose[0, :]
        for waypoint_pose in waypoints:

            string = str(waypoint_pose[0]) + str(waypoint_pose[1])
            rospy.sleep(5)

            pp.get_segment_plan(start_pose, waypoint_pose, rospy.get_rostime())

            print "waiting for a plan..."
            rospy.sleep(30)

            # add segment plan to the plan
            plan_msg = rospy.wait_for_message(plan_topic, PoseArray)
            plan.extend(plan_msg.poses)
            start_pose = waypoint_pose

    np.save(offline_plan_path, plan)
    print "done planning."

    rospy.sleep(5)
    poseArrayMsg = PoseArray()
    poseArrayMsg.header.frame_id = "/map"
    poseArrayMsg.poses = plan
    pp.plan_pub.publish(poseArrayMsg)

    rospy.spin()
コード例 #48
0
ファイル: eval_views.py プロジェクト: kunzel/viper
views = []
with open(INPUT_FILE, "r") as input_file:
    json_data = input_file.read()
    views = jsonpickle.decode(json_data)
    rospy.loginfo("Loaded %s views"  % len(views))



planner = ViewPlanner(robot)

view_values = planner.compute_view_values(views)


robot_poses  = PoseArray()
robot_poses.header.frame_id = '/map'
robot_poses.poses = []
for v in views:
    robot_poses.poses.append(v.get_ptu_pose())
robot_poses_pub.publish(robot_poses)

#view_costs = planner.compute_view_costs(views)

# triangle marker
markerArray = MarkerArray()    
idx = 0
for view in views:
    val = view_values[view.ID]
    print idx, val
    if val > 0:
        print "Create triangle marker with value", val
        vis.create_marker(markerArray, view, view.get_ptu_pose(), view_values)
コード例 #49
0
ファイル: particle_filter.py プロジェクト: mikemwang/obstacle
 def publish_particles(self, particles):
     pa = PoseArray()
     pa.header = Utils.make_header("map")
     pa.poses = Utils.particles_to_poses(particles)
     self.particle_pub.publish(pa)