コード例 #1
2
def main():
    
    rospy.init_node('tf_test')
    rospy.loginfo("Node for testing actionlib server")
 
    #from_link = '/head_color_camera_l_link'
    #to_link = '/base_link'
    
    from_link = '/base_link'
    to_link = '/map'
    
    tfl = TransformListener()
    
    rospy.sleep(5)
    
    
    t = rospy.Time(0)
    
    mpose = PoseStamped()
    
    mpose.pose.position.x = 1
    mpose.pose.position.y = 0
    mpose.pose.position.z = 0
    
    mpose.pose.orientation.x = 0
    mpose.pose.orientation.y = 0
    mpose.pose.orientation.z = 0
    mpose.pose.orientation.w = 0
    
    mpose.header.frame_id = from_link
    mpose.header.stamp = rospy.Time.now()
    
    rospy.sleep(5)
    
    mpose_transf = None
    
    rospy.loginfo('Waiting for transform for some time...')
    
    tfl.waitForTransform(to_link,from_link,t,rospy.Duration(5))
    
    if tfl.canTransform(to_link,from_link,t):
      
      mpose_transf = tfl.transformPose(to_link,mpose)
      
      print mpose_transf
      
    else:
      
      rospy.logerr('Transformation is not possible!')
      sys.exit(0)
コード例 #2
1
def subscriber_callback(data):
	occupancyMap = transformation(data.data, data.info.width, data.info.height)
	way_points = find_goals(occupancyMap, data.info.width, data.info.height)
	result = random.choice(way_points)

	try:
		planMaker = rospy.ServiceProxy('move_base/make_plan', nav_srv.GetPlan)
		listener = TransformListener()
		listener.waitForTransform("base_link", "map", rospy.Time(), rospy.Duration(4.0))
		t = listener.getLatestCommonTime("base_link", "map")
		position, quaternion = listener.lookupTransform("base_link", "map", t)
		pos_x = position[0]
		pos_y = position[1]
		pos_z = position[2]
		goal_robot = create_goal((result[1]-2000)*data.info.resolution,(result[0]-2000)*data.info.resolution)
		#Make plan with 0.5 meter flexibility, from target pose and current pose (with same header)
		start_pose = create_message(pos_x,pos_y,pos_z,quaternion)

		plan = planMaker(
			start_pose,
			goal_robot.target_pose,
			0.5)
		action_server = actionlib.SimpleActionClient('move_base', mb_msg.MoveBaseAction);
		action_server.wait_for_server()
		send_goal(goal_robot, action_server)

	except rospy.ServiceException, e:
		print "plan service call failed: %s"%e
コード例 #3
0
class FTPNode:
    def __init__(self, *args):
        print("init")
        self.tf = TransformListener()
        self.tt = Transformer()
        rospy.Subscriber("/trackedHumans", TrackedHumans, self.pos_callback)
        self.publisher = rospy.Publisher("directionmarker_array", MarkerArray)


    def pos_callback(self, data):
        rospy.loginfo("on updated pos, face robot towards guy...")
        print("hi")
        if (len(data.trackedHumans) > 0):
            print(data.trackedHumans[0].location.point.x)
            try:
                self.tf.waitForTransform(data.trackedHumans[0].location.header.frame_id, "/base_link", rospy.Time.now(), rospy.Duration(2.0))
                pp = self.tf.transformPoint("/base_link", data.trackedHumans[0].location)
                print "Original:"
                print [data.trackedHumans[0].location.point]
                print "Transform:"
                print pp.point

                phi = math.atan2(pp.point.y, pp.point.x)
                sss.move_base_rel("base", [0,0,phi])
                print phi*180/math.pi
                
                markerArray = MarkerArray()
                marker = Marker()
                marker.header.stamp = rospy.Time();    
                marker.ns = "my_namespace";
                marker.id = 0;  
                marker.header.frame_id = "/base_link"
                marker.type = marker.ARROW
                marker.action = marker.ADD
                marker.scale.x = .1
                marker.scale.y = .1
                marker.scale.z = .1
                marker.color.a = 1.0
                marker.color.r = 1.0
                marker.color.g = 1.0
                marker.color.b = 0.0
                p1 = Point()
                p1.x = 0
                p1.y = 0
                p1.z = 0
                p2 = Point()
                p2.x = pp.point.x
                p2.y = pp.point.y
                p2.z = 0
                marker.points = [p1,p2]
                #marker.pose.position.x = 1
                #marker.pose.position.y = 0
                #marker.pose.position.z = 1
                #marker.pose.orientation.w = 1
                markerArray.markers.append(marker)
                self.publisher.publish(markerArray)
                print "try ended"
            except Exception as e:
                print e
コード例 #4
0
class ChallengeProblemLogger(object):
  _knownObstacles = {}
  _placedObstacle = False
  _lastgzlog = 0.0
  _tf_listener = None
  
  def __init__(self,name):
    self._name = name;

    self._experiment_started = False
    self._tf_listener = TransformListener()

    # Subscribe to robot pose ground truth from gazebo
    rospy.Subscriber("/gazebo/model_states", ModelStates, callback=self.gazebo_model_monitor,
                     queue_size=1)

  # Whenever we get a report from Gazebo, map the gazebo coordinate to map coordinates and
  # log this
  # Only do this every second - this should be accurate enough
  # TODO: model is assumed to be the third in the list. Need to make this based
  #       on the array to account for obstacles (maybe)
  def gazebo_model_monitor(self, data):
    if (len(data.pose))<=2:
      return
    data_time = rospy.get_rostime().to_sec()
    if ((self._lastgzlog == 0.0) | (data_time - self._lastgzlog >= 1)):
      # Only do this every second

      # Get the turtlebot model state information (assumed to be indexed at 2)    
      tb_pose = data.pose[2]
      tb_position = tb_pose.position
      self._lastgzlog = data_time

      # Do this only if the transform exists
      if self._tf_listener.frameExists("/base_link") and self._tf_listener.frameExists("/map"):
        self._tf_listener.waitForTransform("/map", "/base_link", rospy.Time(0), rospy.Duration(1))
        (trans,rot) = self._tf_listener.lookupTransform("/map", "/base_link",rospy.Time(0))
        rospy.loginfo("BRASS | Turtlebot | {},{}".format(trans[0], trans[1]))
      
      # Log any obstacle information, but do it only once. This currently assumes one obstacle
      # TODO: test this
      if len(data.name) > 3:
        addedObstacles = {}
        removedObstacles = self._knownObstacles.copy()
        for obs in range(3, len(data.name)-1):
          if (data.name[obs] not in self._knownObstacles):
            addedObstacles[data.name[obs]] = obs
          else:
             self._knownObstacles[data.name[obs]] = obs
 	     del removedObstacles[data.name[obs]]
        
        for key, value in removedObstacles.iteritems():
           rospy.logInfo("BRASS | Obstacle {} | removed".format(key))
           del self._knownObstacles[key]

        for key, value in addedObstacles.iteritems():
	   obs_pos = data.pose[value].position
           rospy.logInfo ("BRASS | Obstacle {} | {},{}".format(key, obs_pos.x, obs_pos.y))
	   self._knownObstacles[key] = value
コード例 #5
0
ファイル: rows_simulator.py プロジェクト: madsbahrt/FroboMind
def calculate_distance_to_rows():
    tflisten = TransformListener()
    dist = []
    for i in range(0,n_rows):
        (veh_trans,veh_rot) = tflisten.lookupTransform("odom","base_footprint",rospy.Time(0))
        
        
    pass
コード例 #6
0
class ForceFromGravity(object):
    def __init__(self):
        self.tf_l = TransformListener()
        self.last_wrench = None
        self.wrench_sub = rospy.Subscriber('/left_wrist_ft',
                                           WrenchStamped,
                                           self.wrench_cb,
                                           queue_size=1)
    def wrench_cb(self, data):
        self.last_wrench = data

    def compute_forces(self):
        qs = self.get_orientation()
        o = qs.quaternion
        r, p, y = euler_from_quaternion([o.x, o.y, o.z, o.w])
        rospy.loginfo("wrist_left_ft_link rpy vs world: " + str( (round(r, 3), round(p, 3), round(y, 3)) ))
        rospy.loginfo("in degrees: " + str( (round(degrees(r), 3), round(degrees(p), 3), round(degrees(y), 3)) ))
        hand_total_force = 10 # 10 Newton, near to a Kg
        fx = hand_total_force * cos(r) * -1.0 # hack
        fy = hand_total_force * cos(p)
        fz = hand_total_force * cos(y)
        #total = abs(fx) + abs(fy) + abs(fz)
        #rospy.loginfo("fx, fy, fz, total:")
        #rospy.loginfo( str( (round(fx, 3), round(fy, 3), round(fz, 3), round(total, 3)) ) )
        return fx, fy, fz

    def get_last_forces(self):
        f = self.last_wrench.wrench.force
        return f.x, f.y, f.z

    def get_orientation(self, from_frame="wrist_left_ft_link"):
        qs = QuaternionStamped()
        qs.quaternion.w = 1.0
        qs.header.stamp = self.tf_l.getLatestCommonTime("world", from_frame)
        qs.header.frame_id = "/" + from_frame
        transform_ok = False
        while not transform_ok and not rospy.is_shutdown():
            try:
                world_q = self.tf_l.transformQuaternion("/world", qs)
                transform_ok = True
                return world_q
            except ExtrapolationException as e:
                rospy.logwarn(
                    "Exception on transforming pose... trying again \n(" + str(e) + ")")
                rospy.sleep(0.05)
                qs.header.stamp = self.tf_l.getLatestCommonTime(
                    "world", from_frame)

    def run(self):
        r = rospy.Rate(1)
        while not rospy.is_shutdown():
            cx, cy, cz = self.compute_forces()
            c_total = abs(cx) + abs(cy) + abs(cz)
            fx, fy, fz = self.get_last_forces()
            f_total = abs(fx) + abs(fy) + abs(fz)
            rospy.loginfo("Computed Forces:" + str(round(c_total, 3)) + "\n" + str( (round(cx, 3), round(cy, 3), round(cz, 3) )))
            rospy.loginfo("Real Forces    :" + str(round(f_total, 3)) + "\n" + str( (round(fx, 3), round(fy, 3), round(fz, 3) )))
            r.sleep()
コード例 #7
0
class TfFilter:
    def __init__(self, buffer_size):
        self.tf = TransformListener(True, rospy.Duration(5))
        self.target = ''
        self.buffer = np.zeros((buffer_size, 1))
        self.buffer_ptr = 0
        self.buffer_size = buffer_size
        self.tf_sub = rospy.Subscriber('tf', tfMessage, self.tf_cb)
        self.tf_pub = rospy.Publisher('tf', tfMessage)
        self.srv = rospy.Service(SERVICE, PublishGoal, self.publish_goal_cb)

    def tf_cb(self, msg):
        for t in msg.transforms:
            if t.child_frame_id == self.target:
                time = self.tf.getLatestCommonTime(self.source, self.target)
                p, q = self.tf.lookupTransform(self.source, self.target, time)
                rm = tfs.quaternion_matrix(q)
                # assemble a new coordinate frame that has z-axis aligned with
                # the vertical direction and x-axis facing the z-axis of the
                # original frame
                z = rm[:3, 2]
                z[2] = 0
                axis_x = tfs.unit_vector(z)
                axis_z = tfs.unit_vector([0, 0, 1])
                axis_y = np.cross(axis_x, axis_z)
                rm = np.vstack((axis_x, axis_y, axis_z)).transpose()
                # shift the pose along the x-axis
                self.position = p + np.dot(rm, self.d)[:3]
                self.buffer[self.buffer_ptr] = tfs.euler_from_matrix(rm)[2]
                self.buffer_ptr = (self.buffer_ptr + 1) % self.buffer_size
                self.publish_filtered_tf(t.header)

    def publish_filtered_tf(self, header):
        yaw = np.median(self.buffer)
        q = tfs.quaternion_from_euler(0, 0, yaw - math.pi)
        ts = TransformStamped()
        ts.header = header
        ts.header.frame_id = self.source
        ts.child_frame_id = self.goal
        ts.transform.translation.x = self.position[0]
        ts.transform.translation.y = self.position[1]
        ts.transform.translation.z = 0
        ts.transform.rotation.x = q[0]
        ts.transform.rotation.y = q[1]
        ts.transform.rotation.z = q[2]
        ts.transform.rotation.w = q[3]
        msg = tfMessage()
        msg.transforms.append(ts)
        self.tf_pub.publish(msg)

    def publish_goal_cb(self, r):
        rospy.loginfo('Received [%s] request. Will start publishing %s frame' %
                      (SERVICE, r.goal_frame_id))
        self.source = r.source_frame_id
        self.target = r.target_frame_id
        self.goal = r.goal_frame_id
        self.d = [r.displacement.x, r.displacement.y, r.displacement.z]
        return []
コード例 #8
0
ファイル: tf_test.py プロジェクト: adegroote/morse
class TfTest(RosTestCase):

    def setUpEnv(self):
        
        robot = ATRV()

        odometry = Odometry()
        robot.append(odometry)
        odometry.add_stream('ros')

        motion = Teleport()
        robot.append(motion)
        motion.add_stream('socket')
        
        robot2 = ATRV()
        robot2.translate(0,1,0)
        odometry2 = Odometry()
        robot2.append(odometry2)
        odometry2.add_stream('ros', frame_id="map", child_frame_id="robot2")

        env = Environment('empty', fastmode = True)
        env.add_service('socket')

    def _check_pose(self, frame1, frame2, position, quaternion, precision = 0.01):

        t = self.tf.getLatestCommonTime(frame1, frame2)
        observed_position, observed_quaternion = self.tf.lookupTransform(frame1, frame2, t)

        for a,b in zip(position, observed_position):
            self.assertAlmostEqual(a, b, delta = precision)
        for a,b in zip(quaternion, observed_quaternion):
            self.assertAlmostEqual(a, b, delta = precision)



    def test_tf(self):

        rospy.init_node('morse_ros_tf_test')

        self.tf = TransformListener()

        sleep(1)
        self.assertTrue(self.tf.frameExists("/odom"))
        self.assertTrue(self.tf.frameExists("/base_footprint"))
        self.assertTrue(self.tf.frameExists("/map"))
        self.assertTrue(self.tf.frameExists("/robot2"))

        self._check_pose("odom", "base_footprint", [0,0,0], [0,0,0,1])
        self._check_pose("map", "robot2", [0,0,0], [0,0,0,1])

        with Morse() as morse:
            teleport = morse.robot.motion

            teleport.publish({'x' : 2, 'y' : 0, 'z' : 0, \
                              'yaw' : 0, 'pitch' : 0.0, 'roll' : 0.0})
            morse.sleep(0.1)

        self._check_pose("odom", "base_footprint", [2,0,-0.1], [0,0,0,1])
コード例 #9
0
ファイル: transform.py プロジェクト: czhao39/racecar_4
class TransformNode:
    def __init__(self, *args):
        self.tf = TransformListener()
        rospy.Subscriber("/tf", TFMessage, transform, queue_size=1)

    def transform(self, msg):
        if self.tf.frameExists("/base_link") and self.tf.frameExists("/map"):
            t = self.tf.getLatestCommonTime("/base_link", "/map")
            position, quaternion = self.tf.lookupTransform("/base_link", "/map", t)
            print position, quaternion
コード例 #10
0
ファイル: lookat_ik.py プロジェクト: gt-ros-pkg/hrl-assistive
class CameraPointer(object):
    def __init__(self, side, camera_ik):
        self.side = side
        self.camera_ik = camera_ik
        self.joint_names = self.joint_angles_act = self.joint_angles_des = None
        self.tfl = TransformListener()
        self.joint_state_sub = rospy.Subscriber('/{0}_arm_controller/state'.format(self.side), JointTrajectoryControllerState, self.joint_state_cb)
        self.joint_traj_pub = rospy.Publisher('/{0}_arm_controller/command'.format(self.side), JointTrajectory)
        # Wait for joint information to become available
        while self.joint_names is None and not rospy.is_shutdown():
            rospy.sleep(0.5)
            rospy.loginfo("[{0}] Waiting for joint state from arm controller.".format(rospy.get_name()))
        #Set rate limits on a per-joint basis
        self.max_vel_rot = [np.pi]*len(self.joint_names)
        self.target_sub = rospy.Subscriber('{0}/lookat_ik/goal'.format(rospy.get_name()), PointStamped, self.goal_cb)
        rospy.loginfo("[{0}] Ready.".format(rospy.get_name()))

    def joint_state_cb(self, jtcs):
        if self.joint_names is None:
            self.joint_names = jtcs.joint_names
        self.joint_angles_act = jtcs.actual.positions
        self.joint_angles_des = jtcs.desired.positions

    def goal_cb(self, pt_msg):
        rospy.loginfo("[{0}] New LookatIK goal received.".format(rospy.get_name()))
        if (pt_msg.header.frame_id != self.camera_ik.base_frame):
            try:
                now = pt_msg.header.stamp
                self.tfl.waitForTransform(pt_msg.header.frame_id,
                                          self.camera_ik.base_frame,
                                          now, rospy.Duration(1.0))
                pt_msg = self.tfl.transformPoint(self.camera_ik.base_frame, pt_msg)
            except (LookupException, ConnectivityException, ExtrapolationException):
                rospy.logwarn("[{0}] TF Error tranforming point from {1} to {2}".format(rospy.get_name(),
                                                                                        pt_msg.header.frame_id,
                                                                                        self.camera_ik.base_frame))
        target = np.array([pt_msg.point.x, pt_msg.point.y, pt_msg.point.z])
        # Get IK Solution
        current_angles = list(copy.copy(self.joint_angles_act))
        iksol = self.camera_ik.lookat_ik(target, current_angles[:len(self.camera_ik.arm_indices)])
        # Start with current angles, then replace angles in camera IK with IK solution
        # Use desired joint angles to avoid sagging over time
        jtp = JointTrajectoryPoint()
        jtp.positions = list(copy.copy(self.joint_angles_des))
        for i, joint_name in enumerate(self.camera_ik.arm_joint_names):
            jtp.positions[self.joint_names.index(joint_name)] = iksol[i]
        deltas = np.abs(np.subtract(jtp.positions, current_angles))
        duration = np.max(np.divide(deltas, self.max_vel_rot))
        jtp.time_from_start = rospy.Duration(duration)
        jt = JointTrajectory()
        jt.joint_names = self.joint_names
        jt.points.append(jtp)
        rospy.loginfo("[{0}] Sending Joint Angles.".format(rospy.get_name()))
        self.joint_traj_pub.publish(jt)
コード例 #11
0
	def __init__(self):
		cv2.namedWindow("map")
		cv2.namedWindow("past_map")
		rospy.init_node("run_mapping")	
		#create map properties, helps to make ratio calcs
		self.origin = [-10,-10]
		self.seq = 0
		self.resolution = 0.1
		self.n = 200

		self.dyn_obs=[]
		self.counter=0

		#Giving initial hypotheses to the system
		self.p_occ = 0.5*np.ones((self.n, self.n)) #50-50 chance of being occupied
		self.odds_ratio_hit = 3.0 #this is arbitrary, can re-assign
		self.odds_ratio_miss = 0.3 #this is arbitrary, can reassign
		#TODO: Evaluate these - what do we need to change in order to make this more friendly to our version?  Potential changes:
		#Whenever there is an adjustment to self.odds_ratio_miss, update an odds ratio that implies dynamicness


		#calculates odds based upon hit to miss, equal odds to all grid squares
		self.odds_ratios = (self.p_occ)/(1-self.p_occ)*np.ones((self.n, self.n))
		#calculate initial past odds_ratio
		self.past_odds_ratios = (self.p_occ)/(1-self.p_occ)*np.ones((self.n, self.n))
		#TODO: Make sure that this is still an accurate representation of how probabilities work in our system.  Make appropriate additions/adjustments for the dynamic obstacle piece
		
		#write laser pubs and subs
		rospy.Subscriber("scan", LaserScan, self.scan_received, queue_size=1)
		self.pub = rospy.Publisher("map", OccupancyGrid)

		#note - in case robot autonomy is added back in
		self.tf_listener = TransformListener()	
コード例 #12
0
    def __init__(self):
        rospy.init_node("learn_transform")

        self.tf_listener = TransformListener()
        self.tf_broadcaster = TransformBroadcaster()

        self.possible_base_link_poses = []
        self.baselink_averages = []

        self.rate = rospy.Rate(20)

        self.markers = { "/PATTERN_1": Pose( [-0.56297, 0.11, 0.0035],
                                             [0.0, 0.0, 0.0, 1.0] ),
                         "/PATTERN_2": Pose( [-0.45097, 0.11, 0.0035],
                                             [0.0, 0.0, 0.0, 1.0] ),
                         "/PATTERN_3": Pose( [-0.34097, 0.0, 0.0035],
                                             [0.0, 0.0, 0.0, 1.0] ),
                         "/PATTERN_4": Pose( [-0.34097, -0.11, 0.0035],
                                             [0.0, 0.0, 0.0, 1.0] ),
                         "/PATTERN_5": Pose( [-0.45097, -0.11, 0.0035],
                                             [0.0, 0.0, 0.0, 1.0] ),
                         "/PATTERN_6": Pose( [-0.56297, 0.0, 0.0035],
                                             [0.0, 0.0, 0.0, 1.0] )
                        }

        self.run()
コード例 #13
0
ファイル: action_server.py プロジェクト: Kapim/arcor-pr2
    def __init__(self, server_prefix, group_name):
        self.group_name = group_name
        self.group = moveit_commander.MoveGroupCommander(self.group_name)

        self._as = actionlib.SimpleActionServer(server_prefix + self.group_name + "/manipulation",
                                                ArmNavigationAction, self.action_cb, auto_start=False)

        char = self.group_name[0]
        self.jta = actionlib.SimpleActionClient('/' + char + '_arm_controller/joint_trajectory_action',
                                                JointTrajectoryAction)
        rospy.loginfo('Waiting for joint trajectory action')
        self.jta.wait_for_server()
        rospy.loginfo('Got it')
        self.joint_state_sub = rospy.Subscriber("/joint_states", JointState, self.js_cb, queue_size=1)
        self.angles = None
        self.move_to_pose_pub = rospy.Publisher("/pr2_arm_navigation/move_to_pose", PoseStamped, queue_size=10, latch=True)

        self.joint_names = [char + '_shoulder_pan_joint',
                            char + '_shoulder_lift_joint',
                            char + '_upper_arm_roll_joint',
                            char + '_elbow_flex_joint',
                            char + '_forearm_roll_joint',
                            char + '_wrist_flex_joint',
                            char + '_wrist_roll_joint']

        self.tf_listener = TransformListener()

        self.look_at_pub = rospy.Publisher("/art/robot/look_at", PointStamped, queue_size=1)
        self._as.start()
コード例 #14
0
    def __init__(self, use_dummy_transform=False):
        rospy.init_node('star_center_positioning_node')
        if use_dummy_transform:
            self.odom_frame_name = ROBOT_NAME+"_odom_dummy"
        else:
            self.odom_frame_name = ROBOT_NAME+"_odom"

        self.marker_locators = {}
        self.add_marker_locator(MarkerLocator(0,(-6*12*2.54/100.0,0),0))
        self.add_marker_locator(MarkerLocator(1,(0.0,0.0),pi))
        self.add_marker_locator(MarkerLocator(2,(0.0,10*12*2.54/100.0),0))
        self.add_marker_locator(MarkerLocator(3,(-6*12*2.54/100.0,6*12*2.54/100.0),0))
        self.add_marker_locator(MarkerLocator(4,(0.0,6*12*2.54/100.0),pi))

        self.pose_correction = rospy.get_param('~pose_correction',0.0)
        self.phase_offset = rospy.get_param('~phase_offset',0.0)
        self.is_flipped = rospy.get_param('~is_flipped',False)


        srv = Server(STARPoseConfig, self.config_callback)

        self.marker_sub = rospy.Subscriber("ar_pose_marker",
                                           ARMarkers,
                                           self.process_markers)
        self.odom_sub = rospy.Subscriber(ROBOT_NAME+"/odom", Odometry, self.process_odom, queue_size=10)
        self.star_pose_pub = rospy.Publisher(ROBOT_NAME+"/STAR_pose",PoseStamped,queue_size=10)
        self.continuous_pose = rospy.Publisher(ROBOT_NAME+"/STAR_pose_continuous",PoseStamped,queue_size=10)
        self.tf_listener = TransformListener()
        self.tf_broadcaster = TransformBroadcaster()
コード例 #15
0
    def __init__(self):
        rospy.init_node('right_arm_actions')
        
        self.tfl = TransformListener()
        self.aul = r_arm_utils.ArmUtils(self.tfl)

        rospy.loginfo('Waiting for Pixel_2_3d Service')
        try:
            rospy.wait_for_service('/pixel_2_3d', 7.0)
            self.p23d_proxy = rospy.ServiceProxy('/pixel_2_3d', Pixel23d, True)
            rospy.loginfo("Found pixel_2_3d service")
        except:
            rospy.logwarn("Pixel_2_3d Service Not available")

        #Low-level motion requests: pass directly to arm_utils
        rospy.Subscriber('wt_right_arm_pose_commands', Point, self.torso_frame_move)
        rospy.Subscriber('wt_right_arm_angle_commands', JointTrajectoryPoint, self.aul.send_traj_point)

        #More complex motion scripts, defined here using arm_util functions 
        rospy.Subscriber('norm_approach_right', PoseStamped, self.norm_approach)
        rospy.Subscriber('wt_grasp_right_goal', PoseStamped, self.grasp)
        rospy.Subscriber('wt_wipe_right_goals', PoseStamped, self.wipe)
        rospy.Subscriber('wt_swipe_right_goals', PoseStamped, self.swipe)
        rospy.Subscriber('wt_lin_move_right', Float32, self.hand_move)
        rospy.Subscriber('wt_adjust_elbow_right', Float32, self.aul.adjust_elbow)
        rospy.Subscriber('wt_surf_wipe_right_points', Point, self.prep_surf_wipe)
        rospy.Subscriber('wt_poke_right_point', PoseStamped, self.poke)

        self.wt_log_out = rospy.Publisher('wt_log_out', String)

        self.wipe_started = False
        self.surf_wipe_started = False
        self.wipe_ends = [PoseStamped(), PoseStamped()]
コード例 #16
0
    def __init__(self, use_dummy_transform=False):
        rospy.init_node("star_center_positioning_node")

        self.robot_name = rospy.get_param("~robot_name", "")
        self.odom_frame_name = self.robot_name + "_odom"
        self.base_link_frame_name = self.robot_name + "_base_link"

        self.marker_locators = {}
        self.add_marker_locator(MarkerLocator(0, (-6 * 12 * 2.54 / 100.0, 0), 0))
        self.add_marker_locator(MarkerLocator(1, (0.0, 0.0), pi))
        self.add_marker_locator(MarkerLocator(2, (0.0, 10 * 12 * 2.54 / 100.0), 0))
        self.add_marker_locator(MarkerLocator(3, (-6 * 12 * 2.54 / 100.0, 6 * 12 * 2.54 / 100.0), 0))
        self.add_marker_locator(MarkerLocator(4, (0.0, 6 * 12 * 2.54 / 100), pi))
        self.add_marker_locator(MarkerLocator(5, (-4 * 12 * 2.54 / 100.0, 14 * 12 * 2.54 / 100.0), pi))

        self.pose_correction = rospy.get_param("~pose_correction", 0.0)
        self.phase_offset = rospy.get_param("~phase_offset", 0.0)
        self.is_flipped = rospy.get_param("~is_flipped", False)

        srv = Server(STARPoseConfig, self.config_callback)

        self.marker_sub = rospy.Subscriber("ar_pose_marker", ARMarkers, self.process_markers)
        self.odom_sub = rospy.Subscriber("odom", Odometry, self.process_odom, queue_size=10)
        self.star_pose_pub = rospy.Publisher("STAR_pose", PoseStamped, queue_size=10)
        self.continuous_pose = rospy.Publisher("STAR_pose_continuous", PoseStamped, queue_size=10)
        self.tf_listener = TransformListener()
        self.tf_broadcaster = TransformBroadcaster()
コード例 #17
0
ファイル: calibration.py プロジェクト: ipa-nhg/automatica2014
    def __init__(self, *args):
        self.tf = TransformListener()
        self.detector_service = rospy.ServiceProxy("/fiducials/get_fiducials", DetectObjects)

        self.frame_camera_mount = rospy.get_param('~frame_camera_mount')
        self.frame_marker_mount = rospy.get_param('~frame_marker_mount')
        self.frame_marker = rospy.get_param('~frame_marker', "/marker")
コード例 #18
0
ファイル: controller_Gon.py プロジェクト: gnunoe/Cf_ROS
 def __init__(self):
     self.pubNav = rospy.Publisher('cmd_vel', Twist, queue_size=1)
     self.listener = TransformListener()
     rospy.Subscriber("joy", Joy, self._joyChanged)
     rospy.Subscriber("cmd_vel_telop", Twist, self._cmdVelTelopChanged)
     self.cmd_vel_telop = Twist()
     #self.pidX = PID(20, 12, 0.0, -30, 30, "x")
     #self.pidY = PID(-20, -12, 0.0, -30, 30, "y")
     #self.pidZ = PID(15000, 3000.0, 1500.0, 10000, 60000, "z")
     #self.pidYaw = PID(50.0, 0.0, 0.0, -200.0, 200.0, "yaw")
     self.pidX = PID(20, 12, 0.0, -30, 30, "x")
     self.pidY = PID(-20, 12, 0.0, -15, 15, "y")
     #50000  800
     self.pidZ = PID(15000, 3000.0,  1500.0, 10000, 60000, "z")
     self.pidYaw = PID(50.0, 0.0, 0.0, -100.0, 100.0, "yaw")
     self.state = Controller.Manual
     #Target Values
     self.pubtarX = rospy.Publisher('target_x', Float32, queue_size=1)
     self.pubtarY = rospy.Publisher('target_y', Float32, queue_size=1)
     self.pubtarZ = rospy.Publisher('target_z', Float32, queue_size=1)
     self.targetZ = 0.5
     self.targetX = 0.0
     self.targetY = 0.0
     self.des_angle = 0.0
     #self.power = 50000.0
     #Actual Values
     self.pubrealX = rospy.Publisher('real_x', Float32, queue_size=1)
     self.pubrealY = rospy.Publisher('real_y', Float32, queue_size=1)
     self.pubrealZ = rospy.Publisher('real_z', Float32, queue_size=1)
     self.lastJoy = Joy()
コード例 #19
0
  def __init__(self):
    rospy.init_node('sample_detection',anonymous=True)
    self.monocular_img_sub = rospy.Subscriber('monocular_img',Image, queue_size=1,callback=self.handle_monocular_img)
    self.left_img_sub = rospy.Subscriber('left_img',Image, queue_size=1,callback=self.handle_left_img)
    self.right_img_sub = rospy.Subscriber('right_img',Image, queue_size=1,callback=self.handle_right_img)
    self.disp_sub = rospy.Subscriber('disp',DisparityImage, queue_size=1,callback=self.handle_disp)
    self.cam_info_sub = rospy.Subscriber('cam_info',CameraInfo, self.handle_info)
    self.tf_listener = TransformListener()

    self.bridge = CvBridge()

    sample_file = rospy.get_param("/sample_detection/samples")
    stream = file(sample_file,'r')
    self.samples = yaml.load(stream)

    self.mser = cv2.MSER()

    if rospy.get_param('~enable_debug',True):
      debug_img_topic = 'debug_img'
      self.debug_img_pub = rospy.Publisher(debug_img_topic, Image)

    self.sample_disparity ={}
    self.sample_points ={}
    self.sample_imgpoints ={}
    for s in self.samples:
      topic = s + '_pointstamped'
      self.sample_points[s] = rospy.Publisher(topic, PointStamped)
      topic = s + '_imgpoints'
      self.sample_imgpoints[s] = rospy.Publisher(topic, PointStamped)
      topic = s + '_disparity'
      self.sample_disparity[s] = rospy.Publisher(topic, DisparityImage)
    self.namedpoints = rospy.Publisher("named_points", NamedPoint)
コード例 #20
0
ファイル: demo.py プロジェクト: trevorlazarus/crazyflie_ros
 def __init__(self, goals):
     rospy.init_node("demo", anonymous=True)
     self.frame = rospy.get_param("~frame")
     self.pubGoal = rospy.Publisher("goal", PoseStamped, queue_size=1)
     self.listener = TransformListener()
     self.goals = goals
     self.goalIndex = 0
コード例 #21
0
ファイル: make_turn.py プロジェクト: madsbahrt/FroboMind
 def __init__(self,name,odom_frame,base_frame):
     """
     
     @param name: the name of the action
     @param odom_frame: the frame the robot is moving in (odom_combined)
     @param base_frame: the vehicles own frame (usually base_link)
     """
     self._action_name = name
     self.__odom_frame = odom_frame
     self.__base_frame = base_frame
     self.__server =  actionlib.SimpleActionServer(self._action_name,make_turnAction,auto_start=False)
     self.__server.register_preempt_callback(self.preempt_cb)
     self.__server.register_goal_callback(self.goal_cb)
     
     self.__cur_pos = 0
     self.__start_yaw = 0
     self.__cur_yaw = 0
     
     self.__feedback = make_turnFeedback()
     
     self.__listen = TransformListener()
     self.vel_pub = rospy.Publisher("/fmControllers/cmd_vel_auto",Twist)
     
     self.__turn_timeout = 200
     self.__start_time = rospy.Time.now()
     self.turn_vel = 0
     self.new_goal = False
     
     self.__server.start()
コード例 #22
0
    def __init__(self, use_dummy_transform=False):
        print "init"
        rospy.init_node("star_center_positioning_node")
        if use_dummy_transform:
            self.odom_frame_name = ROBOT_NAME + "_odom_dummy"  # identify this odom as ROBOT_NAME's
        else:
            self.odom_frame_name = ROBOT_NAME + "_odom"  # identify this odom as ROBOT_NAME's

        self.marker_locators = {}

        self.add_marker_locator(MarkerLocator(0, (-6 * 12 * 2.54 / 100.0, 0), 0))
        self.add_marker_locator(MarkerLocator(1, (0.0, 0.0), pi))
        self.add_marker_locator(MarkerLocator(2, (0.0, 10 * 12 * 2.54 / 100.0), 0))
        self.add_marker_locator(MarkerLocator(3, (-6 * 12 * 2.54 / 100.0, 6 * 12 * 2.54 / 100.0), 0))

        self.pose_correction = rospy.get_param("~pose_correction", 0.0)

        self.marker_sub = rospy.Subscriber("ar_pose_marker", ARMarkers, self.process_markers)
        self.odom_sub = rospy.Subscriber(
            ROBOT_NAME + "/odom", Odometry, self.process_odom, queue_size=10
        )  # publish and subscribe to the correct robot's topics
        self.star_pose_pub = rospy.Publisher(ROBOT_NAME + "/STAR_pose", PoseStamped, queue_size=10)
        self.continuous_pose = rospy.Publisher(ROBOT_NAME + "/STAR_pose_continuous", PoseStamped, queue_size=10)
        self.tf_listener = TransformListener()
        self.tf_broadcaster = TransformBroadcaster()
コード例 #23
0
    def __init__(self):

        self.ellipsoid = EllipsoidSpace()
        self.controller_switcher = ControllerSwitcher()
        self.ee_frame = '/l_gripper_shaver305_frame'
        self.head_pose = None
        self.tfl = TransformListener()
        self.is_forced_retreat = False
        self.pose_traj_pub = rospy.Publisher('/haptic_mpc/goal_pose_array', PoseArray)

        self.global_input_sub = rospy.Subscriber("/face_adls/global_move", String, 
                                                 async_call(self.global_input_cb))
        self.local_input_sub = rospy.Subscriber("/face_adls/local_move", String, 
                                                async_call(self.local_input_cb))
        self.clicked_input_sub = rospy.Subscriber("/face_adls/clicked_move", PoseStamped, 
                                                 async_call(self.global_input_cb))
        self.feedback_pub = rospy.Publisher('/face_adls/feedback', String)
        self.global_move_poses_pub = rospy.Publisher('/face_adls/global_move_poses',
                                                     StringArray, latch=True)
        self.controller_enabled_pub = rospy.Publisher('/face_adls/controller_enabled',
                                                      Bool, latch=True)
        self.enable_controller_srv = rospy.Service("/face_adls/enable_controller", 
                                                   EnableFaceController, self.enable_controller_cb)
        self.request_registration = rospy.ServiceProxy("/request_registration", RequestRegistration)

        self.enable_mpc_srv = rospy.ServiceProxy('/haptic_mpc/enable_mpc', EnableHapticMPC)
        self.ell_params_pub = rospy.Publisher("/ellipsoid_params", EllipsoidParams, latch=True)

        def stop_move_cb(msg):
            self.stop_moving()
        self.stop_move_sub = rospy.Subscriber("/face_adls/stop_move", Bool, stop_move_cb, queue_size=1)
コード例 #24
0
class FaceCommander(Head):
    def __init__(self):
        Head.__init__(self)
        self._world = 'base'
        self._tf_listener = TransformListener()
        self.set_pan(0)

    def look_at(self, obj=None):
        if obj is None:
            self.set_pan(0)
            return True

        if isinstance(obj, str):
            pose = self._tf_listener.lookupTransform('head', obj, rospy.Time(0))
        else:
            rospy.logerr("FaceCommander.look_at() accepts only strings atm")
            return False

        hyp = transformations.norm(pose)
        adj = pose[0][1]
        angle = pi/2 - arccos(float(adj)/hyp)

        if isnan(angle):
            rospy.logerr('FaceCommander cannot look at {}'.format(obj))
            return False

        self.set_pan(angle)
        return True
コード例 #25
0
 def __init__(self):
     self.tf_l = TransformListener()
     self.last_wrench = None
     self.wrench_sub = rospy.Subscriber('/left_wrist_ft',
                                        WrenchStamped,
                                        self.wrench_cb,
                                        queue_size=1)
	def __init__(self):
		cv2.namedWindow("map")
		cv2.namedWindow("past_map")
		rospy.init_node("run_mapping")	
		#create map properties, helps to make ratio calcs
		self.origin = [-10,-10]
		self.seq = 0
		self.resolution = 0.1
		self.n = 200

		self.pose = []
		self.dyn_obs=[]
		self.obstacle = []
		self.rapid_appear = set()
		self.counter=0

		#Giving initial hypotheses to the system
		self.p_occ = 0.5*np.ones((self.n, self.n)) #50-50 chance of being occupied
		self.odds_ratio_hit = 3.0 #this is arbitrary, can re-assign
		self.odds_ratio_miss = 0.3 #this is arbitrary, can reassign

		#calculates odds based upon hit to miss, equal odds to all grid squares
		self.odds_ratios = (self.p_occ)/(1-self.p_occ)*np.ones((self.n, self.n))
		#calculate initial past odds_ratio
		self.past_odds_ratios = (self.p_occ)/(1-self.p_occ)*np.ones((self.n, self.n))

		#write laser pubs and subs
		rospy.Subscriber("scan", LaserScan, self.scan_received, queue_size=1)
		self.pub = rospy.Publisher("map", OccupancyGrid)

		#note - in case robot autonomy is added back in
		self.tf_listener = TransformListener()	
コード例 #27
0
	def __init__(self, use_depth_only):
		self.use_depth_only = use_depth_only
		self.depth_image_lock = Lock()
		self.image_list_lock = Lock()
		self.image_list = []
		self.image_list_max_size = 100
		self.downsample_factor = 2
		self.tf = TransformListener()
		rospy.Subscriber("/color_camera/camera_info",
						 CameraInfo,
						 self.process_camera_info,
						 queue_size=10)
		rospy.Subscriber("/point_cloud",
						 PointCloud,
						 self.process_point_cloud,
						 queue_size=10)
		rospy.Subscriber("/color_camera/image_raw/compressed",
						 CompressedImage,
						 self.process_image,
						 queue_size=10)
		self.clicked_point_pub = rospy.Publisher("/clicked_point",PointStamped,queue_size=10)
		self.camera_info = None
		self.P = None
		self.depth_image = None
		self.image = None
		self.last_image_timestamp = None
		self.click_timestamp = None
		self.depth_timestamp = None
		cv2.namedWindow("depth_feed")
		cv2.namedWindow("image_feed")
		cv2.namedWindow("combined_feed")
		cv2.setMouseCallback('image_feed',self.handle_click)
		cv2.setMouseCallback('combined_feed',self.handle_combined_click)
    def setUp(self):

        rospy.init_node('test_motion_execution_buffer')
        
        self.tf = TransformListener()        

        self.obj_pub = rospy.Publisher('collision_object',CollisionObject,latch=True)
        
        self.move_arm_action_client = actionlib.SimpleActionClient("move_right_arm", MoveArmAction)

        self.move_arm_action_client.wait_for_server()

        obj1 = CollisionObject()
    
        obj1.header.stamp = rospy.Time.now()
        obj1.header.frame_id = "base_link"
        obj1.id = "obj1";
        obj1.operation.operation = arm_navigation_msgs.msg.CollisionObjectOperation.ADD
        obj1.shapes = [Shape() for _ in range(1)]
        obj1.shapes[0].type = Shape.CYLINDER
        obj1.shapes[0].dimensions = [float() for _ in range(2)]
        obj1.shapes[0].dimensions[0] = .1
        obj1.shapes[0].dimensions[1] = 1.5
        obj1.poses = [Pose() for _ in range(1)]
        obj1.poses[0].position.x = .6
        obj1.poses[0].position.y = -.6
        obj1.poses[0].position.z = .75
        obj1.poses[0].orientation.x = 0
        obj1.poses[0].orientation.y = 0
        obj1.poses[0].orientation.z = 0
        obj1.poses[0].orientation.w = 1
        
        self.obj_pub.publish(obj1)

        rospy.sleep(2.0)
コード例 #29
0
    def __init__(self, mode=None):
        self.task = 'feeding_quick'
        self.model = 'autobed' # options are 'chair' and 'autobed'
        self.mode = mode

        self.send_task_count = 0

        if self.model == 'autobed':
            self.bed_state_z = 0.
            self.bed_state_head_theta = 0.
            self.bed_state_leg_theta = 0.
            self.autobed_sub = rospy.Subscriber('/abdout0', FloatArrayBare, self.bed_state_cb)
            self.autobed_pub = rospy.Publisher('/abdin0', FloatArrayBare, latch=True)

            self.world_B_head = None
            self.world_B_ref_model = None
            self.world_B_robot = None

        self.tfl = TransformListener()

        if self.mode == 'manual' or True:
            self.base_pose = None
            self.object_pose = None
            self.raw_head_pose = None
            self.raw_base_pose = None
            self.raw_object_pose = None
            self.raw_head_sub = rospy.Subscriber('/head_frame', PoseStamped, self.head_frame_cb)
            self.raw_base_sub = rospy.Subscriber('/robot_frame', PoseStamped, self.robot_frame_cb)
            self.raw_object_sub = rospy.Subscriber('/reference', PoseStamped, self.reference_cb)
            # self.head_pub = rospy.Publisher('/head_frame', PoseStamped, latch=True)
            # self.base_pub = rospy.Publisher('/robot_frame', PoseStamped, latch=True)
            # self.object_pub = rospy.Publisher('/reference', PoseStamped, latch=True)
            self.base_goal_pub = rospy.Publisher('/base_goal', PoseStamped, latch=True)
            # self.robot_location_pub = rospy.Publisher('/robot_location', PoseStamped, latch=True)
            # self.navigation = NavigationHelper(robot='/robot_location', target='/base_goal')
        self.goal_data_pub = rospy.Publisher("ar_servo_goal_data", ARServoGoalData)
        self.servo_goal_pub = rospy.Publisher('servo_goal_pose', PoseStamped, latch=True)
        self.reach_goal_pub = rospy.Publisher("arm_reacher/goal_pose", PoseStamped)
        # self.test_pub = rospy.Publisher("test_goal_pose", PoseStamped, latch=True)
        # self.test_head_pub = rospy.Publisher("test_head_pose", PoseStamped, latch=True)
        self.feedback_pub = rospy.Publisher('wt_log_out', String)
        self.torso_lift_pub = rospy.Publisher('torso_controller/position_joint_action/goal',
                                              SingleJointPositionActionGoal, latch=True)

        self.base_selection_client = rospy.ServiceProxy("select_base_position", BaseMove_multi)

        self.reach_service = rospy.ServiceProxy("/base_selection/arm_reach_enable", None_Bool)

        self.ui_input_sub = rospy.Subscriber("action_location_goal", String, self.ui_cb)
        self.servo_fdbk_sub = rospy.Subscriber("/pr2_ar_servo/state_feedback", Int8, self.servo_fdbk_cb)

        self.lock = Lock()
        self.head_pose = None
        self.goal_pose = None
        self.marker_topic = None
        rospy.loginfo("[%s] Ready" %rospy.get_name())

        self.base_selection_complete = False

        self.send_task_count = 0
コード例 #30
0
    def __init__(self):
        
	# Get information for this particular camera
        camera_info = get_single('head_camera/depth_registered/camera_info', CameraInfo)
        print camera_info
	
	# Set information for the camera 
        self.cam_model = PinholeCameraModel()
        self.cam_model.fromCameraInfo(camera_info)
	
	# Subscribe to the camera's image topic and depth image topic
        self.rgb_image_sub = rospy.Subscriber("head_camera/rgb/image_raw", 
                                              Image, self.on_rgb)
        self.depth_image_sub = rospy.Subscriber("head_camera/depth_registered/image_raw", 
                                                Image, self.on_depth)
	
	# Publish where the button is in the base link frame
        self.point_pub = rospy.Publisher('button_point', PointStamped, queue_size=10)
	
	# Set up connection to CvBridge
        self.bridge = CvBridge()
        
	# Open up viewing windows
	cv2.namedWindow('depth')
        cv2.namedWindow('rgb')
	
	# Set up the class variables
        self.rgb_image = None
        self.rgb_image_time = None
        self.depth_image = None
        self.center = None
        self.return_point = PointStamped()
        self.tf_listener = TransformListener() 
コード例 #31
0
ファイル: QRCalibrate.py プロジェクト: DeepBlue14/arm_wkspc
 def __init__(self):
     self.qrHypothesis = None
     self.endEffectorHypothesis = None
     self.tf = TransformListener()
     self.errorTrans = None
     self.mut = threading.Lock()
コード例 #32
0
def listener():
    global transform_listener
    transform_listener = TransformListener()
    rospy.Subscriber("/leg_people", People, callbackPplPaths, queue_size=1)
    rospy.spin()
コード例 #33
0
ファイル: TFUtils.py プロジェクト: fagan2888/berkeley_utils
 def transformPose(self, target_frame, ps):
     now = rospy.Time.now()
     self.waitForTransform(target_frame, ps.header.frame_id,
                           rospy.Time.now(), rospy.Duration(5.0))
     ps.header.stamp = now
     return TransformListener.transformPose(self, target_frame, ps)
コード例 #34
0
class ur5_grasp_project(object):
    def __init__(self, joint_values=None):
        rospy.init_node('command_GGCNN_ur5')
        self.joint_values_home = joint_values

        self.tf = TransformListener()

        # Used to change the controller
        self.controller_switch = rospy.ServiceProxy(
            '/controller_manager/switch_controller', SwitchController)

        # actionClient used to send joint positions
        self.client = actionlib.SimpleActionClient(
            'pos_based_pos_traj_controller/follow_joint_trajectory',
            FollowJointTrajectoryAction)
        print("Waiting for server (pos_based_pos_traj_controller)...")
        self.client.wait_for_server()
        print("Connected to server (pos_based_pos_traj_controller)")

        #################
        # GGCNN Related
        #################
        self.d = None
        self.ori_ = None
        self.grasp_cartesian_pose = None
        self.posCB = []
        self.ori = []
        self.gripper_angle_grasp = 0.0
        self.final_orientation = 0.0

        # These offsets are used only in the real robot and need to be calibrated
        self.GGCNN_offset_x = -0.03  # 0.002
        self.GGCNN_offset_y = 0.02  # -0.05
        self.GGCNN_offset_z = 0.058  # 0.013

        self.robotiq_joint_name = rospy.get_param("/robotiq_joint_name")

        # Topic published from GG-CNN Node
        rospy.Subscriber('ggcnn/out/command',
                         Float32MultiArray,
                         self.ggcnn_command_callback,
                         queue_size=1)

        ####################
        # Pipeline Related #
        ####################
        self.classes = rospy.get_param("/classes")
        self.grasp_ready_flag = False
        self.detected_tags = []
        self.tags = [
            'tag_0_corrected', 'tag_1_corrected', 'tag_2_corrected',
            'tag_3_corrected', 'tag_4_corrected', 'tag_5_corrected',
            'tag_6_corrected', 'tag_7_corrected'
        ]

        # These offset for the real camera
        self.tags_position_offset = [0.062, 0.0, 0.062]

        # Subscribers - Topics related to the new grasping pipeline
        rospy.Subscriber('flags/grasp_ready',
                         Bool,
                         self.grasp_ready_callback,
                         queue_size=1)  # Grasp flag
        rospy.Subscriber('flags/reposition_robot_flag',
                         Bool,
                         self.reposition_robot_callback,
                         queue_size=1)  # Reposition flag
        rospy.Subscriber('flags/detection_ready',
                         Bool,
                         self.detection_ready_callback,
                         queue_size=1)  # Detection flag
        rospy.Subscriber('reposition_coord',
                         Float32MultiArray,
                         self.reposition_coord_callback,
                         queue_size=1)
        rospy.Subscriber('/selective_grasping/tag_detections',
                         Int16MultiArray,
                         self.tags_callback,
                         queue_size=1)

        # Publishers
        # Publish the required class so the other nodes such as gg-cnn generates the grasp to the selected part
        self.required_class = rospy.Publisher('pipeline/required_class',
                                              Int8,
                                              queue_size=1)

    def turn_velocity_controller_on(self):
        self.controller_switch(['joint_group_vel_controller'],
                               ['pos_based_pos_traj_controller'], 1)

    def turn_position_controller_on(self):
        self.controller_switch(['pos_based_pos_traj_controller'],
                               ['joint_group_vel_controller'], 1)

    def turn_gripper_velocity_controller_on(self):
        self.controller_switch(['gripper_controller_vel'],
                               ['gripper_controller_pos'], 1)

    def turn_gripper_position_controller_on(self):
        self.controller_switch(['gripper_controller_pos'],
                               ['gripper_controller_vel'], 1)

    def tags_callback(self, msg):
        self.detected_tags = msg.data

    def grasp_ready_callback(self, msg):
        self.grasp_ready_flag = msg.data

    def detection_ready_callback(self, msg):
        self.detection_ready_flag = msg.data

    def reposition_robot_callback(self, msg):
        self.reposition_robot_flag = msg.data

    def reposition_coord_callback(self, msg):
        self.reposition_coords = msg.data

    def ur5_actual_position_callback(self, joint_values_from_ur5):
        """Get UR5 joint angles
		
		The joint states published by /joint_states of the UR5 robot are in wrong order.
		/joint_states topic normally publishes the joint in the following order:
		[elbow_joint, shoulder_lift_joint, shoulder_pan_joint, wrist_1_joint, wrist_2_joint, wrist_3_joint]
		But the correct order of the joints that must be sent to the robot is:
		['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint', 'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
		
		Arguments:
			joint_values_from_ur5 {list} -- Actual angles of the UR5 Robot
		"""
        self.th3, self.th2, self.th1, self.th4, self.th5, self.th6 = joint_values_from_ur5.position
        self.actual_position = [
            self.th1, self.th2, self.th3, self.th4, self.th5, self.th6
        ]

    def genCommand(self, char, command, pos=None):
        """
		Update the command according to the character entered by the user.
		"""
        if char == 'a':
            # command = outputMsg.Robotiq2FGripper_robot_output();
            command.rACT = 1  # Gripper activation
            command.rGTO = 1  # Go to position request
            command.rSP = 255  # Speed
            command.rFR = 150  # Force

        if char == 'r':
            command.rACT = 0

        if char == 'c':
            command.rACT = 1
            command.rGTO = 1
            command.rATR = 0
            command.rPR = 255
            command.rSP = 40
            command.rFR = 150

        # @param pos Gripper width in meters. [0, 0.087]
        if char == 'p':
            command.rACT = 1
            command.rGTO = 1
            command.rATR = 0
            command.rPR = int(
                np.clip(
                    (13. - 230.) / self.gripper_max_width * self.ori + 230., 0,
                    255))
            command.rSP = 40
            command.rFR = 150

        if char == 'o':
            command.rACT = 1
            command.rGTO = 1
            command.rATR = 0
            command.rPR = 0
            command.rSP = 40
            command.rFR = 150

        return command

    def command_gripper(self, action):
        command = outputMsg.Robotiq2FGripper_robot_output()
        command = self.genCommand(action, command)
        self.pub_gripper_command.publish(command)

    def ggcnn_command_callback(self, msg):
        """
		GGCNN Command Subscriber Callback
		"""
        self.tf.waitForTransform("base_link", "object_detected", rospy.Time(0),
                                 rospy.Duration(4.0))  # rospy.Time.now()
        object_pose, object_ori = self.tf.lookupTransform(
            "base_link", "object_detected", rospy.Time(0))
        self.d = list(msg.data)
        object_pose[0] += self.GGCNN_offset_x
        object_pose[1] += self.GGCNN_offset_y
        object_pose[2] += self.GGCNN_offset_z

        self.posCB = object_pose

        self.ori = self.d[3]
        br = TransformBroadcaster()
        br.sendTransform((object_pose[0], object_pose[1], object_pose[2]),
                         quaternion_from_euler(0.0, 0.0, self.ori),
                         rospy.Time.now(), "object_link", "base_link")

    def all_close(self, goal, tolerance=0.00005):
        """Wait until goal is reached in configuration space
		
		This method check if the robot reached goal position since wait_for_result seems to be broken
		Arguments:
			goal {[list]} -- Goal in configuration space (joint values)
		
		Keyword Arguments:
			tolerance {number} -- Minimum error allowed to consider the trajectory completed (default: {0.00005})
		"""

        error = np.sum([(self.actual_position[i] - goal[i])**2
                        for i in range(6)])
        print("> Waiting for the trajectory to finish...")
        while not rospy.is_shutdown() and error > tolerance:
            error = np.sum([(self.actual_position[i] - goal[i])**2
                            for i in range(6)])
        if error < tolerance:
            print(
                "> Trajectory Suceeded!\n")  # whithin the tolerance specified
        else:
            rospy.logerr("> Trajectory aborted.")

    def traj_planner(self,
                     grasp_position,
                     grasp_step='move',
                     way_points_number=10,
                     movement='slow'):
        """Quintic Trajectory Planner
	
		Publish a trajectory to UR5 using quintic splines. 

		Arguments:
			grasp_position {[float]} -- Grasp position [x, y, z]

		Keyword Arguments:
			grasp_step {str} -- Set UR5 movement type (default: {'move'})
			way_points_number {number} -- Number of points considered in trajectory (default: {10})
			movement {str} -- Movement speed (default: {'slow'})
		"""
        if grasp_step == 'pregrasp':
            # we need to take a 'screenshot' of the variables at this specific time
            self.grasp_cartesian_pose = deepcopy(self.posCB)
            self.ori_ = deepcopy(self.ori)

        grasp_cartesian_pose = self.grasp_cartesian_pose
        posCB = self.posCB
        ori = self.ori_
        actual_position = self.actual_position
        d = self.d

        status, goal, joint_pos = IK_TRAJ.traj_planner(
            grasp_position, grasp_step, way_points_number, movement,
            grasp_cartesian_pose, ori, d, actual_position)
        if status:
            self.client.send_goal(goal)
            self.all_close(joint_pos)
        else:
            print("Could not retrieve any IK solution")

    def move_on_shutdown(self):
        self.client.cancel_goal()
        self.client_gripper.cancel_goal()
        print("Shutting down node...")

    def grasp_main(self, point_init_home, depth_shot_point):
        random_classes = [i for i in range(len(self.classes))]

        bin_location = [[-0.65, -0.1, 0.2], [-0.65, 0.1, 0.2]]
        garbage_location = [-0.4, -0.30, 0.10]

        raw_input('=== Press enter to start the grasping process')
        while not rospy.is_shutdown():
            # The grasp is performed randomly and the chosen object is published to
            # the detection node
            if not len(random_classes):
                print(
                    "\n> IMPORTANT! There is no object remaining in the object's picking list. Resetting the objects list...\n"
                )
                random_classes = [i for i in range(len(self.classes))]

            random_object_id = random.sample(random_classes, 1)[0]
            random_classes.pop(random_classes.index(random_object_id))
            print('> Object to pick: ', self.classes[random_object_id])

            # Publish the required class ID so the other nodes such as gg-cnn
            # generates the grasp to the selected part
            self.required_class.publish(random_object_id)

            raw_input("==== Press enter to start the grasping process!")
            if self.detection_ready_flag:
                # Check if the robot needs to be repositioned to reach the object. This flag is published
                # By the detection node. The offset coordinates (self.reposition_coords) are also published
                # by the detection node
                if self.reposition_robot_flag:
                    print('> Repositioning robot...')
                    self.tf.waitForTransform("base_link", "grasping_link",
                                             rospy.Time.now(),
                                             rospy.Duration(4.0))
                    eef_pose, _ = self.tf.lookupTransform(
                        "base_link", "grasping_link", rospy.Time(0))

                    corrected_position = [
                        eef_pose[0] - self.reposition_coords[1],
                        eef_pose[1] + self.reposition_coords[0], eef_pose[2]
                    ]

                    self.traj_planner(corrected_position, movement='fast')
                    raw_input("==== Press enter when the trajectory finishes!")

                # Check if:
                # - The run_ggcn is ready (through grasp_ready_flag);
                # - The detection node is ready (through detection_ready_flag);
                # - The robot does not need to be repositioned (through reposition_robot_flag)
                if self.grasp_ready_flag and self.detection_ready_flag and not self.reposition_robot_flag:
                    raw_input("Move to the pre grasp position")
                    print('> Moving to the grasping position... \n')
                    self.traj_planner([], 'pregrasp', movement='fast')

                    raw_input("Move the gripper")
                    # It closes the gripper a little before approaching the object
                    # to prevent colliding with other objects
                    self.command_gripper('p')

                    raw_input("Move to the grasp position")
                    # Moving the robot to pick the object - BE CAREFULL!
                    self.traj_planner([], 'grasp', movement='slow')

                    raw_input("==== Press enter to close the gripper!")
                    print("Picking object...")
                    self.command_gripper('c')

                    # After a collision is detected, the arm will start the picking action
                    # Different locations were adopted because the camera field of view is not big enough
                    # to detect all the april tags in the environment
                    raw_input("==== Press enter to move the object to the bin")
                    if random_object_id < 5:
                        self.traj_planner(bin_location[0], movement='fast')
                    else:
                        self.traj_planner(bin_location[1], movement='fast')

                    rospy.sleep(2.0)  # waiting for the tag detections

                    selected_tag_status = self.detected_tags[random_object_id]
                    print('selected tag: ', self.tags[random_object_id])
                    # Check if the tag corresponding to the object ID was identified in order to
                    # move the object there
                    if selected_tag_status:
                        self.tf.waitForTransform(
                            "base_link", self.tags[random_object_id],
                            rospy.Time(0),
                            rospy.Duration(2.0))  # rospy.Time.now()
                        ptFinal, oriFinal = self.tf.lookupTransform(
                            "base_link", self.tags[random_object_id],
                            rospy.Time(0))
                        ptFinal = [
                            ptFinal[i] + self.tags_position_offset[i]
                            for i in range(3)
                        ]
                        raw_input("==== Move to the tag")
                        pt_inter = deepcopy(ptFinal)
                        # pt_inter adds 0.1m to the Z coordinate of ptFinal in order to
                        # approach the box before leaving the object there
                        pt_inter[-1] += 0.1
                        self.traj_planner(pt_inter, movement='fast')
                        self.traj_planner(ptFinal, movement='fast')
                    else:
                        # In this case, the camera identifies some other tag but not the tag corresponding to the object
                        print(
                            '> Could not find the box tag. Placing the object anywhere \n'
                        )
                        raw_input('=== Pres enter to proceed')
                        self.traj_planner(garbage_location, movement='fast')

                    print("Placing object...")
                    # After the bin location is reached, the robot will place the object and move back
                    # to the initial position
                    self.command_gripper('o')

                    print("> Moving back to home position...\n")
                    self.traj_planner(point_init_home, movement='fast')
                    self.traj_planner(depth_shot_point, movement='slow')
                    print('> Grasping finished \n')
                else:
                    print('> The requested object could not be grasped! \n')
                    self.traj_planner(depth_shot_point, movement='fast')

            else:
                print('> The requested object was not detected! \n')
コード例 #35
0
    def setUp(self):

        self.tf = TransformListener()

        self.move_arm_action_client = actionlib.SimpleActionClient(
            "move_right_arm", MoveArmAction)

        att_pub = rospy.Publisher('attached_collision_object',
                                  AttachedCollisionObject)
        obj_pub = rospy.Publisher('collision_object', CollisionObject)

        rospy.init_node('test_motion_execution_buffer')

        #let everything settle down
        rospy.sleep(5.)

        obj1 = CollisionObject()

        obj1.header.stamp = rospy.Time.now() - rospy.Duration(.1)
        obj1.header.frame_id = "base_footprint"
        obj1.id = "obj2"
        obj1.operation.operation = mapping_msgs.msg.CollisionObjectOperation.ADD
        obj1.shapes = [Shape() for _ in range(3)]
        obj1.poses = [Pose() for _ in range(3)]

        obj1.shapes[0].type = Shape.BOX
        obj1.shapes[0].dimensions = [float() for _ in range(3)]
        obj1.shapes[0].dimensions[0] = .5
        obj1.shapes[0].dimensions[1] = 1.0
        obj1.shapes[0].dimensions[2] = .2
        obj1.poses[0].position.x = .95
        obj1.poses[0].position.y = -.25
        obj1.poses[0].position.z = .62
        obj1.poses[0].orientation.x = 0
        obj1.poses[0].orientation.y = 0
        obj1.poses[0].orientation.z = 0
        obj1.poses[0].orientation.w = 1

        obj1.shapes[2].type = Shape.BOX
        obj1.shapes[2].dimensions = [float() for _ in range(3)]
        obj1.shapes[2].dimensions[0] = .5
        obj1.shapes[2].dimensions[1] = .1
        obj1.shapes[2].dimensions[2] = 1.0
        obj1.poses[2].position.x = .95
        obj1.poses[2].position.y = -.14
        obj1.poses[2].position.z = 1.2
        obj1.poses[2].orientation.x = 0
        obj1.poses[2].orientation.y = 0
        obj1.poses[2].orientation.z = 0
        obj1.poses[2].orientation.w = 1

        obj1.shapes[1].type = Shape.BOX
        obj1.shapes[1].dimensions = [float() for _ in range(3)]
        obj1.shapes[1].dimensions[0] = .5
        obj1.shapes[1].dimensions[1] = .1
        obj1.shapes[1].dimensions[2] = 1.0
        obj1.poses[1].position.x = .95
        obj1.poses[1].position.y = .12
        obj1.poses[1].position.z = 1.2
        obj1.poses[1].orientation.x = 0
        obj1.poses[1].orientation.y = 0
        obj1.poses[1].orientation.z = 0
        obj1.poses[1].orientation.w = 1

        obj_pub.publish(obj1)

        att_object = AttachedCollisionObject()
        att_object.object.header.stamp = rospy.Time.now()
        att_object.object.header.frame_id = "r_gripper_r_finger_tip_link"
        att_object.link_name = "r_gripper_r_finger_tip_link"
        att_object.object.operation.operation = mapping_msgs.msg.CollisionObjectOperation.ADD
        att_object.object = CollisionObject()

        att_object.object.header.stamp = rospy.Time.now()
        att_object.object.header.frame_id = "r_gripper_r_finger_tip_link"
        att_object.object.id = "pole"
        att_object.object.shapes = [Shape() for _ in range(1)]
        att_object.object.shapes[0].type = Shape.CYLINDER
        att_object.object.shapes[0].dimensions = [float() for _ in range(2)]
        att_object.object.shapes[0].dimensions[0] = .02
        att_object.object.shapes[0].dimensions[1] = .1
        att_object.object.poses = [Pose() for _ in range(1)]
        att_object.object.poses[0].position.x = -.02
        att_object.object.poses[0].position.y = .04
        att_object.object.poses[0].position.z = 0
        att_object.object.poses[0].orientation.x = 0
        att_object.object.poses[0].orientation.y = 0
        att_object.object.poses[0].orientation.z = 0
        att_object.object.poses[0].orientation.w = 1

        att_pub.publish(att_object)

        rospy.sleep(5.0)
コード例 #36
0
ファイル: head.py プロジェクト: patlican23/robot_eup
class Head:

        # 'LOOK_FORWARD: 'LOOK_FORWARD',
        # 'FOLLOW_RIGHT_EE: 'FOLLOW_RIGHT_EE',
        # 'FOLLOW_LEFT_EE: 'FOLLOW_LEFT_EE',
        # 'GLANCE_RIGHT_EE: 'GLANCE_RIGHT_EE',
        # 'GLANCE_LEFT_EE: 'GLANCE_LEFT_EE',
        # 'NOD: 'NOD',
        # 'SHAKE: 'SHAKE',
        # 'FOLLOW_FACE: 'FOLLOW_FACE',
        # 'LOOK_AT_POINT: 'LOOK_AT_POINT',
        # 'LOOK_DOWN: 'LOOK_DOWN',
        # 'NOD_ONCE : 'NOD_ONCE ',
        # 'SHAKE_ONCE: 'SHAKE_ONCE',

    def __init__(self):
        self.defaultLookatPoint = Point(1,0,1.35)
        self.downLookatPoint = Point(0.5,0,0.5)
        self.targetLookatPoint = Point(1,0,1.35)
        self.currentLookatPoint = Point(1,0,1.35)

        self.currentGazeAction = 'LOOK_FORWARD';
        self.prevGazeAction = self.currentGazeAction
        self.prevTargetLookatPoint = array(self.defaultLookatPoint);

        # Some constants
        self.doNotInterrupt = ['GLANCE_RIGHT_EE', 'GLANCE_LEFT_EE', 'NOD', 'SHAKE'];
        self.nodPositions = [Point(1,0,1.05), Point(1,0,1.55)]
        self.shakePositions = [Point(1,0.2,1.35), Point(1,-0.2,1.35)]
        self.nNods = 5
        self.nShakes = 5

        self.nodCounter = 5
        self.shakeCounter = 5
        self.facePos = None

        ## Action client for sending commands to the head.
        self.headActionClient = SimpleActionClient('/head_traj_controller/point_head_action', PointHeadAction)
        self.headActionClient.wait_for_server()
        rospy.loginfo('Head action client has responded.')

        self.headGoal = PointHeadGoal()
        self.headGoal.target.header.frame_id = 'base_link'
        self.headGoal.min_duration = rospy.Duration(1.0)
        self.headGoal.target.point = Point(1,0,1)

        ## Client for receiving detected faces
        #self.faceClient = SimpleActionClient('face_detector_action', FaceDetectorAction)

        ## Service client for arm states
        self.tfListener = TransformListener()


    ## Callback function to receive ee states and face location
    def getEEPos(self, armIndex):

        fromFrame = '/base_link'
        if (armIndex == 0):
            toFrame = '/r_wrist_roll_link'
        else:
            toFrame = '/l_wrist_roll_link'

        try:
            t = self.tfListener.getLatestCommonTime(fromFrame, toFrame)
            (position, rotation) = self.tfListener.lookupTransform(fromFrame, toFrame, t)
        except:
            rospy.logerr('Could not get the end-effector pose.')
        #objPoseStamped = PoseStamped()
        #objPoseStamped.header.stamp = t
        #objPoseStamped.header.frame_id = '/base_link'
        #objPoseStamped.pose = Pose()
        #relEEPose = self.tfListener.transformPose(toFrame, objPoseStamped)
        return Point(position[0], position[1], position[2])

    def getFaceLocation(self):
        connected = self.faceClient.wait_for_server(rospy.Duration(1.0))
        if connected:
            fgoal = FaceDetectorGoal()
            self.faceClient.send_goal(fgoal)
            self.faceClient.wait_for_result()
            f = self.faceClient.get_result()
            ## If there is one face follow that one, if there are more than one, follow the closest one
            closest = -1
            if len(f.face_positions) == 1:
                closest = 0
            elif len(f.face_positions) > 0:
                closest_dist = 1000

            for i in range(len(f.face_positions)):
                dist = f.face_positions[i].pos.x*f.face_positions[i].pos.x + f.face_positions[i].pos.y*f.face_positions[i].pos.y + f.face_positions[i].pos.z*f.face_positions[i].pos.z
                if dist < closest_dist:
                    closest = i
                    closest_dist = dist

            if closest > -1:
                self.facePos = array([f.face_positions[closest].pos.x, f.face_positions[closest].pos.y, f.face_positions[closest].pos.z])
            else:
                rospy.logwarn('No faces were detected.')
                self.facePos = self.defaultLookatPoint
        else:
            rospy.logwarn('Not connected to the face server, cannot follow faces.')
            self.facePos = self.defaultLookatPoint

    ## Callback function for receiving gaze commands
    def do_gaze_action(self, command):
        #command = goal.action;
        if (self.doNotInterrupt.count(self.currentGazeAction) == 0):
            if (self.currentGazeAction != command or command == 'LOOK_AT_POINT'):
                self.isActionComplete = False
                if (command == 'LOOK_FORWARD'):
                    self.targetLookatPoint = self.defaultLookatPoint
                elif (command == 'LOOK_DOWN'):
                    self.targetLookatPoint = self.downLookatPoint
                elif (command == 'NOD'):
                    self.nNods = 5
                    self.startNod()
                elif (command == 'SHAKE'):
                    self.nShakes = 5
                    self.startShake()
                elif (command == 'NOD_ONCE'):
                    self.nNods = 5
                    self.startNod()
                elif (command == 'SHAKE_ONCE'):
                    self.nShakes = 5
                    self.startShake()
                elif (command == 'GLANCE_RIGHT_EE'):
                    self.startGlance(0)
                elif (command == 'GLANCE_LEFT_EE'):
                    self.startGlance(1)
                elif (command == 'LOOK_AT_POINT'):
                    self.targetLookatPoint = goal.point
                rospy.loginfo('\tSetting gaze action to: ' +
                        command)
                self.currentGazeAction = command

                while (not self.isActionComplete):
                    time.sleep(0.1)

    def isTheSame(self, current, target):
        diff = target - current
        dist = linalg.norm(diff)
        return (dist<0.0001)

    def filterLookatPosition(self, current, target):
        speed = 0.02
        diff = self.point2array(target) - self.point2array(current)
        dist = linalg.norm(diff)
        if (dist>speed):
            step = dist/speed
            return self.array2point(self.point2array(current) + diff/step)
        else:
            return target

    def startNod(self):
        self.prevTargetLookatPoint = self.targetLookatPoint
        self.prevGazeAction = str(self.currentGazeAction)
        self.nodCounter = 0
        self.targetLookatPoint = self.nodPositions[0]

    def startGlance(self, armIndex):
        self.prevTargetLookatPoint = self.targetLookatPoint
        self.prevGazeAction = str(self.currentGazeAction)
        self.glanceCounter = 0
        self.targetLookatPoint = self.getEEPos(armIndex)

    def startShake(self):
        self.prevTargetLookatPoint = self.targetLookatPoint
        self.prevGazeAction = str(self.currentGazeAction)
        self.shakeCounter = 0
        self.targetLookatPoint = self.shakePositions[0]

    def point2array(self, p):
        return array((p.x, p.y, p.z))

    def array2point(self, a):
        return Point(a[0], a[1], a[2])

    def getNextNodPoint(self, current, target):
        if (self.isTheSame(self.point2array(current), self.point2array(target))):
            self.nodCounter += 1
            if (self.nodCounter == self.nNods):
                self.currentGazeAction = self.prevGazeAction
                return self.prevTargetLookatPoint
            else:
                return self.nodPositions[self.nodCounter%2]
        else:
            return target

    def getNextGlancePoint(self, current, target):
        if (self.isTheSame(self.point2array(current), self.point2array(target))):
            self.glanceCounter = 1
            self.currentGazeAction = self.prevGazeAction
            return self.prevTargetLookatPoint
        else:
            return target

    def getNextShakePoint(self, current, target):
        if (self.isTheSame(self.point2array(current), self.point2array(target))):
            self.shakeCounter += 1
            if (self.shakeCounter == self.nNods):
                self.currentGazeAction = self.prevGazeAction
                return self.prevTargetLookatPoint
            else:
                return self.shakePositions[self.shakeCounter%2]
        else:
            return target

    def update(self):

        isActionPossiblyComplete = True
        if (self.currentGazeAction == 'FOLLOW_RIGHT_EE'):
            self.targetLookatPoint = self.getEEPos(0)

        elif (self.currentGazeAction == 'FOLLOW_LEFT_EE'):
            self.targetLookatPoint = self.getEEPos(1)

        elif (self.currentGazeAction == 'FOLLOW_FACE'):
            self.getFaceLocation()
            self.targetLookatPoint = self.facePos

        elif (self.currentGazeAction == 'NOD'):
            self.targetLookatPoint = self.getNextNodPoint(self.currentLookatPoint, self.targetLookatPoint)
            self.headGoal.min_duration = rospy.Duration(0.5)
            isActionPossiblyComplete = False;

        elif (self.currentGazeAction == 'SHAKE'):
            self.targetLookatPoint = self.getNextShakePoint(self.currentLookatPoint, self.targetLookatPoint)
            self.headGoal.min_duration = rospy.Duration(0.5)
            isActionPossiblyComplete = False;

        elif (self.currentGazeAction == 'GLANCE_RIGHT_EE' or self.currentGazeAction == 'GLANCE_LEFT_EE'):
            self.targetLookatPoint = self.getNextGlancePoint(self.currentLookatPoint, self.targetLookatPoint)
            isActionPossiblyComplete = False;

        self.currentLookatPoint = self.filterLookatPosition(self.currentLookatPoint, self.targetLookatPoint)
        if (self.isTheSame(self.point2array(self.headGoal.target.point), self.point2array(self.currentLookatPoint))):
            if (isActionPossiblyComplete):
                if (self.headActionClient.get_state() == GoalStatus.SUCCEEDED):
                    self.isActionComplete = True
        else:
            self.headGoal.target.point = self.currentLookatPoint
            self.headActionClient.send_goal(self.headGoal)

        time.sleep(0.02)
コード例 #37
0
ファイル: tensioner.py プロジェクト: qingchenkanlu/IL_ROS_HSR
class Tensioner(object):

    def __init__(self):
        self.torque = Gripper_Torque()
        self.tl = TransformListener()


    def get_translation(self,direction):
        pose = self.tl.lookupTransform(direction,'hand_palm_link',rospy.Time(0))
        trans = pose[0]
        return trans


    def get_rotation(self,direction):
        pose = self.tl.lookupTransform(direction,'hand_palm_link',rospy.Time(0))
        rot = pose[1]
        return tf.transformations.quaternion_matrix(rot)


    def compute_bed_tension(self, wrench, direction):
        x = wrench.wrench.force.x
        y = wrench.wrench.force.y
        z = wrench.wrench.force.z 
        force = np.array([x,y,z,1.0])
        rot = self.get_rotation(direction)
        force_perp = np.dot(rot,force)
        print("\nInside p_pi/bed_making/tensioner.py, `compute_bed_tension()`:")
        print("CURRENT FORCES: {}".format(force))
        print("FORCE PERP: {}".format(force_perp))
        return force_perp[1]


    def force_pull(self, whole_body, direction):
        """Pull to the target `direction`, hopefully with the sheet!
        
        I think this splits it into several motions, at most `cfg.MAX_PULLS`, so
        this explains the occasional pauses with the HSR's motion. This is *per*
        grasp+pull attempt, so it's different from the max attempts per side,
        which I empirically set at 4. Actually he set his max pulls to 3, but
        because of the delta <= 1.0 (including equality) this is 4 also.

        Move the end-effector, then quickly read the torque data and look at the
        wrench. The end-effector moves according to a _fraction_ that is
        specified by `s = 1-delta`, so I _hope_ if there's too much tension
        after that tiny bit of motion, that we'll exit. But maybe the
        discretization into four smaller pulls is not fine enough?
        """
        count = 0
        is_pulling = False
        t_o = self.get_translation(direction)
        delta = 0.0
        while delta <= 1.0:
            s = 1.0 - delta
            whole_body.move_end_effector_pose(
                    geometry.pose(x=t_o[0]*s, y=t_o[1]*s, z=t_o[2]*s),
                    direction
            )
            wrench = self.torque.read_data()
            norm = np.abs(self.compute_bed_tension(wrench,direction))
            print("FORCE NORM: {}".format(norm))
            if norm > cfg.HIGH_FORCE:
                is_pulling = True
            if is_pulling and norm < cfg.LOW_FORCE:
                break
            if norm > cfg.FORCE_LIMT:
                break
            delta += 1.0/float(cfg.MAX_PULLS)
コード例 #38
0
class TFHelper(object):
    """ TFHelper Provides functionality to convert poses between various
        forms, compare angles in a suitable way, and publish needed
        transforms to ROS """
    def __init__(self):
        self.tf_listener = TransformListener()
        self.tf_broadcaster = TransformBroadcaster()

    def convert_translation_rotation_to_pose(self, translation, rotation):
        """ Convert from representation of a pose as translation and rotation
            (Quaternion) tuples to a geometry_msgs/Pose message """
        return Pose(position=Point(x=translation[0],
                                   y=translation[1],
                                   z=translation[2]),
                    orientation=Quaternion(x=rotation[0],
                                           y=rotation[1],
                                           z=rotation[2],
                                           w=rotation[3]))

    def convert_pose_inverse_transform(self, pose):
        """ This is a helper method to invert a transform (this is built into
            the tf C++ classes, but ommitted from Python) """
        transform = t.concatenate_matrices(
            t.translation_matrix(
                [pose.position.x, pose.position.y, pose.position.z]),
            t.quaternion_matrix([
                pose.orientation.x, pose.orientation.y, pose.orientation.z,
                pose.orientation.w
            ]))
        inverse_transform_matrix = t.inverse_matrix(transform)
        return (t.translation_from_matrix(inverse_transform_matrix),
                t.quaternion_from_matrix(inverse_transform_matrix))

    def convert_pose_to_xy_and_theta(self, pose):
        """ Convert pose (geometry_msgs.Pose) to a (x,y,yaw) tuple """
        orientation_tuple = (pose.orientation.x, pose.orientation.y,
                             pose.orientation.z, pose.orientation.w)
        angles = t.euler_from_quaternion(orientation_tuple)
        return (pose.position.x, pose.position.y, angles[2])

    def covert_xy_and_theta_to_pose(self, x, y, theta):
        """ A helper function to convert a x, y, and theta values to a pose """
        orientation_tuple = t.quaternion_from_euler(0, 0, theta)
        return Pose(position=Point(x=x, y=y, z=0),
                    orientation=Quaternion(x=orientation_tuple[0],
                                           y=orientation_tuple[1],
                                           z=orientation_tuple[2],
                                           w=orientation_tuple[3]))

    def angle_normalize(self, z):
        """ convenience function to map an angle to the range [-pi,pi] """
        return math.atan2(math.sin(z), math.cos(z))

    def angle_diff(self, a, b):
        """ Calculates the difference between angle a and angle b (both should
            be in radians) the difference is always based on the closest
            rotation from angle a to angle b.
            examples:
                angle_diff(.1,.2) -> -.1
                angle_diff(.1, 2*math.pi - .1) -> .2
                angle_diff(.1, .2+2*math.pi) -> -.1
        """
        a = self.angle_normalize(a)
        b = self.angle_normalize(b)
        d1 = a - b
        d2 = 2 * math.pi - math.fabs(d1)
        if d1 > 0:
            d2 *= -1.0
        if math.fabs(d1) < math.fabs(d2):
            return d1
        else:
            return d2

    def loop_around(self, num):
        return abs(num % 360)

    def fix_map_to_odom_transform(self, robot_pose, timestamp):
        """ This method constantly updates the offset of the map and
            odometry coordinate systems based on the latest results from
            the localizer.

            robot_pose should be of type geometry_msgs/Pose and timestamp is of
            type rospy.Time and represents the time at which the robot's pose
            corresponds.
            """
        (translation, rotation) = \
            self.convert_pose_inverse_transform(robot_pose)
        p = PoseStamped(pose=self.convert_translation_rotation_to_pose(
            translation, rotation),
                        header=Header(stamp=timestamp, frame_id='base_link'))
        self.tf_listener.waitForTransform('base_link', 'odom', timestamp,
                                          rospy.Duration(1.0))
        self.odom_to_map = self.tf_listener.transformPose('odom', p)
        (self.translation, self.rotation) = \
            self.convert_pose_inverse_transform(self.odom_to_map.pose)

    def send_last_map_to_odom_transform(self):
        if not (hasattr(self, 'translation') and hasattr(self, 'rotation')):
            return
        self.tf_broadcaster.sendTransform(self.translation, self.rotation,
                                          rospy.get_rostime(), 'odom', 'map')
コード例 #39
0
        for obj_index in range(obj_list.size):
            center_x = obj_list.list[obj_index].position.x
            center_y = obj_list.list[obj_index].position.y
            center_z = obj_list.list[obj_index].position.z
            center  = np.array([center_x, center_y, center_z, 1])
            new_center = np.dot(transpose_matrix, center)
            obj_list.list[obj_index].position.x = new_center[0]
            obj_list.list[obj_index].position.y = new_center[1]
            obj_list.list[obj_index].position.z = new_center[2]
        obj_list.header.frame_id = "/odom"
        pub_obj.publish(obj_list)

    except (LookupException, ConnectivityException, ExtrapolationException):
        print "Nothing Happen"

if __name__ == "__main__":
    # Tell ROS that we're making a new node.
    rospy.init_node("object_map",anonymous=False)
    tf_ = TransformListener()
    transformer = TransformerROS()
    sub_classify = rospy.get_param('~classify', False)
    if sub_classify:
        sub_topic = "/obj_list/classify"
    else:
        sub_topic = "/obj_list"
    print "Object map subscribe from: ", sub_topic
    rospy.Subscriber(sub_topic, ObjectPoseList, call_back, queue_size=10)
    #rospy.Subscriber("/waypointList", WaypointList, call_back, queue_size=10)
    pub_obj = rospy.Publisher("/obj_list/odom", ObjectPoseList, queue_size=1)
    #pub_rviz = rospy.Publisher("/wp_path", Marker, queue_size = 1)
    rospy.spin()
コード例 #40
0
ファイル: TFUtils.py プロジェクト: fagan2888/berkeley_utils
 def transformQuaternion(self, target_frame, quat):
     now = rospy.Time.now()
     self.waitForTransform(target_frame, quat.header.frame_id,
                           rospy.Time.now(), rospy.Duration(5.0))
     quat.header.stamp = now
     return TransformListener.transformQuaternion(self, target_frame, quat)
コード例 #41
0
    def __init__(self, joint_values=None):
        rospy.init_node('command_GGCNN_ur5')
        self.joint_values_home = joint_values

        self.tf = TransformListener()

        # Used to change the controller
        self.controller_switch = rospy.ServiceProxy(
            '/controller_manager/switch_controller', SwitchController)

        # actionClient used to send joint positions
        self.client = actionlib.SimpleActionClient(
            'pos_based_pos_traj_controller/follow_joint_trajectory',
            FollowJointTrajectoryAction)
        print("Waiting for server (pos_based_pos_traj_controller)...")
        self.client.wait_for_server()
        print("Connected to server (pos_based_pos_traj_controller)")

        #################
        # GGCNN Related
        #################
        self.d = None
        self.ori_ = None
        self.grasp_cartesian_pose = None
        self.posCB = []
        self.ori = []
        self.gripper_angle_grasp = 0.0
        self.final_orientation = 0.0

        # These offsets are used only in the real robot and need to be calibrated
        self.GGCNN_offset_x = -0.03  # 0.002
        self.GGCNN_offset_y = 0.02  # -0.05
        self.GGCNN_offset_z = 0.058  # 0.013

        self.robotiq_joint_name = rospy.get_param("/robotiq_joint_name")

        # Topic published from GG-CNN Node
        rospy.Subscriber('ggcnn/out/command',
                         Float32MultiArray,
                         self.ggcnn_command_callback,
                         queue_size=1)

        ####################
        # Pipeline Related #
        ####################
        self.classes = rospy.get_param("/classes")
        self.grasp_ready_flag = False
        self.detected_tags = []
        self.tags = [
            'tag_0_corrected', 'tag_1_corrected', 'tag_2_corrected',
            'tag_3_corrected', 'tag_4_corrected', 'tag_5_corrected',
            'tag_6_corrected', 'tag_7_corrected'
        ]

        # These offset for the real camera
        self.tags_position_offset = [0.062, 0.0, 0.062]

        # Subscribers - Topics related to the new grasping pipeline
        rospy.Subscriber('flags/grasp_ready',
                         Bool,
                         self.grasp_ready_callback,
                         queue_size=1)  # Grasp flag
        rospy.Subscriber('flags/reposition_robot_flag',
                         Bool,
                         self.reposition_robot_callback,
                         queue_size=1)  # Reposition flag
        rospy.Subscriber('flags/detection_ready',
                         Bool,
                         self.detection_ready_callback,
                         queue_size=1)  # Detection flag
        rospy.Subscriber('reposition_coord',
                         Float32MultiArray,
                         self.reposition_coord_callback,
                         queue_size=1)
        rospy.Subscriber('/selective_grasping/tag_detections',
                         Int16MultiArray,
                         self.tags_callback,
                         queue_size=1)

        # Publishers
        # Publish the required class so the other nodes such as gg-cnn generates the grasp to the selected part
        self.required_class = rospy.Publisher('pipeline/required_class',
                                              Int8,
                                              queue_size=1)
コード例 #42
0
class Controller():
    Manual = 0
    Automatic = 1
    TakeOff = 2

    def __init__(self):
        self.pubNav = rospy.Publisher('cmd_vel', Twist, queue_size=1)
        self.listener = TransformListener()
        rospy.Subscriber("joy", Joy, self._joyChanged)
        rospy.Subscriber("cmd_vel_telop", Twist, self._cmdVelTelopChanged)
        self.cmd_vel_telop = Twist()
        self.pidX = PID(20, 12, 0.0, -30, 30, "x")
        self.pidY = PID(-20, -12, 0.0, -30, 30, "y")
        self.pidZ = PID(15000, 3000.0, 1500.0, 10000, 60000, "z")
        self.pidYaw = PID(-50.0, 0.0, 0.0, -200.0, 200.0, "yaw")
        self.state = Controller.Manual
        self.targetZ = 0.5
        self.lastJoy = Joy()

    def _cmdVelTelopChanged(self, data):
        self.cmd_vel_telop = data
        if self.state == Controller.Manual:
            self.pubNav.publish(data)

    def pidReset(self):
        self.pidX.reset()
        self.pidZ.reset()
        self.pidZ.reset()
        self.pidYaw.reset()

    def _joyChanged(self, data):
        if len(data.buttons) == len(self.lastJoy.buttons):
            delta = np.array(data.buttons) - np.array(self.lastJoy.buttons)
            print("Buton ok")
            #Button 1
            if delta[0] == 1 and self.state != Controller.Automatic:
                print("Automatic!")
                thrust = self.cmd_vel_telop.linear.z
                print(thrust)
                self.pidReset()
                self.pidZ.integral = thrust / self.pidZ.ki
                self.targetZ = 0.5
                self.state = Controller.Automatic
            #Button 2
            if delta[1] == 1 and self.state != Controller.Manual:
                print("Manual!")
                self.pubNav.publish(self.cmd_vel_telop)
                self.state = Controller.Manual
            #Button 3
            if delta[2] == 1:
                self.state = Controller.TakeOff
                print("TakeOff!")
            #Button 5
            if delta[4] == 1:
                self.targetZ += 0.1
                print(self.targetZ)
            #Button 6
            if delta[5] == 1:
                self.targetZ -= 0.1
                print(self.targetZ)
        self.lastJoy = data

    def run(self):
        thrust = 0
        print("jello")
        while not rospy.is_shutdown():
            if self.state == Controller.TakeOff:
                t = self.listener.getLatestCommonTime("/mocap", "/Nano_Mark")
                if self.listener.canTransform("/mocap", "/Nano_Mark", t):
                    position, quaternion = self.listener.lookupTransform(
                        "/mocap", "/Nano_Mark", t)

                    if position[2] > 0.1 or thrust > 50000:
                        self.pidReset()
                        self.pidZ.integral = thrust / self.pidZ.ki
                        self.targetZ = 0.5
                        self.state = Controller.Automatic
                        thrust = 0
                    else:
                        thrust += 100
                        msg = self.cmd_vel_telop
                        msg.linear.z = thrust
                        self.pubNav.publish(msg)

            if self.state == Controller.Automatic:
                # transform target world coordinates into local coordinates
                t = self.listener.getLatestCommonTime("/Nano_Mark", "/mocap")
                print(t)
                if self.listener.canTransform("/Nano_Mark", "/mocap", t):
                    targetWorld = PoseStamped()
                    targetWorld.header.stamp = t
                    targetWorld.header.frame_id = "mocap"
                    targetWorld.pose.position.x = 0
                    targetWorld.pose.position.y = 0
                    targetWorld.pose.position.z = self.targetZ
                    quaternion = tf.transformations.quaternion_from_euler(
                        0, 0, 0)
                    targetWorld.pose.orientation.x = quaternion[0]
                    targetWorld.pose.orientation.y = quaternion[1]
                    targetWorld.pose.orientation.z = quaternion[2]
                    targetWorld.pose.orientation.w = quaternion[3]

                    targetDrone = self.listener.transformPose(
                        "/Nano_Mark", targetWorld)

                    quaternion = (targetDrone.pose.orientation.x,
                                  targetDrone.pose.orientation.y,
                                  targetDrone.pose.orientation.z,
                                  targetDrone.pose.orientation.w)
                    euler = tf.transformations.euler_from_quaternion(
                        quaternion)

                    #msg = self.cmd_vel_teleop
                    msg.linear.x = self.pidX.update(
                        0.0, targetDrone.pose.position.x)
                    msg.linear.y = self.pidY.update(
                        0.0, targetDrone.pose.position.y)
                    msg.linear.z = self.pidZ.update(
                        0.0, targetDrone.pose.position.z
                    )  #self.pidZ.update(position[2], self.targetZ)
                    msg.angular.z = self.pidYaw.update(0.0, euler[2])
                    #print(euler[2])
                    #print(msg.angular.z)
                    self.pubNav.publish(msg)

            rospy.sleep(0.01)
コード例 #43
0
ファイル: tensioner.py プロジェクト: qingchenkanlu/IL_ROS_HSR
 def __init__(self):
     self.torque = Gripper_Torque()
     self.tl = TransformListener()
コード例 #44
0
 def __init__(self, *args):
     self.tf = TransformListener()
コード例 #45
0
    yellowBox1Name = "BoxTF_yellow1"
    yellowBox2Name = "BoxTF_yellow2"
    yellowBox3Name = "BoxTF_yellow3"
    greenBox1Name = "BoxTF_green1"
    greenBox2Name = "BoxTF_green2"
    greenBox3Name = "BoxTF_green3"
    vehicleName = "base_link"
    vehicleTargetName = "youBotTarget"
    world = "map"
    gripperTip = "gripperTip"
    place1 = "DropPlaceTF_1Start"
    place2 = "DropPlaceTF_2Temp"
    place3 = "DropPlaceTF_3Goal"

    ######  Subsrcibe topics
    tf_sub = TransformListener()

    ###### Do Hanoi
    #wait for node and topics to come up
    time.sleep(2)

    # define some relative positions
    pickup1 = [
        0, -14.52 * math.pi / 180, -70.27 * math.pi / 180,
        -95.27 * math.pi / 180, 0 * math.pi / 180
    ]
    pickup2 = [
        0, -13.39 * math.pi / 180, -93.91 * math.pi / 180,
        -72.72 * math.pi / 180, 90 * math.pi / 180
    ]
    pickup3 = [
コード例 #46
0
class mapping():
    def __init__(self):
        self.node_name = rospy.get_name()
        self.tf = TransformListener()
        self.transformer = TransformerROS()
        rospy.loginfo("[%s] Initializing " % (self.node_name))
        #rospy.Subscriber("/pcl_points", PoseArray, self.call_back, queue_size=1, buff_size = 2**24)
        sub_pcl = message_filters.Subscriber("/pcl_points", PoseArray)
        sub_odom = message_filters.Subscriber("/odometry/ground_truth",
                                              Odometry)
        ats = ApproximateTimeSynchronizer((sub_pcl, sub_odom),
                                          queue_size=1,
                                          slop=0.1)
        ats.registerCallback(self.call_back)
        rospy.Subscriber("/move_base_simple/goal",
                         PoseStamped,
                         self.cb_new_goal,
                         queue_size=1)
        self.pub_map = rospy.Publisher('/local_map',
                                       OccupancyGrid,
                                       queue_size=1)
        self.pub_rviz = rospy.Publisher("/wp_line", Marker, queue_size=1)
        self.pub_poses = rospy.Publisher("/path_points",
                                         PoseArray,
                                         queue_size=1)
        self.resolution = 0.25
        self.robot = Pose()
        self.width = 200
        self.height = 200
        self.origin = Pose()
        self.local_map = OccupancyGrid()
        self.dilating_size = 6
        self.wall_width = 3
        self.start_planning = False
        self.transpose_matrix = None
        self.goal = []
        self.astar = AStar()
        self.msg_count = 0
        self.planning_range = 20
        self.frame_id = "map"

    def init_param(self):
        self.occupancygrid = np.zeros((self.height, self.width))
        self.local_map = OccupancyGrid()
        self.local_map.info.resolution = self.resolution
        self.local_map.info.width = self.width
        self.local_map.info.height = self.height
        self.origin.position.x = -self.width * self.resolution / 2. + self.robot.position.x
        self.origin.position.y = -self.height * self.resolution / 2. + self.robot.position.y
        self.local_map.info.origin = self.origin

    def cb_rviz(self, msg):
        self.click_pt = [msg.pose.position.x, msg.pose.position.y]
        self.publish_topic()

    def call_back(self, pcl_msg, odom_msg):
        self.robot = odom_msg.pose.pose
        self.msg_count = self.msg_count + 1
        self.init_param()
        self.local_map.header = pcl_msg.header
        self.local_map.header.frame_id = self.frame_id
        self.get_tf()
        if self.transpose_matrix is None:
            return
        for i in range(len(pcl_msg.poses)):
            p = (pcl_msg.poses[i].position.x, pcl_msg.poses[i].position.y,
                 pcl_msg.poses[i].position.z, 1)
            local_p = np.array(p)
            global_p = np.dot(self.transpose_matrix, local_p)
            x, y = self.map2occupancygrid(global_p)
            width_in_range = (x >= self.width - self.dilating_size
                              or x <= self.dilating_size)
            height_in_range = (y >= self.height - self.dilating_size
                               or y <= self.dilating_size)
            if width_in_range or height_in_range:
                continue  # To prevent point cloud range over occupancy grid range
            self.occupancygrid[y][x] = 100

        # map dilating
        for i in range(self.height):
            for j in range(self.width):
                if self.occupancygrid[i][j] == 100:
                    for m in range(-self.dilating_size,
                                   self.dilating_size + 1):
                        for n in range(-self.dilating_size,
                                       self.dilating_size + 1):
                            if self.occupancygrid[i + m][j + n] != 100:
                                if m > self.wall_width or m < -self.wall_width or n > self.wall_width or n < -self.wall_width:
                                    if self.occupancygrid[i + m][j + n] != 90:
                                        self.occupancygrid[i + m][j + n] = 50
                                else:
                                    self.occupancygrid[i + m][j + n] = 90

        for i in range(self.height):
            for j in range(self.width):
                self.local_map.data.append(self.occupancygrid[i][j])
        self.pub_map.publish(self.local_map)

        if self.start_planning:
            self.path_planning()

    def get_tf(self):
        try:
            position, quaternion = self.tf.lookupTransform(
                "/map", "/X1/front_laser", rospy.Time(0))
            self.transpose_matrix = self.transformer.fromTranslationRotation(
                position, quaternion)
        except (LookupException, ConnectivityException,
                ExtrapolationException):
            print("Nothing Happen")

    def path_planning(self):
        if self.msg_count % 5 != 0:
            return
        self.msg_count = 0
        cost_map = np.zeros((self.height, self.width))
        border = self.planning_range / self.resolution
        h_min = int((self.height - border) / 2.)
        h_max = int(self.height - (self.height - border) / 2.)
        w_min = int((self.height - border) / 2.)
        w_max = int(self.width - (self.width - border) / 2.)
        for i in range(self.width):
            for j in range(self.width):
                if i > h_min and i < h_max:
                    if j > w_min and j < w_max:
                        cost_map[i][j] = self.occupancygrid[i][j]
        start_point = self.map2occupancygrid(
            (self.robot.position.x, self.robot.position.y))
        start = (start_point[1], start_point[0])
        goal = self.map2occupancygrid(self.goal)
        end = (goal[1], goal[0])
        self.astar.initial(cost_map, start, end)
        path = self.astar.planning()
        self.pub_path(path)
        self.rviz(path)

    def cb_new_goal(self, p):
        self.goal = [p.pose.position.x, p.pose.position.y]
        self.start_planning = True

    def occupancygrid2map(self, p):
        x = p[
            0] * self.resolution + self.origin.position.x + self.resolution / 2.
        y = p[
            1] * self.resolution + self.origin.position.y + self.resolution / 2.
        return [x, y]

    def map2occupancygrid(self, p):
        x = int((p[0] - self.origin.position.x) / self.resolution)
        y = int((p[1] - self.origin.position.y) / self.resolution)
        return [x, y]

    def pub_path(self, path):
        poses = PoseArray()
        for i in range(len(path)):
            p = self.occupancygrid2map([path[i][1], path[i][0]])
            pose = Pose()
            pose.position.x = p[0]
            pose.position.y = p[1]
            pose.position.z = 0
            poses.poses.append(pose)
        self.pub_poses.publish(poses)

    def rviz(self, path):
        marker = Marker()
        marker.header.frame_id = self.frame_id
        marker.type = marker.LINE_STRIP
        marker.action = marker.ADD
        marker.scale.x = 0.3
        marker.scale.y = 0.3
        marker.scale.z = 0.3
        marker.color.a = 1.0
        marker.color.r = 1.0
        marker.color.g = 0.
        marker.color.b = 0.
        marker.pose.orientation.x = 0.0
        marker.pose.orientation.y = 0.0
        marker.pose.orientation.z = 0.0
        marker.pose.orientation.w = 1.0
        marker.pose.position.x = 0.0
        marker.pose.position.y = 0.0
        marker.pose.position.z = 0.0
        marker.points = []
        for i in range(len(path)):
            p = self.occupancygrid2map([path[i][1], path[i][0]])
            point = Point()
            point.x = p[0]
            point.y = p[1]
            point.z = 0
            marker.points.append(point)
        self.pub_rviz.publish(marker)
コード例 #47
0
ファイル: Kinova.py プロジェクト: rafaelpossas/kinova-ros
class KinovaUtilities(object):

    JOINT_1_MIN_MAX = (-3.14, 3.14)
    JOINT_2_MIN_MAX = (0.82, 5.46)
    JOINT_3_MIN_MAX = (0.33, 5.95)
    JOINT_4_MIN_MAX = (-3.14, 3.14)
    JOINT_5_MIN_MAX = (-3.14, 3.14)
    JOINT_6_MIN_MAX = (-3.14, 3.14)

    JOINT_4_OFFSET = 6.28325902769
    JOINT_5_OFFSET = -18.8241170548
    JOINT_6_OFFSET = 18.8293733406

    CUBE_GZ_NAME = "cube"

    X_CUBE_MIN = 0.76
    X_CUBE_MAX = 1.03

    Y_CUBE_MIN = 0.55
    Y_CUBE_MAX = 1.05

    def __init__(self):
        super(KinovaUtilities, self).__init__()
        ## First initialize `moveit_commander`_ and a `rospy`_ node:
        moveit_commander.roscpp_initialize(sys.argv)
        rospy.init_node('Kinova_Utilities', anonymous=True)

        ## Instantiate a `RobotCommander`_ object. This object is the outer-level interface to
        ## the robot:
        self.robot = moveit_commander.RobotCommander()

        group_names = self.robot.get_group_names()
        print "============ Robot Groups:", self.robot.get_group_names()

        ## Instantiate a `PlanningSceneInterface`_ object.  This object is an interface
        ## to the world surrounding the robot:
        self.scene = moveit_commander.PlanningSceneInterface()
        self.box_name = "cube"

        ## Instantiate a `MoveGroupCommander`_ object.  This object is an interface
        ## to one group of joints.  In this case the group is the joints in the Panda
        ## arm so we set ``group_name = panda_arm``. If you are using a different robot,
        ## you should change this value to the name of your robot arm planning group.
        ## This interface can be used to plan and execute motions on the Panda:

        self.arm_group_name = "Arm"
        self.arm_group = moveit_commander.MoveGroupCommander(self.arm_group_name)
        print "============ Printing arm joint values"
        #print self.arm_group.get_current_joint_values()

        self.gripper_group_name = "All"
        self.gripper_group = moveit_commander.MoveGroupCommander(self.gripper_group_name)
        print "============ Printing gripper joint values"
        #print self.gripper_group.get_current_joint_values()

        # Sometimes for debugging it is useful to print the entire state of the
        # robot:
        print "============ Printing robot state"
        print self.robot.get_current_state()
        print ""

        current_pose = self.arm_group.get_current_pose()
        print "============ Current Pose"
        print current_pose
        print ""

        # aruco_pose = rospy.wait_for_message('/aruco_single/pose', PoseStamped)
        # rospy.loginfo("Got: " + str(aruco_pose))
        #self.return_home_position()

        self.tf = TransformListener()

        rate = rospy.Rate(10.0)




        #print trans

        # rand_x = random.uniform(self.X_CUBE_MIN, self.X_CUBE_MAX)
        # rand_y = random.uniform(self.Y_CUBE_MIN, self.Y_CUBE_MAX)
        #
        # self.delete_gazebo_object(self.CUBE_GZ_NAME)
        # self.spawn_gazebo_object(x=rand_x, y=rand_y, model_name=self.CUBE_GZ_NAME)
        #
        try:
            self.tf.waitForTransform("/world", "/tag_0", rospy.Time(), rospy.Duration(4))
            now = rospy.Time(0)
            (trans, rot) = self.tf.lookupTransform("/world", "/tag_0", now)
            self.add_rviz_object(self.CUBE_GZ_NAME, trans, rot)
        except TransformException, e:
            rospy.logerr("Could not find the TAG: {0}".format(e))


        #self.add_object("cube", trans,rot)

        # tag_pos = geometry_msgs.msg.PoseStamped()
        # tag_pos.header.frame_id = "tag_0"
        # tag_pos.pose.orientation.w = 1.0
        # start_pos_manual = self.listener.transformPose("world", tag_pos)
        # #
        # rospy.loginfo("Pose via manual TF is: ")
        # rospy.loginfo(start_pos_manual)
        #
        # self.return_home_position()
        self.pre_grasp_face_4(trans)

        # self.return_home_position()
        # self.pre_grasp_face_2(trans)
        # # #
        # self.return_home_position()
        # self.pre_grasp_face_1(trans)

        # self.return_home_position()
        try:
            self.joint_planning()
        except moveit_commander.MoveItCommanderException, e:
            print(e)
コード例 #48
0
class HandEyeCalibrator(object):
    def __init__(self):

        self._camera_frame = rospy.get_param('%s/camera_frame' % (NODE_NAME, ),
                                             'camera_rgb_optical_frame')
        self._marker_frame = rospy.get_param('%s/marker_frame' % (NODE_NAME, ),
                                             'marker26')
        self._robot_base_frame = rospy.get_param(
            '%s/robot_base_frame' % (NODE_NAME, ), 'world')
        self._ee_frame = rospy.get_param('%s/ee_frame' % (NODE_NAME, ),
                                         'left_arm_7_link')

        self._save_calib_file = rospy.get_param(
            '%s/save_calib_file' % (NODE_NAME, ), 'hand_eye_calib_data.npy')
        self._load_calib_file = rospy.get_param(
            '%s/load_calib_file' % (NODE_NAME, ), 'hand_eye_calib_data.npy')
        self._calib_from_file = rospy.get_param(
            '%s/calibrate_from_file' % (NODE_NAME, ), False)

        rospy.loginfo("Camera frame: %s" % (self._camera_frame, ))
        rospy.loginfo("Marker frame: %s" % (self._marker_frame, ))
        rospy.loginfo("Robot frame: %s" % (self._robot_base_frame, ))
        rospy.loginfo("End-effector frame: %s" % (self._ee_frame, ))
        rospy.loginfo("Calibration filename to save: %s" %
                      (self._save_calib_file, ))
        rospy.loginfo("Calibration filename to load: %s" %
                      (self._load_calib_file, ))
        rospy.loginfo("Calibrate from file: %s" % (self._calib_from_file, ))

        self._camera_base_p = [0.000, 0.022, 0.0]
        self._camera_base_q = [-0.500, 0.500, -0.500, 0.500]

        self._camera_base_transform = self.to_transform_matrix(
            self._camera_base_p, self._camera_base_q)

        self._calib = HandEyeCalib()

        self._br = tf.TransformBroadcaster()
        self._tf = TransformListener()

    def broadcast_frame(self, pt, rot, frame_name="marker"):

        rot = np.append(rot, [[0, 0, 0]], 0)
        rot = np.append(rot, [[0], [0], [0], [1]], 1)
        quat = tuple(tf.transformations.quaternion_from_matrix(rot))
        now = rospy.Time.now()
        self._br.sendTransform((pt[0], pt[1], pt[2]),
                               tf.transformations.quaternion_from_matrix(rot),
                               now, frame_name, 'base')
        print("should have done it!")

    def get_transform(self, base_frame, source_frame):

        if base_frame == source_frame:
            return (0, 0, 0), (0, 0, 0, 1), rospy.Time.now()

        # t = (0,0,0)
        # q = (0,0,0,1)
        time = None
        while time is None:
            try:

                time = self._tf.getLatestCommonTime(source_frame, base_frame)
            except:
                rospy.loginfo(
                    "Failed to get common time between %s and %s. Trying again..."
                    % (
                        source_frame,
                        base_frame,
                    ))

        t = None
        q = None
        try:
            t, q = self._tf.lookupTransform(
                base_frame, source_frame, time
            )  #self._tf_buffer.lookup_transform(frame_name, base_frame, rospy.Time(0), rospy.Duration(5.0))

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            rospy.logerr("Transform could not be queried.")

            return (0, 0, 0), (0, 0, 0, 1), rospy.Time.now()

        translation = (t[0], t[1], t[2])
        quaternion = (q[0], q[1], q[2], q[3])
        # translation = (t.transform.translation.x, t.transform.translation.y, t.transform.translation.z)
        # quaternion = (t.transform.rotation.x, t.transform.rotation.y, t.transform.rotation.z, t.transform.rotation.w)

        return translation, quaternion, time

    def to_transform_matrix(self, t, q):

        t_mat = tf.transformations.translation_matrix(t)
        r_mat = tf.transformations.quaternion_matrix(q)
        transform_mat = np.dot(t_mat, r_mat)

        return transform_mat

    def to_euler(self, transform):
        return np.asarray(tf.transformations.euler_from_matrix(transform))

    def to_translation(self, transform):

        return np.array(transform[:3, 3])

    def calibrate_from_file(self):

        calib_data = np.load(self._load_calib_file).item()

        camera_poses = calib_data['camera_poses']
        ee_poses = calib_data['ee_poses']

        for i in range(len(camera_poses)):
            self._calib.add_measurement(ee_poses[i], camera_poses[i])

        hand_eye_transform = self._calib.calibrate()
        hand_eye_transform = np.matmul(
            hand_eye_transform, np.linalg.inv(self._camera_base_transform))
        print 'Hand-eye transform: ', hand_eye_transform
        translation = self.to_translation(hand_eye_transform)
        euler = self.to_euler(hand_eye_transform)
        print "XYZ:", translation
        print "RPY: ", euler

        # calib_data = {'hand_eye_transform':  {'matrix': hand_eye_transform,
        #                                       'xyz': translation,
        #                                       'rpy': euler},
        #               'camera_poses': self._calib._camera_poses,
        #                'ee_poses': self._calib._ee_poses}
        # np.save('hand_eye_calibration_right.npy', calib_data)

    def calibrate(self):
        from getch import getch
        calibrate = False
        print(
            "Press enter to record to next. Add/Remove/Calibrate/Help? (a/r/c/h)"
        )
        while not calibrate:

            r = getch()

            if r == 'a':
                camera_t, camera_q, _ = self.get_transform(
                    self._camera_frame, self._marker_frame)
                ee_t, ee_q, _ = self.get_transform(self._robot_base_frame,
                                                   self._ee_frame)

                camera_transform = self.to_transform_matrix(camera_t, camera_q)
                ee_transform = self.to_transform_matrix(ee_t, ee_q)
                print "------------%d Poses---------------" % (len(
                    self._calib._ee_poses), )
                print "Camera transform: ", camera_t, camera_q
                print "Hand transform: ", ee_t, ee_q
                print "---------------------------------"
                self._calib.add_measurement(ee_transform, camera_transform)
                print 'Transform has been added for calibration'
            elif r == 'c':
                calibrate = True
            elif r == 'h':
                print(
                    "Press enter to record to next. Add/Remove/Calibrate? (a/r/c/h)"
                )
            elif r == 'r':
                if len(self._calib._ee_poses) > 0:
                    del self._calib._ee_poses[-1]
                    del self._calib._camera_poses[-1]
                    print("Removed last pose")
                else:
                    print("Pose list already empty.")

            elif r in ['\x1b', '\x03']:
                rospy.signal_shutdown("Acquisition finished.")
                break

        if calibrate:
            hand_eye_transform = self._calib.calibrate()

            hand_eye_transform = np.matmul(
                hand_eye_transform, np.linalg.inv(self._camera_base_transform))
            print "Found Transform: ", hand_eye_transform

            translation = self.to_translation(hand_eye_transform)
            euler = self.to_euler(hand_eye_transform)
            print "XYZ:", translation
            print "RPY: ", euler

            calib_data = {
                'hand_eye_transform': {
                    'matrix': hand_eye_transform,
                    'xyz': translation,
                    'rpy': euler
                },
                'camera_poses': self._calib._camera_poses,
                'ee_poses': self._calib._ee_poses
            }

            np.save('hand_eye_calibration_right.npy', calib_data)
            np.save(self._save_calib_file, calib_data)
        else:
            print("Quiting. Bye!")
コード例 #49
0
ファイル: mover_with_pose.py プロジェクト: gcielniak/CMP9767M
class Mover:
    """
    A very simple Roamer implementation for Thorvald.
    It simply goes straight until any obstacle is within
    3 m distance and then just simply turns left.
    A purely reactive approach.
    """
    def __init__(self):
        """
        On construction of the object, create a Subscriber
        to listen to lasr scans and a Publisher to control
        the robot
        """
        self.publisher = rospy.Publisher('/thorvald_001/teleop_joy/cmd_vel',
                                         Twist,
                                         queue_size=1)
        rospy.Subscriber("/thorvald_001/front_scan", LaserScan, self.callback)
        self.pose_pub = rospy.Publisher('/nearest_obstacle',
                                        PoseStamped,
                                        queue_size=1)
        self.listener = TransformListener()

    def callback(self, data):
        """
        Callback called any time a new laser scan becomes available
        """

        # some logging on debug level, not usually displayed
        rospy.logdebug("I heard %s", data.header.seq)

        # This find the closest of all laser readings
        min_dist = min(data.ranges)

        # let's already create an object of type Twist to
        # publish it later. All initialised to 0!
        t = Twist()

        # If anything is closer than 4 metres anywhere in the
        # scan, we turn away
        if min_dist < 2:
            t.angular.z = 1.0
        else:  # if all obstacles are far away, let's keep
            # moving forward at 0.8 m/s
            t.linear.x = 0.8
        # publish to the topic that makes the robot actually move
        self.publisher.publish(t)

        # above in min_dist we found the nearest value,
        # but to display the position of the nearest value
        # we need to find which range index corresponds to that
        # min_value.
        # find the index of the nearest point
        # (trick from https://stackoverflow.com/a/11825864)
        # it really is a very Python-ic trick here using a getter
        # function on the range. Can also be done with a
        # classic for loop
        index_min = min(range(len(data.ranges)), key=data.ranges.__getitem__)

        # convert the obtained index to angle, using the
        # reported angle_increment, and adding that to the
        # angle_min, i.e. the angle the index 0 corresponds to.
        # (is negative, in fact -PI/2).
        alpha = data.angle_min + (index_min * data.angle_increment)

        # No time for some trigonometry to turn the
        # polar coordinates into cartesian coordinates
        # inspired by https://stackoverflow.com/a/20926435
        # use trigonometry to create the point in laser
        # z = 0, in the frame of the laser
        laser_point_2d = [cos(alpha) * min_dist, sin(alpha) * min_dist, 0.0]

        # create an empty PoseStamped to be published later.
        pose = PoseStamped()

        # keep the frame ID (the entire header here) as read from
        # the sensor. This is general ROS practice, as it
        # propagates the recording time from the sensor and
        # the corrdinate frame of the sensor through to
        # other results we may be publishing based on the anaylysis
        # of the data of that sensor

        pose.header = data.header

        # fill in the slots from the points calculated above.
        # bit tedious to do it this way, but hey...
        pose.pose.position.x = laser_point_2d[0]
        pose.pose.position.y = laser_point_2d[1]
        pose.pose.position.z = laser_point_2d[2]

        # using my trick from https://github.com/marc-hanheide/jupyter-notebooks/blob/master/quaternion.ipynb
        # to convert to quaternion, so that the Pose always
        # points in the direction of the laser beam.
        # Not required if only the positions is
        # of relevance
        # (then just set pose.pose.orientation.w = 1.0 and leave
        # others as 0).
        pose.pose.orientation.x = 0.0
        pose.pose.orientation.y = 0.0
        pose.pose.orientation.z = sin(alpha / 2)
        pose.pose.orientation.w = cos(alpha / 2)

        # publish the pose so it can be visualised in rviz
        self.pose_pub.publish(pose)

        rospy.loginfo("The closest point in laser frame coords is at\n%s" %
                      pose.pose.position)

        # now lets actually transform this pose into a robot
        # "base_link" frame.
        transformed_pose = self.listener.transformPose(
            "thorvald_001/base_link", pose)
        rospy.loginfo("The closest point in robot coords is at\n%s" %
                      transformed_pose.pose.position)
コード例 #50
0
ファイル: Kinova.py プロジェクト: rafaelpossas/kinova-ros
    def __init__(self):
        super(KinovaUtilities, self).__init__()
        ## First initialize `moveit_commander`_ and a `rospy`_ node:
        moveit_commander.roscpp_initialize(sys.argv)
        rospy.init_node('Kinova_Utilities', anonymous=True)

        ## Instantiate a `RobotCommander`_ object. This object is the outer-level interface to
        ## the robot:
        self.robot = moveit_commander.RobotCommander()

        group_names = self.robot.get_group_names()
        print "============ Robot Groups:", self.robot.get_group_names()

        ## Instantiate a `PlanningSceneInterface`_ object.  This object is an interface
        ## to the world surrounding the robot:
        self.scene = moveit_commander.PlanningSceneInterface()
        self.box_name = "cube"

        ## Instantiate a `MoveGroupCommander`_ object.  This object is an interface
        ## to one group of joints.  In this case the group is the joints in the Panda
        ## arm so we set ``group_name = panda_arm``. If you are using a different robot,
        ## you should change this value to the name of your robot arm planning group.
        ## This interface can be used to plan and execute motions on the Panda:

        self.arm_group_name = "Arm"
        self.arm_group = moveit_commander.MoveGroupCommander(self.arm_group_name)
        print "============ Printing arm joint values"
        #print self.arm_group.get_current_joint_values()

        self.gripper_group_name = "All"
        self.gripper_group = moveit_commander.MoveGroupCommander(self.gripper_group_name)
        print "============ Printing gripper joint values"
        #print self.gripper_group.get_current_joint_values()

        # Sometimes for debugging it is useful to print the entire state of the
        # robot:
        print "============ Printing robot state"
        print self.robot.get_current_state()
        print ""

        current_pose = self.arm_group.get_current_pose()
        print "============ Current Pose"
        print current_pose
        print ""

        # aruco_pose = rospy.wait_for_message('/aruco_single/pose', PoseStamped)
        # rospy.loginfo("Got: " + str(aruco_pose))
        #self.return_home_position()

        self.tf = TransformListener()

        rate = rospy.Rate(10.0)




        #print trans

        # rand_x = random.uniform(self.X_CUBE_MIN, self.X_CUBE_MAX)
        # rand_y = random.uniform(self.Y_CUBE_MIN, self.Y_CUBE_MAX)
        #
        # self.delete_gazebo_object(self.CUBE_GZ_NAME)
        # self.spawn_gazebo_object(x=rand_x, y=rand_y, model_name=self.CUBE_GZ_NAME)
        #
        try:
            self.tf.waitForTransform("/world", "/tag_0", rospy.Time(), rospy.Duration(4))
            now = rospy.Time(0)
            (trans, rot) = self.tf.lookupTransform("/world", "/tag_0", now)
            self.add_rviz_object(self.CUBE_GZ_NAME, trans, rot)
        except TransformException, e:
            rospy.logerr("Could not find the TAG: {0}".format(e))
コード例 #51
0
class PickAndPlace(object):
    def __init__(self, limb, start_pose, hover_distance=0.15):
        self.start_pose = start_pose
        self.hover_distance = hover_distance
        self.limb = robot_limb.Limb(limb)
        self.gripper = robot_gripper.Gripper(limb)
        self.base = "base_marker"
        self.pick_obj = "obj_marker"
        self.dest = "dest_marker"
        self.destinations = []
        self.queue = Queue.Queue()
        self.tf = TransformListener()
        self.set_scene()
        self.set_limb_planner()
        self.enable_robot()
        self.set_destinations()
        print("Moving Sawyer Arm to Start Position")
        self.move_to_start()

    def set_limb_planner():
        self.right_arm = MoveGroupCommander("right_arm")
        self.right_arm.set_planner_id('RRTConnectkConfigDefault')
        self.right_arm.allow_replanning(True)
        self.right_arm.set_planning_time(7)

    def enable_robot():
        print("Getting robot state... ")
        self._rs = intera_interface.RobotEnable(intera_interface.CHECK_VERSION)
        self._init_state = self._rs.state().enabled
        print("Enabling robot... ")
        self._rs.enable()

    def set_scene():
        self.scene_pub = rospy.Publisher('/planning_scene',
                                         PlanningScene,
                                         queue_size=10)
        self.scene = PlanningSceneInterface()
        rospy.sleep(2)
        self.add_collision_object('turtlebot', 0.8, 0., -0.65, .5, 1.5, .33)
        self.add_collision_object('right_wall', 0., 0.65, 0., 4., .1, 4.)
        self.add_collision_object('left_wall', 0., -0.8, 0., 4., .1, 4.)
        self.add_collision_object('back_wall', -0.6, 0., 0., .1, 4., 4.)
        self.add_collision_object('destination_table', .5, -.4, -.35, 1, .5,
                                  .5)
        self.plan_scene = PlanningScene()
        self.plan_scene.is_diff = True
        self.scene_pub.publish(self.plan_scene)

    def add_collision_object(self, name, x, y, z, xs, ys, zs):
        self.scene.remove_world_object(name)
        o = PoseStamped()
        o.pose.position.x = x
        o.pose.position.y = y
        o.pose.position.z = z
        self.scene.attach_box('base', name, o, [xs, ys, zs])

    def set_destinations(self):
        while len(self.destinations) == 0:
            if self.tf.frameExists(self.base) and self.tf.frameExists(
                    self.dest):
                point, quaternion = self.tf.lookupTransform(
                    self.base, self.dest,
                    self.tf.getLatestCommonTime(self.base, self.dest))
                position = list_to_point(point)
                self.destinations.append(
                    Pose(position=position,
                         orientation=self.start_pose.orientation))

    def add_new_objects_to_queue(self, sleep=True):
        print("Checking if " + self.base + " and " + self.pick_obj +
              " both exist.")
        if self.tf.frameExists(self.base) and self.tf.frameExists(
                self.pick_obj):
            if sleep:
                rospy.sleep(10.0)
                self.add_new_objects_to_queue(sleep=False)
            else:
                print(self.base + " and " + self.pick_obj + " both exist.")
                point, quaternion = self.tf.lookupTransform(
                    self.base, self.pick_obj,
                    self.tf.getLatestCommonTime(self.base, self.pick_obj))
                position = list_to_point(point)
                print("Adding " + self.pick_obj + " to queue")
                obj_location = Pose(position=position,
                                    orientation=self.start_pose.orientation)
                print("Picking Object from:", obj_location)
                self.queue.put(obj_location)

    def complete_pick_place(self):
        if not self.queue.empty():
            start_pose = self.queue.get()
            end_pose = self.destinations[0]
            print("\nPicking...")
            self.pick(start_pose)
            print("\nPlacing...")
            self.place(end_pose)
            print("\Resetting...")
            self.move_to_start()

    def guarded_move_to_joint_position(self, pose):
        self.right_arm.set_pose_target(pose)
        self.right_arm.go()

    def servo_to_pose(self, pose):
        # servo down to release
        self.guarded_move_to_joint_position(pose)

    def approach(self, pose):
        # approach with a pose the hover-distance above the requested pose
        approach = copy.deepcopy(pose)
        approach.position.z += self.hover_distance
        print("approaching:", approach)
        self.guarded_move_to_joint_position(approach)

    def retract(self):
        # retrieve current pose from endpoint
        current_pose = self.limb.endpoint_pose()
        ik_pose = Pose()
        ik_pose.position.x = current_pose['position'].x
        ik_pose.position.y = current_pose['position'].y
        ik_pose.position.z = current_pose['position'].z + self.hover_distance
        ik_pose.orientation.x = current_pose['orientation'].x
        ik_pose.orientation.y = current_pose['orientation'].y
        ik_pose.orientation.z = current_pose['orientation'].z
        ik_pose.orientation.w = current_pose['orientation'].w
        # servo up from current pose
        self.guarded_move_to_joint_position(ik_pose)

    def move_to_start(self):
        print("Moving the right arm to start pose...")
        self.guarded_move_to_joint_position(self.start_pose)
        self.gripper_open()
        rospy.sleep(1.0)
        print("Running. Ctrl-c to quit")

    def gripper_open(self):
        self.gripper.open()
        rospy.sleep(1.0)

    def gripper_close(self):
        self.gripper.close()
        rospy.sleep(1.0)

    def pick(self, pose):
        # open the gripper
        self.gripper_open()
        # servo above pose
        self.approach(pose)
        # servo to pose
        self.servo_to_pose(pose)
        # close gripper
        self.gripper_close()
        # retract to clear object
        self.retract()

    def place(self, pose):
        # servo above pose
        self.approach(pose)
        # servo to pose
        self.servo_to_pose(pose)
        # open the gripper
        self.gripper_open()
        # retract to clear object
        self.retract()
コード例 #52
0
class Controller():
    ActionTakeOff = 0
    ActionHover = 1
    ActionLand = 2
    ActionAnimation = 3

    def __init__(self):
        self.lastNavdata = Navdata()
        self.lastImu = Imu()
        self.lastMag = Vector3Stamped()
        self.current_pose = PoseStamped()
        self.current_odom = Odometry()
        self.lastState = State.Unknown
        self.command = Twist()
        self.drone_msg = ARDroneData()
        self.cmd_freq = 1.0 / 200.0
        self.drone_freq = 1.0 / 200.0

        self.action = Controller.ActionTakeOff
        self.previousDebugTime = rospy.get_time()

        self.pose_error = [0, 0, 0, 0]
        self.pidX = PID(0.35, 0.15, 0.025, -1, 1, "x")
        self.pidY = PID(0.35, 0.15, 0.025, -1, 1, "y")
        self.pidZ = PID(1.25, 0.1, 0.25, -1.0, 1.0, "z")
        self.pidYaw = PID(0.75, 0.1, 0.2, -1.0, 1.0, "yaw")
        self.scale = 1.0

        self.goal = [-1, 0, 0, height, 0]  #set it to center to start
        self.goal_rate = [0, 0, 0, 0, 0]  # Use the update the goal on time
        self.achieved_goal = Goal(
        )  # Use this to store recently achieved goal, reference time-dependent
        self.next_goal = Goal()  # next goal
        self.goal_done = False
        self.waypoints = None

        rospy.on_shutdown(self.on_shutdown)
        rospy.Subscriber("ardrone/navdata", Navdata, self.on_navdata)
        rospy.Subscriber("ardrone/imu", Imu, self.on_imu)
        rospy.Subscriber("ardrone/mag", Vector3Stamped, self.on_mag)
        rospy.Subscriber("arcontroller/goal", Goal, self.on_goal)
        rospy.Subscriber("arcontroller/waypoints", Waypoints,
                         self.on_waypoints)
        rospy.Subscriber("qualisys/ARDrone/pose", PoseStamped,
                         self.get_current_pose)
        rospy.Subscriber("qualisys/ARDrone/odom", Odometry,
                         self.get_current_odom)
        self.pubTakeoff = rospy.Publisher('ardrone/takeoff',
                                          Empty,
                                          queue_size=1)
        self.pubLand = rospy.Publisher('ardrone/land', Empty, queue_size=1)
        self.pubCmd = rospy.Publisher('cmd_vel', Twist, queue_size=1)
        self.pubDroneData = rospy.Publisher('droneData',
                                            ARDroneData,
                                            queue_size=1)
        self.pubGoal = rospy.Publisher('current_goal', Goal, queue_size=1)
        self.setFlightAnimation = rospy.ServiceProxy(
            'ardrone/setflightanimation', FlightAnim)

        self.commandTimer = rospy.Timer(rospy.Duration(self.cmd_freq),
                                        self.sendCommand)
        #self.droneDataTimer = rospy.Timer(rospy.Duration(self.drone_freq),self.sendDroneData)
        #self.goalTimer = rospy.Timer(rospy.Duration(self.drone_freq),self.sendCurrentGoal)

        self.listener = TransformListener()

    def get_current_pose(self, data):
        self.current_pose = data

    def get_current_odom(self, data):
        self.current_odom = data

    def on_imu(self, data):
        self.lastImu = data

    def on_mag(self, data):
        self.lastMag = data

    def on_navdata(self, data):
        self.lastNavdata = data
        if data.state != self.lastState:
            rospy.loginfo("State Changed: " + str(data.state))
            self.lastState = data.state

    def on_shutdown(self):
        rospy.loginfo("Shutdown: try to land...")
        self.command = Twist()
        for i in range(0, 1000):
            self.pubLand.publish()
            self.pubCmd.publish(self.command)
        rospy.sleep(1)

    def on_goal(self, data):
        rospy.loginfo('New goal.')
        self.goal = [data.t, data.x, data.y, data.z, data.yaw]
        self.goal_done = False

    def sendCommand(self, event=None):
        self.command.linear.x = self.scale * self.pidX.update(
            0.0, self.pose_error[0])
        self.command.linear.y = self.scale * self.pidY.update(
            0.0, self.pose_error[1])
        self.command.linear.z = self.pidZ.update(0.0, self.pose_error[2])
        self.command.angular.z = self.pidYaw.update(0.0, self.pose_error[3])

        self.drone_msg = ARDroneData()

        self.drone_msg.header.stamp = rospy.get_rostime()
        self.drone_msg.header.frame_id = 'drone_data'
        self.drone_msg.cmd = self.command
        self.drone_msg.goal.t = rospy.get_time()
        self.drone_msg.goal.x = self.goal[1]
        self.drone_msg.goal.y = self.goal[2]
        self.drone_msg.goal.z = self.goal[3]
        self.drone_msg.goal.yaw = self.goal[4]
        self.drone_msg.tm = self.lastNavdata.tm
        self.pubDroneData.publish(self.drone_msg)

        self.pubCmd.publish(self.command)

    def sendDroneData(self, event=None):
        self.drone_msg = ARDroneData()

        self.drone_msg.header.stamp = rospy.get_rostime()
        self.drone_msg.header.frame_id = 'drone_data'
        #self.drone_msg.navdata = self.lastNavdata
        #self.drone_msg.imu = self.lastImu
        #self.drone_msg.mag = self.lastMag
        #self.drone_msg.pose = self.current_pose
        #self.drone_msg.odom = self.current_odom
        self.drone_msg.cmd = self.command
        self.drone_msg.goal.t = rospy.get_time()
        self.drone_msg.goal.x = self.goal[1]
        self.drone_msg.goal.y = self.goal[2]
        self.drone_msg.goal.z = self.goal[3]
        self.drone_msg.goal.yaw = self.goal[4]
        self.drone_msg.tm = self.lastNavdata.tm
        self.pubDroneData.publish(self.drone_msg)

    def sendCurrentGoal(self, event=None):
        current_goal = Goal()
        current_goal.t = rospy.get_time()
        current_goal.x = self.goal[1]
        current_goal.y = self.goal[2]
        current_goal.z = self.goal[3]
        current_goal.yaw = self.goal[4]
        self.pubGoal.publish(current_goal)

    def on_waypoints(self, data):
        rospy.loginfo('New waypoints.')
        self.waypoints = []
        for d in range(data.len):
            self.waypoints.append([
                data.waypoints[d].t, data.waypoints[d].x, data.waypoints[d].y,
                data.waypoints[d].z, data.waypoints[d].yaw
            ])
            rospy.loginfo(self.waypoints)

    def waypoint_follower(self, points):
        current_index = 0
        previous_index = current_index - 1
        rospy.loginfo(points)
        # Get the time vector
        time_wp = [goal[0] for goal in points]

        self.goal = points[current_index]  #get the first point
        dt_two_wp = time_wp[1] - time_wp[0]
        self.achieved_goal.t = points[0][0]
        self.achieved_goal.x = points[0][1]
        self.achieved_goal.y = points[0][2]
        self.achieved_goal.z = points[0][3]
        self.achieved_goal.yaw = points[0][4]

        self.next_goal = self.achieved_goal

        self.goal_rate = [1, 0, 0, 0, 0]

        minX = .05
        minY = .05
        time0_wp = rospy.get_time()
        time_achieved_goal = time0_wp
        while True:  #for i in range(0,points.len()):
            #goal = points[i]
            # transform target world coordinates into local coordinates
            targetWorld = PoseStamped()
            t = self.listener.getLatestCommonTime("/ARDrone", "/mocap")
            if self.listener.canTransform("/ARDrone", "/mocap", t):
                # Get starting time
                time_current_goal = rospy.get_time()
                dt_current_achieve = time_current_goal - time_achieved_goal

                # # Update the continuous goal using: goal = rate*t+goal
                # current_goalX = self.goal_rate[1]*dt_current_achieve+self.achieved_goal.x
                # current_goalY = self.goal_rate[2]*dt_current_achieve+self.achieved_goal.y
                # current_goalZ = self.goal_rate[3]*dt_current_achieve+self.achieved_goal.z
                # current_goalYaw = self.goal_rate[4]*dt_current_achieve+self.achieved_goal.yaw

                # self.goal = [self.next_goal.t,current_goalX,current_goalY,current_goalZ,current_goalYaw]

                self.goal = points[current_index]
                targetWorld.header.stamp = t
                targetWorld.header.frame_id = "mocap"
                targetWorld.pose.position.x = self.goal[1]
                targetWorld.pose.position.y = self.goal[2]
                targetWorld.pose.position.z = self.goal[3]
                quaternion = tf.transformations.quaternion_from_euler(
                    0, 0, self.goal[4])

                targetWorld.pose.orientation.x = quaternion[0]
                targetWorld.pose.orientation.y = quaternion[1]
                targetWorld.pose.orientation.z = quaternion[2]
                targetWorld.pose.orientation.w = quaternion[3]

                targetDrone = self.listener.transformPose(
                    "/ARDrone", targetWorld)

                quaternion = (targetDrone.pose.orientation.x,
                              targetDrone.pose.orientation.y,
                              targetDrone.pose.orientation.z,
                              targetDrone.pose.orientation.w)
                euler = tf.transformations.euler_from_quaternion(quaternion)

                # Define the pose_error to publish the command in fixed rate
                self.pose_error = [
                    targetDrone.pose.position.x, targetDrone.pose.position.y,
                    targetDrone.pose.position.z, euler[2]
                ]
                # Run PID controller and send navigation message

                error_xy = math.sqrt(targetDrone.pose.position.x**2 +
                                     targetDrone.pose.position.y**2)
                time_current_wp = rospy.get_time()
                if self.goal[
                        0] < 0:  #-1 implying that waypoints is not time-dependent
                    # goal t, x, y, z, yaw
                    #self.goal = points[current_index]
                    if (error_xy < 0.2
                            and math.fabs(targetDrone.pose.position.z) < 0.2
                            and math.fabs(euler[2]) < math.radians(5)):

                        if (current_index < len(points) - 1):
                            current_index += 1
                            self.goal = points[current_index]
                        else:
                            return
                else:
                    # Check how long from the first point
                    diff_time_wp = time_current_wp - time0_wp
                    if diff_time_wp <= time_wp[-1] - time_wp[
                            0]:  # this time should be less than the time defined wp
                        # Check the index of current goal based on rospy time and time vector in waypoints
                        current_index = next(x for x, val in enumerate(time_wp)
                                             if val >= diff_time_wp)
                        if current_index > 0:
                            # Meaning current goal is passed, update new goal
                            previous_index = current_index - 1
                            self.achieved_goal.t = points[previous_index][0]
                            self.achieved_goal.x = points[previous_index][1]
                            self.achieved_goal.y = points[previous_index][2]
                            self.achieved_goal.z = points[previous_index][3]
                            self.achieved_goal.yaw = points[previous_index][4]

                            self.next_goal.t = points[current_index][0]
                            self.next_goal.x = points[current_index][1]
                            self.next_goal.y = points[current_index][2]
                            self.next_goal.z = points[current_index][3]
                            self.next_goal.yaw = points[current_index][4]

                            self.goal_rate = [
                                (points[current_index][i] -
                                 points[previous_index][i]) / dt_two_wp
                                for i in range(5)
                            ]
                            time_achieved_goal = time_current_wp
                        else:
                            self.achieved_goal.t = points[0][0]
                            self.achieved_goal.x = points[0][1]
                            self.achieved_goal.y = points[0][2]
                            self.achieved_goal.z = points[0][3]
                            self.achieved_goal.yaw = points[0][4]

                            self.next_goal = self.achieved_goal

                            self.goal_rate = [1, 0, 0, 0, 0]

                    else:  # time already passed compared to the last goal, keep the last goal
                        self.goal = points[-1]
                        self.goal_rate = [1, 0, 0, 0, 0]
                        return

                #time = rospy.get_time()
                diff_time_log = current_time_wp - self.previousDebugTime
                if diff_time_log > 0.5:
                    #log_msg = "Current pos:" + [targetDrone.pose.position.x, targetDrone.pose.position.y,targetDrone.pose.position.z]
                    rospy.loginfo('--------------------------------------')
                    rospy.loginfo('Control:%.2f,%.2f,%.2f,%.2f | bat: %.2f',
                                  self.command.linear.x, self.command.linear.y,
                                  self.command.linear.z,
                                  self.command.angular.z,
                                  self.lastNavdata.batteryPercent)
                    rospy.loginfo(
                        'Current position: [%.2f, %.2f, %.2f, %.2f, %.2f]',
                        diff_time_wp, self.current_pose.pose.position.x,
                        self.current_pose.pose.position.y,
                        self.current_pose.pose.position.z, euler[2])
                    rospy.loginfo('Current goal:%.2f,%.2f,%.2f,%.2f,%.2f',
                                  self.goal[0], self.goal[1], self.goal[2],
                                  self.goal[3], self.goal[4])
                    rospy.loginfo('Error: %.2f,%.2f,%.2f', error_xy,
                                  math.fabs(targetDrone.pose.position.z),
                                  math.fabs(euler[2]))
                    rospy.loginfo('--------------------------------------')
                    self.previousDebugTime = current_time_wp

    def go_to_goal(self, goal):
        #rospy.loginfo('Going to goal')
        #rospy.loginfo(goal)
        self.goal = goal
        # transform target world coordinates into local coordinates
        targetWorld = PoseStamped()

        t = self.listener.getLatestCommonTime("/ARDrone", "/mocap")
        if self.listener.canTransform("/ARDrone", "/mocap", t):
            # Get starting time
            startCmdTime = rospy.get_time()

            targetWorld.header.stamp = t
            targetWorld.header.frame_id = "mocap"
            targetWorld.pose.position.x = goal[1]
            targetWorld.pose.position.y = goal[2]
            targetWorld.pose.position.z = goal[3]
            quaternion = tf.transformations.quaternion_from_euler(
                0, 0, goal[4])
            targetWorld.pose.orientation.x = quaternion[0]
            targetWorld.pose.orientation.y = quaternion[1]
            targetWorld.pose.orientation.z = quaternion[2]
            targetWorld.pose.orientation.w = quaternion[3]

            targetDrone = self.listener.transformPose("/ARDrone", targetWorld)

            quaternion = (targetDrone.pose.orientation.x,
                          targetDrone.pose.orientation.y,
                          targetDrone.pose.orientation.z,
                          targetDrone.pose.orientation.w)
            euler = tf.transformations.euler_from_quaternion(quaternion)

            # Define pose error to publish the command in fixed rate
            self.pose_error = [
                targetDrone.pose.position.x, targetDrone.pose.position.y,
                targetDrone.pose.position.z, euler[2]
            ]

            error_xy = math.sqrt(targetDrone.pose.position.x**2 +
                                 targetDrone.pose.position.y**2)
            time = rospy.get_time()
            if time - self.previousDebugTime > 1:
                #log_msg = "Current pos:" + [targetDrone.pose.position.x, targetDrone.pose.position.y,targetDrone.pose.position.z]
                rospy.loginfo('--------------------------------------')
                rospy.loginfo('Control:%.2f,%.2f,%.2f,%.2f,bat:%.2f',
                              self.command.linear.x, self.command.linear.y,
                              self.command.linear.z, self.command.angular.z,
                              self.lastNavdata.batteryPercent)
                rospy.loginfo('Current goal:%.2f,%.2f,%.2f,%.2f', goal[1],
                              goal[2], goal[3], goal[4])
                rospy.loginfo('Current pose:%.2f,%.2f,%.2f',
                              self.current_pose.pose.position.x,
                              self.current_pose.pose.position.y,
                              self.current_pose.pose.position.z)
                rospy.loginfo('Error: %.2f,%.2f,%.2f', error_xy,
                              math.fabs(targetDrone.pose.position.z),
                              math.fabs(euler[2]))
                self.previousDebugTime = time
            if self.goal_done:
                rospy.loginfo("Goal done.")
                rospy.loginfo('-------------------------------------')

            if (error_xy < 0.2 and math.fabs(targetDrone.pose.position.z) < 0.2
                    and math.fabs(euler[2]) < math.radians(5)):
                self.goal_done = True
            else:
                self.goal_done = False

    def run(self):
        while not rospy.is_shutdown():
            if self.action == Controller.ActionTakeOff:
                if self.lastState == State.Landed:
                    pass
            #rospy.loginfo('Taking off.')
            #self.pubTakeoff.publish()
            elif self.lastState == State.Hovering or self.lastState == State.Flying or self.lastState == State.Flying2:
                self.action = Controller.ActionHover
            elif self.action == Controller.ActionLand:
                msg = Twist()
                self.pubCmd.publish(msg)
                self.pubLand.publish()
            elif self.action == Controller.ActionHover:
                if self.waypoints == None:
                    #if self.goal_done == False:
                    self.go_to_goal(self.goal)
            else:
                self.waypoint_follower(self.waypoints)
                self.waypoints = None

                rospy.sleep(0.01)
コード例 #53
0
 def __init__(self, frame_out):
     self.tl = TransformListener()
     self.points = []
     self.frame_out = frame_out
コード例 #54
0
ファイル: swarmlib.py プロジェクト: RuslanAgishev/cfcontrol
class Drone:
    def __init__(self, name, obstacles, leader=False):
        self.name = name
        self.tf = '/vicon/' + name + '/' + name
        self.leader = leader
        self.tl = TransformListener()
        self.pose = self.position()
        self.orient = np.array([0, 0, 0])
        self.sp = self.position()
        self.obstacles = obstacles
        self.path = Path()
        self.delta = np.array([0, 0])
        self.near_obstacle = np.zeros(len(obstacles))
        self.radius_impedance = Radius_Impedance()
        self.angular_impedance = Angular_Impedance()
        self.impedance_avel = Impedance_avel()
        self.theta = None
        self.d_theta = pi * np.ones(len(obstacles))
        self.dist_to_drones = np.zeros(3)
        self.dist_to_obst = np.zeros(len(obstacles))
        self.drone_time_array = np.ones(10)
        self.drone_pose_array = np.array(
            [np.ones(10), np.ones(10), np.ones(10)])
        self.rate = rospy.Rate(100)
        self.traj = np.array([0, 0, 0])
        self.feature = self.sp
        self.start = self.position()
        self.goal = self.sp

    def position(self):
        self.tl.waitForTransform("/world", self.tf, rospy.Time(0),
                                 rospy.Duration(1))
        position, quaternion = self.tl.lookupTransform("/world", self.tf,
                                                       rospy.Time(0))
        self.pose = position
        return np.array(position)

    def orientation(self):
        self.tl.waitForTransform("/world", self.tf, rospy.Time(0),
                                 rospy.Duration(1))
        position, quaternion = self.tl.lookupTransform("/world", self.tf,
                                                       rospy.Time(0))
        self.orient = get_angles(np.array(quaternion))
        return get_angles(np.array(quaternion))

    def publish_sp(self):
        publish_pose(self.sp, np.array([0, 0, 0]), self.name + "_sp")

    def publish_position(self):
        publish_pose(self.pose, self.orient, self.name + "_pose")

    def publish_path(self, limit=1000):
        publish_path(self.path, self.sp, self.orient, self.name + "_path",
                     limit)
        #for i in range( 1,len(self.path.poses) ):
        #	print self.name +": "+ str(self.path.poses[i].pose)

    def fly(self):
        # if self.leader:
        # 	limits = np.array([ 2, 2, 2.5 ])
        # 	np.putmask(self.sp, self.sp >= limits, limits)
        # 	np.putmask(self.sp, self.sp <= -limits, -limits)
        publish_goal_pos(self.sp, 0, "/" + self.name)

    def update_pose_theta(self, drone, obstacle, R):
        drone_omega = self.omegaZ(drone.sp, drone.sp - obstacle.position())
        if drone.near_obstacle[
                obstacle.
                id] == 1:  # initial impedance parameters, when the drone is just near the obstacle
            drone.angular_impedance.imp_theta, drone.angular_impedance.imp_omega, drone.angular_impedance.imp_time =\
             drone.angular_impedance.impedance_model(drone.theta, drone.theta, drone_omega, drone.angular_impedance.imp_time)
        else:
            drone.angular_impedance.imp_theta, drone.angular_impedance.imp_omega, drone.angular_impedance.imp_time =\
             drone.angular_impedance.impedance_model(drone.theta, drone.angular_impedance.imp_theta, drone.angular_impedance.imp_omega, drone.angular_impedance.imp_time)

        updated_pose = drone.angular_impedance.pose_from_theta(
            obstacle.position()[:2], R, drone.angular_impedance.imp_theta)
        return updated_pose

    def update_pose_radius(self, pose_prev, drone, dist_to_obstacle, koef):
        if (
                sum(drone.near_obstacle) <= 10
        ):  # 10 is the number of samples to compute velocity from consequent poses
            drone.radius_impedance.imp_vel = drone.velocity(drone.sp)
        drone.radius_impedance.imp_pose, \
        drone.radius_impedance.imp_vel, \
        drone.radius_impedance.imp_time = \
         drone.radius_impedance.impedance_model(drone.delta, drone.radius_impedance.imp_pose, drone.radius_impedance.imp_vel, drone.radius_impedance.imp_time)
        delta_pose = drone.radius_impedance.imp_pose[:2]
        updated_pose = pose_prev + koef * delta_pose
        ''' impedance model vizualization '''
        length = np.linalg.norm(delta_pose)
        yaw = acos(delta_pose[0] / length)
        publish_arrow(np.hstack([updated_pose, drone.sp[2]]), [0, 0, yaw],
                      length, '/delta_pose')

        return updated_pose

    def update_pose_avel(self, pose_prev, average_vel):
        self.impedance_avel.imp_pose, self.impedance_avel.imp_vel, self.impedance_avel.imp_time = \
         self.impedance_avel.impedance_model(average_vel, self.impedance_avel.imp_pose, self.impedance_avel.imp_vel, self.impedance_avel.imp_time)
        updated_pose = pose_prev + 0.15 * self.impedance_avel.imp_pose
        return updated_pose

    def calculate_dist_to_drones(self, drones_poses):
        for i in range(len(drones_poses)):
            self.dist_to_drones[i] = np.linalg.norm(drones_poses[i] - self.sp)

    def dist_to_obstacles(self):
        dist = []
        for i in range(len(self.obstacles)):
            dist.append(
                np.linalg.norm(self.obstacles[i].position()[:2] - self.sp[:2]))
        self.dist_to_obst = dist
        return np.array(dist)

    def omegaZ(self, drone_pose, dist_from_obstacle):
        R = dist_from_obstacle  # 3D-vector
        drone_vel = self.velocity(drone_pose)
        # drone_vel_n = np.dot(drone_vel, R)/(np.linalg.norm(R)**2) * R
        # drone_vel_t = drone_vel - drone_vel_n
        drone_w = np.cross(R, drone_vel)  # / ( np.linalg.norm(R)**2 )

        return drone_w[2]

    def velocity(self, drone_pose):
        for i in range(len(self.drone_time_array) - 1):
            self.drone_time_array[i] = self.drone_time_array[i + 1]
        self.drone_time_array[-1] = time.time()

        for i in range(len(self.drone_pose_array[0]) - 1):
            self.drone_pose_array[0][i] = self.drone_pose_array[0][i + 1]
            self.drone_pose_array[1][i] = self.drone_pose_array[1][i + 1]
            self.drone_pose_array[2][i] = self.drone_pose_array[2][i + 1]
        self.drone_pose_array[0][-1] = drone_pose[0]
        self.drone_pose_array[1][-1] = drone_pose[1]
        self.drone_pose_array[2][-1] = drone_pose[2]

        vel_x = (self.drone_pose_array[0][-1] - self.drone_pose_array[0][0]
                 ) / (self.drone_time_array[-1] - self.drone_time_array[0])
        vel_y = (self.drone_pose_array[1][-1] - self.drone_pose_array[1][0]
                 ) / (self.drone_time_array[-1] - self.drone_time_array[0])
        vel_z = (self.drone_pose_array[2][-1] - self.drone_pose_array[2][0]
                 ) / (self.drone_time_array[-1] - self.drone_time_array[0])

        drone_vel = np.array([vel_x, vel_y, vel_z])

        return drone_vel

    def landing(self, sim=False):
        drone_landing_pose = self.position() if sim == False else self.sp
        while not rospy.is_shutdown():
            self.sp = drone_landing_pose
            drone_landing_pose[2] = drone_landing_pose[2] - 0.007
            self.publish_sp()
            self.publish_path(limit=1000)
            if sim == False:
                self.fly()
            if self.sp[2] < -1.0:
                sleep(1)
                if sim == False:
                    cf1.stop()
                print 'reached the floor, shutdown'
                # rospy.signal_shutdown('landed')
            self.rate.sleep()
コード例 #55
0
class Crazyflie:
    def __init__(self, prefix, cf_id):
        """
        Creates a Crazyflie high-level wrapper
        
        Args:
            prefix (str): ROS namespace of the drone. Ex = "/cf1"
            cf_id (int): drone id. Ex : 1
        """
        self.prefix = prefix
        self.tf = TransformListener()
        self.cf_id = cf_id

        rospy.wait_for_service(prefix + "/set_group_mask")
        self.setGroupMaskService = rospy.ServiceProxy(
            prefix + "/set_group_mask", SetGroupMask)
        rospy.wait_for_service(prefix + "/takeoff")
        self.takeoffService = rospy.ServiceProxy(prefix + "/takeoff", Takeoff)
        rospy.wait_for_service(prefix + "/land")
        self.landService = rospy.ServiceProxy(prefix + "/land", Land)
        rospy.wait_for_service(prefix + "/stop")
        self.stopService = rospy.ServiceProxy(prefix + "/stop", Stop)
        rospy.wait_for_service(prefix + "/go_to")
        self.goToService = rospy.ServiceProxy(prefix + "/go_to", GoTo)
        rospy.wait_for_service(prefix + "/upload_trajectory")
        self.uploadTrajectoryService = rospy.ServiceProxy(
            prefix + "/upload_trajectory", UploadTrajectory)
        rospy.wait_for_service(prefix + "/start_trajectory")
        self.startTrajectoryService = rospy.ServiceProxy(
            prefix + "/start_trajectory", StartTrajectory)
        rospy.wait_for_service(prefix + "/update_params")
        self.updateParamsService = rospy.ServiceProxy(
            prefix + "/update_params", UpdateParams)

    def setGroup(self, groupMask):
        self.setGroupMaskService(groupMask)

    def takeoff(self, targetHeight, duration, groupMask=0):
        self.takeoffService(groupMask, targetHeight,
                            rospy.Duration.from_sec(duration))

    def land(self, targetHeight, duration, groupMask=0):
        self.landService(groupMask, targetHeight,
                         rospy.Duration.from_sec(duration))

    def stop(self, groupMask=0):
        self.stopService(groupMask)

    def goTo(self, goal, yaw, duration, relative=False, groupMask=0):
        gp = arrayToGeometryPoint(goal)
        self.goToService(groupMask, relative, gp, yaw,
                         rospy.Duration.from_sec(duration))

    def uploadTrajectory(self, trajectoryId, pieceOffset, trajectory):
        pieces = []
        for poly in trajectory.polynomials:
            piece = TrajectoryPolynomialPiece()
            piece.duration = rospy.Duration.from_sec(poly.duration)
            piece.poly_x = poly.px.p
            piece.poly_y = poly.py.p
            piece.poly_z = poly.pz.p
            piece.poly_yaw = poly.pyaw.p
            pieces.append(piece)
        self.uploadTrajectoryService(trajectoryId, pieceOffset, pieces)

    def startTrajectory(self,
                        trajectoryId,
                        timescale=1.0,
                        reverse=False,
                        relative=True,
                        groupMask=0):
        self.startTrajectoryService(groupMask, trajectoryId, timescale,
                                    reverse, relative)

    def position(self):
        self.tf.waitForTransform("/world", "/cf" + str(self.cf_id),
                                 rospy.Time(0), rospy.Duration(10))
        position, quaternion = self.tf.lookupTransform("/world",
                                                       "/cf" + str(self.cf_id),
                                                       rospy.Time(0))
        return np.array(position)

    def getParam(self, name):
        return rospy.get_param(self.prefix + "/" + name)

    def setParam(self, name, value):
        rospy.set_param(self.prefix + "/" + name, value)
        self.updateParamsService([name])

    def setParams(self, params):
        for name, value in params.iteritems():
            rospy.set_param(self.prefix + "/" + name, value)
        self.updateParamsService(params.keys())

    def enableHighLevel(self):
        self.setParam("commander/enHighLevel", 1)
コード例 #56
0

    side = 'BOTTOM'

    cam = RGBD()
    com = COM()

    com.go_to_initial_state(whole_body)

    tt = TableTop()
    tt.find_table(robot)

    grasp_count = 0

    br = tf.TransformBroadcaster()
    tl = TransformListener()

    gp = GraspPlanner()
    gripper = Crane_Gripper(gp, cam, com.Options, robot.get('gripper'))
    suction = Suction_Gripper(gp, cam, com.Options, robot.get('suction'))
    gm = GraspManipulator(gp, gripper, suction, whole_body, omni_base, tt)
    gm.position_head()

    print "after thread"
    curr_offsets = np.array([0, 0, -0.5])
    curr_rot = np.array([0.0,0.0,1.57])

    while True:
        label = id_generator()
        tt.make_new_pose(curr_offsets,label,rot = curr_rot)
        whole_body.move_end_effector_pose(geometry.pose(z=-0.1), label)
コード例 #57
0
ファイル: spray.py プロジェクト: sdu-rsd-group1/FroboMind
class SprayAction():
    """
        Drives x meters either forward or backwards depending on the given distance.
        the velocity should always be positive. 
    """
    def __init__(self, name, odom_frame, base_frame):
        """
        
        @param name: the name of the action
        @param odom_frame: the frame the robot is moving in (odom_combined)
        @param base_frame: the vehicles own frame (usually base_link)
        """

        self._action_name = name
        self.__odom_frame = odom_frame
        self.__base_frame = base_frame
        self.__server = actionlib.SimpleActionServer(self._action_name,
                                                     sprayAction,
                                                     auto_start=False)
        self.__server.register_preempt_callback(self.preempt_cb)
        self.__server.register_goal_callback(self.goal_cb)

        self.__cur_pos = 0
        self.__start_pos = 0
        self.__start_yaw = 0
        self.__cur_yaw = 0

        self.__feedback = sprayFeedback()

        self.__listen = TransformListener()
        #self.vel_pub = rospy.Publisher("/fmControllers/cmd_vel_auto",Twist)
        self.spray_pub = rospy.Publisher("/fmControllers/spray", UInt32)

        self.__start_time = rospy.Time.now()
        self.new_goal = False
        self.n_sprays = 0
        self.spray_msg = UInt32()

        self.spray_l = False
        self.spray_cnt = 0

        self.__server.start()

    def preempt_cb(self):
        rospy.loginfo("Preempt requested")
        self.spray_msg.data = 0
        self.spray_pub.publish(self.spray_msg)
        self.__server.set_preempted()

    def goal_cb(self):
        """
            called when we receive a new goal
            the goal contains a desired radius and a success radius in which we check if the turn succeeded or not
            the message also contains if we should turn left or right
        """
        g = self.__server.accept_new_goal()
        self.spray_dist = g.distance
        self.spraytime = g.spray_time

        self.new_goal = True

    def on_timer(self, e):
        """
            called regularly by a ros timer
            in here we simply orders the vehicle to start moving either forward 
            or backwards depending on the distance sign, while monitoring the 
            travelled distance, if the distance is equal to or greater then this
            action succeeds.
        """
        if self.__server.is_active():
            if self.new_goal:
                self.new_goal = False
                self.n_sprays = 0
                self.__get_start_position()
            else:
                if self.__get_current_position():
                    if self.__get_distance() >= self.spray_dist:
                        self.do_spray()
                        self.__get_start_position()
                else:
                    self.__server.set_aborted(None, "could not locate")
                    rospy.logerr("Could not locate vehicle")
                    self.spray_msg.data = 0
                    self.spray_pub.publish(self.spray_msg)

    def __get_distance(self):
        return math.sqrt(
            math.pow(self.__cur_pos[0][0] - self.__start_pos[0][0], 2) +
            math.pow(self.__cur_pos[0][1] - self.__start_pos[0][1], 2))

    def __get_start_position(self):
        ret = False
        try:
            self.__start_pos = self.__listen.lookupTransform(
                self.__odom_frame, self.__base_frame, rospy.Time(0))
            self.__start_yaw = tf.transformations.euler_from_quaternion(
                self.__start_pos[1])[2]
            ret = True
        except (tf.LookupException, tf.ConnectivityException), err:
            rospy.loginfo("could not locate vehicle")

        return ret
コード例 #58
0
class Unique_persion_detection(object):
    """detect the unique persion from the fused objects"""

    def __init__(self,node_name = "Unique_person"):
        self.node_name = node_name
        rospy.init_node(self.node_name,anonymous="true")
        # self.pub = rospy.Publisher("trajectory",Path,queue_size = 2)
        self.info_sub = rospy.Subscriber("/fused_detectionarray", DetectionArray, self.info_odom_callback) #   fused_pointcloud 
        # self.odom_sub = message_filters.Subscriber("/husky1_robot_pose",Odometry)
        self.info_pub = rospy.Publisher("fused_person",PointCloud, queue_size = 2)
        self.unique_pub = rospy.Publisher("Unique_person",PointStamped, queue_size = 2)
        # self.image_pub = rospy.Publisher("Abnormal_image",Image,queue_size = 2)
        #self.odom_sub = message_filters.Subscriber("/optris/thermal_image",Image)
        self.paths_pub = []
        self.listener = TransformListener()
        self.paths = []
        self.paths_size = 0
        #self.pose = PoseStamped()
        self.trans_pose = PoseStamped()
        #self.pose_index = []
        self.prob = []
        self.pre_velocity_prob = []
        self.velocity = []
        self.V_magnitude = []
        self.V_direction = []
        self.Human_list = []
        self.cv_bridge = CvBridge()
        self.Unique_l = 0
        self.fused_pointcloud = PointCloud()
        self.unique_pointstamped = PointStamped()
        rospy.loginfo("Class has been instantiated!")

    def info_odom_callback(self,info):   #,odom
        print("comesin")
        self.velocity[:] = []
        self.V_direction = []
        self.V_magnitude = []
        self.prob = []
        self.fused_pointcloud = PointCloud()
        my_awesome_pointcloud = PointCloud()
        #filling pointcloud header
        # header = std_msgs.msg.Header()
        # header.stamp = rospy.Time.now()
        # header.frame_id = 'map'
        # my_awesome_pointcloud.header = header

        


        if (info.size > 0):
            # anzahl_punkte = 10 # i got 10 points, for example
            # self.stelldaten_tabelle.header = self.h
            # self.stelldaten_tabelle.points = anzahl_punkte
            # self.stelldaten_tabelle.channels = 3
            # point = Point()

            # self.pointcloud.header.stamp = rospy.Time.now()
            # self.pointcloud.header.frame_id = "map"
            # self.pointcloud.points = info.size
            
            # self.pointcloud.channels = 3

            for i in range(info.size):                
                # print("info.size: ")
                # print(info.size)
                # print("i: ")
                # print(i)
                # print("info.data[i].ptStamped.point.x" )
                # print(info.data[i].ptStamped.point.x )
                
                # self.pointcloud.points[i].x = info.data[i].ptStamped.point.x 
                # self.pointcloud.points[i].y = info.data[i].ptStamped.point.y 
                # self.pointcloud.points[i].z = info.data[i].ptStamped.point.z
                
                # self.pointcloud.points.append(info.data[i].ptStamped.point) # Point32(1.0, 1.0, 0.0)
                my_awesome_pointcloud.points.append(Point32(info.data[i].ptStamped.point.x, info.data[i].ptStamped.point.y, info.data[i].ptStamped.point.z))

                print("awesome")
                # print(my_awesome_pointcloud.points[i])

                # self.pointcloud.points.append( Point32(info.data[i].ptStamped.point.x, info.data[i].ptStamped.point.y, info.data[i].ptStamped.point.z) )
            
            
            self.pointcloud = my_awesome_pointcloud
            self.pointcloud.header.stamp = rospy.Time.now()
            self.pointcloud.header.frame_id = "map"
            # self.pointcloud.points = info.size
            
            # self.pointcloud.channels = 3
            # print(self.pointcloud)


            #rospy.loginfo("%s",info.size)
            # Store the path info for each detection and publish the path while getting the new message update.
            for entry in info.data:
                if entry.num > self.paths_size:
                    for i in range(entry.num-self.paths_size):
                        self.paths.append(Path())
                        #self.dynamics.append(Dynamics())
                        self.paths_pub.append(rospy.Publisher("~trajectory"+str(self.paths_size + i + 1),Path,queue_size = 1))
                        #self.dynamics_pub.append(rospy.Publisher("~dynamics")+str(self.paths_size + i + 1),Dynamics,queue_size = 1)
                        #self.pose_index.append(0)
                    self.paths_size = entry.num

                pose = PoseStamped()
                point_stamped = entry.ptStamped
                pose.header = point_stamped.header
                pose.pose.position = point_stamped.point
                pose.pose.orientation.w = 1
                rospy.loginfo("Trajectories")
                try:
					# Make true the frame_id of the pose (ptStamped)
                    self.trans_pose = self.listener.transformPose("map",pose)
                except(tf.Exception):
                    rospy.loginfo("The transformation is not available right now!")
                
                self.paths[entry.num - 1].poses.append(self.trans_pose)
                self.paths[entry.num - 1].header.frame_id = 'map'
                self.paths_pub[entry.num - 1].publish(self.paths[entry.num - 1])
                rospy.loginfo("Poses stored!")
                if len(self.paths[entry.num - 1].poses) > 3:
                    pre_x = self.paths[entry.num - 1].poses[-2].pose.position.x
                    pre_y = self.paths[entry.num - 1].poses[-2].pose.position.y
                    pre_z = self.paths[entry.num - 1].poses[-2].pose.position.z
                    pre_stamp = self.paths[entry.num - 1].poses[-2].header.stamp

                    cur_x = self.paths[entry.num - 1].poses[-1].pose.position.x 
                    cur_y = self.paths[entry.num - 1].poses[-1].pose.position.y 
                    cur_z = self.paths[entry.num - 1].poses[-1].pose.position.z 
                    cur_stamp = self.paths[entry.num - 1].poses[-1].header.stamp

                    duration = (cur_stamp - pre_stamp).to_sec()
                    vx = (cur_x - pre_x) / duration
                    vy = (cur_y - pre_y) / duration
                    self.velocity.append([cur_x,cur_y,cur_z,vx,vy,entry.num])
                
                
                #self.pose_index[entry.num - 1] += 1
            #rospy.loginfo("published paths!")
            # for index in range(self.paths_size):
            #     #self.velocity[:] = []
            #     if len(self.paths[index].poses) > 3:
            #         num = index + 1
            #         pre_x = self.paths[index].poses[-2].pose.position.x
            #         pre_y = self.paths[index].poses[-2].pose.position.y
            #         pre_z = self.paths[index].poses[-2].pose.position.z
            #         pre_stamp = self.paths[index].poses[-2].header.stamp
            #         cur_x = self.paths[index].poses[-1].pose.position.x
            #         cur_y = self.paths[index].poses[-1].pose.position.y
            #         cur_z = self.paths[index].poses[-1].pose.position.z
            #         cur_stamp = self.paths[index].poses[-1].header.stamp
            #         duration = (cur_stamp - pre_stamp).to_sec()
            #         vx = (cur_x - pre_x) / duration 
            #         vy = (cur_y - pre_y) / duration
            #         self.velocity.append([cur_x,cur_y,cur_z,vx,vy,num])
            #     else:
            # #         pass 
            # odom_x = odom.pose.pose.position.x 
            # odom_y = odom.pose.pose.position.y 
            # odom_z = odom.pose.pose.position.z
            odom_x = 0.0
            odom_y = 0.0
            odom_z = 0.0
            #rospy.loginfo("There")
            '''odom_x = odom.height
            odom_y = odom.width
            odom_z = 1 '''
            self.info = self.velocity.append([odom_x,odom_y,odom_z]) # append
            # rospy.loginfo("Velocity stored!")
            #rospy.loginfo("The current velocity info are %s",self.velocity)
            if (len(self.velocity) > 1):
                Human_num = len(self.velocity) - 1
                for i in np.arange(Human_num):
                    self.Human_list.append([self.velocity[i][5]])
                    self.V_magnitude.append(np.sqrt(np.square(self.velocity[i][3]) + np.square(self.velocity[i][4])))

                    vector1 = [self.velocity[i][3], self.velocity[i][4]]
                    vector1 = vector1 / np.linalg.norm(vector1)
                    # vector2 = [self.velocity[Human_num][0] - self.velocity[i][0], self.velocity[Human_num][1] - self.velocity[i][1]]
                    # vector2 = vector2 / np.linalg.norm(vector2)
                    # self.V_direction.append(np.dot(vector1, vector2)/(np.linalg.norm(vector1)*np.linalg.norm(vector2)))
                    self.V_direction.append(np.dot(vector1,[0, -1])/(np.linalg.norm(vector1)*np.linalg.norm([0, -1])))

                Unique_v = self.crf(self.V_magnitude)
                #print Unique_v
                #rospy.lginfo("HI")
                print self.V_direction                
                Unique_d = self.crf(self.V_direction)
                print Unique_d
                Unique_d = self.normalize(np.exp(np.dot(-1,Unique_d)))
                print Unique_d
                #rospy.loginfo(len(self.V_direction))
                #rospy.loginfo(len(self.V_magnitude))
                if (np.var(Unique_d) == 0 or np.var(Unique_v)==0):
                    [w_v,w_d] = [0.5,0.5]
                else:
                    [w_v, w_d] = self.normalize([np.var(Unique_v), np.var(Unique_d)])
                #print w_v
                #print w_d
                #Unique = self.softmax(np.array(Unique_v) * np.array(w_v) + np.array(Unique_d) * np.array(w_d))
                Unique = np.array(Unique_v) * np.array(w_v) + np.array(Unique_d) * np.array(w_d)

                Unique = list(Unique)
                # print Unique
                #print(len(self.velocity))
                #rospy.loginfo(Unique.index(max(Unique)))
                #print Unique
                #print(len(self.velocity))
                #rospy.loginfo(Unique.index(max(Unique)))
                #self.Unique_l = self.velocity[Unique.index(max(Unique))][5]
                # rospy.loginfo("The Abnormal Object's label detected in husky1 camera is %s",self.Unique_l)
                #print("HELLO!")\
                list1 = []
                for (i,j)in zip(self.velocity[:-1],range(len(Unique))):
                    # print("The num is %s",i[5])
                    self.prob.append(i[5])
                    self.prob.append(Unique[j])
                print("prob are %s",self.prob)

                if not self.pre_velocity_prob:
                    self.pre_velocity_prob = self.prob
                    # print("HI Pre!")
                    
                elif self.pre_velocity_prob:
                    # print("Already here!")
                    # print("prob 2 is %s",self.prob)
                    # print(self.pre_velocity_prob)
                    for i in range(len(self.prob)):
                        if (i % 2 == 0):
                            if self.prob[i] in self.pre_velocity_prob:
                                self.prob[i + 1] = 0.6 * self.prob[i + 1] + 0.4 * self.pre_velocity_prob[self.pre_velocity_prob.index(self.prob[i]) + 1]
                                print(self.prob[i+1])
                            else:
                                self.prob[i + 1] = 0.6 * self.prob[i + 1]
                        #print("change the probability!")
                        
                    for i in range(len(self.prob)):
                        if (i % 2 == 0):
                            list1.append(self.prob[i+1])
                        #print("okkkkk!")
                    self.Unique_l = self.prob[self.prob.index(max(list1)) - 1]
                    rospy.loginfo("The Unique Object's label detected in global frame is %s",self.Unique_l) 
                    self.pre_velocity_prob = self.prob 
                    # print("hello there!")

            # print("hello'")
            # cv_image = self.cv_bridge.imgmsg_to_cv2(info.image,"bgr8")
            # for entry in info.data:
            #     if (entry.num == self.Unique_l):
            #         cv2.rectangle(cv_image,(int(entry.x),int(entry.y)),(int(entry.x + entry.width),int(entry.y + entry.height)),(255,0,0),2)
            #         cv2.putText(cv_image,"Person:" + str(entry.num), (int(entry.x),int(entry.y)),0,5e-3*100,(0,255,0),2)
            #     else:
            #         cv2.rectangle(cv_image,(int(entry.x),int(entry.y)),(int(entry.x + entry.width),int(entry.y + entry.height)),(255,0,0),2)
            #         cv2.putText(cv_image,"Person:" + str(entry.num), (int(entry.x),int(entry.y)),0,5e-3*100,(0,255,0),2)
            # ros_image = self.cv_bridge.cv2_to_imgmsg(cv_image)
            # self.image_pub.publish(ros_image)    
            # print("haha")
            for entry in info.data:
                if (entry.num == self.Unique_l):
                    self.unique_pointstamped.point.x = entry.ptStamped.point.x 
                    self.unique_pointstamped.point.y = entry.ptStamped.point.y 
                    self.unique_pointstamped.point.z = entry.ptStamped.point.z 
                    self.unique_pointstamped.header.stamp = rospy.Time.now()
                    self.unique_pointstamped.header.frame_id = "map"
                    self.unique_pub.publish(self.unique_pointstamped)
                else:
                    pass

            self.info_pub.publish(self.pointcloud)

        else:
            rospy.logdebug("There is no person detected in the current frame!")

    def crf(self,a): # conditional random field in feature level
        m=[]
        for i in np.arange(np.size(a)):
            m.append(reduce(lambda x,y:x*y,a)/a[i])
        
        Z = reduce(lambda x,y:x+y,m)
        neibor = np.divide(m,Z)
        # neibor = list(self.normalize(np.divide(1,neibor)))
        #if np.var(neibor) <= 0.01 and np.var(neibor) != 0: # threshold for unique definition
        #    neibor=[]
        #print neibor
        return neibor

    def softmax(self, a):
        if len(a) == 0:
            return a
        elif len(a) == 1:
            a = [1]
            return a

        e_a = np.exp(a - np.max(a))
        return e_a / e_a.sum()

    def normalize(self,a):
        if len(a) == 0:
            return a
        elif len(a) == 1:
            a = [1]
            return a
        
        sum = reduce(lambda x,y:x+y,a)
        for i in np.arange(np.shape(a)[0]):
            a[i] = a[i] / sum
        return a
    
    def normalize2(self,a):
        for i in np.arange(len(a)):
            a[i] = (a[i]-np.min(a))/(np.max(a)-np.min(a))
        return a
コード例 #59
0
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import tf_conversions
import geometry_msgs.msg
from tf import TransformListener

print("============ Starting tutorial setup")
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('move_group_python_interface_tutorial', anonymous=True)


tf_listener = TransformListener()

robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
arm = moveit_commander.MoveGroupCommander("arm")
gripper = moveit_commander.MoveGroupCommander("gripper")


rospy.sleep(1)

arm.set_goal_tolerance(0.003)
arm.set_max_velocity_scaling_factor(1.0)


print("current pose: ", arm.get_current_pose())
print("current rpy: ", arm.get_current_rpy())
コード例 #60
0
 def __init__(self):
     self.tf_listener = TransformListener()
     self.tf_broadcaster = TransformBroadcaster()