예제 #1
0
class CamPixelToPointServer:
    def __init__(self):
        self.camera_model = PinholeCameraModel()
        self.bridge = CvBridge()
        self.camera_model.fromCameraInfo(SubscriberValue('camera_info', CameraInfo).value)
        self.depth_image = SubscriberValue('camera_depth_image', Image, transform=self.bridge.imgmsg_to_cv2)
        self.service = rospy.Service('cam_pixel_to_point', CamPixelToPoint, self.handle_service)
        self.tf_listener = TransformListener()
        print('Service is ready.')

    def handle_service(self, req):  # type: (CamPixelToPoint) -> CamPixelToPointResponse
        x, y = int(req.cam_pixel.x), int(req.cam_pixel.y)
        methods = [self.read_depth_simple,
                   # self.read_depth_average,
                   self.read_depth_as_floor_depth]
        for method in methods:
            d = method(x, y)
            if not np.isnan(d):
                break

        pos = np.array(self.camera_model.projectPixelTo3dRay((x, y))) * d

        point = PointStamped()
        point.header.frame_id = self.camera_model.tfFrame()
        point.point.x, point.point.y, point.point.z = pos[0], pos[1], pos[2]
        return CamPixelToPointResponse(point)

    def read_depth_simple(self, x, y):  # (int, int) -> float
        return self.depth_image.value[y, x]

    def read_depth_average(self, x, y):  # (int, int) -> float
        print('Fallback to average')
        s = 5
        return np.nanmean(self.depth_image.value[y-s:y+s, x-s:x+s])

    def read_depth_as_floor_depth(self, x, y):  # (int, int) -> float
        print('Fallback to floor model')
        min_distance = 10.0
        # Extend the camera ray until it passes through where the floor should be. Use its length as the depth.
        camera_origin = PointStamped()
        camera_origin.header.frame_id = self.camera_model.tfFrame()
        camera_origin.point.x, camera_origin.point.y, camera_origin.point.z = 0.0, 0.0, 0.0
        point_along_ray = PointStamped()
        point_along_ray.header.frame_id = self.camera_model.tfFrame()
        point_along_ray.point.x, point_along_ray.point.y, point_along_ray.point.z = self.camera_model.projectPixelTo3dRay((x, y))

        self.tf_listener.waitForTransform('base_footprint', self.camera_model.tfFrame(), rospy.Time(rospy.get_time()), rospy.Duration(1))
        camera_origin = self.tf_listener.transformPoint('base_footprint', camera_origin)
        point_along_ray = self.tf_listener.transformPoint('base_footprint', point_along_ray)

        camera_origin = np_from_poinstamped(camera_origin)
        point_along_ray = np_from_poinstamped(point_along_ray)
        ray_dir = point_along_ray - camera_origin
        # Assuming this transformation was orthogonal, |ray_dir| = 1, at least approximately
        d = camera_origin[1]/max(-ray_dir[1], camera_origin[1]/min_distance)
        if d <= 0.01:
            d = np.nan
        return d
예제 #2
0
class StartServer(object):
    def __init__(self):
        rospy.init_node('start_server')
        self._server = actionlib.SimpleActionServer(
            "StartServer",
            iair_msgs.msg.armAction,
            execute_cb=self.call_arm_server,
            auto_start=False)
        self.tf_listener = TransformListener()
        self.pp_client = actionlib.SimpleActionClient(
            "PickAndPlace", kinova_msgs.msg.PoseAndSizeAction)
        self._server.start()

    def call_arm_server(self, goal):
        arm_goal = kinova_msgs.msg.PoseAndSizeGoal()
        arm_goal.object_class = goal.target_object.object_class
        arm_goal.object_pose = self.tf_listener.transformPoint(
            "root", goal.target_object.object_point)
        rospy.logwarn("arm_goal.object_pose:\n%s", arm_goal.object_pose)
        arm_goal.object_size = goal.target_object.object_size.point
        self.pp_client.wait_for_server()
        self.pp_client.send_goal(arm_goal)
        if self.pp_client.wait_for_result():
            state = self.pp_client.get_state()
            if state == GoalStatus.SUCCEEDED:
                print("Pick and Place Action: Succeeded")
            elif state == GoalStatus.ABORTED:
                rospy.logwarn("Aborted!")
예제 #3
0
class RangeToPointCloud2Converter(object):
    def __init__(self, frame_out):
        self.tl = TransformListener()
        self.points = []
        self.frame_out = frame_out

    def receiveRange(self, msg):
        ps = PointStamped()
        ps.header.frame_id = msg.header.frame_id
        ps.header.stamp = rospy.Time(0)
        ps.point.x = min(max(msg.range, msg.min_range), msg.max_range)
        
        try:
            p = self.tl.transformPoint(self.frame_out, ps).point
            xyz = [p.x, p.y, p.z]
            self.points.append(xyz)
        except LookupException as e:
            rospy.logerr('could not find transform from {} to {}'.format(ps.header.frame_id, self.frame_out))

    def flushPointCloud(self):
        header = Header()
        header.frame_id = self.frame_out
        header.stamp = rospy.Time.now()

        pc = create_cloud_xyz32(header, self.points) if len(self.points) else None
        self.points = []
        return pc
예제 #4
0
class PoopTransformer:
    def __init__(self):
        self.subscriber = rospy.Subscriber('/poop_perception', PointCloud,
                                           self._callback)
        self.transformer = TransformListener()
        self.publisher = rospy.Publisher('/poop_perception_odom_combined',
                                         PointCloud)

    def _callback(self, data):
        self.transformer.waitForTransform('odom_combined', 'map', Time.now(),
                                          rospy.Duration(2))
        new_data = PointCloud()
        new_data.header.stamp = Time.now()
        new_data.header.frame_id = 'odom_combined'
        new_data.points = [
            self._map_to_odom_combined(p, data.header.stamp)
            for p in data.points
        ]
        new_data.channels = data.channels
        self.publisher.publish(new_data)

    def _map_to_odom_combined(self, point_in_map, stamp):
        """Takes and returns a Point32."""
        ps = PointStamped()
        ps.header.frame_id = 'map'
        ps.header.stamp = stamp
        ps.point.x = point_in_map.x
        ps.point.y = point_in_map.y
        ps.point.z = point_in_map.z
        self.transformer.waitForTransform('odom_combined', 'map', stamp,
                                          rospy.Duration(2))
        point_in_odom = self.transformer.transformPoint('odom_combined', ps)
        z = 25 if point_in_map.z == 25 else point_in_map.z
        return Point32(point_in_odom.point.x, point_in_odom.point.y, z)
예제 #5
0
class FTPNode:
    def __init__(self, *args):
        print("init")
        self.tf = TransformListener()
        self.tt = Transformer()
        rospy.Subscriber("/trackedHumans", TrackedHumans, self.pos_callback)
        self.publisher = rospy.Publisher("directionmarker_array", MarkerArray)


    def pos_callback(self, data):
        rospy.loginfo("on updated pos, face robot towards guy...")
        print("hi")
        if (len(data.trackedHumans) > 0):
            print(data.trackedHumans[0].location.point.x)
            try:
                self.tf.waitForTransform(data.trackedHumans[0].location.header.frame_id, "/base_link", rospy.Time.now(), rospy.Duration(2.0))
                pp = self.tf.transformPoint("/base_link", data.trackedHumans[0].location)
                print "Original:"
                print [data.trackedHumans[0].location.point]
                print "Transform:"
                print pp.point

                phi = math.atan2(pp.point.y, pp.point.x)
                sss.move_base_rel("base", [0,0,phi])
                print phi*180/math.pi
                
                markerArray = MarkerArray()
                marker = Marker()
                marker.header.stamp = rospy.Time();    
                marker.ns = "my_namespace";
                marker.id = 0;  
                marker.header.frame_id = "/base_link"
                marker.type = marker.ARROW
                marker.action = marker.ADD
                marker.scale.x = .1
                marker.scale.y = .1
                marker.scale.z = .1
                marker.color.a = 1.0
                marker.color.r = 1.0
                marker.color.g = 1.0
                marker.color.b = 0.0
                p1 = Point()
                p1.x = 0
                p1.y = 0
                p1.z = 0
                p2 = Point()
                p2.x = pp.point.x
                p2.y = pp.point.y
                p2.z = 0
                marker.points = [p1,p2]
                #marker.pose.position.x = 1
                #marker.pose.position.y = 0
                #marker.pose.position.z = 1
                #marker.pose.orientation.w = 1
                markerArray.markers.append(marker)
                self.publisher.publish(markerArray)
                print "try ended"
            except Exception as e:
                print e
예제 #6
0
    def run(self, motionDir):
        ###assumes it has been initialized
        #assumes ros node exists
        listener = TransformListener()
        time.sleep(2)
        PoseFiles = os.listdir(motionDir)
        prevPoseName = "path"
        currentPoseName = None
        for f in PoseFiles:
            frameTracks = 0
            frameLosses = 0
            currentPoseName = f[:f.rfind(".")]
            print(prevPoseName, currentPoseName)
            #####
            ##check each landmark if it was present in the previous frame

            for k in self.Landmarks.keys():
                if (prevPoseName in self.Landmarks[k].frameTracks):
                    ########
                    ##check if it is still visible

                    ###
                    ##transform from first sighting into current coordinate frame
                    originalPoint = PointStamped()
                    originalPoint.point.x = self.Landmarks[k].X[0, 0]
                    originalPoint.point.y = self.Landmarks[k].X[1, 0]
                    originalPoint.point.z = self.Landmarks[k].X[2, 0]
                    originalPoint.header.frame_id = self.Landmarks[
                        k].frameTracks[0]

                    PotentialPoint = listener.transformPoint(
                        currentPoseName, originalPoint)
                    ########################
                    ##project it onto the cameras
                    hX = np.ones((4, 1))
                    hX[0, 0] = PotentialPoint.point.x
                    hX[1, 0] = PotentialPoint.point.y
                    hX[2, 0] = PotentialPoint.point.z

                    L = self.kSettings["Pl"].dot(hX)
                    L /= L[2, 0]
                    R = self.kSettings["Pr"].dot(hX)
                    R /= R[2, 0]

                    if (self.checkWithinROI(L)
                            and self.checkWithinROI(R, False)):
                        self.Landmarks[k].Ml.append(L)
                        self.Landmarks[k].Mr.append(R)
                        self.Landmarks[k].frameTracks.append(currentPoseName)
                        frameTracks += 1
                    else:
                        data = self.genLandmark()
                        newVertex = simLandmark(currentPoseName, data[0],
                                                data[1], data[2])
                        self.Landmarks[str(self.count).zfill(7)] = newVertex
                        self.count += 1
                        frameLosses += 1
            print(frameTracks, frameLosses, self.count)
            prevPoseName = currentPoseName
예제 #7
0
class VelocityTracker(object):
    def __init__(self):
        self.people = {}
        self.rate = 10
        self.last_stamp = rospy.Time.now()
        self.transform_listener = TransformListener()
        self.TIMEOUT = rospy.Duration(rospy.get_param('~timeout', 1.0))
        self.sub = rospy.Subscriber('/people_tracker_measurements',
                                    PositionMeasurementArray, self.pm_cb)
        self.mpub = rospy.Publisher('/visualization_marker',
                                    Marker,
                                    queue_size=1)
        self.ppub = rospy.Publisher('/people', People, queue_size=1)

    def pm_cb(self, msg):
        self.last_stamp = msg.header.stamp
        for pm in msg.people:
            if pm.object_id in self.people:
                self.people[pm.object_id].update(pm)
            else:
                p = PersonEstimate(pm)
                self.people[pm.object_id] = p

    def spin(self):
        rate = rospy.Rate(self.rate)
        while not rospy.is_shutdown():
            # Remove People Older Than timeout param
            #now = rospy.Time.now()
            for p in self.people.values():
                if self.last_stamp - p.age() > self.TIMEOUT:
                    del self.people[p.id()]
            self.publish()
            rate.sleep()

    def publish(self):
        gen.counter = 0
        pl = People()
        pl.header.frame_id = None
        pl.header.stamp = self.last_stamp
        for p in self.people.values():
            p.publish_markers(self.mpub)
            frame, person = p.get_person()
            pl.header.frame_id = frame
            point = PointStamped()
            point.header = pl.header
            point.point = person.position
            base_link_point = self.transform_listener.transformPoint(
                "laser", point)
            if base_link_point.point.x < 4.0 and base_link_point.point.y < 2.0 and base_link_point.point.y > -2.0:
                print frame
                print person.name
                print base_link_point.point
                print "---"
                pl.people.append(person)
        self.ppub.publish(pl)
예제 #8
0
class CameraPointer(object):
    def __init__(self, side, camera_ik):
        self.side = side
        self.camera_ik = camera_ik
        self.joint_names = self.joint_angles_act = self.joint_angles_des = None
        self.tfl = TransformListener()
        self.joint_state_sub = rospy.Subscriber('/{0}_arm_controller/state'.format(self.side), JointTrajectoryControllerState, self.joint_state_cb)
        self.joint_traj_pub = rospy.Publisher('/{0}_arm_controller/command'.format(self.side), JointTrajectory)
        # Wait for joint information to become available
        while self.joint_names is None and not rospy.is_shutdown():
            rospy.sleep(0.5)
            rospy.loginfo("[{0}] Waiting for joint state from arm controller.".format(rospy.get_name()))
        #Set rate limits on a per-joint basis
        self.max_vel_rot = [np.pi]*len(self.joint_names)
        self.target_sub = rospy.Subscriber('{0}/lookat_ik/goal'.format(rospy.get_name()), PointStamped, self.goal_cb)
        rospy.loginfo("[{0}] Ready.".format(rospy.get_name()))

    def joint_state_cb(self, jtcs):
        if self.joint_names is None:
            self.joint_names = jtcs.joint_names
        self.joint_angles_act = jtcs.actual.positions
        self.joint_angles_des = jtcs.desired.positions

    def goal_cb(self, pt_msg):
        rospy.loginfo("[{0}] New LookatIK goal received.".format(rospy.get_name()))
        if (pt_msg.header.frame_id != self.camera_ik.base_frame):
            try:
                now = pt_msg.header.stamp
                self.tfl.waitForTransform(pt_msg.header.frame_id,
                                          self.camera_ik.base_frame,
                                          now, rospy.Duration(1.0))
                pt_msg = self.tfl.transformPoint(self.camera_ik.base_frame, pt_msg)
            except (LookupException, ConnectivityException, ExtrapolationException):
                rospy.logwarn("[{0}] TF Error tranforming point from {1} to {2}".format(rospy.get_name(),
                                                                                        pt_msg.header.frame_id,
                                                                                        self.camera_ik.base_frame))
        target = np.array([pt_msg.point.x, pt_msg.point.y, pt_msg.point.z])
        # Get IK Solution
        current_angles = list(copy.copy(self.joint_angles_act))
        iksol = self.camera_ik.lookat_ik(target, current_angles[:len(self.camera_ik.arm_indices)])
        # Start with current angles, then replace angles in camera IK with IK solution
        # Use desired joint angles to avoid sagging over time
        jtp = JointTrajectoryPoint()
        jtp.positions = list(copy.copy(self.joint_angles_des))
        for i, joint_name in enumerate(self.camera_ik.arm_joint_names):
            jtp.positions[self.joint_names.index(joint_name)] = iksol[i]
        deltas = np.abs(np.subtract(jtp.positions, current_angles))
        duration = np.max(np.divide(deltas, self.max_vel_rot))
        jtp.time_from_start = rospy.Duration(duration)
        jt = JointTrajectory()
        jt.joint_names = self.joint_names
        jt.points.append(jtp)
        rospy.loginfo("[{0}] Sending Joint Angles.".format(rospy.get_name()))
        self.joint_traj_pub.publish(jt)
예제 #9
0
class FaceInteractionDemo(object):
    def __init__(self):
        self.move_pub = rospy.Publisher('move_base_simple/goal', PoseStamped, queue_size=10)
        self.lock = RLock()
        self.head = Head()
        self.expressions = Expressions()
        self.tf = TransformListener()
        self.lastFacePos = None
        rospy.sleep(0.5)

    # Gets an equivalent point in the reference frame "destFrame"
    def getPointStampedInFrame(self, pointStamped, destFrame):
        # Wait until we can transform the pointStamped to destFrame
        self.tf.waitForTransform(pointStamped.header.frame_id, destFrame, rospy.Time(), rospy.Duration(4.0))
        # Set point header to the last common frame time between its frame and destFrame
        pointStamped.header.stamp = self.tf.getLatestCommonTime(pointStamped.header.frame_id, destFrame)
        # Transform the point to destFrame
        return self.tf.transformPoint(destFrame, pointStamped)

    def onFacePos(self, posStamped):
        self.lastFacePos = posStamped

    def navigateTo(self):
        if (self.lastFacePos is not None and self.lock.acquire(blocking=False)):
            self.expressions.nod_head()
            pointStamped = self.getPointStampedInFrame(pointStamped, "base_link")
            distance_to_base = sqrt(pointStamped.point.x ** 2 + pointStamped.point.y ** 2 + pointStamped.point.z ** 2)
            unit_vector = { "x": pointStamped.point.x / distance_to_base,
                            "y": pointStamped.point.y / distance_to_base }

            pointStamped.point.x = unit_vector["x"] * (distance_to_base - 0.5)
            pointStamped.point.y = unit_vector["y"] * (distance_to_base - 0.5)
            
            quaternion = Quaternion()
            quaternion.w = 1

            pose = Pose()
            pose.position = pointStamped.point
            pose.orientation = quaternion

            poseStamped = PoseStamped()
            poseStamped.header = pointStamped.header
            pointStamped.pose = pose

            self.move_pub.publish(poseStamped)
            self.lock.release()

    def onFaceCount(self, int8):
        if (int8.data > 0):
            self.expressions.be_happy()
        else:
            self.expressions.be_sad()
            self.expressions.be_neutral()
class Trajectory_generator(object):
    # Generate the object's trajectory from the central point of the detected box, then project
    def __init__(self,node_name="point_reproject"):
        self.point = PointStamped()
        self.node_name = node_name
        #self.listener = tf.TransformListener()
        rospy.init_node(self.node_name)
        self.tf = TransformListener()
        self.point_sub = rospy.Subscriber("/rgbdt_fusion/resColor/full",DetectionFull,self.Point_callback)
        self.point_pub = rospy.Publisher("point_reproject",PointStamped,queue_size = 2)

    def Point_callback(self,detectionsfull):
        #self.listener.waitForTransform("/map","/velodyne",rospy.Time(),rospy.Duration(0.5))
        #try:
        #    now = rospy.Time.now()
        #    self.listener.waitForTransform("/map","/velodyne",now,rospy.Duration(0.5))
        #    (trans,rot) = listener.lookupTransform("/map","/velodyne",now)
        time = rospy.Time.now()
        rospy.logerr(time)
        if (detectionsfull.detections.size >= 1):
        	for i in range(detectionsfull.detections.size):
        		if detectionsfull.detections.data[i].object_class == "person":	
        			
        			point_stamped = detectionsfull.detections.data[i].ptStamped
        			#self.tf.waitForTransform("base_link", "thermal_camera", rospy.Time.now(), rospy.Duration(4.0))
        			try:
        				now = rospy.Time.now()
        				self.tf.waitForTransform("map","thermal_camera", now,rospy.Duration(4.0))
        				
		        		point_transformed = self.tf.transformPoint("map",point_stamped)
		        		self.point = point_transformed
		        		'''self.path.header = point_transformed.header
		        		pose = PoseStamped()
		        		pose.header = point_transformed.header
		        		pose.pose.position.x = point_transformed.point.x
		        		pose.pose.position.y = point_transformed.point.y
		        		pose.pose.position.z = point_transformed.point.z
        				pose.pose.orientation.x = 0
        				pose.pose.orientation.y = 0
        				pose.pose.orientation.z = 0
        				pose.pose.orientation.w = 1
        				self.path.poses.append(pose)'''
        				self.point_pub.publish(self.point)
        				rospy.loginfo("The pointhas been published!")
        					#break
    				except(tf.ConnectivityException,tf.LookupException,tf.ExtrapolationException,rospy.ROSTimeMovedBackwardsException):
    						rospy.loginfo("Some Exceptions happend!")
        else:
        	
     		rospy.logdebug("There is no detection of person available!")
예제 #11
0
    def transform_point(src_frame, target_frame, point, tf_listener=None):
        if tf_listener is None:
            tf_listener = TransformListener()

        p = PointStamped()
        p.header.frame_id = src_frame
        p.header.stamp = rospy.Time(0)
        p.point = Point(*point)

        tf_listener.waitForTransform(target_frame, src_frame, rospy.Time(0),
                                     rospy.Duration(4.0))
        newp = tf_listener.transformPoint(target_frame, p)
        p = newp.point

        return numpy.array([p.x, p.y, p.z])
예제 #12
0
class HeadControl(object):
    def __init__(self):
        self.tf = TransformListener()
        self.client = actionlib.SimpleActionClient(ACTION_CLIENT,
                                                   PointHeadAction)
        rospy.loginfo("Waiting for point_head actionlib server...")
        self.client.wait_for_server()

    def WaitForTransform(self):
        rospy.loginfo(
            self.tf.waitForTransform('head_pan_link', 'base_link',
                                     rospy.Time(), rospy.Duration(4.0)))

    def SetPointHead(self, point):
        goal = PointHeadGoal()
        now = rospy.Time.now()  # point.header.stamp
        rospy.loginfo(
            self.tf.waitForTransform('head_pan_link', 'base_link', now,
                                     rospy.Duration(4.0)))
        corrected_point = self.tf.transformPoint('head_pan_link', point)
        corrected_point.point.z = 0.0
        rospy.loginfo(corrected_point)
        goal.target = corrected_point
        goal.min_duration = rospy.Duration(.5)
        goal.max_velocity = 1

        self.client.send_goal_and_wait(goal)

    def HeadFront(self):
        front = PointStamped()
        front.header.stamp = rospy.Time.now()
        front.header.frame_id = 'base_link'
        front.point.x = 3.0
        front.point.y = 0.0
        front.point.z = 0.0

        self.SetPointHead(front)

    def HeadLeft(self):
        left = PointStamped()
        left.header.stamp = rospy.Time.now()
        left.header.frame_id = 'base_link'
        left.point.x = 0.0
        left.point.y = 3.0
        left.point.z = 0.0

        self.SetPointHead(left)
예제 #13
0
    def __init__(self):

        rospy.init_node("test_changement_repere")
        
        listener = TransformListener()
        now = rospy.Time(0)
        object_point = PointStamped()
        object_point.header.frame_id = "palbator_arm_kinect_link"
        object_point.header.stamp = now
        object_point.point.x = 2.15
        object_point.point.y = 0.44
        object_point.point.z = 0.0

        rospy.loginfo("{class_name} : Object coords in palbator_arm_kinect_link : %s".format(class_name=self.__class__.__name__),str(object_point))
        listener.waitForTransform("map", "/palbator_arm_kinect_link", now, rospy.Duration(20))
        target = listener.transformPoint("/map",object_point)

        rospy.loginfo("{class_name} : Object coords in map : %s".format(class_name=self.__class__.__name__),str(target))
예제 #14
0
class PoopPublisher:
    def __init__(self):
        self.transformer = TransformListener()
        self.publisher = rospy.Publisher('/poop_perception', PointCloud)
    
    def publish_poops_to_cost_map(self, poops):
        """Always publishes 30 poops. Keeps unused ones at map position 25, 25."""
        NUM_POOPS = 30
        poops_with_z = [(p[0], p[1], 0) for p in poops]
        unused_poops = [(25, 25, 25)] * (NUM_POOPS - len(poops))
        poop_positions = poops_with_z + unused_poops
        #print poop_positions
        
        point_cloud = PointCloud()
        # cost map doesn't like the map coordinate frame
        point_cloud.header = Header(stamp=Time.now(), frame_id='odom_combined')
        point_cloud.points = [self._map_to_odom_combined(pos)
                              for pos in poop_positions]
        #print 'new points', point_cloud.points
        point_cloud.channels = [
            ChannelFloat32(name='intensities', values=[2000] * NUM_POOPS),
            ChannelFloat32(name='index', values=[0] * NUM_POOPS), #values=range(NUM_POOPS)),
            ChannelFloat32(name='distances', values=[2] * NUM_POOPS),
            ChannelFloat32(name='stamps', values=[0.002] * NUM_POOPS),
        ]
        self.publisher.publish(point_cloud)

    def _map_to_odom_combined(self, pos):
        """Transforms (x, y, 0) to a Point32."""
        point_in_map = PointStamped()
        point_in_map.header.frame_id = 'map'
        point_in_map.header.stamp = Time.now()
        point_in_map.point.x = pos[0]
        point_in_map.point.y = pos[1]
        self.transformer.waitForTransform('odom_combined', 'map', Time.now(),
                                          rospy.Duration(2))
        point_in_odom = self.transformer.transformPoint('odom_combined',
                                                        point_in_map)
        z = 0 if pos[2] == 0 else 25
        return Point32(point_in_odom.point.x, point_in_odom.point.y, z)
class TestGetStateValidity(unittest.TestCase):
    
    def setUp(self):

        rospy.init_node('test_allowed_collision_near_start')

        self.tf = TransformListener()
        
        self.obj_pub = rospy.Publisher('collision_object',CollisionObject,latch=True)

        rospy.wait_for_service(default_prefix+'/get_robot_state')
        self.get_robot_state_server = rospy.ServiceProxy(default_prefix+'/get_robot_state', GetRobotState)
        
        rospy.wait_for_service(default_prefix+'/get_state_validity')
        self.get_state_validity_server = rospy.ServiceProxy(default_prefix+'/get_state_validity', GetStateValidity)

        rospy.wait_for_service(default_prefix+'/get_group_info')
        self.get_group_info_server = rospy.ServiceProxy(default_prefix+'/get_group_info', GetGroupInfo)
        
        rospy.sleep(3.)

        obj1 = CollisionObject()

        obj1.header.stamp = rospy.Time.now()
        obj1.header.frame_id = "r_gripper_palm_link"
        obj1.id = "test_object"
        obj1.operation.operation = mapping_msgs.msg.CollisionObjectOperation.ADD
        obj1.shapes = [Shape() for _ in range(1)]
        obj1.shapes[0].type = Shape.BOX
        obj1.shapes[0].dimensions = [float() for _ in range(3)]
        obj1.shapes[0].dimensions[0] = .03
        obj1.shapes[0].dimensions[1] = .03
        obj1.shapes[0].dimensions[2] = .03
        obj1.poses = [Pose() for _ in range(1)]
        obj1.poses[0].position.x = 0
        obj1.poses[0].position.y = 0
        obj1.poses[0].position.z = 0
        obj1.poses[0].orientation.x = 0
        obj1.poses[0].orientation.y = 0
        obj1.poses[0].orientation.z = 0
        obj1.poses[0].orientation.w = 1
    
        self.obj_pub.publish(obj1)

        rospy.sleep(2.)

        # now we put another object in collision with the base
        obj2 = CollisionObject()

        obj2.header.stamp = rospy.Time.now()
        obj2.header.frame_id = "base_link"
        obj2.id = "base_object"
        obj2.operation.operation = mapping_msgs.msg.CollisionObjectOperation.ADD
        obj2.shapes = [Shape() for _ in range(1)]
        obj2.shapes[0].type = Shape.BOX
        obj2.shapes[0].dimensions = [float() for _ in range(3)]
        obj2.shapes[0].dimensions[0] = .1
        obj2.shapes[0].dimensions[1] = .1
        obj2.shapes[0].dimensions[2] = .1
        obj2.poses = [Pose() for _ in range(1)]
        obj2.poses[0].position.x = 0
        obj2.poses[0].position.y = 0
        obj2.poses[0].position.z = 0
        obj2.poses[0].orientation.x = 0
        obj2.poses[0].orientation.y = 0
        obj2.poses[0].orientation.z = 0
        obj2.poses[0].orientation.w = 1

        self.obj_pub.publish(obj2)
        
        rospy.sleep(5.)

    def test_get_state_validity(self):

        req = GetRobotStateRequest()

        cur_state = self.get_robot_state_server.call(req)
        
        #for i in range(len(cur_state.robot_state.joint_state.name)):
        #    print cur_state.robot_state.joint_state.name[i], cur_state.robot_state.joint_state.position[i] 

        state_req = GetStateValidityRequest()
        state_req.robot_state = cur_state.robot_state

        group_req = GetGroupInfoRequest()
        group_req.group_name = 'right_arm'

        group_res = self.get_group_info_server.call(group_req)

        self.failIf(len(group_res.link_names) == 0)

        right_arm_links = group_res.link_names

        group_req.group_name = ''
        
        group_res = self.get_group_info_server.call(group_req)
        
        self.failIf(len(group_res.link_names) == 0)

        state_req.ordered_collision_operations.collision_operations = arm_navigation_msgs_utils.make_disable_allowed_collisions_with_exclusions(group_res.link_names,
                                                                                                                                                      right_arm_links)

        for i in state_req.ordered_collision_operations.collision_operations:
            print 'Disabling ', i.object1
        
        state_req.check_collisions = True

        res = self.get_state_validity_server.call(state_req)

        #should be in collision
        self.failIf(res.error_code.val == res.error_code.SUCCESS)
        self.assertEqual(res.error_code.val, res.error_code.COLLISION_CONSTRAINTS_VIOLATED) 

        #should be some contacts
        self.failIf(len(res.contacts) == 0)

        for c in res.contacts:
        
            #getting everything in common frame of base_link
            contact_point = PointStamped()
            contact_point.header = c.header
            contact_point.point = c.position
            contact_point_base = self.tf.transformPoint("base_link",contact_point)

            gripper_point = PointStamped()
            gripper_point.header.stamp = c.header.stamp
            gripper_point.header.frame_id = "r_gripper_palm_link"
            gripper_point.point.x = 0
            gripper_point.point.y = 0
            gripper_point.point.z = 0
            gripper_point_base = self.tf.transformPoint("base_link", gripper_point)

            print 'x diff:', abs(gripper_point_base.point.x-contact_point_base.point.x)
            print 'y diff:', abs(gripper_point_base.point.y-contact_point_base.point.y)
            print 'z diff:', abs(gripper_point_base.point.z-contact_point_base.point.z)

            self.failIf(abs(gripper_point_base.point.x-contact_point_base.point.x) > .031)
            self.failIf(abs(gripper_point_base.point.y-contact_point_base.point.y) > .031)
            self.failIf(abs(gripper_point_base.point.z-contact_point_base.point.z) > .031)

        #now we delete obj1
        obj2 = CollisionObject()

        obj2.header.stamp = rospy.Time.now()
        obj2.header.frame_id = "base_link"
        obj2.id = "test_object"
        obj2.operation.operation = mapping_msgs.msg.CollisionObjectOperation.REMOVE

        self.obj_pub.publish(obj2)
        
        rospy.sleep(5.)

        cur_state = self.get_robot_state_server.call(req)
        state_req.robot_state = cur_state.robot_state

        res = self.get_state_validity_server.call(state_req)

        # base shouldn't collide due to child only links
        self.failIf(res.error_code.val != res.error_code.SUCCESS)

        # now it should collide
        state_req.ordered_collision_operations.collision_operations = []

        res = self.get_state_validity_server.call(state_req)

        # base shouldn't collide due to child only links
        self.failIf(res.error_code.val == res.error_code.SUCCESS)
예제 #16
0
class FakeFaceDetector:
    
    def __init__(self, debug=False, cam_frame_id = "/head_mount_kinect_rgb_optical_frame", robot_frame_id = "/base_link"):
        
        self.tfl = TransformListener()
        
        rospy.sleep(2)
        
        self.debug = debug
        
        self._point_pub = rospy.Publisher('nearest_face', PointStamped)
        
        self.cam_frame_id = cam_frame_id
        self.robot_frame_id = robot_frame_id
        
        self.det_duration = rospy.Duration(20)
        self.det_last = rospy.Time(0)
        
        self.pt = PointStamped()
        
        self._timer = rospy.Timer(rospy.Duration(0.1), self.timer)
        
    def timer(self,event):        
        
        now = rospy.Time.now()
        self.pt.header.stamp = now - rospy.Duration(0.5)
        
        if (self.det_last + self.det_duration < now):
        
            self.det_last = now
        
            self.pt.header.frame_id = self.robot_frame_id
            
            self.pt.point.x = random.uniform(1.5, 3.5)
            self.pt.point.y = random.uniform(-0.5,0.5)
            self.pt.point.z = random.uniform(1.3, 2.0)
            
            if self.debug:
            
                rospy.loginfo("Fake face at: [" + str(self.pt.point.x) + ", " + str(self.pt.point.y) + ", " + str(self.pt.point.z) + "] (" + self.robot_frame_id + ")")
                
                
        
        pt = self.pt
        
        pt.point.x = random.uniform(pt.point.x - 0.05, pt.point.x + 0.05)
        pt.point.y = random.uniform(pt.point.y - 0.05, pt.point.y + 0.05)
        pt.point.z = random.uniform(pt.point.z - 0.05, pt.point.z + 0.05)
        
        # TODO "simulate" movement?
        
        try:
            
            pt = self.tfl.transformPoint(self.cam_frame_id, pt)
            
        except:
            
            rospy.logerr("Transform error")
            self.det_last = rospy.Time(0)
            return
        
        self._point_pub.publish(pt)
예제 #17
0
class ActiveCamera(object):
    def __init__(self):
        self.cv_bridge = CvBridge()
        self.camera_info_lock = threading.RLock()
        self.ar_tag_lock = threading.RLock()

        # Setup to control camera.
        self.joint_cmd_srv = rospy.ServiceProxy(
            JOINT_COMMAND_TOPIC, JointCommand)

        # Subscribe to camera pose and instrinsic streams.
        rospy.Subscriber(
            JOINT_STATE_TOPIC,
            JointState,
            self._camera_pose_callback)
        rospy.Subscriber(
            ROSTOPIC_CAMERA_INFO_STREAM,
            CameraInfo,
            self.camera_info_callback)
        self.img = None
        rospy.Subscriber(
            ROSTOPIC_CAMERA_IMAGE_STREAM,
            Image,
            lambda x: self.img_callback(
                x,
                'img',
                'bgr8'))
        self.depth = None
        rospy.Subscriber(
            ROSTOPIC_CAMERA_DEPTH_STREAM,
            Image,
            lambda x: self.img_callback(
                x,
                'depth',
                None))
        self.depth_registered = None
        rospy.Subscriber(
            ROSTOPIC_ALIGNED_CAMERA_DEPTH_STREAM,
            Image,
            lambda x: self.img_callback(
                x,
                'depth_registered',
                None))
        rospy.Subscriber(
            ROSTOPIC_AR_POSE_MARKER,
            AlvarMarkers,
            self.alvar_callback)
        self.img = None
        self.ar_tag_pose = None

        self._transform_listener = TransformListener()
        self._update_camera_extrinsic = True
        self.camera_extrinsic_mat = None
        self.set_pan_pub = rospy.Publisher(
            ROSTOPIC_SET_PAN, Float64, queue_size=1)
        self.set_tilt_pub = rospy.Publisher(
            ROSTOPIC_SET_TILT, Float64, queue_size=1)

    def _camera_pose_callback(self, msg):
        pan_id = msg.name.index('head_pan_joint')
        tilt_id = msg.name.index('head_tilt_joint')
        self.pan = msg.position[pan_id]
        self.tilt = msg.position[tilt_id]

    def camera_info_callback(self, msg):
        self.camera_info_lock.acquire()
        self.camera_info = msg
        self.camera_P = np.array(msg.P).reshape((3, 4))
        self.camera_info_lock.release()

    def alvar_callback(self, msg):
        self.ar_tag_lock.acquire()
        self.ar_tag_pose = msg
        self.ar_tag_lock.release()

    @property
    def state(self):
        return self.get_state()

    def get_state(self):
        return [self.pan, self.tilt]

    def get_pan(self):
        return self.pan

    def get_tilt(self):
        return self.tilt

    def set_pan(self, pan, wait=True):
        pan = constrain_within_range(
            np.mod(
                pan + np.pi,
                2 * np.pi) - np.pi,
            MIN_PAN,
            MAX_PAN)
        # self.joint_cmd_srv('rad', 8, pan)
        self.set_pan_pub.publish(pan)
        if wait:
            rospy.sleep(3)

    def set_tilt(self, tilt, wait=True):
        tilt = constrain_within_range(
            np.mod(
                tilt + np.pi,
                2 * np.pi) - np.pi,
            MIN_TILT,
            MAX_TILT)
        # self.joint_cmd_srv('rad', 9, tilt)
        self.set_tilt_pub.publish(tilt)
        if wait:
            rospy.sleep(3)

    def reset(self):
        self.set_pan(RESET_PAN)
        self.set_tilt(RESET_TILT)

    def img_callback(self, msg, field, typ=None):
        if typ is None:
            setattr(self, field, self.cv_bridge.imgmsg_to_cv2(msg))
        else:
            setattr(self, field, self.cv_bridge.imgmsg_to_cv2(msg, desired_encoding=typ))

    def get_ar_tag_pose(self):
        self.ar_tag_lock.acquire()
        ar_tag_pose = copy.deepcopy(self.ar_tag_pose)
        self.ar_tag_lock.release()
        return ar_tag_pose

    def get_image(self):
        if self.img is not None:
            return self.img * 1
        else:
            raise RuntimeError('No image found. Please check the camera')

    def get_depth(self):
        if self.depth_registered is not None:
            return self.depth_registered * 1
        elif self.depth is not None:
            return self.depth * 1
        else:
            raise RuntimeError('No depth found. Please check the camera')

    def get_intrinsics(self):
        self.camera_info_lock.acquire()
        P = np.array(self.camera_info.P).copy()
        self.camera_info_lock.release()
        return P.reshape((3, 4))

    def _process_depth(self, cur_depth=None):
        if cur_depth is None:
            cur_depth = self.get_depth()
        cur_depth = cur_depth / 1000.  # conversion from mm to m
        cur_depth[cur_depth > MAX_DEPTH] = 0.
        return cur_depth

    def _get_z_mean(self, depth, pt, bb=BB_SIZE):
        sum_z = 0.
        nps = 0
        for i in range(bb * 2):
            for j in range(bb * 2):
                new_pt = [pt[0] - bb + i, pt[1] - bb + j]
                try:
                    new_z = depth[int(new_pt[0]), int(new_pt[1])]
                    if new_z > 0.:
                        sum_z += new_z
                        nps += 1
                except:
                    pass
        if nps == 0.:
            return 0.
        else:
            return sum_z / nps

    def _get_3D_camera(self, pt, norm_z=None):
        assert len(pt) == 2
        cur_depth = self._process_depth()
        z = self._get_z_mean(cur_depth, [pt[0], pt[1]])
        if z == 0.:
            raise RuntimeError
        if norm_z is not None:
            z = z / norm_z
        u = pt[1]
        v = pt[0]
        P = copy.deepcopy(self.camera_P)
        P_n = np.zeros((3, 3))
        P_n[:, :2] = P[:, :2]
        P_n[:, 2] = P[:, 3] + P[:, 2] * z
        P_n_inv = np.linalg.inv(P_n)
        temp_p = np.dot(P_n_inv, np.array([u, v, 1]))
        temp_p = temp_p / temp_p[-1]
        temp_p[-1] = z
        return temp_p

    def _convert_frames(self, pt):
        assert len(pt) == 3
        ps = PointStamped()
        ps.header.frame_id = KINECT_FRAME
        ps.point.x, ps.point.y, ps.point.z = pt
        base_ps = self._transform_listener.transformPoint(BASE_FRAME, ps)
        base_pt = np.array([base_ps.point.x, base_ps.point.y, base_ps.point.z])
        return base_pt

    def get_3D(self, pt, z_norm=None):
        temp_p = self._get_3D_camera(pt, z_norm)
        base_pt = self._convert_frames(temp_p)
        return base_pt
예제 #18
0
class ros_cv_testing_node:

    def __init__(self):
        
	# Get information for this particular camera
        camera_info = get_single('head_camera/depth_registered/camera_info', CameraInfo)
        print camera_info
	
	# Set information for the camera 
        self.cam_model = PinholeCameraModel()
        self.cam_model.fromCameraInfo(camera_info)
	
	# Subscribe to the camera's image topic and depth image topic
        self.rgb_image_sub = rospy.Subscriber("head_camera/rgb/image_raw", 
                                              Image, self.on_rgb)
        self.depth_image_sub = rospy.Subscriber("head_camera/depth_registered/image_raw", 
                                                Image, self.on_depth)
	
	# Publish where the button is in the base link frame
        self.point_pub = rospy.Publisher('button_point', PointStamped, queue_size=10)
	
	# Set up connection to CvBridge
        self.bridge = CvBridge()
        
	# Open up viewing windows
	cv2.namedWindow('depth')
        cv2.namedWindow('rgb')
	
	# Set up the class variables
        self.rgb_image = None
        self.rgb_image_time = None
        self.depth_image = None
        self.center = None
        self.return_point = PointStamped()
        self.tf_listener = TransformListener() 
        
    def on_rgb(self, image):
	# Convert image to something that cv2 can work with
        try:
            self.rgb_image = self.bridge.imgmsg_to_cv2(image, 'bgr8')
        except e:
            print e
            return

        self.rgb_image_time = image.header.stamp

        # Get height and width of image
        h = self.rgb_image.shape[0]
        w = self.rgb_image.shape[1]
        
        # Create empty image
        color_dst = numpy.empty((h,w), 'uint8')
	
        # Convert picture to grayscale
        cv2.cvtColor(self.rgb_image, cv2.COLOR_RGB2GRAY, color_dst)

        # Find circles given the camera image
        dp = 1.1
        minDist = 90

        circles = cv2.HoughCircles(color_dst, cv2.cv.CV_HOUGH_GRADIENT, dp, minDist)
 
        # If no circles are detected then exit the program
        if circles == None:
            print "No circles found using these parameters"
            sys.exit()
    
        circles = numpy.uint16(numpy.around(circles))
            
        # Draw the center of the circle closest to the top
        ycoord = []
        for i in circles[0,:]:
            ycoord.append(i[1])

	# Find the top circle in the frame
        top_circ_y_coor = min(ycoord)
        x_coor = 0    
        y_coor = 0

        for i in circles[0,:]:
            if i[1] == top_circ_y_coor:
                # draw the center of the circle
                #cv2.circle(self.rgb_image,(i[0],i[1]),2,(0,0,255),3)
                # draw outer circle
                #cv2.circle(self.rgb_image,(i[0],i[1]),i[2],(0,255,0),2)
                x_coor = i[0]
                y_coor = i[1]

	cv2.circle(self.rgb_image,(327,247),2,(0,0,255),3)
	# Set the coordinates for the center of the circle
        self.center = (x_coor, y_coor)
        #self.center = (328,248)
        
	cv2.imshow('rgb', self.rgb_image)
        cv2.waitKey(1)
    
    def on_depth(self, image):
        
        # Use numpy so that cv2 can use image
        self.depth_image = numpy.fromstring(image.data, dtype='float32').reshape((480,640))
        nmask = numpy.isnan(self.depth_image)

	#Find the minimum and the maximum of the depth image
        Dmin = self.depth_image[~nmask].min()
        Dmax = self.depth_image[~nmask].max()

        # If a circle has been found find its depth and apply that to the ray
        if self.center!=None:
            ray = self.cam_model.projectPixelTo3dRay(self.center)
            depth = self.depth_image[self.center[1]][self.center[0]]
            # If the depth is not a number exit
            if math.isnan(depth):
                print "Incomplete information on depth at this point"
                return
            # Otherwise mulitply the depth by the unit vector of the ray
            else:
                print "depth", depth
                cam_coor = [depth*ray[0],depth*ray[1],depth*ray[2]]
                #print "camera frame coordinate:", cam_coor  
        else:
            return

        # Rescale to [0,1]
        cv2.imshow('depth', self.depth_image / 2.0)
        cv2.waitKey(1)

	# Only use depth if the RGB image is running
        if self.rgb_image_time is None:
            rospy.loginfo('not using depth cause no RGB yet')
            return
            
        time_since_rgb = image.header.stamp - self.rgb_image_time

	# Only use depth if it is close in time to the RGB image
        if time_since_rgb > rospy.Duration(0.5):
            rospy.loginfo('not using depth because time since RGB is ' + str(time_since_rgb))
            #return

        # Find transform from camera frame to world frame
        self.tf_listener.waitForTransform(self.cam_model.tfFrame(), "base_link",
                                          image.header.stamp, rospy.Duration(1.0))
       
	# Set up the point to be published               
        self.return_point.header.stamp = image.header.stamp
        self.return_point.header.frame_id = self.cam_model.tfFrame()
        self.return_point.point.x = cam_coor[0]
        self.return_point.point.y = cam_coor[1]
        self.return_point.point.z = cam_coor[2]
        
	# Transform point to the baselink frame
        self.return_point = self.tf_listener.transformPoint("base_link", self.return_point)
        print "world frame coordinate:", self.return_point.point.x, \
            self.return_point.point.y,self.return_point.point.z, "\n"       


	self.point_pub.publish(self.return_point)
class Fused_PointCloud(object):
    "The init function for the fused detectionArray"

    def __init__(self, node_name="fused_detectionArray"):
        self.node_name = node_name
        rospy.init_node(self.node_name)
        #self.fused_pointcloud = PointCloud()
        self.listener = TransformListener()
        self.R = 100
        self.angle = 30
        self.h = self.R * math.cos(self.angle / 180 * math.pi)
        self.r = self.R * math.sin(self.angle / 180 * math.pi)
        # self.DetectionInfo_sub1 = message_filters.Subscriber("rgbdt_fusion/resColor/full", DetectionFull)
        # self.DetectionInfo_sub2 = message_filters.Subscriber("rgbdt_fusion/resColor/full2", DetectionFull)
        self.DetectionInfo_sub1 = rospy.Subscriber(
            "rgbdt_fusion/resColor/full", DetectionFull, self.sub1_callback)
        self.DetectionInfo_sub2 = rospy.Subscriber(
            "rgbdt_fusion/resColor/full2", DetectionFull, self.sub2_callback)
        self.DetectionInfo_pub = rospy.Publisher('fused_detectionarray',
                                                 DetectionArray,
                                                 queue_size=2)
        self.sub1_new = False
        self.sub2_new = False
        self.storeDetectionInfo1 = None
        self.storeDetectionInfo2 = None

    def sub1_callback(self, DetectionInfo):
        self.storeDetectionInfo1 = DetectionInfo
        self.sub1_new = True

    def sub2_callback(self, DetectionInfo):
        self.storeDetectionInfo2 = DetectionInfo
        self.sub2_new = True

    def DetectionInfo(self):
        DetectionInfo1 = self.storeDetectionInfo1.detections
        DetectionInfo2 = self.storeDetectionInfo2.detections
        detect_data1_map = self.Transfer_global_map(DetectionInfo1, 1)
        detect_data2_map = self.Transfer_global_map(DetectionInfo2, 2)
        detect_fused_data_map = self.Fuse_by_distance(
            detect_data1_map,
            detect_data2_map,
            distance_threshold=1.0,
            color_threshold=30)  # need to modify the color threshold
        rospy.loginfo("COMES")
        self.DetectionInfo_pub.publish(detect_fused_data_map)
        self.sub1_new = False
        self.sub2_new = False

    def Transfer_global_map(
            self, Detect_data,
            index):  # The input is message with type of DetectionArray
        Detect_data_ = DetectionArray()
        # print(Detect_data_)
        Detect_data_ = Detect_data
        #print(Detect_data)
        # Try reproject the stamped point to the global map
        if Detect_data.size > 0:
            for i in range(Detect_data.size):
                print(i)
                try:
                    Detect_data_.data[
                        i].ptStamped = self.listener.transformPoint(
                            "map", Detect_data.data[i].ptStamped
                        )  # Potential problem is from the time stamp
                except tf.Exception:
                    rospy.loginfo("Some tf Exception happened!")
        rospy.loginfo("The transformation is done currently!" + str(index))

        return Detect_data_

    def Fuse_by_distance(
        self,
        detect_data1,
        detect_data2,
        distance_threshold=1.0,
        color_threshold=30
    ):  # The inputs here are both with type of DetectionArray
        detect_data = detect_data1
        # print("detect_data.size before: ")
        # print(detect_data.size)
        size = detect_data2.size
        # value = self.HSV_distance(detect_data1.data[i].color[0],detect_data1.data[i].color[1],detect_data1.data[i].color[2],detect_data2.data[j].color[0],detect_data2.data[j].color[1].detect_data2.data[j].color[2])
        # print(type(detect_data2.data))
        for i in range(detect_data1.size):
            for j in range(detect_data2.size):
                if (math.sqrt(pow(detect_data1.data[i].ptStamped.point.x - detect_data2.data[j].ptStamped.point.x,2) + pow(detect_data1.data[i].ptStamped.point.y - detect_data2.data[j].ptStamped.point.y,2) \
                + pow(detect_data1.data[i].ptStamped.point.z - detect_data2.data[j].ptStamped.point.z,2)) < distance_threshold) and (self.HSV_distance(detect_data1.data[i].color[0],detect_data1.data[i].color[1],detect_data1.data[i].color[2],detect_data2.data[j].color[0],detect_data2.data[j].color[1].detect_data2.data[j].color[2]) < color_threshold):
                    print(type(detect_data2.data))
                    detect_data2.data.pop[j]
                    print("pop out an element!")
                    size -= 1
        # print("detect_data2")
        # print(detect_data2)
        # print("detect_data2 left humans: ")
        # print(size)
        for i in range(size):
            detect_data.data.append(
                detect_data2.data[i]
            )  # There might be a problem when appending the pointstamped with different time stamped
            if detect_data.size > 1:
                detect_data.data[-1].num = detect_data.data[-2].num + 1
            else:
                pass
            detect_data.size += 1
        # print("detect_data.size after: ")
        # print(detect_data.size)
        # print("detect_data")
        # print(detect_data)

        return detect_data

    # reference link :
    def HSV_distance(self, h1, s1, v1, h2, s2, v2):
        x1 = self.r * v1 * s1 * math.cos(h1 / 180 * math.pi)
        y1 = self.r * v1 * s1 * math.sin(h1 / 180 * math.pi)
        z1 = self.h * (1 - v1)

        x2 = self.r * v2 * s2 * math.cos(h2 / 180 * math.pi)
        y2 = self.r * v2 * s2 * math.sin(h2 / 180 * math.pi)
        z2 = self.h * (1 - v2)

        dx, dy, dz = x1 - x2, y1 - y2, z1 - z2
        return math.sqrt(dx * dx + dy * dy + dz * dz)
예제 #20
0
class LandmarkMethod(object):
    def __init__(self):
        self.subjects = dict()
        self.bridge = CvBridge()
        self.__subject_bridge = SubjectListBridge()
        self.model_size_rescale = 30.0
        self.head_pitch = 0.0
        self.margin = rospy.get_param("~margin", 42)
        self.margin_eyes_height = rospy.get_param("~margin_eyes_height", 36)
        self.margin_eyes_width = rospy.get_param("~margin_eyes_width", 60)
        self.interpupillary_distance = 0.058
        self.cropped_face_size = (rospy.get_param("~face_size_height", 224),
                                  rospy.get_param("~face_size_width", 224))
        self.gpu_id = rospy.get_param("~gpu_id", default="0")
        torch.cuda.set_device(self.gpu_id)
        rospy.loginfo("Using GPU for landmark: {}".format(self.gpu_id))

        self.rgb_frame_id = rospy.get_param("~rgb_frame_id", "/kinect2_link")
        self.rgb_frame_id_ros = rospy.get_param("~rgb_frame_id_ros",
                                                "/kinect2_nonrotated_link")

        self.model_points = None
        self.eye_image_size = (rospy.get_param("~eye_image_height", 36),
                               rospy.get_param("~eye_image_width", 60))

        self.tf_broadcaster = TransformBroadcaster()
        self.tf_listener = TransformListener()
        self.tf_prefix = rospy.get_param("~tf_prefix", default="gaze")

        self.use_previous_headpose_estimate = True
        self.last_rvec = {}
        self.last_tvec = {}
        self.pose_stabilizers = {}  # Introduce scalar stabilizers for pose.

        try:
            tqdm.write("Wait for camera message")
            cam_info = rospy.wait_for_message("/camera_info",
                                              CameraInfo,
                                              timeout=None)
            self.img_proc = PinholeCameraModel()
            # noinspection PyTypeChecker
            self.img_proc.fromCameraInfo(cam_info)
            if np.array_equal(
                    self.img_proc.intrinsicMatrix(),
                    np.matrix([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])):
                raise Exception(
                    'Camera matrix is zero-matrix. Did you calibrate'
                    'the camera and linked to the yaml file in the launch file?'
                )
            tqdm.write("Camera message received")
        except rospy.ROSException:
            raise Exception("Could not get camera info")

        # multiple person images publication
        self.subject_pub = rospy.Publisher("/subjects/images",
                                           MSG_SubjectImagesList,
                                           queue_size=1)
        # multiple person faces publication for visualisation
        self.subject_faces_pub = rospy.Publisher("/subjects/faces",
                                                 Image,
                                                 queue_size=1)

        self.model_points = self._get_full_model_points()

        self.sess_bb = None
        self.face_net = FaceDetector(device="cuda:{}".format(self.gpu_id))

        self.color_sub = rospy.Subscriber("/image",
                                          Image,
                                          self.callback,
                                          buff_size=2**24,
                                          queue_size=1)

        self.facial_landmark_nn = face_alignment.FaceAlignment(
            landmarks_type=face_alignment.LandmarksType._2D,
            device="cuda:{}".format(self.gpu_id),
            flip_input=False)

        Server(ModelSizeConfig, self._dyn_reconfig_callback)

    def _dyn_reconfig_callback(self, config, level):
        self.model_points /= (self.model_size_rescale *
                              self.interpupillary_distance)
        self.model_size_rescale = config["model_size"]
        self.interpupillary_distance = config["interpupillary_distance"]
        self.model_points *= (self.model_size_rescale *
                              self.interpupillary_distance)
        self.head_pitch = config["head_pitch"]
        return config

    def _get_full_model_points(self):
        """Get all 68 3D model points from file"""
        raw_value = []
        filename = rospkg.RosPack().get_path(
            'rt_gene') + '/model_nets/face_model_68.txt'
        with open(filename) as f:
            for line in f:
                raw_value.append(line)
        model_points = np.array(raw_value, dtype=np.float32)
        model_points = np.reshape(model_points, (3, -1)).T
        # model_points *= 4
        model_points[:, -1] *= -1

        # index the expansion of the model based.
        model_points = model_points * (self.interpupillary_distance *
                                       self.model_size_rescale)

        return model_points

    def get_face_bb(self, image):
        faceboxes = []

        start_time = time.time()
        fraction = 4
        image = scipy.misc.imresize(image, 1.0 / fraction)
        detections = self.face_net.detect_from_image(image)
        tqdm.write("Face Detector Frequency: {:.2f}Hz".format(
            1 / (time.time() - start_time)))

        for result in detections:
            x_left_top, y_left_top, x_right_bottom, y_right_bottom, confidence = result * fraction

            if confidence > 0.8 * fraction:
                box = [x_left_top, y_left_top, x_right_bottom, y_right_bottom]
                diff_height_width = (box[3] - box[1]) - (box[2] - box[0])
                offset_y = int(abs(diff_height_width / 2))
                box_moved = self.move_box(box, [0, offset_y])

                # Make box square.
                facebox = self.get_square_box(box_moved)
                faceboxes.append(facebox)

        return faceboxes

    @staticmethod
    def move_box(box, offset):
        """Move the box to direction specified by vector offset"""
        left_x = box[0] + offset[0]
        top_y = box[1] + offset[1]
        right_x = box[2] + offset[0]
        bottom_y = box[3] + offset[1]

        return [left_x, top_y, right_x, bottom_y]

    @staticmethod
    def box_in_image(box, image):
        """Check if the box is in image"""
        rows = image.shape[0]
        cols = image.shape[1]

        return box[0] >= 0 and box[1] >= 0 and box[2] <= cols and box[3] <= rows

    @staticmethod
    def get_square_box(box):
        """Get a square box out of the given box, by expanding it."""
        left_x = box[0]
        top_y = box[1]
        right_x = box[2]
        bottom_y = box[3]

        box_width = right_x - left_x
        box_height = bottom_y - top_y

        # Check if box is already a square. If not, make it a square.
        diff = box_height - box_width
        delta = int(abs(diff) / 2)

        if diff == 0:  # Already a square.
            return box
        elif diff > 0:  # Height > width, a slim box.
            left_x -= delta
            right_x += delta
            if diff % 2 == 1:
                right_x += 1
        else:  # Width > height, a short box.
            top_y -= delta
            bottom_y += delta
            if diff % 2 == 1:
                bottom_y += 1

        return [left_x, top_y, right_x, bottom_y]

    def process_image(self, color_msg):
        tqdm.write('Time now: ' + str(rospy.Time.now().to_sec()) +
                   ' message color: ' + str(color_msg.header.stamp.to_sec()) +
                   ' diff: ' + str(rospy.Time.now().to_sec() -
                                   color_msg.header.stamp.to_sec()))

        start_time = time.time()

        color_img = gaze_tools.convert_image(color_msg, "bgr8")
        timestamp = color_msg.header.stamp

        self.detect_landmarks(color_img, timestamp)  # update self.subjects

        tqdm.write('Elapsed after detecting transformed_landmarks: ' +
                   str(time.time() - start_time))

        if not self.subjects:
            tqdm.write("No face found")
            return

        self.get_eye_image()  # update self.subjects

        final_head_pose_images = None
        for subject_id, subject in self.subjects.items():
            if subject.left_eye_color is None or subject.right_eye_color is None:
                continue
            if subject_id not in self.last_rvec:
                self.last_rvec[subject_id] = np.array([[0.01891013],
                                                       [0.08560084],
                                                       [-3.14392813]])
            if subject_id not in self.last_tvec:
                self.last_tvec[subject_id] = np.array([[-14.97821226],
                                                       [-10.62040383],
                                                       [-2053.03596872]])
            if subject_id not in self.pose_stabilizers:
                self.pose_stabilizers[subject_id] = [
                    Stabilizer(state_num=2,
                               measure_num=1,
                               cov_process=0.1,
                               cov_measure=0.1) for _ in range(6)
                ]

            success, rotation_vector, translation_vector = self.get_head_pose(
                subject.marks, subject_id)

            # Publish all the data
            translation_headpose_tf = self.get_head_translation(
                timestamp, subject_id)

            if success:
                if translation_headpose_tf is not None:
                    euler_angles_head = self.publish_pose(
                        timestamp, translation_headpose_tf, rotation_vector,
                        subject_id)

                    if euler_angles_head is not None:
                        head_pose_image = self.visualize_headpose_result(
                            subject.face_color,
                            gaze_tools.get_phi_theta_from_euler(
                                euler_angles_head))
                        head_pose_image_resized = cv2.resize(
                            head_pose_image,
                            dsize=(224, 224),
                            interpolation=cv2.INTER_CUBIC)

                        if final_head_pose_images is None:
                            final_head_pose_images = head_pose_image_resized
                        else:
                            final_head_pose_images = np.concatenate(
                                (final_head_pose_images,
                                 head_pose_image_resized),
                                axis=1)
            else:
                if not success:
                    tqdm.write("Could not get head pose properly")

        if final_head_pose_images is not None:
            self.publish_subject_list(timestamp, self.subjects)
            headpose_image_ros = self.bridge.cv2_to_imgmsg(
                final_head_pose_images, "bgr8")
            headpose_image_ros.header.stamp = timestamp
            self.subject_faces_pub.publish(headpose_image_ros)

        tqdm.write('Elapsed total: ' + str(time.time() - start_time) + '\n\n')

    def get_head_pose(self, landmarks, subject_id):
        """
        We are given a set of 2D points in the form of landmarks. The basic idea is that we assume a generic 3D head
        model, and try to fit the 2D points to the 3D model based on the Levenberg-Marquardt optimization. We can use
        OpenCV's implementation of SolvePnP for this.
        This method is inspired by http://www.learnopencv.com/head-pose-estimation-using-opencv-and-dlib/
        We are planning to replace this with our own head pose estimator.
        :param landmarks: Landmark positions to be used to determine head pose
        :return: success - Whether the pose was successfully extracted
                 rotation_vector - rotation vector that along with translation vector brings points from the model
                 coordinate system to the camera coordinate system
                 translation_vector - see rotation_vector
        """

        image_points_headpose = landmarks

        camera_matrix = self.img_proc.intrinsicMatrix()
        dist_coeffs = self.img_proc.distortionCoeffs()

        try:
            if self.last_rvec[subject_id] is not None and self.last_tvec[
                    subject_id] is not None and self.use_previous_headpose_estimate:
                (success, rotation_vector_unstable, translation_vector_unstable) = \
                    cv2.solvePnP(self.model_points, image_points_headpose, camera_matrix, dist_coeffs,
                                 flags=cv2.SOLVEPNP_ITERATIVE, useExtrinsicGuess=True,
                                 rvec=self.last_rvec[subject_id], tvec=self.last_tvec[subject_id])
            else:
                (success, rotation_vector_unstable, translation_vector_unstable) = \
                    cv2.solvePnP(self.model_points, image_points_headpose, camera_matrix, dist_coeffs,
                                 flags=cv2.SOLVEPNP_ITERATIVE)
        except Exception:
            print('Could not estimate head pose')
            return False, None, None

        if not success:
            print('Could not estimate head pose')
            return False, None, None

        # Apply Kalman filter
        stable_pose = []
        pose_np = np.array(
            (rotation_vector_unstable, translation_vector_unstable)).flatten()
        for value, ps_stb in zip(pose_np, self.pose_stabilizers[subject_id]):
            ps_stb.update([value])
            stable_pose.append(ps_stb.state[0])

        stable_pose = np.reshape(stable_pose, (-1, 3))
        rotation_vector = stable_pose[0]
        translation_vector = stable_pose[1]

        self.last_rvec[subject_id] = rotation_vector
        self.last_tvec[subject_id] = translation_vector

        rotation_vector_swapped = [
            -rotation_vector[2], -rotation_vector[1] + np.pi + self.head_pitch,
            rotation_vector[0]
        ]
        rot_head = tf_transformations.quaternion_from_euler(
            *rotation_vector_swapped)

        return success, rot_head, translation_vector

    def publish_subject_list(self, timestamp, subjects):
        assert (subjects is not None)

        subject_list_message = self.__subject_bridge.images_to_msg(
            subjects, timestamp)

        self.subject_pub.publish(subject_list_message)

    @staticmethod
    def visualize_headpose_result(face_image, est_headpose):
        """Here, we take the original eye eye_image and overlay the estimated gaze."""
        output_image = np.copy(face_image)

        center_x = face_image.shape[1] / 2
        center_y = face_image.shape[0] / 2

        endpoint_x, endpoint_y = gaze_tools.get_endpoint(
            est_headpose[1], est_headpose[0], center_x, center_y, 100)

        cv2.line(output_image, (int(center_x), int(center_y)),
                 (int(endpoint_x), int(endpoint_y)), (0, 0, 255), 3)
        return output_image

    def get_head_translation(self, timestamp, subject_id):
        trans_reshaped = self.last_tvec[subject_id].reshape(3, 1)
        nose_center_3d_rot = [
            -float(trans_reshaped[0] / 1000.0),
            -float(trans_reshaped[1] / 1000.0),
            -float(trans_reshaped[2] / 1000.0)
        ]

        nose_center_3d_rot_frame = self.rgb_frame_id

        try:
            nose_center_3d_rot_pt = PointStamped()
            nose_center_3d_rot_pt.header.frame_id = nose_center_3d_rot_frame
            nose_center_3d_rot_pt.header.stamp = timestamp
            nose_center_3d_rot_pt.point = Point(x=nose_center_3d_rot[0],
                                                y=nose_center_3d_rot[1],
                                                z=nose_center_3d_rot[2])
            nose_center_3d = self.tf_listener.transformPoint(
                self.rgb_frame_id_ros, nose_center_3d_rot_pt)
            nose_center_3d.header.stamp = timestamp

            nose_center_3d_tf = gaze_tools.position_ros_to_tf(
                nose_center_3d.point)

            print('Translation based on landmarks', nose_center_3d_tf)
            return nose_center_3d_tf
        except ExtrapolationException as e:
            print(e)
            return None

    def publish_pose(self, timestamp, nose_center_3d_tf, rot_head, subject_id):
        self.tf_broadcaster.sendTransform(
            nose_center_3d_tf, rot_head, timestamp,
            self.tf_prefix + "/head_pose_estimated" + str(subject_id),
            self.rgb_frame_id_ros)

        return gaze_tools.get_head_pose(nose_center_3d_tf, rot_head)

    def callback(self, color_msg):
        """Simply call process_image."""
        try:
            self.process_image(color_msg)

        except CvBridgeError as e:
            print(e)
        except rospy.ROSException as e:
            if str(e) == "publish() to a closed topic":
                pass
            else:
                raise e

    def __update_subjects(self, new_faceboxes):
        """
        Assign the new faces to the existing subjects (id tracking)
        :param new_faceboxes: new faceboxes detected
        :return: update self.subjects
        """
        assert (self.subjects is not None)
        assert (new_faceboxes is not None)

        if len(new_faceboxes) == 0:
            self.subjects = dict()
            return

        if len(self.subjects) == 0:
            for j, b_new in enumerate(new_faceboxes):
                self.subjects[j] = SubjectDetected(b_new)
            return

        distance_matrix = np.ones((len(self.subjects), len(new_faceboxes)))
        for i, subject in enumerate(self.subjects.values()):
            for j, b_new in enumerate(new_faceboxes):
                distance_matrix[i][j] = np.sqrt(
                    np.mean(
                        ((np.array(subject.face_bb) - np.array(b_new))**2)))
        ids_to_assign = range(len(new_faceboxes))
        self.subjects = dict()
        for id in ids_to_assign:
            subject = np.argmin(distance_matrix[:, id])
            while subject in self.subjects:
                subject += 1
            self.subjects[subject] = SubjectDetected(new_faceboxes[id])

    def __detect_landmarks_one_box(self, facebox, color_img):
        try:
            _bb = map(int, facebox)
            face_img = color_img[_bb[1]:_bb[3], _bb[0]:_bb[2]]
            marks_orig = np.array(
                self.__detect_facial_landmarks(color_img, facebox)[0])

            eye_indices = np.array([36, 39, 42, 45])

            transformed_landmarks = marks_orig[eye_indices]
            transformed_landmarks[:, 0] -= facebox[0]
            transformed_landmarks[:, 1] -= facebox[1]

            return face_img, transformed_landmarks, marks_orig
        except Exception:
            return None, None, None

    def __detect_facial_landmarks(self, color_img, facebox):
        marks = self.facial_landmark_nn.get_landmarks(color_img,
                                                      detected_faces=[facebox])
        return marks

    def detect_landmarks(self, color_img, timestamp):
        faceboxes = self.get_face_bb(color_img)

        self.__update_subjects(faceboxes)

        for subject in self.subjects.values():
            res = self.__detect_landmarks_one_box(subject.face_bb, color_img)
            subject.face_color = res[0]
            subject.transformed_landmarks = res[1]
            subject.marks = res[2]

    def __get_eye_image_one(self, transformed_landmarks, face_aligned_color):
        margin_ratio = 1.0

        try:
            # Get the width of the eye, and compute how big the margin should be according to the width
            lefteye_width = transformed_landmarks[3][
                0] - transformed_landmarks[2][0]
            righteye_width = transformed_landmarks[1][
                0] - transformed_landmarks[0][0]
            lefteye_margin, righteye_margin = lefteye_width * margin_ratio, righteye_width * margin_ratio

            # lefteye_center_x = transformed_landmarks[2][0] + lefteye_width / 2
            # righteye_center_x = transformed_landmarks[0][0] + righteye_width / 2
            lefteye_center_y = (transformed_landmarks[2][1] +
                                transformed_landmarks[3][1]) / 2
            righteye_center_y = (transformed_landmarks[1][1] +
                                 transformed_landmarks[0][1]) / 2

            desired_ratio = self.eye_image_size[0] / self.eye_image_size[1] / 2

            # Now compute the bounding boxes
            # The left / right x-coordinates are computed as the landmark position plus/minus the margin
            # The bottom / top y-coordinates are computed according to the desired ratio, as the width of the image is known
            left_bb = np.zeros(4, dtype=np.int32)
            left_bb[0] = transformed_landmarks[2][0] - lefteye_margin / 2
            left_bb[1] = lefteye_center_y - (
                lefteye_width + lefteye_margin) * desired_ratio * 1.25
            left_bb[2] = transformed_landmarks[3][0] + lefteye_margin / 2
            left_bb[3] = lefteye_center_y + (
                lefteye_width + lefteye_margin) * desired_ratio * 1.25

            right_bb = np.zeros(4, dtype=np.int32)
            right_bb[0] = transformed_landmarks[0][0] - righteye_margin / 2
            right_bb[1] = righteye_center_y - (
                righteye_width + righteye_margin) * desired_ratio * 1.25
            right_bb[2] = transformed_landmarks[1][0] + righteye_margin / 2
            right_bb[3] = righteye_center_y + (
                righteye_width + righteye_margin) * desired_ratio * 1.25

            # Extract the eye images from the aligned image
            left_eye_color = face_aligned_color[left_bb[1]:left_bb[3],
                                                left_bb[0]:left_bb[2], :]
            right_eye_color = face_aligned_color[right_bb[1]:right_bb[3],
                                                 right_bb[0]:right_bb[2], :]

            # for p in transformed_landmarks:  # For debug visualization only
            #     cv2.circle(face_aligned_color, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

            # So far, we have only ensured that the ratio is correct. Now, resize it to the desired size.
            left_eye_color_resized = scipy.misc.imresize(left_eye_color,
                                                         self.eye_image_size,
                                                         interp='lanczos')
            right_eye_color_resized = scipy.misc.imresize(right_eye_color,
                                                          self.eye_image_size,
                                                          interp='lanczos')
        except (ValueError, TypeError) as e:
            print(e)
            return None, None, None, None
        return left_eye_color_resized, right_eye_color_resized, left_bb, right_bb

    # noinspection PyUnusedLocal
    def get_eye_image(self):
        """Extract the left and right eye images given the (dlib) transformed_landmarks and the source image.
        First, align the face. Then, extract the width of the eyes given the landmark positions.
        The height of the images is computed according to the desired ratio of the eye images."""

        start_time = time.time()
        for subject in self.subjects.values():
            res = self.__get_eye_image_one(subject.transformed_landmarks,
                                           subject.face_color)
            subject.left_eye_color = res[0]
            subject.right_eye_color = res[1]
            subject.left_eye_bb = res[2]
            subject.right_eye_bb = res[3]

        tqdm.write('New get_eye_image time: ' + str(time.time() - start_time))

    @staticmethod
    def get_image_points_eyes_nose(landmarks):
        landmarks_x, landmarks_y = landmarks.T[0], landmarks.T[1]

        left_eye_center_x = landmarks_x[42] + (landmarks_x[45] -
                                               landmarks_x[42]) / 2.0
        left_eye_center_y = (landmarks_y[42] + landmarks_y[45]) / 2.0
        right_eye_center_x = landmarks_x[36] + (landmarks_x[40] -
                                                landmarks_x[36]) / 2.0
        right_eye_center_y = (landmarks_y[36] + landmarks_y[40]) / 2.0
        nose_center_x, nose_center_y = (landmarks_x[33] + landmarks_x[31] + landmarks_x[35]) / 3.0, \
                                       (landmarks_y[33] + landmarks_y[31] + landmarks_y[35]) / 3.0

        return (nose_center_x, nose_center_y), \
               (left_eye_center_x, left_eye_center_y), (right_eye_center_x, right_eye_center_y)
예제 #21
0
class ros_cv_testing_node:
    def __init__(self):

        # Get information for this particular camera
        camera_info = get_single('head_camera/depth_registered/camera_info',
                                 CameraInfo)
        print camera_info

        # Set information for the camera
        self.cam_model = PinholeCameraModel()
        self.cam_model.fromCameraInfo(camera_info)

        # Subscribe to the camera's image topic and depth image topic
        self.rgb_image_sub = rospy.Subscriber("head_camera/rgb/image_raw",
                                              Image, self.on_rgb)
        self.depth_image_sub = rospy.Subscriber(
            "head_camera/depth_registered/image_raw", Image, self.on_depth)

        # Publish where the button is in the base link frame
        self.point_pub = rospy.Publisher('button_point',
                                         PointStamped,
                                         queue_size=10)

        # Set up connection to CvBridge
        self.bridge = CvBridge()

        # Open up viewing windows
        cv2.namedWindow('depth')
        cv2.namedWindow('rgb')

        # Set up the class variables
        self.rgb_image = None
        self.rgb_image_time = None
        self.depth_image = None
        self.center = None
        self.return_point = PointStamped()
        self.tf_listener = TransformListener()

    def on_rgb(self, image):
        # Convert image to something that cv2 can work with
        try:
            self.rgb_image = self.bridge.imgmsg_to_cv2(image, 'bgr8')
        except e:
            print e
            return

        self.rgb_image_time = image.header.stamp

        # Get height and width of image
        h = self.rgb_image.shape[0]
        w = self.rgb_image.shape[1]

        # Create empty image
        color_dst = numpy.empty((h, w), 'uint8')

        # Convert picture to grayscale
        cv2.cvtColor(self.rgb_image, cv2.COLOR_RGB2GRAY, color_dst)

        # Find circles given the camera image
        dp = 1.1
        minDist = 90

        circles = cv2.HoughCircles(color_dst, cv2.cv.CV_HOUGH_GRADIENT, dp,
                                   minDist)

        # If no circles are detected then exit the program
        if circles == None:
            print "No circles found using these parameters"
            sys.exit()

        circles = numpy.uint16(numpy.around(circles))

        # Draw the center of the circle closest to the top
        ycoord = []
        for i in circles[0, :]:
            ycoord.append(i[1])

# Find the top circle in the frame
        top_circ_y_coor = min(ycoord)
        x_coor = 0
        y_coor = 0

        for i in circles[0, :]:
            if i[1] == top_circ_y_coor:
                # draw the center of the circle
                #cv2.circle(self.rgb_image,(i[0],i[1]),2,(0,0,255),3)
                # draw outer circle
                #cv2.circle(self.rgb_image,(i[0],i[1]),i[2],(0,255,0),2)
                x_coor = i[0]
                y_coor = i[1]

        cv2.circle(self.rgb_image, (327, 247), 2, (0, 0, 255), 3)
        # Set the coordinates for the center of the circle
        self.center = (x_coor, y_coor)
        #self.center = (328,248)

        cv2.imshow('rgb', self.rgb_image)
        cv2.waitKey(1)

    def on_depth(self, image):

        # Use numpy so that cv2 can use image
        self.depth_image = numpy.fromstring(image.data,
                                            dtype='float32').reshape(
                                                (480, 640))
        nmask = numpy.isnan(self.depth_image)

        #Find the minimum and the maximum of the depth image
        Dmin = self.depth_image[~nmask].min()
        Dmax = self.depth_image[~nmask].max()

        # If a circle has been found find its depth and apply that to the ray
        if self.center != None:
            ray = self.cam_model.projectPixelTo3dRay(self.center)
            depth = self.depth_image[self.center[1]][self.center[0]]
            # If the depth is not a number exit
            if math.isnan(depth):
                print "Incomplete information on depth at this point"
                return
            # Otherwise mulitply the depth by the unit vector of the ray
            else:
                print "depth", depth
                cam_coor = [depth * ray[0], depth * ray[1], depth * ray[2]]
                #print "camera frame coordinate:", cam_coor
        else:
            return

        # Rescale to [0,1]
        cv2.imshow('depth', self.depth_image / 2.0)
        cv2.waitKey(1)

        # Only use depth if the RGB image is running
        if self.rgb_image_time is None:
            rospy.loginfo('not using depth cause no RGB yet')
            return

        time_since_rgb = image.header.stamp - self.rgb_image_time

        # Only use depth if it is close in time to the RGB image
        if time_since_rgb > rospy.Duration(0.5):
            rospy.loginfo('not using depth because time since RGB is ' +
                          str(time_since_rgb))
            #return

        # Find transform from camera frame to world frame
        self.tf_listener.waitForTransform(self.cam_model.tfFrame(),
                                          "base_link", image.header.stamp,
                                          rospy.Duration(1.0))

        # Set up the point to be published
        self.return_point.header.stamp = image.header.stamp
        self.return_point.header.frame_id = self.cam_model.tfFrame()
        self.return_point.point.x = cam_coor[0]
        self.return_point.point.y = cam_coor[1]
        self.return_point.point.z = cam_coor[2]

        # Transform point to the baselink frame
        self.return_point = self.tf_listener.transformPoint(
            "base_link", self.return_point)
        print "world frame coordinate:", self.return_point.point.x, \
            self.return_point.point.y,self.return_point.point.z, "\n"

        self.point_pub.publish(self.return_point)
예제 #22
0
class SRB:
    def __init__(self):

        self.choice = 0
        self.movePub = rospy.Publisher(
            '/cmd_vel', Twist, queue_size=10)  # publisher for move commands
        self.peopleSub = rospy.Subscriber(
            '/people_tracker_filter/people', People,
            self.peopleCall)  # subscribe to people tracker
        self.laserSub = rospy.Subscriber('/scan', LaserScan,
                                         self.laserCall)  # subscribe to laser
        self.distance = 0  # variable to store distance from laser
        self.tf = TransformListener()

        while True:
            print("select behaviour")
            print("1) control")
            print("2) friendly")
            print("3) cautious")
            self.choice = input("> ")
            if self.choice == 1 or self.choice == 2:
                break
            elif self.choice == 3:
                break
            print("started")

    def peopleCall(self, msg):  # what to do with people data

        xCoords = [
        ]  # arrays to hold x, y coords of people from people tracker
        yCoords = []

        for i in msg.people:
            p = PointStamped()
            p.point = i.position
            p.header = msg.header
            pos = self.tf.transformPoint('/base_link', p)
            xCoords.append(pos.point.x)
            yCoords.append(pos.point.y)
            #print(i.name)

        if self.choice == 1:  # control

            (fb, lr) = control(
                xCoords, yCoords, self.distance
            )  # get movement commands from behaviour function, forwards/backwards, left/right
            print('reacting')

        if self.choice == 2:  # friendly

            (fb, lr) = friendly(xCoords, yCoords, self.distance)
            print('reacting')

        if self.choice == 3:  # cautious

            (fb, lr) = cautious(xCoords, yCoords, self.distance)
            print('reacting')

        if not self.choice == 0:
            self.twist = Twist()  # twist message to hold commands
            self.twist.angular.z = lr
            self.twist.linear.x = fb
            #print type(self.choice), self.choice
            self.movePub.publish(self.twist)  # send commands

    def laserCall(self, msg):  # what to do with laser data

        self.distance = min(msg.ranges)  # get range directly in front of robot
예제 #23
0
 def transformPoint(self, target_frame, ps):
     now = rospy.Time.now()
     self.waitForTransform(target_frame, ps.header.frame_id, rospy.Time.now(), rospy.Duration(5.0))
     ps.header.stamp = now
     return TransformListener.transformPoint(self, target_frame, ps)
예제 #24
0
class CameraPointer(object):
    def __init__(self, side, camera_ik):
        self.side = side
        self.camera_ik = camera_ik
        self.joint_names = self.joint_angles_act = self.joint_angles_des = None
        self.tfl = TransformListener()
        self.joint_state_sub = rospy.Subscriber(
            '/{0}_arm_controller/state'.format(self.side),
            JointTrajectoryControllerState, self.joint_state_cb)
        self.joint_traj_pub = rospy.Publisher(
            '/{0}_arm_controller/command'.format(self.side), JointTrajectory)
        # Wait for joint information to become available
        while self.joint_names is None and not rospy.is_shutdown():
            rospy.sleep(0.5)
            rospy.loginfo(
                "[{0}] Waiting for joint state from arm controller.".format(
                    rospy.get_name()))
        #Set rate limits on a per-joint basis
        self.max_vel_rot = [np.pi] * len(self.joint_names)
        self.target_sub = rospy.Subscriber(
            '{0}/lookat_ik/goal'.format(rospy.get_name()), PointStamped,
            self.goal_cb)
        rospy.loginfo("[{0}] Ready.".format(rospy.get_name()))

    def joint_state_cb(self, jtcs):
        if self.joint_names is None:
            self.joint_names = jtcs.joint_names
        self.joint_angles_act = jtcs.actual.positions
        self.joint_angles_des = jtcs.desired.positions

    def goal_cb(self, pt_msg):
        rospy.loginfo("[{0}] New LookatIK goal received.".format(
            rospy.get_name()))
        if (pt_msg.header.frame_id != self.camera_ik.base_frame):
            try:
                now = pt_msg.header.stamp
                self.tfl.waitForTransform(pt_msg.header.frame_id,
                                          self.camera_ik.base_frame, now,
                                          rospy.Duration(1.0))
                pt_msg = self.tfl.transformPoint(self.camera_ik.base_frame,
                                                 pt_msg)
            except (LookupException, ConnectivityException,
                    ExtrapolationException):
                rospy.logwarn(
                    "[{0}] TF Error tranforming point from {1} to {2}".format(
                        rospy.get_name(), pt_msg.header.frame_id,
                        self.camera_ik.base_frame))
        target = np.array([pt_msg.point.x, pt_msg.point.y, pt_msg.point.z])
        # Get IK Solution
        current_angles = list(copy.copy(self.joint_angles_act))
        iksol = self.camera_ik.lookat_ik(
            target, current_angles[:len(self.camera_ik.arm_indices)])
        # Start with current angles, then replace angles in camera IK with IK solution
        # Use desired joint angles to avoid sagging over time
        jtp = JointTrajectoryPoint()
        jtp.positions = list(copy.copy(self.joint_angles_des))
        for i, joint_name in enumerate(self.camera_ik.arm_joint_names):
            jtp.positions[self.joint_names.index(joint_name)] = iksol[i]
        deltas = np.abs(np.subtract(jtp.positions, current_angles))
        duration = np.max(np.divide(deltas, self.max_vel_rot))
        jtp.time_from_start = rospy.Duration(duration)
        jt = JointTrajectory()
        jt.joint_names = self.joint_names
        jt.points.append(jtp)
        rospy.loginfo("[{0}] Sending Joint Angles.".format(rospy.get_name()))
        self.joint_traj_pub.publish(jt)
예제 #25
0
class FTPNode:
    def __init__(self, *args):
        print("init")
        self.tf = TransformListener()
        self.tt = Transformer()
        rospy.Subscriber("/trackedHumans", TrackedHumans, self.pos_callback)
        self.publisher = rospy.Publisher("directionmarker_array", MarkerArray)

    def pos_callback(self, data):
        rospy.loginfo("on updated pos, face robot towards guy...")
        print("hi")
        if (len(data.trackedHumans) > 0):
            print(data.trackedHumans[0].location.point.x)
            try:
                self.tf.waitForTransform(
                    data.trackedHumans[0].location.header.frame_id,
                    "/base_link", rospy.Time.now(), rospy.Duration(2.0))
                pp = self.tf.transformPoint("/base_link",
                                            data.trackedHumans[0].location)
                print "Original:"
                print[data.trackedHumans[0].location.point]
                print "Transform:"
                print pp.point

                phi = math.atan2(pp.point.y, pp.point.x)
                sss.move_base_rel("base", [0, 0, phi])
                print phi * 180 / math.pi

                markerArray = MarkerArray()
                marker = Marker()
                marker.header.stamp = rospy.Time()
                marker.ns = "my_namespace"
                marker.id = 0
                marker.header.frame_id = "/base_link"
                marker.type = marker.ARROW
                marker.action = marker.ADD
                marker.scale.x = .1
                marker.scale.y = .1
                marker.scale.z = .1
                marker.color.a = 1.0
                marker.color.r = 1.0
                marker.color.g = 1.0
                marker.color.b = 0.0
                p1 = Point()
                p1.x = 0
                p1.y = 0
                p1.z = 0
                p2 = Point()
                p2.x = pp.point.x
                p2.y = pp.point.y
                p2.z = 0
                marker.points = [p1, p2]
                #marker.pose.position.x = 1
                #marker.pose.position.y = 0
                #marker.pose.position.z = 1
                #marker.pose.orientation.w = 1
                markerArray.markers.append(marker)
                self.publisher.publish(markerArray)
                print "try ended"
            except Exception as e:
                print e
예제 #26
0
class Collector():
    def __init__(self):
        rospy.init_node('collector', anonymous = False)                
        self.lis=TransformListener();
        self.data_out=SBC_Output();
        rospy.Subscriber("/joint_states", JointState, self.j_callback)
        rospy.Subscriber("/finger1/ContactState", KCL_ContactStateStamped, self.f_callback1)
        rospy.Subscriber("/finger2/ContactState", KCL_ContactStateStamped, self.f_callback2)
        rospy.Subscriber("/pressure", PressureControl, self.p_callback)        
        rospy.Subscriber("/prob_fail", Float64, self.prob_callback)
        self.publisher=rospy.Publisher("sbc_data", SBC_Output)
        self.point1=PointStamped()
        self.point2=PointStamped()
        self.rate=rospy.Rate(20);

    def getParams(self):     
        self.data_out.D_Gain=rospy.get_param("/bhand_pid/d_gain")     
        self.data_out.F_ref_pid=rospy.get_param("/bhand_pid/f_ref")
        self.data_out.I_Gain=rospy.get_param("/bhand_pid/i_gain")
        self.data_out.P_Gain=rospy.get_param("/bhand_pid/p_gain")
        self.data_out.freq=rospy.get_param("/pressure_reg/frequency")
        self.data_out.Beta=rospy.get_param("/bhand_sbc/beta")
        self.data_out.Delta=rospy.get_param("/bhand_sbc/delta")
        self.data_out.Eta=rospy.get_param("/bhand_sbc/eta")
        self.data_out.F_ref_sbc=rospy.get_param("/bhand_sbc/f_ref")
        
    def j_callback(self,data):
        self.joints=data;
        self.data_out.effort1=data.effort[1]
        self.data_out.effort2=data.effort[0]
        
    def f_callback1(self,data):                    
        self.data_out.Fn1=data.Fnormal;            
        ft=np.array([data.tangential_force.x,data.tangential_force.y,data.tangential_force.z])
        self.data_out.Ft1=np.sqrt(ft.dot(ft));

        self.point1=PointStamped();
        self.point1.header=data.header;
        self.point1.point=data.contact_position;
                    
        
    def f_callback2(self,data):
        self.data_out.Fn2=data.Fnormal;
        ft=np.array([data.tangential_force.x,data.tangential_force.y,data.tangential_force.z])
        self.data_out.Ft2=np.sqrt(ft.dot(ft));  
        
        self.point2=PointStamped();
        self.point2.header=data.header;
        self.point2.point=data.contact_position;

        
    def p_callback(self,data):
        self.data_out.p_demand=data.p_demand;
        self.data_out.p_measure=data.p_measure;
    def prob_callback(self,data):
        self.data_out.Pfailure=data.data;
        
    def transform_it(self,data):
        if(data.header.frame_id):
            #data.header.stamp=rospy.Time.now();
            if(self.lis.canTransform("base_link",data.header.frame_id,data.header.stamp) or True):
                #print(rospy.Time.now())     
                data.header.stamp=data.header.stamp-rospy.Duration(0.02)           
                #point=self.lis.transformPoint("base_link", data)
                try:    
                    #self.lis.waitForTransform("base_link", data.header.frame_id, data.header.stamp, rospy.Duration(1))
                  #  print(rospy.Time.now())                
                    self.point=self.lis.transformPoint("base_link", data)
                    return True
                except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                    rospy.logwarn("TF problem 2")                    
                    pass
            else:
                rospy.logwarn("Cannot Transform")
        else:
            print(data.header.frame_id)        
        return False
    
    def get_distance(self,point1,point2):  
        d=np.array([point1.x-point2.x, point1.y-point2.y, point1.z-point2.z])
        return np.sqrt(d.dot(d));
  
        
        
    def send_it(self):
        while not rospy.is_shutdown():            
            self.data_out.header.stamp=rospy.Time.now();
            self.getParams()            
            
            got_it=self.transform_it(self.point1);
            if(got_it):
                self.data_out.contact1=self.point.point
            got_it=self.transform_it(self.point2);
            if(got_it):
                self.data_out.contact2=self.point.point                   
            self.data_out.distance=self.get_distance(self.data_out.contact1,self.data_out.contact2)*100
            self.publisher.publish(self.data_out);
            self.rate.sleep();           
class CasadiMPC:
    def __init__(self, lbw, ubw, lbg, ubg):
        #publishers
        self.pub_nav_vel = rospy.Publisher('/nav_vel', Twist, queue_size=0)
        self.pub_prediction_poses = rospy.Publisher('/prediction_poses',
                                                    PoseArray,
                                                    queue_size=0)
        self.pub_goal = rospy.Publisher('/local_goal',
                                        PoseStamped,
                                        queue_size=0)
        self.pub_mo_viz = rospy.Publisher('/mobs', MarkerArray, queue_size=0)
        self.pub_current_obs = rospy.Publisher('/current_obs',
                                               MarkerArray,
                                               queue_size=0)
        #subscribers
        self.robotpose_sub = rospy.Subscriber('/robot_pose',
                                              PoseWithCovarianceStamped,
                                              self.callback_pose)
        self.goal_sub = rospy.Subscriber('/goal_pub', PoseStamped,
                                         self.callback_goal)
        self.costmap_converter_sub = rospy.Subscriber(
            '/costmap_converter/costmap_obstacles', ObstacleArrayMsg,
            self.callback_so)
        self.mo_sub = rospy.Subscriber('/MO_Obstacles', ObstacleArrayMsg,
                                       self.callback_mo)
        #Markers
        self.MOMarkerArray = MarkerArray()
        self.SOMarkerArray = MarkerArray()
        #casadi variables
        self.lbw = np.array(lbw)
        self.ubw = np.array(ubw)
        self.lbg = np.array(lbg).T
        self.ubg = np.array(ubg).T
        self.p = np.zeros((n_states + n_states + n_MO * (N + 1) * n_MOst) +
                          n_SO * 7)
        self.u0 = np.zeros((2, N))
        self.x_st_0 = np.matlib.repmat(np.array([[0], [0], [0.0]]), 1, N + 1).T
        self.goal = None
        self.MO_obs = None
        self.MO_data = None
        self.SO_obs = None
        self.mpc_i = 0
        self.goal_tolerance = [0.2, 0.2]
        self.tf = TransformListener()
        self.cl_obs = []
        self.cl_sizes = []

    def callback_goal(self, goal_data):  # Current way to get the goal
        yaw_goal = quaternion_to_euler(goal_data.pose.orientation.x,
                                       goal_data.pose.orientation.y,
                                       goal_data.pose.orientation.z,
                                       goal_data.pose.orientation.w)
        self.goal = np.array(([goal_data.pose.position.x],
                              [goal_data.pose.position.y], [yaw_goal]))
        goal_data.header.stamp = rospy.Time.now()
        goal_data.header.frame_id = "/map"
        self.pub_goal.publish(goal_data)

    def callback_pose(
        self, pose_data
    ):  # Update the pose either using the topics robot_pose or amcl_pose.
        yaw_pose = quaternion_to_euler(pose_data.pose.pose.orientation.x,
                                       pose_data.pose.pose.orientation.y,
                                       pose_data.pose.pose.orientation.z,
                                       pose_data.pose.pose.orientation.w)
        self.pose = np.array(([pose_data.pose.pose.position.x],
                              [pose_data.pose.pose.position.y], [yaw_pose]))

    def callback_so(self, obs_data):
        obs_data.header.frame_id = "odom"
        for i in range(len(obs_data.obstacles)):
            pt = PointStamped()
            pt.header.frame_id = "odom"
            for k in range(len(obs_data.obstacles[i].polygon.points)):
                pt.point.x = obs_data.obstacles[i].polygon.points[k].x
                pt.point.y = obs_data.obstacles[i].polygon.points[k].y
                pt.point.z = obs_data.obstacles[i].polygon.points[k].z
                tfpts = self.tf.transformPoint("map", pt)
                obs_data.obstacles[i].polygon.points[k] = tfpts.point
        self.SO_obs = obs_data

    def callback_mo(self, MO_data):
        self.MO_obs = []
        for k in range(len(MO_data.obstacles[:])):
            self.MO_obs.append(MO_data.obstacles[k])

    def compute_vel_cmds(self):
        if self.goal is not None and self.MO_obs is not None and self.SO_obs is not None:
            x0 = self.pose
            x_goal = self.goal
            print('This is the GOAL: ', x_goal)
            print('This is the ROBOT POSE:', x0)

            self.p[0:6] = np.append(x0, x_goal)

            #MO constraints
            i_pos = 6
            for k in range(N + 1):
                for i in range(n_MO):
                    self.p[i_pos + 2:i_pos + 5] = np.array([
                        self.MO_obs[i].velocities.twist.linear.x,
                        self.MO_obs[i].orientation.z, self.MO_obs[i].radius
                    ])
                    t_predicted = k * Ts
                    obs_x = self.MO_obs[i].polygon.points[
                        0].x + t_predicted * self.MO_obs[
                            i].velocities.twist.linear.x * ca.cos(
                                self.MO_obs[i].orientation.z)
                    obs_y = self.MO_obs[i].polygon.points[
                        0].y + t_predicted * self.MO_obs[
                            i].velocities.twist.linear.x * ca.sin(
                                self.MO_obs[i].orientation.z)
                    self.p[i_pos:i_pos + 2] = [obs_x, obs_y]
                    i_pos += 5

            #SO constraints
            self.p[i_pos:i_pos + n_SO * 7] = find_closest_n_so(
                self.SO_obs, x0, n_SO)
            self.cl_obs = self.p[i_pos:i_pos + n_SO * 6]
            self.cl_sizes = self.p[i_pos + n_SO * 6:i_pos + n_SO * 7]

            x0k = np.append(self.x_st_0.reshape(3 * (N + 1), 1),
                            self.u0.reshape(2 * N, 1))
            x0k = x0k.reshape(x0k.shape[0], 1)

            sol = solver(x0=x0k,
                         lbx=self.lbw,
                         ubx=self.ubw,
                         lbg=self.lbg,
                         ubg=self.ubg,
                         p=self.p)

            #print('------------')
            #print('constraints g = ', sol["g"])

            u_sol = sol.get('x')[3 * (N + 1):].reshape((N, 2))

            self.u0 = np.append(u_sol[1:, :],
                                u_sol[u_sol.shape[0] - 1, :],
                                axis=0)

            self.x_st_0 = np.reshape(sol.get('x')[0:3 * (N + 1)], (N + 1, 3))
            self.x_st_0 = np.append(self.x_st_0[1:, :],
                                    self.x_st_0[-1, :].reshape((1, 3)),
                                    axis=0)
            cmd_vel = Twist()
            cmd_vel.linear.x = u_sol[0]
            cmd_vel.angular.z = u_sol[1]
            print('------------')
            print('APPLIED CMD_VEL:', cmd_vel)
            self.pub_nav_vel.publish(cmd_vel)

            self.mpc_i = self.mpc_i + 1

            # Publish to Rviz
            self.publish_mo_marker()
            self.publish_mpc_prediction()
            self.publish_current_so()

        else:
            print(
                "Waiting for pathsplit, MO_publisher or constmap_converter_publisher"
            )

    def publish_mo_marker(self):
        self.MOMarkerArray = MarkerArray()
        for i in range(n_MO):
            marker = Marker()
            marker.id = i
            marker.header.frame_id = 'map'
            marker.header.stamp = rospy.Time.now()
            marker.type = marker.CYLINDER
            marker.action = marker.ADD
            marker.scale.x = 2 * self.MO_obs[i].radius
            marker.scale.y = 2 * self.MO_obs[i].radius
            marker.scale.z = 0.3
            marker.color.r = 1.0
            marker.color.g = 1.0
            marker.color.b = 0.0
            marker.color.a = 1.0
            marker.pose.position.x = self.MO_obs[i].polygon.points[0].x
            marker.pose.position.y = self.MO_obs[i].polygon.points[0].y
            marker.pose.position.z = 0
            marker.pose.orientation.w = 1.0
            self.MOMarkerArray.markers.append(marker)
        self.pub_mo_viz.publish(self.MOMarkerArray)

    def publish_mpc_prediction(self):
        poseArray = PoseArray()
        poseArray.header.stamp = rospy.Time.now()
        poseArray.header.frame_id = "/map"
        for k in range(len(self.x_st_0)):
            x_st = Pose()
            x_st.position.x = self.x_st_0[k, 0]
            x_st.position.y = self.x_st_0[k, 1]
            [qx, qy, qz, qw] = euler_to_quaternion(0, 0, self.x_st_0[k, 2])
            x_st.orientation.x = qx
            x_st.orientation.y = qy
            x_st.orientation.z = qz
            x_st.orientation.w = qw
            poseArray.poses.append(x_st)
        self.pub_prediction_poses.publish(poseArray)

    def publish_current_so(self):
        point_step = 0
        obs_array = []
        for k in range(n_SO):
            temp_obs = self.cl_obs[point_step:point_step + 2 *
                                   (int(self.cl_sizes[k]))]
            print('This is the temp_obs: ', temp_obs)
            obs_array.append(temp_obs)
            point_step += 6

        self.SOMarkerArray = MarkerArray()
        for k in range(n_SO):
            marker = Marker()
            marker.id = k
            marker.header.frame_id = 'map'
            marker.header.stamp = rospy.Time.now()
            marker.type = marker.LINE_STRIP
            marker.action = marker.ADD
            marker.scale.x = 0.1
            marker.color.r = 1.0
            marker.color.g = 0.0
            marker.color.b = 0.0
            marker.color.a = 1.0
            marker.points = []
            point_step = 0
            for i in range(int(self.cl_sizes[k])):
                temp_point = Point32()
                temp_point.x = obs_array[k][point_step]
                temp_point.y = obs_array[k][point_step + 1]
                temp_point.z = 0
                point_step += 2
                marker.points.append(temp_point)
                if int(self.cl_sizes[k]) == 3:
                    temp_point = Point32()
                    temp_point.x = obs_array[k][point_step - 6]
                    temp_point.y = obs_array[k][point_step - 5]
                    temp_point.z = 0
                    marker.points.append(temp_point)
            self.SOMarkerArray.markers.append(marker)
        self.pub_current_obs.publish(self.SOMarkerArray)
예제 #28
0
class grasper:
    def __init__(self):
        self.listener = TransformListener()
        # Subscribe to the ROS topic that contains the grasps.
        self.sub = rospy.Subscriber('/detect_grasps/clustered_grasps',
                                    GraspConfigList, self.grasp_callback)
        self.pub = rospy.Publisher('/object_pose', Pose, queue_size=1)
        self.grasp_acc = []
        self.tempPoint = PointStamped()
        #self.msg = PointStamped()
        self.transformedPoint = PointStamped()
        self.orient = Vector3Stamped()

        ## initiate a RobotCommander object. This object is an interface to the
        ## robot as a whole
        self.robot = moveit_commander.RobotCommander()

        ## Instantiate a PlanningSceneInterface object.  This object is an interface
        ## to the world surrounding the robot.
        self.scene = moveit_commander.PlanningSceneInterface()

        ## Instantiate a MoveGroupCommander object.  This object is an interface
        ## to one group of joints.
        self.group = moveit_commander.MoveGroupCommander("manipulator")

        ## We create this DisplayTrajectory publisher which is used below to publish
        ## trajectories for RVIZ to visualize.
        self.display_trajectory_publisher = rospy.Publisher(
            '/move_group/display_planned_path',
            moveit_msgs.msg.DisplayTrajectory)

    def grasp_callback(self, msg):
        #global grasps
        self.grasps_acc = msg.grasps

        print "Grasp position: ", self.grasps_acc[0].surface
        print "header", msg.header
        self.tempPoint.point = self.grasps_acc[0].surface
        self.tempPoint.header = msg.header
        self.orient.vector = self.grasps_acc[0].binormal

        print "Orientation -grasp : ", self.grasps_acc[0].binormal

        try:

            now = rospy.Time(0)
            # self.listener.waitForTransform( "/camera_rgb_optical_frame", "/camera_link", now, rospy.Time(0.1))
            print "Transformed"
            self.transformedPoint = self.listener.transformPoint(
                "/zed_current_frame", self.tempPoint)

        except (tf.LookupException, tf.ConnectivityException):
            pass

        quaternion = tf.transformations.quaternion_from_euler(0, 0, 0)
        pose_target = geometry_msgs.msg.Pose()
        pose_target.orientation.x = quaternion[0]
        pose_target.orientation.y = quaternion[1]
        pose_target.orientation.z = quaternion[2]
        pose_target.orientation.w = quaternion[3]
        pose_target.position.x = self.transformedPoint.point.z  #self.transformedPoint.point.x
        pose_target.position.y = self.transformedPoint.point.y
        pose_target.position.z = self.transformedPoint.point.x  #self.transformedPoint.point.z

        self.group.set_pose_target(pose_target)
        plan1 = self.group.plan()
        self.pub.publish(pose_target)

        print "transformed point : "
        print "============ Waiting while RVIZ displays plan1..."
        rospy.sleep(5)
        self.group.go(wait=True)
class ORKTabletop(object):
    """ Listens to the table and object messages from ORK. Provides ActionServer
    that assembles table and object into same message. 
    NB - the table is an axis-aligned bounding box in the kinect's frame"""
    def __init__(self, name):

        self.sub = rospy.Subscriber("/recognized_object_array", RecognizedObjectArray, self.callback)
        self.pub = rospy.Publisher('/recognized_object_array_as_point_cloud', PointCloud2)
        self.pose_pub = rospy.Publisher('/recognized_object_array_as_pose_stamped', PoseStamped)

        # We listen for ORK's MarkerArray of tables on this topic
        self.table_topic = "/marker_tables"

        self.tl = TransformListener()

        # create messages that are used to publish feedback/result.
        # accessed by multiple threads
        self._result = TabletopResult()
        self.result_lock = threading.Lock()
        # used s.t. we don't return a _result message that hasn't been updated yet. 
        self.has_data = False

        self._action_name = name
        self._as = actionlib.SimpleActionServer(self._action_name, TabletopAction, 
                                                execute_cb=self.execute_cb, auto_start=False)
        self._as.start()

    # TODO: Is this really the best structure for handling the callbacks?
    # Would it be possible to have separate callbacks for table and objects, each updating a most-recently-seen variable?
    # Or maybe use the message_filters.TimeSynchronizer class if corresponding table/object data has identical timestamps?
    def callback(self, data):
        rospy.loginfo("Objects %d", data.objects.__len__())

        table_corners = []

        # obtain table_offset and table_pose
        to = rospy.wait_for_message(self.table_topic, MarkerArray);

        # obtain Table corners ...
        rospy.loginfo("Tables hull size %d", to.markers.__len__())
        if not to.markers:
            rospy.loginfo("No tables detected")
            return
        else:
            # NB - D says that ORK has this set to filter based on height.
            # IIRC, it's 0.6-0.8m above whatever frame is set as the floor
            point_array_size = 4      # for the 4 corners of the bounding box
            for i in range (0, point_array_size):
                p = Point()
                p.x = to.markers[0].points[i].x
                p.y = to.markers[0].points[i].y
                p.z = to.markers[0].points[i].z
                table_corners.append(p)
            # this is a table pose at the edge close to the robot, in the center of x axis
            table_pose = PoseStamped()
            table_pose.header = to.markers[0].header
            table_pose.pose = to.markers[0].pose
            
        # Determine table dimensions
        rospy.loginfo('calculating table pose bounding box in frame: %s' % table_pose.header.frame_id)
        min_x = sys.float_info.max
        min_y = sys.float_info.max
        max_x = -sys.float_info.max
        max_y = -sys.float_info.max

        for i in range (table_corners.__len__()):
            if table_corners[i].x > max_x:
                max_x = table_corners[i].x
            if table_corners[i].y > max_y:
                max_y = table_corners[i].y
            if table_corners[i].x < min_x:
                min_x = table_corners[i].x
            if table_corners[i].y < min_y:
                min_y = table_corners[i].y

        table_dim = Point()
        # TODO: if we don't (can't!) compute the height, should we at least give it non-zero depth? 
        # (would also require shifting the observed centroid down by table_dim.z/2)
        table_dim.z = 0.0

        table_dim.x = abs(max_x - min_x)
        table_dim.y = abs(max_y - min_y)
        print "Dimensions: ", table_dim.x, table_dim.y

        # Temporary frame used for transformations
        table_link = 'table_link'

        # centroid of a table in table_link frame
        centroid = PoseStamped()
        centroid.header.frame_id = table_link
        centroid.header.stamp = table_pose.header.stamp
        centroid.pose.position.x = (max_x + min_x)/2.
        centroid.pose.position.y = (max_y + min_y)/2.
        centroid.pose.position.z = 0.0
        centroid.pose.orientation.x = 0.0
        centroid.pose.orientation.y = 0.0
        centroid.pose.orientation.z = 0.0
        centroid.pose.orientation.w = 1.0

        # generate transform from table_pose to our newly-defined table_link
        tt = TransformStamped()
        tt.header = table_pose.header
        tt.child_frame_id = table_link
        tt.transform.translation = table_pose.pose.position
        tt.transform.rotation = table_pose.pose.orientation
        self.tl.setTransform(tt)
        self.tl.waitForTransform(table_link, table_pose.header.frame_id, table_pose.header.stamp, rospy.Duration(3.0))
        if self.tl.canTransform(table_pose.header.frame_id, table_link, table_pose.header.stamp):
            centroid_table_pose = self.tl.transformPose(table_pose.header.frame_id, centroid)
            self.pose_pub.publish(centroid_table_pose)
        else:
            rospy.logwarn("No transform between %s and %s possible",table_pose.header.frame_id, table_link)
            return

        # transform each object into desired frame; add to list of clusters
        cluster_list = []
        for i in range (data.objects.__len__()):
            rospy.loginfo("Point clouds %s", data.objects[i].point_clouds.__len__())

            pc = PointCloud2()
            pc = data.objects[i].point_clouds[0]
            arr = pointclouds.pointcloud2_to_array(pc, 1)
            arr_xyz = pointclouds.get_xyz_points(arr)

            arr_xyz_trans = []
            for j in range (arr_xyz.__len__()):
                ps = PointStamped();
                ps.header.frame_id = table_link
                ps.header.stamp = table_pose.header.stamp
                ps.point.x = arr_xyz[j][0]
                ps.point.y = arr_xyz[j][1]
                ps.point.z = arr_xyz[j][2]
                if self.tl.canTransform(table_pose.header.frame_id, table_link, table_pose.header.stamp):
                    ps_in_kinect_frame = self.tl.transformPoint(table_pose.header.frame_id, ps)
                else:
                    rospy.logwarn("No transform between %s and %s possible",table_pose.header.frame_id, table_link)
                    return
                arr_xyz_trans.append([ps_in_kinect_frame.point.x, ps_in_kinect_frame.point.y, ps_in_kinect_frame.point.z])

            pc_trans = PointCloud2()
            pc_trans = pointclouds.xyz_array_to_pointcloud2(np.asarray([arr_xyz_trans]), 
                                                            table_pose.header.stamp, table_pose.header.frame_id)

            self.pub.publish(pc_trans)
            cluster_list.append(pc_trans)
            rospy.loginfo("cluster size %d", cluster_list.__len__())

        # finally - save all data in the object that'll be sent in response to actionserver requests
        with self.result_lock:
            self._result.objects = cluster_list 
            self._result.table_dims = table_dim
            self._result.table_pose = centroid_table_pose
            self.has_data = True

    def execute_cb(self, goal):
        rospy.loginfo('Executing ORKTabletop action')

        # want to get the NEXT data coming in, rather than the current one. 
        with self.result_lock:
            self.has_data = False

        rr = rospy.Rate(1.0)
        while not rospy.is_shutdown() and not self._as.is_preempt_requested():
            with self.result_lock:
                if self.has_data:
                    break
            rr.sleep()

        if self._as.is_preempt_requested():
            rospy.loginfo('%s: Preempted' % self._action_name)
            self._as.set_preempted()
        elif rospy.is_shutdown():
            self._as.set_aborted()
        else:
            with self.result_lock:
                rospy.loginfo('%s: Succeeded' % self._action_name)
                self._as.set_succeeded(self._result)
class color_name_sample_detection(object):

  def __init__(self):
    rospy.init_node('color_name_sample_detection',log_level=rospy.INFO)
    self.monocular_img_sub = rospy.Subscriber('monocular_img',Image, queue_size=1,callback=self.handle_monocular_img, buff_size=60000000)
    self.cam_info_sub = rospy.Subscriber('cam_info',CameraInfo, queue_size=1, callback=self.handle_info)
    #I think this node only uses the search camera - JL
    #self.left_img_sub = rospy.Subscriber('left_img',Image, queue_size=1,callback=self.handle_left_img)
    #self.right_img_sub = rospy.Subscriber('right_img',Image, queue_size=1,callback=self.handle_right_img)
    #self.disp_sub = rospy.Subscriber('disp',DisparityImage, queue_size=1,callback=self.handle_disp)

    self.bridge = CvBridge()
    self.tf = TransformListener()

    maxArea = rospy.get_param('~max_area',800.0)
    minArea = rospy.get_param('~min_area',100.0)
    delta = rospy.get_param('~delta',10.0)

    self.min_size = rospy.get_param('~min_size',0.08)
    self.max_size = rospy.get_param('~max_size',0.4)

    self.mser = cv2.MSER()
    self.mser.setDouble('maxArea', maxArea)
    self.mser.setDouble('minArea', minArea)
    self.mser.setDouble('delta', delta)

    if rospy.get_param('~enable_debug',True):
      debug_img_topic = 'debug_img'
      self.debug_img_pub = rospy.Publisher(debug_img_topic, Image)

    self.named_img_point_pub = rospy.Publisher('named_img_point', NamedPoint)
    self.named_point_pub = rospy.Publisher('named_point', NamedPoint)

    color_file = rospy.get_param('~color_file')
    self.color_mat = scipy.io.loadmat(color_file)
    self.color_names = ['black','blue','brown','gray','green','orange','pink','purple','red','white','yellow']
    #self.sample_names = [None,None,'wood_block','pre_cached',None,'orange_pipe','pink_ball',None,'red_puck','pre_cached','yellow_rock']
    self.sample_names = [None,'pre_cached',None,'pre_cached',None,None,None,None,None,'pre_cached','pre_cached']
    self.sample_thresh = [None,0.1,0.1,0.1,None,0.1,0.1,None,0.1,0.1,0.1]

    self.min_disp = 0.0
    self.max_disp = 128.0

    self.disp_img = None

    self.static_mask = None

    self.nthreads = 4
    self.height = None
    self.threads = []
    self.q_img = Queue.Queue()
    self.q_proj = Queue.Queue()

  def handle_left_img(self,Image):
    detections = self.find_samples(Image)
    self.debug_img_pub.publish(self.bridge.cv_to_imgmsg(cv2.cv.fromarray(self.debug_img),'bgr8'))

  def handle_right_img(self, Image):
    detections = self.find_samples(Image)

  def handle_monocular_img(self, Image):
    detections = self.find_samples_threaded(Image)
    self.debug_img_pub.publish(self.bridge.cv_to_imgmsg(cv2.cv.fromarray(self.debug_img),'bgr8'))

  def threaded_mser(self, img, header):
    for i in range(self.nthreads):
      t = threading.Thread(
          target=self.split_mser_and_color,
          args=(img[self.height*i:self.height*(i+1),:],self.static_mask[self.height*i:self.height*(i+1),:],
            i,self.q_img,self.q_proj,header))
      self.threads.append(t)
      t.start()
    for t in self.threads:
      t.join()

  def split_mser_and_color(self, img, mask, i, q_img, q_proj, header):
    regions = self.mser.detect(img, mask)
    for r in regions:
      r += np.array([0,self.height*i])
    hulls = [cv2.convexHull(r.reshape(-1,1,2)) for r in regions]
    img = cv2.cvtColor(self.img,cv2.COLOR_BGR2RGB)
    mask = np.zeros((img.shape[0],img.shape[1]))
    for h in (hulls):
      top_index, similarity = self.compute_color_name(h,img,mask)
      if self.sample_names[top_index] is not None and similarity >= self.sample_thresh[top_index]:
        moments = cv2.moments(h)
        # converts to x,y
        location = np.array([moments['m10']/moments['m00'],moments['m01']/moments['m00']])
        named_img_point = NamedPoint()
        named_img_point.header = copy.deepcopy(header)
        named_img_point.point.x = location[0]
        named_img_point.point.y = location[1]
        named_img_point.name = self.sample_names[top_index]

        named_point = self.project_xyz_point(h, top_index, header)
        #rospy.logdebug("Named_point: %s",named_point)

        size = self.real_size_check(h,header)
        if self.min_size < size < self.max_size:
          q_img.put(named_img_point)
          q_proj.put(named_point)

  def find_samples_threaded(self, Image):
    self.img = np.asarray(self.bridge.imgmsg_to_cv(Image,'bgr8'))
    self.debug_img = self.img.copy()

    if self.static_mask is None:
      self.static_mask = np.zeros((self.img.shape[0],self.img.shape[1],1),np.uint8)
      self.static_mask[600:,:,:] = 1
    if self.height is None:
      self.height = self.img.shape[0]/self.nthreads

    gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
    hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)
    flip = 255 - hsv[:,:,1]
    test = gray.astype(np.float32)*flip.astype(np.float32)
    test /= np.max(test)
    test = (test*255).astype(np.uint8)
    self.threaded_mser(test, Image.header)

    while not self.q_img.empty():
      self.named_img_point_pub.publish(self.q_img.get())
    while not self.q_proj.empty():
      pt = self.q_proj.get()
      self.named_point_pub.publish(pt)

  def find_samples(self, Image):
    self.img = np.asarray(self.bridge.imgmsg_to_cv(Image,'bgr8'))
    self.debug_img = self.img.copy()

    #if self.static_mask is None:
    #  self.static_mask = np.zeros((self.img.shape[0],self.img.shape[1],1),np.uint8)
    #  self.static_mask[400:,:,:] = 1

    lab = cv2.cvtColor(self.img, cv2.COLOR_BGR2LAB)
    gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
    #a_regions = self.mser.detect(lab[:,:,1] ,None)
    #a_hulls = [cv2.convexHull(r.reshape(-1,1,2)) for r in a_regions]
    #b_regions = self.mser.detect(lab[:,:,2] ,None)
    #b_hulls = [cv2.convexHull(r.reshape(-1,1,2)) for r in b_regions]
    #g_regions = self.mser.detect(gray, self.static_mask)
    mask = np.ones((self.img.shape[0],self.img.shape[1],1),np.uint8)
    mask[:,:100] = 0
    #mask[:,-100:] = 0
    g_regions = self.mser.detect(gray, mask)
    rospy.logdebug("number of regions: %s", len(g_regions))
    g_hulls = [cv2.convexHull(r.reshape(-1,1,2)) for r in g_regions]
    rospy.logdebug("number of hulls: %s", len(g_hulls))
    img = cv2.cvtColor(self.img,cv2.COLOR_BGR2RGB)
    mask = np.zeros((img.shape[0],img.shape[1]))
    #for h in (a_hulls + b_hulls):
    for h in (g_hulls):
      top_index, similarity = self.compute_color_name(h,img,mask)
      if self.sample_names[top_index] is not None and similarity >= self.sample_thresh[top_index]:
        moments = cv2.moments(h)
        # converts to x,y
        location = np.array([moments['m10']/moments['m00'],moments['m01']/moments['m00']])
        rospy.logdebug("Publishing Named Point")
        named_img_point = NamedPoint()
        named_img_point.header = Image.header
        named_img_point.point.x = location[0]
        named_img_point.point.y = location[1]
        named_img_point.name = self.sample_names[top_index]
        self.named_img_point_pub.publish(named_img_point)
        named_point = self.project_xyz_point(h, top_index, Image.header)
        self.named_point_pub.publish(named_point)

  def project_xyz_point(self, hull, top_index, header):
    if self.disp_img is not None:
      rect = cv2.boundingRect(hull)
      local_disp = self.disp_img[rect[1]:rect[1]+rect[3],rect[0]:rect[0]+rect[2]]
      # Trim off extreme disparities
      local_disp = cv2.threshold(local_disp.astype(np.float32), self.min_disp, 0, cv2.THRESH_TOZERO)[1]
      local_disp = cv2.threshold(local_disp.astype(np.float32), self.max_disp, 0, cv2.THRESH_TOZERO_INV)[1]
      # Sort disparities, grab ends, compute mean
      count = cv2.countNonZero(local_disp)
      local_disp = local_disp.reshape((rect[2]*rect[3],1))
      local_disp = np.sort(local_disp)
      accum_disp = local_disp[:10].sum() + local_disp[count-10:count].sum()
      ave_disp = accum_disp/20.
      # Depth in meters
      ave_depth = self.f*self.T/ave_disp
      x = rect[0]+rect[2]/2
      y = rect[1]+rect[3]/2
      XY = np.dot(self.inv_K,np.array([x,y,1]))
      XYZ = XY*ave_depth
      named_point = NamedPoint()
      named_point.name = self.sample_names[top_index]
      named_point.header = copy.deepcopy(header)
      named_point.point.x = XYZ[0]
      named_point.point.y = XYZ[1]
      named_point.point.z = XYZ[2]
      #self.named_point_pub.publish(named_point)
      return named_point
    else:
      rect = cv2.boundingRect(hull)
      x = (rect[0]+rect[2]/2)#*2.
      y = (rect[1]+rect[3]/2)#*2.
      XY = np.dot(self.inv_K,np.array([x,y,1]))
      named_point = NamedPoint()
      named_point.name = self.sample_names[top_index]
      named_point.header = copy.deepcopy(header)
      named_point.point.x = XY[0]
      named_point.point.y = XY[1]
      named_point.point.z = 1.0
      return named_point
      #self.named_point_pub.publish(named_point)


  def handle_disp(self,DisparityImage):
    self.disp_img = np.asarray(self.bridge.imgmsg_to_cv(DisparityImage.image))
    self.disp_header = copy.deepcopy(DisparityImage.header)
    self.min_disparity = DisparityImage.min_disparity
    self.max_disparity = DisparityImage.max_disparity
    self.f = DisparityImage.f
    self.T = DisparityImage.T

  def handle_info(self, CameraInfo):
    # grab camera matrix and distortion model
    self.K = CameraInfo.K
    self.D = CameraInfo.D
    self.R = CameraInfo.R
    self.P = CameraInfo.P
    self.h = CameraInfo.height
    self.w = CameraInfo.width
    self.frame_id = CameraInfo.header.frame_id
    self.P = np.asarray(self.P).reshape(3,4)
    self.K = np.asarray(self.K).reshape(3,3)
    self.inv_K = scipy.linalg.inv(self.K)

  def compute_color_name(self,hull,img,mask):
    rect = cv2.boundingRect(hull)
    cv2.drawContours(mask,[hull],-1,255,-1)
    small_img = img[rect[1]:rect[1]+rect[3],rect[0]:rect[0]+rect[2]]
    small_mask = mask[rect[1]:rect[1]+rect[3],rect[0]:rect[0]+rect[2]]/255.
    index_img = np.floor(small_img[:,:,0]/8) + 32*np.floor(small_img[:,:,1]/8) + 32*32*np.floor(small_img[:,:,2]/8)
    out = self.color_mat['w2c'][np.int16(index_img)]
    for i in range(out.shape[-1]):
      out[:,:,i] *= small_mask
    ave_vec= np.sum(np.sum(out,axis=0),axis=0)
    top_index = np.argmax(ave_vec)
    similarity = ave_vec[top_index]/cv2.countNonZero(small_mask)
    #if similarity > 0.1 and (top_index == 3 or top_index == 9):
    cv2.putText(self.debug_img,self.color_names[top_index] + str(similarity),(rect[0],rect[1]),cv2.FONT_HERSHEY_PLAIN,2,(255,0,255))
    if top_index == 3 or top_index == 9 or top_index == 1:
      cv2.polylines(self.debug_img,[hull],1,(255,0,255),3)
      cv2.putText(self.debug_img,self.color_names[top_index] + str(similarity),(rect[0],rect[1]),cv2.FONT_HERSHEY_PLAIN,2,(255,0,255))
    return top_index,similarity

  def cast_ray(self, point_in, tf, name):
    #rospy.logdebug("Point In: %s", point_in)

    base_link_point = tf.transformPoint('/base_link', point_in)
    t = tf.getLatestCommonTime('/base_link', point_in.header.frame_id)
    pos, quat = tf.lookupTransform('/base_link', point_in.header.frame_id, t)
    height = pos[2]
    #rospy.logdebug("Pos: %s", pos)

    x_slope = np.abs((pos[0]-base_link_point.point.x)/(pos[2]-base_link_point.point.z))
    y_slope = np.abs((pos[1]-base_link_point.point.y)/(pos[2]-base_link_point.point.z))
    #rospy.logdebug("X Slope: %s", x_slope)
    #rospy.logdebug("Y Slope: %s", y_slope)

    ground_point = np.array([0.,0.,0.])
    ground_point[0] = x_slope*height
    ground_point[1] = y_slope*height

    ground_named_point = NamedPoint()
    ground_named_point.point.x = ground_point[0]
    ground_named_point.point.y = ground_point[1]
    ground_named_point.point.z = ground_point[2]
    ground_named_point.header = copy.deepcopy(point_in.header)
    ground_named_point.header.frame_id = 'base_link'
    ground_named_point.name = name

    odom_named_point = self.tf.transformPoint('/odom',ground_named_point)

    #rospy.logdebug("Ground Point: %s", ground_named_point)
    #rospy.logdebug("Odom Point: %s", odom_named_point)

    return ground_named_point, odom_named_point

  def real_size_check(self,hull,header):
    rect = cv2.boundingRect(hull)
    #top_left = np.array([rect[0],rect[1]])
    bot_left = np.array([rect[0],rect[1]+rect[3]])
    bot_right = np.array([rect[0]+rect[2],rect[1]+rect[3]])

    #rospy.logdebug("Top Left: %s", top_left)
    #rospy.logdebug("Bot Right: %s", bot_right)

    bot_left_point = PointStamped()
    bot_left_point.header = copy.deepcopy(header)
    bot_left_point.point.x = bot_left[0]
    bot_left_point.point.y = bot_left[1]
    bot_left_point.point.z = 1.0
    bot_right_point = PointStamped()
    bot_right_point.header = copy.deepcopy(header)
    bot_right_point.point.x = bot_right[0]
    bot_right_point.point.y = bot_right[1]
    bot_right_point.point.z = 1.0

    #rospy.logdebug("Top Left Point: %s", top_left_point)
    #rospy.logdebug("Bot Right Point: %s", bot_right_point)

    bot_left_ground, bot_left_odom = self.cast_ray(bot_left_point,self.tf,'bot_left')
    bot_right_ground, bot_right_odom = self.cast_ray(bot_right_point,self.tf,'bot_right')

    #rospy.logdebug("Top Left Ground: %s", top_left_ground)
    #rospy.logdebug("Bot Right Ground: %s", bot_right_ground)

    width = np.array([0.,0.])
    width[0] = bot_left_ground.point.x - bot_right_ground.point.x
    width[1] = bot_left_ground.point.y - bot_right_ground.point.y
    rospy.logdebug("Width: %s", width)
    size = scipy.linalg.norm(width)
    rospy.logdebug("Size: %s", size)
    return size
예제 #31
0
class Person_Follow(object):
    def __init__(self):
        rospy.init_node('person_follow')

        self.twist=Twist()
        self.twist.linear.x = 0; self.twist.linear.y = 0; self.twist.linear.z = 0
        self.twist.angular.x = 0; self.twist.angular.y = 0; self.twist.angular.z = 0

        self.dist0=0

        self.target=1
        self.p_angle=.01
        self.i_angle=.0005
        self.p_d=1
        self.i_d=.005
        self.error_angle_sum=0
        self.error_d_sum=0
        
        self.centroid=Point(x=0,y=0)
        self.header_msg=Header(stamp=rospy.Time.now(), frame_id='base_link')



    def Find_Pos(self, Data):

        d={}
        for i in range(len(Data.ranges)):
            if(Data.ranges[i]!=0):
                d[i]=Data.ranges[i]

        d_sum_x=0
        d_sum_y=0
        d_count=0
        for key in d:
            if key>345 or key<15:
                
                d_sum_x+=d[key]*math.cos(math.radians(key))
                d_sum_y+=d[key]*math.sin(math.radians(key))
                d_count+=1
                


        self.dist0=Data.ranges[0]
        d_avg_x=d_sum_x/d_count
        d_avg_y=d_sum_y/d_count

        self.centroid.x = d_avg_x
        self.centroid.y = d_avg_y

#        print self.centroid.x


    def run(self):
        
        rospy.Subscriber("/scan",LaserScan,self.Find_Pos,queue_size=10)
        self.pub = rospy.Publisher('cmd_vel',Twist,queue_size=10)
      
        self.t=TransformListener()

        self.pub_centroid = rospy.Publisher('/centroid',PointStamped,queue_size=10)
        r = rospy.Rate(30)
        while not rospy.is_shutdown():
            theta=0
            if self.centroid.x!=0:
                theta = 3*math.atan2(self.centroid.y,self.centroid.x)
                print theta
            self.twist.angular.z = theta

            print self.centroid.x
            if self.dist0 == 0:
                speed = 1
            else:
                speed = .1 *(self.dist0-.5)
            
            self.twist.linear.x=speed

            self.pub.publish(self.twist)
            #print self.t.allFramesAsString()
            try:
                point_stamped_msg = PointStamped(header=self.header_msg,
                                     point=self.t.waitForTransform('odom','base_link',target.header.stamp, rospy.Duration(0.5))Point(x=-self.centroid.x, y=-self.centroid.y))
                self.header_msg.stamp = rospy.Time.now()
                #print self.header_msg
                self.t.waitForTransform('odom','base_link',self.header_msg.stamp, rospy.Duration(0.5))
                point_stamped_msg_odom = self.t.transformPoint('/odom',point_stamped_msg)
                #print point_stamped_msg_odom
                self.pub_centroid.publish(point_stamped_msg_odom)
            except Exception as inst:
                print inst
class TestMotionExecutionBuffer(unittest.TestCase):
    def setUp(self):

        rospy.init_node('test_motion_execution_buffer')

        self.tf = TransformListener()

        self.obj_pub = rospy.Publisher('collision_object',
                                       CollisionObject,
                                       latch=True)

        self.move_arm_action_client = actionlib.SimpleActionClient(
            "move_right_arm", MoveArmAction)

        self.move_arm_action_client.wait_for_server()

        obj1 = CollisionObject()

        obj1.header.stamp = rospy.Time.now()
        obj1.header.frame_id = "base_link"
        obj1.id = "obj1"
        obj1.operation.operation = arm_navigation_msgs.msg.CollisionObjectOperation.ADD
        obj1.shapes = [Shape() for _ in range(1)]
        obj1.shapes[0].type = Shape.CYLINDER
        obj1.shapes[0].dimensions = [float() for _ in range(2)]
        obj1.shapes[0].dimensions[0] = .1
        obj1.shapes[0].dimensions[1] = 1.5
        obj1.poses = [Pose() for _ in range(1)]
        obj1.poses[0].position.x = .6
        obj1.poses[0].position.y = -.6
        obj1.poses[0].position.z = .75
        obj1.poses[0].orientation.x = 0
        obj1.poses[0].orientation.y = 0
        obj1.poses[0].orientation.z = 0
        obj1.poses[0].orientation.w = 1

        self.obj_pub.publish(obj1)

        rospy.sleep(2.0)

    def tearDown(self):
        obj1 = CollisionObject()
        obj1.header.stamp = rospy.Time.now()
        obj1.header.frame_id = "base_link"
        obj1.id = "all"
        obj1.operation.operation = arm_navigation_msgs.msg.CollisionObjectOperation.REMOVE

        self.obj_pub.publish(obj1)

        rospy.sleep(2.0)

    def testMotionExecutionBuffer(self):

        global padd_name
        global extra_buffer

        #too much trouble to read for now
        allow_padd = .05  #rospy.get_param(padd_name)

        joint_names = [
            '%s_%s' % ('r', j) for j in [
                'shoulder_pan_joint', 'shoulder_lift_joint',
                'upper_arm_roll_joint', 'elbow_flex_joint',
                'forearm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint'
            ]
        ]
        goal = MoveArmGoal()

        goal.motion_plan_request.goal_constraints.joint_constraints = [
            JointConstraint() for i in range(len(joint_names))
        ]

        goal.motion_plan_request.group_name = "right_arm"
        goal.motion_plan_request.num_planning_attempts = 1
        goal.motion_plan_request.allowed_planning_time = rospy.Duration(5.)
        goal.motion_plan_request.planner_id = ""
        goal.planner_service_name = "ompl_planning/plan_kinematic_path"

        goal.motion_plan_request.goal_constraints.joint_constraints = [
            JointConstraint() for i in range(len(joint_names))
        ]
        for i in range(len(joint_names)):
            goal.motion_plan_request.goal_constraints.joint_constraints[
                i].joint_name = joint_names[i]
            goal.motion_plan_request.goal_constraints.joint_constraints[
                i].position = 0.0
            goal.motion_plan_request.goal_constraints.joint_constraints[
                i].tolerance_above = 0.08
            goal.motion_plan_request.goal_constraints.joint_constraints[
                i].tolerance_below = 0.08

        for z in range(2):

            min_dist = 1000

            if (z % 2 == 0):
                goal.motion_plan_request.goal_constraints.joint_constraints[
                    0].position = -2.0
                goal.motion_plan_request.goal_constraints.joint_constraints[
                    3].position = -0.2
                goal.motion_plan_request.goal_constraints.joint_constraints[
                    5].position = -0.2
            else:
                goal.motion_plan_request.goal_constraints.joint_constraints[
                    0].position = 0.0
                goal.motion_plan_request.goal_constraints.joint_constraints[
                    3].position = -0.2
                goal.motion_plan_request.goal_constraints.joint_constraints[
                    5].position = -0.2

            for x in range(3):

                self.move_arm_action_client.send_goal(goal)

                r = rospy.Rate(10)

                while True:
                    cur_state = self.move_arm_action_client.get_state()
                    if (cur_state != actionlib_msgs.msg.GoalStatus.ACTIVE
                            and cur_state !=
                            actionlib_msgs.msg.GoalStatus.PENDING):
                        break

                    #getting right finger tip link in base_link frame
                    t = self.tf.getLatestCommonTime(
                        "/base_link", "/r_gripper_r_finger_tip_link")
                    finger_point = PointStamped()
                    finger_point.header.frame_id = "/r_gripper_r_finger_tip_link"
                    finger_point.header.stamp = t
                    finger_point.point.x = 0
                    finger_point.point.y = 0
                    finger_point.point.z = 0
                    finger_point_base = self.tf.transformPoint(
                        "base_link", finger_point)

                    distance = math.sqrt(
                        math.pow(finger_point_base.point.x - .6, 2) +
                        math.pow(finger_point_base.point.y + .6, 2))

                    # pole is .1 in diameter
                    distance -= .1

                    if distance < min_dist:
                        rospy.loginfo("X: %g Y: %g Dist: %g",
                                      finger_point_base.point.x,
                                      finger_point_base.point.y, distance)
                        min_dist = distance

                    r.sleep()

                end_state = self.move_arm_action_client.get_state()

                if (end_state == actionlib_msgs.msg.GoalStatus.SUCCEEDED):
                    break

            rospy.loginfo("Min dist %d is %g", z, min_dist)

            #should be a .5 buffer, allowing .1 buffer
            self.failIf(min_dist < (allow_padd - extra_buffer))

            final_state = self.move_arm_action_client.get_state()

            self.assertEqual(final_state,
                             actionlib_msgs.msg.GoalStatus.SUCCEEDED)

    def testAllowedNotAllowedInitialContact(self):

        #adding object in collision with base

        obj2 = CollisionObject()

        obj2.header.stamp = rospy.Time.now()
        obj2.header.frame_id = "base_link"
        obj2.id = "base_object"
        obj2.operation.operation = arm_navigation_msgs.msg.CollisionObjectOperation.ADD
        obj2.shapes = [Shape() for _ in range(1)]
        obj2.shapes[0].type = Shape.BOX
        obj2.shapes[0].dimensions = [float() for _ in range(3)]
        obj2.shapes[0].dimensions[0] = .1
        obj2.shapes[0].dimensions[1] = .1
        obj2.shapes[0].dimensions[2] = .1
        obj2.poses = [Pose() for _ in range(1)]
        obj2.poses[0].position.x = 0
        obj2.poses[0].position.y = 0
        obj2.poses[0].position.z = 0
        obj2.poses[0].orientation.x = 0
        obj2.poses[0].orientation.y = 0
        obj2.poses[0].orientation.z = 0
        obj2.poses[0].orientation.w = 1

        self.obj_pub.publish(obj2)

        rospy.sleep(5.)

        joint_names = [
            '%s_%s' % ('r', j) for j in [
                'shoulder_pan_joint', 'shoulder_lift_joint',
                'upper_arm_roll_joint', 'elbow_flex_joint',
                'forearm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint'
            ]
        ]
        goal = MoveArmGoal()

        goal.motion_plan_request.goal_constraints.joint_constraints = [
            JointConstraint() for i in range(len(joint_names))
        ]

        goal.motion_plan_request.group_name = "right_arm"
        goal.motion_plan_request.num_planning_attempts = 1
        goal.motion_plan_request.allowed_planning_time = rospy.Duration(5.)
        goal.motion_plan_request.planner_id = ""
        goal.planner_service_name = "ompl_planning/plan_kinematic_path"

        goal.motion_plan_request.goal_constraints.joint_constraints = [
            JointConstraint() for i in range(len(joint_names))
        ]
        for i in range(len(joint_names)):
            goal.motion_plan_request.goal_constraints.joint_constraints[
                i].joint_name = joint_names[i]
            goal.motion_plan_request.goal_constraints.joint_constraints[
                i].position = 0.0
            goal.motion_plan_request.goal_constraints.joint_constraints[
                i].tolerance_above = 0.08
            goal.motion_plan_request.goal_constraints.joint_constraints[
                i].tolerance_below = 0.08

        goal.motion_plan_request.goal_constraints.joint_constraints[
            0].position = -2.0
        goal.motion_plan_request.goal_constraints.joint_constraints[
            3].position = -0.2
        goal.motion_plan_request.goal_constraints.joint_constraints[
            5].position = -0.2

        self.move_arm_action_client.send_goal(goal)

        r = rospy.Rate(10)

        while True:
            cur_state = self.move_arm_action_client.get_state()
            if (cur_state != actionlib_msgs.msg.GoalStatus.ACTIVE
                    and cur_state != actionlib_msgs.msg.GoalStatus.PENDING):
                break

        #should still have succeedeed
        final_state = self.move_arm_action_client.get_state()
        self.failIf(final_state != actionlib_msgs.msg.GoalStatus.SUCCEEDED)

        # but we can still overwrite
        coll = CollisionOperation()
        coll.object1 = "base_link"
        coll.object2 = coll.COLLISION_SET_OBJECTS
        coll.operation = coll.ENABLE

        goal.motion_plan_request.ordered_collision_operations.collision_operations.append(
            coll)

        goal.motion_plan_request.goal_constraints.joint_constraints[
            0].position = 0.0
        goal.motion_plan_request.goal_constraints.joint_constraints[
            3].position = -0.2
        goal.motion_plan_request.goal_constraints.joint_constraints[
            5].position = -0.2

        self.move_arm_action_client.send_goal(goal)

        r = rospy.Rate(10)

        while True:
            cur_state = self.move_arm_action_client.get_state()
            if (cur_state != actionlib_msgs.msg.GoalStatus.ACTIVE
                    and cur_state != actionlib_msgs.msg.GoalStatus.PENDING):
                break

        #should still have succeedeed
        final_state = self.move_arm_action_client.get_state()
        self.failIf(final_state == actionlib_msgs.msg.GoalStatus.SUCCEEDED)
class GridSonarSrv():
    def __init__(self, SONAR_FRAME, MAP_FRAME, ROBOT_FRAME):
        # GOAL_LIST_DIR = rospy.get_param('~goal_list_dir')
        # self.path = MAP_DIR
        self.sonar_frame = SONAR_FRAME
        self.map_frame = MAP_FRAME
        self.robot_frame = ROBOT_FRAME
        self.mapinfo = OccupancyGrid()
        self.pose_msg = PoseStamped()
        self.sonar_msg = []

        self.tf_listener = TransformListener()
        rospy.Service('api/grid_sonar', GridSonar, self.handle_gridsonar)
        rospy.Service('api/rt_grid_sonar', GridSonar, self.handle_realtimesonar)
        rospy.Subscriber("map", OccupancyGrid, self.map_callback, queue_size=1)
        rospy.Subscriber('range_dist', Sonar, self.sonar_callback, queue_size=1)
        rospy.Subscriber('pose', PoseStamped, self.pose_callback, queue_size=1)

    def pose_callback(self, msg):
        self.pose_msg = msg

    def handle_realtimesonar(self, req):
        i = 0
        sonar = {}
        rot = self.get_rotate_matrix()[0:3]

        for t in self.sonar_frame:
            pt = {}
            sonar_pt = PointStamped()
            sonar_pt.header.frame_id = t
            # transform sonar center point from sonar to base_link
            # self.tf_listener.waitForTransform(self.map_frame, t, rospy.Time.now(),
            #                                   rospy.Duration(1.0))
            temp = self.tf_listener.transformPoint(self.robot_frame, sonar_pt)

            # calc sonar center coordinate in map
            # p = np.dot([temp.point.x, temp.point.y, 0], rot)
            # p = [p[0]+self.pose_msg.pose.position.x, p[1]+self.pose_msg.pose.position.y, 0]
            p = [temp.point.x * rot[0][0] + temp.point.y * rot[1][0] + self.pose_msg.pose.position.x,
                 temp.point.x * rot[0][1] + temp.point.y * rot[1][1] + self.pose_msg.pose.position.y]

            # calc sonar center grid in map
            start_x = (int)((p[0] - self.mapinfo.info.origin.position.x)
                            / self.mapinfo.info.resolution)
            start_y = (int)((p[1] - self.mapinfo.info.origin.position.y)
                            / self.mapinfo.info.resolution)

            # calc sonar end point from sonar to base_link
            sonar_pt.point.x = self.sonar_msg[i] / 100.0
            i += 1
            temp = self.tf_listener.transformPoint(self.robot_frame, sonar_pt)
            # calc sonar end coordinate in map
            # p = np.dot([temp.point.x, temp.point.y, 0], rot)
            # p = [p[0]+self.pose_msg.pose.position.x, p[1]+self.pose_msg.pose.position.y, 0]
            p = [temp.point.x * rot[0][0] + temp.point.y * rot[1][0] + self.pose_msg.pose.position.x,
                 temp.point.x * rot[0][1] + temp.point.y * rot[1][1] + self.pose_msg.pose.position.y]

            # calc sonar end grid in map
            end_x = (int)((p[0] - self.mapinfo.info.origin.position.x)
                          / self.mapinfo.info.resolution)
            end_y = (int)((p[1] - self.mapinfo.info.origin.position.y)
                          / self.mapinfo.info.resolution)

            pt['start'] = [start_x, start_y]
            pt['end'] = [end_x, end_y]
            sonar[t] = pt

        mapInfo = {}
        mapInfo['gridWidth'] = self.mapinfo.info.width
        mapInfo['gridHeight'] = self.mapinfo.info.height
        mapInfo['resolution'] = self.mapinfo.info.resolution

        res = GridSonarResponse()
        res.gridPoint = json.dumps(sonar)
        res.mapInfo = json.dumps(mapInfo)
        res.msg = 'success'
        return res

    def get_rotate_matrix(self):
        quat = (-self.pose_msg.pose.orientation.x, -self.pose_msg.pose.orientation.y,
                -self.pose_msg.pose.orientation.z, self.pose_msg.pose.orientation.w)
        return self.tf_listener.fromTranslationRotation(translation=(0, 0, 0),
                                                        rotation=quat)

    def handle_gridsonar(self, req):
        i = 0
        sonar = {}
        for t in self.sonar_frame:
            pt = {}
            sonar_pt = PointStamped()
            sonar_pt.header.frame_id = t
            # transform sonar center point from sonar to base_link
            self.tf_listener.waitForTransform(self.robot_frame, t, rospy.Time.now(),
                                              rospy.Duration(1.0))
            temp = self.tf_listener.transformPoint(self.robot_frame, sonar_pt)
            # calc sonar center grid in map
            # start_x = (int)((temp.point.x - self.mapinfo.info.origin.position.x)
            #                 / self.mapinfo.info.resolution)
            # start_y = (int)((temp.point.y - self.mapinfo.info.origin.position.y)
            #                 / self.mapinfo.info.resolution)
            start_x = (int)(temp.point.x / 0.01)
            start_y = (int)(temp.point.y / 0.01)

            # calc sonar end point from sonar to base_link
            sonar_pt.point.x = self.sonar_msg[i] / 100.0
            i += 1
            temp = self.tf_listener.transformPoint(self.robot_frame, sonar_pt)
            # calc sonar end grid in map
            # end_x = (int)((temp.point.x - self.mapinfo.info.origin.position.x)
            #               / self.mapinfo.info.resolution)
            # end_y = (int)((temp.point.y - self.mapinfo.info.origin.position.y)
            #               / self.mapinfo.info.resolution)
            end_x = (int)(temp.point.x / 0.01)
            end_y = (int)(temp.point.y / 0.01)

            pt['start'] = [start_x, start_y]
            pt['end'] = [end_x, end_y]
            pt['range'] = sonar_pt.point.x
            sonar[t] = pt

        mapInfo = {}
        # mapInfo['gridWidth'] = self.mapinfo.info.width
        # mapInfo['gridHeight'] = self.mapinfo.info.height
        # mapInfo['resolution'] = self.mapinfo.info.resolution

        res = GridSonarResponse()
        res.gridPoint = json.dumps(sonar)
        res.mapInfo = json.dumps(mapInfo)
        res.msg = 'success'
        return res

    def map_callback(self, msg):
        self.mapinfo.info = msg.info

    def sonar_callback(self, msg):
        self.sonar_msg = map(int, msg.ranges)
예제 #34
0
class PR2Greeter:
    
    def __init__(self,debug = False, robot_frame = "odom_combined"):
        
        self._tf = TransformListener()
        
        self.snd_handle = SoundClient()
        
        rospy.sleep(1)
        
        self.snd_handle.say('Hello world!')
        
        rospy.sleep(1)
        
        self._debug = debug
        self._robot_frame = robot_frame
        
        self._point_sub = rospy.Subscriber('nearest_face', PointStamped, self.face_cb)
        
        self._head_action_cl = actionlib.SimpleActionClient('/head_traj_controller/point_head_action', pr2_controllers_msgs.msg.PointHeadAction)
        
        self._left_arm = MoveGroupCommander("left_arm")
        self._right_arm = MoveGroupCommander("right_arm")
        
        print "r.f.: " + self._left_arm.get_pose_reference_frame()

        self.face = None
        self.face_from = rospy.Time(0)
        self.face_last_dist = 0
        
        self.l_home_pose = [0.283, 0.295, 0.537, -1.646, 0.468, -1.735]
        
        self.l_wave_1 = [-0.1, 0.6, 1.15, -1.7, -0.97, -1.6]
        self.l_wave_2 = [-0.1, 0.6, 1.15,  1.7, -0.97,  1.6]
        
        self.r_home_pose =   [0.124, -0.481, 0.439, -1.548, 0.36, -0.035]
        self.r_advert = [0.521, -0.508, 0.845, -1.548, 0.36, -0.035]
        
        self.no_face_random_delay = None
        
        self._initialized = False
        
        self._timer = rospy.Timer(rospy.Duration(1.0), self.timer)
        
        
        self._move_buff = Queue.Queue()
        
        self._head_buff = Queue.Queue()
        
        self._move_thread = threading.Thread(target=self.movements)
        self._move_thread.daemon = True
        self._move_thread.start()
        
        self._head_thread = threading.Thread(target=self.head)
        self._head_thread.daemon = True
        self._head_thread.start()
        
        self.new_face = False
        self.face_last_dist = 0.0
        
        self.face_counter = 0
        
        self.actions = [self.introduceAction, self.waveHandAction, self.lookAroundAction, self.lookAroundAction, self.lookAroundAction, self.advertAction, self.numberOfFacesAction]
        
        self.goodbye_strings = ["Thanks for stopping by.","Enjoy the event.","See you!", "Have a nice day!"]
        self.invite_strings = ["Hello. It's nice to see you.","Come here and take some flyer.", "I hope you are enjoying the event."]
        
        rospy.loginfo("Ready")
    
    def getRandomFromArray(self, arr):
        
        idx = random.randint(0,len(arr)-1)
        
        return arr[idx]
    
    def numberOfFacesAction(self):
        
        self.snd_handle.say("Today I already saw " + str(self.face_counter) + " faces.")
        rospy.sleep(1)
    
    def advertAction(self):
        
        self.snd_handle.say("Hello. Here are some posters for you.")
        rospy.sleep(1)
        
        self.go(self._right_arm, self.r_advert)
        
    def introduceAction(self):
        
        self.snd_handle.say("Hello. I'm PR2 robot. Come here to check me.")
        rospy.sleep(1)
        
        
    def waveHandAction(self):
        
        self.snd_handle.say("I'm here. Please come to see me.")
        rospy.sleep(1)
        
        rand = random.randint(1,3)
        
        for _ in range(rand):
            
            self.wave()
            
        self.go(self._left_arm, self.l_home_pose)
        
        rospy.loginfo("Waving")
        
    def lookAroundAction(self):
        
        self.snd_handle.say("I'm looking for somebody. Please come closer.")
        rospy.sleep(1)
        
        p = PointStamped()
        p.header.stamp = rospy.Time.now()
        p.header.frame_id = "/base_link"
        
        p.point.x = 2.0
        
        sign = random.choice([-1, 1])
        
        p.point.y = sign*random.uniform(1.5, 0.5)
        p.point.z = random.uniform(1.7, 0.2)
        self._head_buff.put(copy.deepcopy(p))
        
        p.point.y = -1*sign*random.uniform(1.5, 0.5)
        p.point.z = random.uniform(1.7, 0.2)
        self._head_buff.put(copy.deepcopy(p))
        
        p.point.y = sign*random.uniform(1.5, 0.5)
        p.point.z = random.uniform(1.7, 0.2)
        self._head_buff.put(copy.deepcopy(p))
        
        p.point.y = -1*sign*random.uniform(1.5, 0.5)
        p.point.z = random.uniform(1.7, 0.2)
        self._head_buff.put(copy.deepcopy(p))
        
        rospy.loginfo("Looking around")
        
    def getPointDist(self,pt):
        
        assert(self.face is not None)
        
        # fist, get my position
        p = PoseStamped()
        
        p.header.frame_id = "base_link"
        p.header.stamp = rospy.Time.now() - rospy.Duration(0.5)
        
        p.pose.position.x = 0
        p.pose.position.y = 0
        p.pose.position.z = 0
        p.pose.orientation.x = 0
        p.pose.orientation.y = 0
        p.pose.orientation.z = 0
        p.pose.orientation.w = 1
        
        try:
            
            self._tf.waitForTransform(p.header.frame_id, self._robot_frame, p.header.stamp, rospy.Duration(2))
            p = self._tf.transformPose(self._robot_frame, p)
            
        except:
            
            rospy.logerr("TF error!")
            return None
        
        return sqrt(pow(p.pose.position.x - pt.point.x, 2) + pow(p.pose.position.y - pt.point.y, 2) + pow(p.pose.position.z - pt.point.z, 2))
        
        
        
    def getPoseStamped(self, group, c):
        
        assert(len(c)==6)
        
        p = PoseStamped()
        
        p.header.frame_id = "base_link"
        p.header.stamp = rospy.Time.now() - rospy.Duration(0.5)
        
        p.pose.position.x = c[0]
        p.pose.position.y = c[1]
        p.pose.position.z = c[2]
        
        quat = tf.transformations.quaternion_from_euler(c[3], c[4], c[5])
        
        p.pose.orientation.x = quat[0]
        p.pose.orientation.y = quat[1]
        p.pose.orientation.z = quat[2]
        p.pose.orientation.w = quat[3]
        
        try:
            
            self._tf.waitForTransform(p.header.frame_id, group.get_pose_reference_frame(), p.header.stamp, rospy.Duration(2))
            p = self._tf.transformPose(group.get_pose_reference_frame(), p)
            
        except:
            
            rospy.logerr("TF error!")
            return None
        
        return p
    
    def go(self,group,where):
        
        self._move_buff.put((group,where))
        
    def wave(self):
        
        self.go(self._left_arm, self.l_wave_1)
        self.go(self._left_arm, self.l_wave_2)
        self.go(self._left_arm, self.l_wave_1)
        
    def head(self):
        
        self._head_action_cl.wait_for_server()
        
        while not rospy.is_shutdown():
            
            target = self._head_buff.get()
            
            #print "head point goal"
            #print target
            
            # point PR2's head there (http://wiki.ros.org/pr2_controllers/Tutorials/Moving%20the%20Head)
            goal = pr2_controllers_msgs.msg.PointHeadGoal()
        
            goal.target = target
            goal.pointing_frame = "high_def_frame"
            goal.pointing_axis.x = 1
            goal.pointing_axis.y = 0
            goal.pointing_axis.z = 0
            
            self._head_action_cl.send_goal(goal)
            
            self._head_action_cl.wait_for_result(rospy.Duration.from_sec(5.0))
            
            self._head_buff.task_done()
        
    def movements(self):
        
        while not rospy.is_shutdown():
            
            (group, where) = self._move_buff.get()
            
            group.set_start_state_to_current_state()
            p = self.getPoseStamped(group, where)
            if p is None:
                
                self._move_buff.task_done()
                continue
                
            group.set_pose_target(p)
            
            self._move_buff.task_done()
            
            group.go(wait = True)
    
    def timer(self,event):
        
        if self._initialized is False:
            
            rospy.loginfo("Moving arms to home positions")
            
            self.init_head()
            self.go(self._left_arm, self.l_home_pose)
            self.go(self._right_arm, self.r_home_pose)
            self._move_buff.join()
            
            self.snd_handle.say("I'm ready for a great job.")
            self._initialized = True
        
        
        if self.face is None:
            
            if (self.no_face_random_delay is None):
                
                delay = random.uniform(20, 5)    
                self.no_face_random_delay = rospy.Time.now() + rospy.Duration(delay)
                
                rospy.loginfo("Random delay: " + str(delay))
                
                return
                
            else:
                
                if rospy.Time.now() < self.no_face_random_delay:
                    
                    return
            
            self.init_head()
            self.go(self._left_arm, self.l_home_pose)
            self.go(self._right_arm, self.r_home_pose)
                
            rospy.loginfo("Starting selected action")
                
            action = self.getRandomFromArray(self.actions)
            
            action()
            
            delay = random.uniform(30, 5)    
            self.no_face_random_delay = rospy.Time.now() + rospy.Duration(delay)
            
            return
        
        else:
            
            self.no_face_random_delay = None
        
        if self.new_face:
            
            self.face_counter = self.face_counter + 1
            
            self.new_face = False
            #cd = getPointDist(self.face)
            
            # TODO decide action based on distance ?
            self.go(self._left_arm, self.l_home_pose)
            self.go(self._right_arm, self.r_advert)
            
            string = self.getRandomFromArray(self.invite_strings)
            self.snd_handle.say(string)
            
            # TODO wait some min. time + say something
        
        # after 20 seconds of no detected face, let's continue 
        if self.face.header.stamp + rospy.Duration(20) < rospy.Time.now():
            
            string = self.getRandomFromArray(self.goodbye_strings)
            self.snd_handle.say(string)
            
            self.init_head()
            
            self.go(self._left_arm, self.l_home_pose)
            self.go(self._right_arm, self.r_home_pose)
            self.face = None
            return
        
        self._head_buff.put(self.face)
            
     
    def init_head(self):
        
        p = PointStamped()
        p.header.stamp = rospy.Time.now()
        p.header.frame_id = "/base_link"
        
        p.point.x = 2.0
        p.point.y = 0.0
        p.point.z = 1.7
        
        self._head_buff.put(p)
        
    def face_cb(self,point):
        
        # transform point
        
        try:
        
            self._tf.waitForTransform(point.header.frame_id, self._robot_frame, point.header.stamp, rospy.Duration(2))
            pt = self._tf.transformPoint(self._robot_frame, point)
            
        except:
            
            rospy.logerr("Transform error")
            return
        
        if self.face is not None:
        
            cd = self.getPointDist(pt) # current distance
            dd = fabs(self.face_last_dist - cd) # change in distance
            
            if dd < 0.2:
        
                self.face.header = pt.header
                
                # filter x,y,z values a bit
                self.face.point.x = (15*self.face.point.x + pt.point.x)/16
                self.face.point.y = (15*self.face.point.y + pt.point.y)/16
                self.face.point.z = (15*self.face.point.z + pt.point.z)/16
                
            else:
                
               self.new_face = True
               self.face = pt
               
            self.face_last_dist = cd
            
        else:
            
            self.new_face = True
            self.face = pt
            
        
        if self.face_from == rospy.Time(0):
            
            #self.snd_handle.say('Hello. Come closer.')
            rospy.loginfo("New face.")
            self.face_from = self.face.header.stamp
예제 #35
0
class HandoverAdaptionInit(EventState):

    ''' Returns which hand of the handover partner is detected in the robot's workspace. '''

    def __init__(self, topic='/hace/people', x_min=0, x_max=0.5, y_min=-0.75, y_max=0.05, z_min=-0.25, z_max=0.55):

        super(HandoverAdaptionInit, self).__init__(outcomes=['right_hand_in_ws', 'left_hand_in_ws', 'error'])

        rospy.loginfo('Starting initiation state.')

        # define workspace
        self._x_min = x_min
        self._x_max = x_max
        self._y_min = y_min
        self._y_max = y_max
        self._z_min = z_min
        self._z_max = z_max

        self._ws_transformed = False
        self._tf = TransformListener()
        self._topic = topic

        self._person = None
        self._persons = None

        self._sub = ProxySubscriberCached({self._topic: MinimalHumans})

        self._error = False

    def execute(self, userdata):

        self._persons = self._sub.get_last_msg(self._topic)

        if self._persons is not None:

            self._person = self._persons.humans[0]

            if self._person.right_hand is not None:

                hh_position = PointStamped()
                hh_position.header.frame_id = 'camera_depth_frame'
                hh_position.point.x = self._person.right_hand.position.x
                hh_position.point.y = self._person.right_hand.position.y
                hh_position.point.z = self._person.right_hand.position.z

                t_hh_position = self._tf.transformPoint('/upper', hh_position)

                if t_hh_position.point.x > self._x_min and t_hh_position.point.x < self._x_max and \
                   t_hh_position.point.y > self._y_min and t_hh_position.point.y < self._y_max and \
                   t_hh_position.point.z > self._z_min and t_hh_position.point.z < self._z_max:
                    return 'right_hand_in_ws'

            elif self._person.left_hand is not None:
                left_hand_pos_x = self._person.left_hand.position.x
                left_hand_pos_y = self._person.left_hand.position.y
                left_hand_pos_z = self._person.left_hand.position.z

                if left_hand_pos_x > self._x_min and left_hand_pos_x < self._x_max and \
                   left_hand_pos_y > self._y_min and left_hand_pos_y < self._y_max and \
                   left_hand_pos_z > self._z_min and left_hand_pos_z < self._z_max:
                    return 'left_hand_in_ws'

        if self._error:
            return 'error'

    def on_enter(self, userdata):

        self._persons = self._sub.get_last_msg(self._topic)

        if self._persons is not None:
            self._person = self._persons.humans[0]

        self._error = False

    def on_exit(self, userdata):

        rospy.loginfo('Exit initiation of adaption.')
예제 #36
0
#!/usr/bin/env python

import rospy
from tf import TransformListener

rospy.init_node('arm_to_pos', anonymous=True)
# Initialize the listener (needs some time to subscribe internally to TF and fill its buffer)
tl = TransformListener()
# Our point would look like this
from geometry_msgs.msg import PointStamped
while not rospy.is_shutdown():
    rospy.sleep(
        1
    )  #must pass a time between the inizialization and the p time for having a tf correctly initialized
    p = PointStamped()
    p.header.stamp = rospy.Time.now()
    rospy.sleep(1)  #for security leave it, it reach less error
    p.header.frame_id = '/base_footprint'
    p.point.x = 1.0
    p.point.y = 0.5
    p.point.z = 0.0
    # Transform the point from base_footprint to map
    map_p = tl.transformPoint('map', p)
    print(map_p)
예제 #37
0
class ray_to_points(object):
  # Alright, this thing is going to take in a ray (in the camera frame) from
  # a monocular camera or a stereo region lacking a disparity match. It's going
  # intersect that ray with a presumed flay ground plane and generate a point
  # cloud around that intersection point that incorporates some of our
  # geometrical uncertainty (most notably, pitch)

  def __init__(self):
    rospy.init_node('ray_to_points',log_level=rospy.DEBUG)

    self.named_point_sub = rospy.Subscriber('named_point', NamedPoint, self.handle_named_point)

    self.points_pub = rospy.Publisher('points', PointCloud2)
    self.named_point_pub = rospy.Publisher('point', NamedPoint)
    self.marker_pub = rospy.Publisher('marker', Marker)

    self.fence_line_sub = message_filters.Subscriber('fence_line',PolygonStamped)
    self.cache = message_filters.Cache(self.fence_line_sub,120)

    self.tf = TransformListener()
    rospy.sleep(2.0)

    self.pitch_error = rospy.get_param("~pitch_error",0.1)
    self.yaw_error = rospy.get_param("~yaw_error",0.1)

    self.count = 0

  def handle_named_point(self, point_in):
    rospy.logdebug("handle_named_point x: %s y: %s z: %s",
        point_in.point.x,
        point_in.point.y,
        point_in.point.z)
    point_stamped = PointStamped()
    point_stamped.header = point_in.header
    point_stamped.point = point_in.point

    ground_named_point, odom_named_point = self.cast_ray(point_stamped,self.tf,point_in.name)
    intersection = self.check_fence_intersection(ground_named_point)
    rospy.logdebug("ground_named_point %s",ground_named_point)
    rospy.logdebug("odom_named_point %s",odom_named_point)
    if ground_named_point.point.x > 25.0 or np.abs(ground_named_point.point.y) > 15.0:
      return
    elif intersection:
      rospy.logdebug("Detection intersects fence")
      return
    self.named_point_pub.publish(odom_named_point)
    self.send_marker(odom_named_point)

  def check_fence_intersection(self, ground_named_point):
      # Line-line intersection of the fence line and the line between base_link
      # and the cast DSLR detection. If there is an intersection, and it lies
      # between base_link and the detection, reject the detection
      fence_line_msg = self.cache.getElemAfterTime(ground_named_point.header.stamp)
      if fence_line_msg is None:
          return False
      x1,y1 = 0.0,0.0
      x2,y2 = ground_named_point.point.x,ground_named_point.point.y
      x3,y3 = fence_line_msg.polygon.points[0].x,fence_line_msg.polygon.points[0].y
      x4,y4 = fence_line_msg.polygon.points[1].x,fence_line_msg.polygon.points[1].y

      px = ((x1*y2-y1*x2)*(x3-x4) - (x1-x2)*(x3*y4-y3*x4))/((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
      py = ((x1*y2-y1*x2)*(y3-y4) - (y1-y2)*(x3*y4-y3*x4))/((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))

      if (np.abs(px) < np.abs(x2)) and ((y2<py<0) or (0<py<y2)):
          return True
      else:
          return False

  def send_marker(self, named_pt):
    m=Marker()
    m.header = copy.deepcopy(named_pt.header)
    m.type=Marker.CYLINDER
    m.pose.position = named_pt.point
    m.pose.orientation.x=0.707
    m.pose.orientation.y=0.0
    m.pose.orientation.z=0.0
    m.pose.orientation.w=0.707
    m.scale.x=0.2
    m.scale.y=0.2
    m.scale.z=0.2
    m.color.r=0.8
    m.color.g=0.8
    m.color.b=0.8
    m.color.a=1.0
    m.id = self.count
    #m.text=named_pt.name
    self.marker_pub.publish(m)
    self.count += 1

    t=Marker()
    t.header = copy.deepcopy(named_pt.header)
    m.type = Marker.TEXT_VIEW_FACING
    m.pose.position = named_pt.point
    m.pose.position.z += 0.1
    m.pose.orientation.x=0.707
    m.pose.orientation.y=0.0
    m.pose.orientation.z=0.0
    m.pose.orientation.w=0.707
    m.scale.x=0.2
    m.scale.y=0.2
    m.scale.z=0.2
    m.color.r=0.8
    m.color.g=0.8
    m.color.b=0.8
    m.color.a=1.0
    m.text = named_pt.name
    m.id = self.count
    self.marker_pub.publish(m)
    self.count += 1

  def cast_ray(self, point_in, tf, name):
    base_link_point = tf.transformPoint('/base_link', point_in)
    t = tf.getLatestCommonTime('/base_link', point_in.header.frame_id)
    pos, quat = tf.lookupTransform('/base_link', point_in.header.frame_id, t)
    height = pos[2]

    x_slope = (base_link_point.point.x - pos[0])/(pos[2]-base_link_point.point.z)
    y_slope = (base_link_point.point.y - pos[1])/(pos[2]-base_link_point.point.z)

    ground_point = np.array([0.,0.,0.])
    ground_point[0] = x_slope*height + pos[0]
    ground_point[1] = y_slope*height + pos[1]

    ground_named_point = NamedPoint()
    ground_named_point.point.x = ground_point[0]
    ground_named_point.point.y = ground_point[1]
    ground_named_point.point.z = ground_point[2]
    ground_named_point.header = point_in.header
    ground_named_point.header.frame_id = 'base_link'
    ground_named_point.header.stamp = point_in.header.stamp
    ground_named_point.name = name

    odom_named_point = NamedPoint()
    odom_point = self.tf.transformPoint('/odom',ground_named_point)
    odom_named_point.point = odom_point.point
    odom_named_point.header = point_in.header
    odom_named_point.header.frame_id = "/odom"
    odom_named_point.header.stamp = point_in.header.stamp
    odom_named_point.name = name

    return ground_named_point, odom_named_point

  def make_point_cloud(Point):
    # Take a vector, nominally [x,y,1] and apply some rotation about x (pitch)
    # and about y (yaw) in the base_link frame. This will make a frustum that
    # can be sampled for a point cloud
    p = self.pitch_error
    pitch_mat = np.array([[1., 0., 0.],[0., np.cos(p), -np.sin(p)],[0., np.sin(p), np.cos(p)]])
    y = self.yaw_error
    yaw_mat = np.array([[np.cos(y), 0., np.sin(y)],[0., 1., 0.],[-np.sin(y), 0., np.cos(y)]])
    vec = np.array([0,0,0])
    vec[0] = Point.x
    vec[1] = Point.y
    vec[2] = Point.z
    down_left = np.dot(pitch_mat,np.dot(yaw_mat,vec))
    down_right = np.dot(pitch_mat,np.dot(-yaw_mat,vec))
    up_left = np.dot(-pitch_mat,np.dot(yaw_mat,vec))
    up_right = np.dot(-pitch_mat,np.dot(-yaw_mat,vec))
예제 #38
0
class DepthImageCreator(object):
    def __init__(self, use_depth_only):
        self.use_depth_only = use_depth_only
        self.depth_image_lock = Lock()
        self.image_list_lock = Lock()
        self.image_list = []
        self.image_list_max_size = 100
        self.downsample_factor = 2
        self.tf = TransformListener()
        rospy.Subscriber("/color_camera/camera_info",
                         CameraInfo,
                         self.process_camera_info,
                         queue_size=10)
        rospy.Subscriber("/point_cloud",
                         PointCloud,
                         self.process_point_cloud,
                         queue_size=10)
        rospy.Subscriber("/color_camera/image_raw/compressed",
                         CompressedImage,
                         self.process_image,
                         queue_size=10)
        self.clicked_point_pub = rospy.Publisher("/clicked_point",
                                                 PointStamped,
                                                 queue_size=10)
        self.camera_info = None
        self.P = None
        self.depth_image = None
        self.image = None
        self.last_image_timestamp = None
        self.click_timestamp = None
        self.depth_timestamp = None
        cv2.namedWindow("depth_feed")
        cv2.namedWindow("image_feed")
        cv2.namedWindow("combined_feed")
        cv2.setMouseCallback('image_feed', self.handle_click)
        cv2.setMouseCallback('combined_feed', self.handle_combined_click)

    def handle_click(self, event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            self.click_timestamp = self.last_image_timestamp
            self.click_coords = (x * self.downsample_factor,
                                 y * self.downsample_factor)

    def process_image(self, msg):
        self.image_list_lock.acquire()
        np_arr = np.fromstring(msg.data, np.uint8)
        self.last_image_timestamp = msg.header.stamp
        self.image = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        if len(self.image_list) == self.image_list_max_size:
            self.image_list.pop(0)
        self.image_list.append((msg.header.stamp, self.image))
        self.image_list_lock.release()

    def process_camera_info(self, msg):
        self.camera_info = msg
        self.P = np.array(msg.P).reshape((3, 4))
        self.K = np.array(msg.K).reshape((3, 3))
        # TODO: this is necessary due to a mistake in intrinsics_server.py
        self.D = np.array([msg.D[0], msg.D[1], 0, 0, msg.D[2]])

    def get_nearest_image_temporally(self, timestamp):
        self.image_list_lock.acquire()
        diff_list = []
        for im_stamp, image in self.image_list:
            diff_list.append((abs((im_stamp - timestamp).to_sec()), image))
        closest_temporally = min(diff_list, key=lambda t: t[0])
        print closest_temporally[0]
        self.image_list_lock.release()
        return closest_temporally[1]

    def handle_combined_click(self, event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            try:
                self.depth_image_lock.acquire()
                click_coords = (x * self.downsample_factor,
                                y * self.downsample_factor)
                distances = []
                for i in range(self.projected_points.shape[0]):
                    dist = (self.projected_points[i, 0, 0] - click_coords[0]
                            )**2 + (self.projected_points[i, 0, 1] -
                                    click_coords[1])**2
                    distances.append(dist)
                three_d_coord = self.points_3d[:, np.argmin(distances)]

                # again, we have to reshuffle the coordinates due to differences in ROS Tango coordinate systems
                point_msg = PointStamped(header=Header(
                    stamp=self.depth_image_timestamp, frame_id="depth_camera"),
                                         point=Point(y=three_d_coord[0],
                                                     z=three_d_coord[1],
                                                     x=three_d_coord[2]))
                self.tf.waitForTransform("depth_camera", "odom",
                                         self.depth_image_timestamp,
                                         rospy.Duration(1.0))
                transformed_coord = self.tf.transformPoint('odom', point_msg)
                self.clicked_point_pub.publish(transformed_coord)
                self.depth_image_lock.release()
            except Exception as ex:
                print "Encountered an errror! ", ex
                self.depth_image_lock.release()

    def process_point_cloud(self, msg):
        self.depth_image_lock.acquire()
        try:
            if self.P == None:
                self.depth_image_lock.release()
                return
            self.depth_image_timestamp = msg.header.stamp
            self.depth_image = np.zeros(
                (self.camera_info.height, self.camera_info.width, 3),
                dtype=np.uint8)
            self.points_3d = np.zeros((3, len(msg.points))).astype(np.float32)
            depths = []
            for i, p in enumerate(msg.points):
                # this is weird due to mismatches between Tango coordinate system and ROS
                self.points_3d[:, i] = np.array([p.y, p.z, p.x])
                depths.append(p.x)
            self.projected_points, dc = cv2.projectPoints(
                self.points_3d.T, (0, 0, 0), (0, 0, 0), self.K, self.D)

            if self.click_timestamp != None and msg.header.stamp > self.click_timestamp:
                distances = []
                for i in range(self.projected_points.shape[0]):
                    dist = (self.projected_points[i, 0, 0] -
                            self.click_coords[0])**2 + (
                                self.projected_points[i, 0, 1] -
                                self.click_coords[1])**2
                    distances.append(dist)
                three_d_coord = self.points_3d[:, np.argmin(distances)]
                # again, we have to reshuffle the coordinates due to differences in ROS Tango coordinate systems
                point_msg = PointStamped(header=Header(
                    stamp=msg.header.stamp, frame_id="depth_camera"),
                                         point=Point(y=three_d_coord[0],
                                                     z=three_d_coord[1],
                                                     x=three_d_coord[2]))
                transformed_coord = self.tf.transformPoint('odom', point_msg)
                self.clicked_point_pub.publish(transformed_coord)
                self.click_timestamp = None

            # do equalization
            depths_equalized = np.zeros((len(depths), 1))
            for idx, (i, depth) in enumerate(
                    sorted(enumerate(depths), key=lambda x: -x[1])):
                depths_equalized[i] = idx / float(len(depths) - 1)
            for i in range(self.projected_points.shape[0]):
                if not (np.isnan(self.projected_points[i, 0, 0])) and not (
                        np.isnan(self.projected_points[i, 0, 1])):
                    self.depth_image[int(self.projected_points[i, 0, 1]),
                                     int(self.projected_points[
                                         i, 0, 0]), :] = int(
                                             depths_equalized[i] * 255.0)
            self.depth_image_lock.release()
        except Exception as ex:
            print "Encountered an errror! ", ex
            self.depth_image_lock.release()

    def run(self):
        r = rospy.Rate(10)
        while not (rospy.is_shutdown()):
            cv2.waitKey(5)
            if self.depth_image != None:
                # dilate the depth image for display since it is so sparse
                if not (self.depth_image_lock.locked()):
                    kernel = np.ones((5, 5), 'uint8')
                    self.depth_image_lock.acquire()
                    cv2.imshow(
                        "depth_feed",
                        cv2.resize(cv2.dilate(self.depth_image, kernel),
                                   (self.depth_image.shape[1] /
                                    self.downsample_factor,
                                    self.depth_image.shape[0] /
                                    self.downsample_factor)))
                    self.depth_image_lock.release()

            if self.image != None:
                cv2.imshow(
                    "image_feed",
                    cv2.resize(self.image,
                               (self.image.shape[1] / self.downsample_factor,
                                self.image.shape[0] / self.downsample_factor)))
            if not (self.use_depth_only
                    ) and self.image != None and self.depth_image != None:
                kernel = np.ones((3, 3), 'uint8')
                self.depth_image_lock.acquire()
                nearest_image = self.get_nearest_image_temporally(
                    self.depth_image_timestamp)
                ret, depth_threshed = cv2.threshold(self.depth_image, 1, 255,
                                                    cv2.THRESH_BINARY)
                combined_img = (cv2.dilate(depth_threshed, kernel) * 0.2 +
                                nearest_image * 0.8).astype(dtype=np.uint8)
                cv2.imshow(
                    "combined_feed",
                    cv2.resize(combined_img,
                               (self.image.shape[1] / self.downsample_factor,
                                self.image.shape[0] / self.downsample_factor)))

                self.depth_image_lock.release()

            if self.depth_image != None and self.use_depth_only:
                kernel = np.ones((5, 5), 'uint8')
                self.depth_image_lock.acquire()
                ret, depth_threshed = cv2.threshold(self.depth_image, 1, 255,
                                                    cv2.THRESH_BINARY)
                combined_img = (cv2.dilate(depth_threshed,
                                           kernel)).astype(dtype=np.uint8)
                cv2.imshow(
                    "combined_feed",
                    cv2.resize(
                        combined_img,
                        (combined_img.shape[1] / self.downsample_factor,
                         combined_img.shape[0] / self.downsample_factor)))

                self.depth_image_lock.release()

            r.sleep()
예제 #39
0
class DepthImageCreator(object):
	def __init__(self, use_depth_only):
		self.use_depth_only = use_depth_only
		self.depth_image_lock = Lock()
		self.image_list_lock = Lock()
		self.image_list = []
		self.image_list_max_size = 100
		self.downsample_factor = 2
		self.tf = TransformListener()
		rospy.Subscriber("/color_camera/camera_info",
						 CameraInfo,
						 self.process_camera_info,
						 queue_size=10)
		rospy.Subscriber("/point_cloud",
						 PointCloud,
						 self.process_point_cloud,
						 queue_size=10)
		rospy.Subscriber("/color_camera/image_raw/compressed",
						 CompressedImage,
						 self.process_image,
						 queue_size=10)
		self.clicked_point_pub = rospy.Publisher("/clicked_point",PointStamped,queue_size=10)
		self.camera_info = None
		self.P = None
		self.depth_image = None
		self.image = None
		self.last_image_timestamp = None
		self.click_timestamp = None
		self.depth_timestamp = None
		cv2.namedWindow("depth_feed")
		cv2.namedWindow("image_feed")
		cv2.namedWindow("combined_feed")
		cv2.setMouseCallback('image_feed',self.handle_click)
		cv2.setMouseCallback('combined_feed',self.handle_combined_click)

	def handle_click(self,event,x,y,flags,param):
		if event == cv2.EVENT_LBUTTONDOWN:
			self.click_timestamp = self.last_image_timestamp
			self.click_coords = (x*self.downsample_factor,y*self.downsample_factor)

	def process_image(self,msg):
		self.image_list_lock.acquire()
		np_arr = np.fromstring(msg.data, np.uint8)
		self.last_image_timestamp = msg.header.stamp
		self.image = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
		if len(self.image_list) == self.image_list_max_size:
			self.image_list.pop(0)
		self.image_list.append((msg.header.stamp,self.image))
		self.image_list_lock.release()

	def process_camera_info(self, msg):
		self.camera_info = msg
		self.P = np.array(msg.P).reshape((3,4))
		self.K = np.array(msg.K).reshape((3,3))
		# TODO: this is necessary due to a mistake in intrinsics_server.py
		self.D = np.array([msg.D[0],msg.D[1],0,0,msg.D[2]])

	def get_nearest_image_temporally(self,timestamp):
		self.image_list_lock.acquire()
		diff_list = []
		for im_stamp,image in self.image_list:
			diff_list.append((abs((im_stamp-timestamp).to_sec()),image))
		closest_temporally = min(diff_list,key=lambda t: t[0])
		print closest_temporally[0]
		self.image_list_lock.release()
		return closest_temporally[1]

	def handle_combined_click(self,event,x,y,flags,param):
		if event == cv2.EVENT_LBUTTONDOWN:
			try:
				self.depth_image_lock.acquire()
				click_coords = (x*self.downsample_factor,y*self.downsample_factor)
				distances = []
				for i in range(self.projected_points.shape[0]):
					dist = (self.projected_points[i,0,0] - click_coords[0])**2 + (self.projected_points[i,0,1] - click_coords[1])**2
					distances.append(dist)
				three_d_coord = self.points_3d[:,np.argmin(distances)]

				# again, we have to reshuffle the coordinates due to differences in ROS Tango coordinate systems
				point_msg = PointStamped(header=Header(stamp=self.depth_image_timestamp,
													   frame_id="depth_camera"),
										 point=Point(y=three_d_coord[0],
												 	 z=three_d_coord[1],
												 	 x=three_d_coord[2]))
				self.tf.waitForTransform("depth_camera",
										 "odom",
										 self.depth_image_timestamp,
										 rospy.Duration(1.0))
				transformed_coord = self.tf.transformPoint('odom', point_msg)
				self.clicked_point_pub.publish(transformed_coord)
				self.depth_image_lock.release()
			except Exception as ex:
				print "Encountered an errror! ", ex
				self.depth_image_lock.release()


	def process_point_cloud(self, msg):
		self.depth_image_lock.acquire()
		try:
			if self.P == None:
				self.depth_image_lock.release()
				return
			self.depth_image_timestamp = msg.header.stamp
			self.depth_image = np.zeros((self.camera_info.height, self.camera_info.width, 3),dtype=np.uint8)
			self.points_3d = np.zeros((3,len(msg.points))).astype(np.float32)
			depths = []
			for i,p in enumerate(msg.points):
				# this is weird due to mismatches between Tango coordinate system and ROS
				self.points_3d[:,i] = np.array([p.y, p.z, p.x])
				depths.append(p.x)
			self.projected_points, dc = cv2.projectPoints(self.points_3d.T,
												 		  (0,0,0),
												 		  (0,0,0),
												 		  self.K,
														  self.D)

			if self.click_timestamp != None and msg.header.stamp > self.click_timestamp:
				distances = []
				for i in range(self.projected_points.shape[0]):
					dist = (self.projected_points[i,0,0] - self.click_coords[0])**2 + (self.projected_points[i,0,1] - self.click_coords[1])**2
					distances.append(dist)
				three_d_coord = self.points_3d[:,np.argmin(distances)]
				# again, we have to reshuffle the coordinates due to differences in ROS Tango coordinate systems
				point_msg = PointStamped(header=Header(stamp=msg.header.stamp,
													   frame_id="depth_camera"),
										 point=Point(y=three_d_coord[0],
													 z=three_d_coord[1],
													 x=three_d_coord[2]))
				transformed_coord = self.tf.transformPoint('odom', point_msg)
				self.clicked_point_pub.publish(transformed_coord)
				self.click_timestamp = None

			# do equalization
			depths_equalized = np.zeros((len(depths),1))
			for idx,(i,depth) in enumerate(sorted(enumerate(depths), key=lambda x: -x[1])):
				depths_equalized[i] = idx/float(len(depths)-1)
			for i in range(self.projected_points.shape[0]):
				if not(np.isnan(self.projected_points[i,0,0])) and not(np.isnan(self.projected_points[i,0,1])):
					self.depth_image[int(self.projected_points[i,0,1]),int(self.projected_points[i,0,0]),:] = int(depths_equalized[i]*255.0)
			self.depth_image_lock.release()
		except Exception as ex:
			print "Encountered an errror! ", ex
			self.depth_image_lock.release()

	def run(self):
		r = rospy.Rate(10)
		while not(rospy.is_shutdown()):
			cv2.waitKey(5)
			if self.depth_image != None:
				# dilate the depth image for display since it is so sparse
				if not(self.depth_image_lock.locked()):
					kernel = np.ones((5,5),'uint8')
					self.depth_image_lock.acquire()
					cv2.imshow("depth_feed", cv2.resize(cv2.dilate(self.depth_image, kernel),(self.depth_image.shape[1]/self.downsample_factor,
																	  					  self.depth_image.shape[0]/self.downsample_factor)))
					self.depth_image_lock.release()

			if self.image != None:
				cv2.imshow("image_feed", cv2.resize(self.image,(self.image.shape[1]/self.downsample_factor,
															self.image.shape[0]/self.downsample_factor)))
			if not(self.use_depth_only) and self.image != None and self.depth_image != None:
				kernel = np.ones((3,3),'uint8')
				self.depth_image_lock.acquire()
				nearest_image = self.get_nearest_image_temporally(self.depth_image_timestamp)
				ret, depth_threshed = cv2.threshold(self.depth_image,1,255,cv2.THRESH_BINARY)
				combined_img = (cv2.dilate(depth_threshed,kernel)*0.2 + nearest_image*0.8).astype(dtype=np.uint8)
				cv2.imshow("combined_feed", cv2.resize(combined_img,(self.image.shape[1]/self.downsample_factor,
													   self.image.shape[0]/self.downsample_factor)))

				self.depth_image_lock.release()

			if self.depth_image != None and self.use_depth_only:
				kernel = np.ones((5,5),'uint8')
				self.depth_image_lock.acquire()
				ret, depth_threshed = cv2.threshold(self.depth_image,1,255,cv2.THRESH_BINARY)
				combined_img = (cv2.dilate(depth_threshed,kernel)).astype(dtype=np.uint8)
				cv2.imshow("combined_feed", cv2.resize(combined_img,(combined_img.shape[1]/self.downsample_factor,
													   combined_img.shape[0]/self.downsample_factor)))

				self.depth_image_lock.release()

			r.sleep()
class TestMotionExecutionBuffer(unittest.TestCase):

    def setUp(self):

        rospy.init_node('test_motion_execution_buffer')
        
        self.tf = TransformListener()        

        self.obj_pub = rospy.Publisher('collision_object',CollisionObject,latch=True)
        
        self.move_arm_action_client = actionlib.SimpleActionClient("move_right_arm", MoveArmAction)

        self.move_arm_action_client.wait_for_server()

        obj1 = CollisionObject()
    
        obj1.header.stamp = rospy.Time.now()
        obj1.header.frame_id = "base_link"
        obj1.id = "obj1";
        obj1.operation.operation = arm_navigation_msgs.msg.CollisionObjectOperation.ADD
        obj1.shapes = [Shape() for _ in range(1)]
        obj1.shapes[0].type = Shape.CYLINDER
        obj1.shapes[0].dimensions = [float() for _ in range(2)]
        obj1.shapes[0].dimensions[0] = .1
        obj1.shapes[0].dimensions[1] = 1.5
        obj1.poses = [Pose() for _ in range(1)]
        obj1.poses[0].position.x = .6
        obj1.poses[0].position.y = -.6
        obj1.poses[0].position.z = .75
        obj1.poses[0].orientation.x = 0
        obj1.poses[0].orientation.y = 0
        obj1.poses[0].orientation.z = 0
        obj1.poses[0].orientation.w = 1
        
        self.obj_pub.publish(obj1)

        rospy.sleep(2.0)
        
    def tearDown(self):
        obj1 = CollisionObject()
        obj1.header.stamp = rospy.Time.now()
        obj1.header.frame_id = "base_link"
        obj1.id = "all";
        obj1.operation.operation = arm_navigation_msgs.msg.CollisionObjectOperation.REMOVE

        self.obj_pub.publish(obj1)
        
        rospy.sleep(2.0)

    def testMotionExecutionBuffer(self):
        
        global padd_name
        global extra_buffer
        
        #too much trouble to read for now
        allow_padd = .05#rospy.get_param(padd_name)
        

        joint_names = ['%s_%s' % ('r', j) for j in ['shoulder_pan_joint', 'shoulder_lift_joint', 'upper_arm_roll_joint', 'elbow_flex_joint', 'forearm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']]
        goal = MoveArmGoal()

        goal.motion_plan_request.goal_constraints.joint_constraints = [JointConstraint() for i in range(len(joint_names))]

        goal.motion_plan_request.group_name = "right_arm"
        goal.motion_plan_request.num_planning_attempts = 1
        goal.motion_plan_request.allowed_planning_time = rospy.Duration(5.)
        goal.motion_plan_request.planner_id = ""
        goal.planner_service_name = "ompl_planning/plan_kinematic_path"

        goal.motion_plan_request.goal_constraints.joint_constraints = [JointConstraint() for i in range(len(joint_names))]
        for i in range(len(joint_names)):
            goal.motion_plan_request.goal_constraints.joint_constraints[i].joint_name = joint_names[i]
            goal.motion_plan_request.goal_constraints.joint_constraints[i].position = 0.0
            goal.motion_plan_request.goal_constraints.joint_constraints[i].tolerance_above = 0.08
            goal.motion_plan_request.goal_constraints.joint_constraints[i].tolerance_below = 0.08

        for z in range(2):

            min_dist = 1000
            
            if(z%2 == 0):
                goal.motion_plan_request.goal_constraints.joint_constraints[0].position = -2.0
                goal.motion_plan_request.goal_constraints.joint_constraints[3].position = -0.2
                goal.motion_plan_request.goal_constraints.joint_constraints[5].position = -0.2
            else:
                goal.motion_plan_request.goal_constraints.joint_constraints[0].position = 0.0
                goal.motion_plan_request.goal_constraints.joint_constraints[3].position = -0.2
                goal.motion_plan_request.goal_constraints.joint_constraints[5].position = -0.2

            for x in range(3):

                self.move_arm_action_client.send_goal(goal)

                r = rospy.Rate(10)

                while True:
                    cur_state = self.move_arm_action_client.get_state()
                    if(cur_state != actionlib_msgs.msg.GoalStatus.ACTIVE and
                       cur_state != actionlib_msgs.msg.GoalStatus.PENDING):
                        break

                    #getting right finger tip link in base_link frame
                    t = self.tf.getLatestCommonTime("/base_link", "/r_gripper_r_finger_tip_link") 
                    finger_point = PointStamped()
                    finger_point.header.frame_id = "/r_gripper_r_finger_tip_link"
                    finger_point.header.stamp = t
                    finger_point.point.x = 0
                    finger_point.point.y = 0
                    finger_point.point.z = 0
                    finger_point_base = self.tf.transformPoint("base_link",finger_point)

                    distance = math.sqrt(math.pow(finger_point_base.point.x-.6,2)+math.pow(finger_point_base.point.y+.6,2))

                    # pole is .1 in diameter
                    distance -= .1

                    if distance < min_dist:
                        rospy.loginfo("X: %g Y: %g Dist: %g",finger_point_base.point.x,finger_point_base.point.y, distance)  
                        min_dist = distance
                        
                    r.sleep()

                end_state = self.move_arm_action_client.get_state()

                if(end_state == actionlib_msgs.msg.GoalStatus.SUCCEEDED): break

            rospy.loginfo("Min dist %d is %g",z,min_dist)

            #should be a .5 buffer, allowing .1 buffer
            self.failIf(min_dist < (allow_padd-extra_buffer))

            final_state = self.move_arm_action_client.get_state()

            self.assertEqual(final_state,  actionlib_msgs.msg.GoalStatus.SUCCEEDED)

    def testAllowedNotAllowedInitialContact(self):

        #adding object in collision with base

        obj2 = CollisionObject()

        obj2.header.stamp = rospy.Time.now()
        obj2.header.frame_id = "base_link"
        obj2.id = "base_object"
        obj2.operation.operation = arm_navigation_msgs.msg.CollisionObjectOperation.ADD
        obj2.shapes = [Shape() for _ in range(1)]
        obj2.shapes[0].type = Shape.BOX
        obj2.shapes[0].dimensions = [float() for _ in range(3)]
        obj2.shapes[0].dimensions[0] = .1
        obj2.shapes[0].dimensions[1] = .1
        obj2.shapes[0].dimensions[2] = .1
        obj2.poses = [Pose() for _ in range(1)]
        obj2.poses[0].position.x = 0
        obj2.poses[0].position.y = 0
        obj2.poses[0].position.z = 0
        obj2.poses[0].orientation.x = 0
        obj2.poses[0].orientation.y = 0
        obj2.poses[0].orientation.z = 0
        obj2.poses[0].orientation.w = 1

        self.obj_pub.publish(obj2)

        rospy.sleep(5.)

        joint_names = ['%s_%s' % ('r', j) for j in ['shoulder_pan_joint', 'shoulder_lift_joint', 'upper_arm_roll_joint', 'elbow_flex_joint', 'forearm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']]
        goal = MoveArmGoal()

        goal.motion_plan_request.goal_constraints.joint_constraints = [JointConstraint() for i in range(len(joint_names))]

        goal.motion_plan_request.group_name = "right_arm"
        goal.motion_plan_request.num_planning_attempts = 1
        goal.motion_plan_request.allowed_planning_time = rospy.Duration(5.)
        goal.motion_plan_request.planner_id = ""
        goal.planner_service_name = "ompl_planning/plan_kinematic_path"

        goal.motion_plan_request.goal_constraints.joint_constraints = [JointConstraint() for i in range(len(joint_names))]
        for i in range(len(joint_names)):
            goal.motion_plan_request.goal_constraints.joint_constraints[i].joint_name = joint_names[i]
            goal.motion_plan_request.goal_constraints.joint_constraints[i].position = 0.0
            goal.motion_plan_request.goal_constraints.joint_constraints[i].tolerance_above = 0.08
            goal.motion_plan_request.goal_constraints.joint_constraints[i].tolerance_below = 0.08

        goal.motion_plan_request.goal_constraints.joint_constraints[0].position = -2.0
        goal.motion_plan_request.goal_constraints.joint_constraints[3].position = -0.2
        goal.motion_plan_request.goal_constraints.joint_constraints[5].position = -0.2

        self.move_arm_action_client.send_goal(goal)

        r = rospy.Rate(10)

        while True:
            cur_state = self.move_arm_action_client.get_state()
            if(cur_state != actionlib_msgs.msg.GoalStatus.ACTIVE and
               cur_state != actionlib_msgs.msg.GoalStatus.PENDING):
                break

        #should still have succeedeed
        final_state = self.move_arm_action_client.get_state()
        self.failIf(final_state != actionlib_msgs.msg.GoalStatus.SUCCEEDED)

        # but we can still overwrite
        coll = CollisionOperation()
        coll.object1 = "base_link"
        coll.object2 = coll.COLLISION_SET_OBJECTS
        coll.operation = coll.ENABLE

        goal.motion_plan_request.ordered_collision_operations.collision_operations.append(coll)

        goal.motion_plan_request.goal_constraints.joint_constraints[0].position = 0.0
        goal.motion_plan_request.goal_constraints.joint_constraints[3].position = -0.2
        goal.motion_plan_request.goal_constraints.joint_constraints[5].position = -0.2

        self.move_arm_action_client.send_goal(goal)

        r = rospy.Rate(10)

        while True:
            cur_state = self.move_arm_action_client.get_state()
            if(cur_state != actionlib_msgs.msg.GoalStatus.ACTIVE and
               cur_state != actionlib_msgs.msg.GoalStatus.PENDING):
                break

        #should still have succeedeed
        final_state = self.move_arm_action_client.get_state()
        self.failIf(final_state == actionlib_msgs.msg.GoalStatus.SUCCEEDED)
예제 #41
0
class DepthImageCreator(object):
    def __init__(self):
        self.cloud_count = 0
        self.bridge = CvBridge()
        self.depth_image_lock = Lock()
        self.image_list_lock = Lock()
        self.image_list = []
        self.image_list_max_size = 100
        self.downsample_factor = 2
        self.tf = TransformListener()
        self.tf_buffer = tf2_ros.Buffer()
        self.tf2_listener = tf2_ros.TransformListener(self.tf_buffer)
        rospy.Subscriber("/color_camera/camera_info",
                         CameraInfo,
                         self.process_camera_info,
                         queue_size=10)
        rospy.Subscriber("/point_cloud",
                         PointCloud2,
                         self.process_point_cloud,
                         queue_size=10)
        rospy.Subscriber("/color_camera/image_raw",
                         Image,
                         self.process_image,
                         queue_size=10)
        self.clicked_point_pub = rospy.Publisher("/clicked_point",
                                                 PointStamped,
                                                 queue_size=10)
        self.camera_info = None
        self.depth_image = None
        self.image = None
        self.depth_timestamp = None
        cv2.namedWindow("combined_feed")
        cv2.setMouseCallback('combined_feed', self.handle_combined_click)

    def process_image(self, msg):
        self.image_list_lock.acquire()
        self.image = self.bridge.imgmsg_to_cv2(msg)

        if len(self.image_list) == self.image_list_max_size:
            self.image_list.pop(0)
        self.image_list.append((msg.header.stamp, self.image))
        self.image_list_lock.release()

    def process_camera_info(self, msg):
        if self.camera_info != None:
            return
        self.camera_info = msg
        self.K = np.array(msg.K).reshape((3, 3))
        # TODO: this is necessary due to a mistake in intrinsics_server.py
        self.D = np.array([msg.D[0], msg.D[1], 0, 0, msg.D[2]])

    def get_nearest_image_temporally(self, timestamp):
        self.image_list_lock.acquire()
        diff_list = []
        for im_stamp, image in self.image_list:
            diff_list.append((abs(
                (im_stamp - timestamp).to_sec()), image, im_stamp))
        closest_temporally = min(diff_list, key=lambda t: t[0])
        print closest_temporally[0]
        self.image_list_lock.release()
        return closest_temporally[1], closest_temporally[2]

    def handle_combined_click(self, event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            try:
                self.depth_image_lock.acquire()
                click_coords = (x * self.downsample_factor,
                                y * self.downsample_factor)

                distances = [(p[0, 0] - click_coords[0])**2 +
                             (p[0, 1] - click_coords[1])**2
                             for p in self.projected_points]
                three_d_coord = self.points_3d[np.argmin(distances), :]

                # again, we have to reshuffle the coordinates due to differences in ROS Tango coordinate systems
                point_msg = PointStamped(header=Header(
                    stamp=self.depth_image_timestamp, frame_id="color_camera"),
                                         point=Point(x=three_d_coord[0],
                                                     y=three_d_coord[1],
                                                     z=three_d_coord[2]))
                self.tf.waitForTransform("color_camera", "odom",
                                         self.depth_image_timestamp,
                                         rospy.Duration(1.0))
                transformed_coord = self.tf.transformPoint('odom', point_msg)
                self.clicked_point_pub.publish(transformed_coord)
                self.depth_image_lock.release()

            except Exception as ex:
                print "Encountered an errror! ", ex
                self.depth_image_lock.release()

    def process_point_cloud(self, msg):
        self.cloud_count += 1
        if self.cloud_count % 4 != 0:
            return
        if self.K == None or self.D == None:
            return
        self.depth_image_lock.acquire()
        try:
            self.depth_image_timestamp = msg.header.stamp
            _, nearest_image_timestamp = self.get_nearest_image_temporally(
                self.depth_image_timestamp)
            try:
                transform = self.tf_buffer.lookup_transform_full(
                    target_frame='depth_camera',
                    target_time=msg.header.stamp,
                    source_frame='color_camera',
                    source_time=nearest_image_timestamp,
                    fixed_frame='odom',
                    timeout=rospy.Duration(1.0))
            except Exception as inst:
                print "transform error", inst

            msg = do_transform_cloud(msg, transform)

            self.depth_image = np.zeros(
                (self.camera_info.height, self.camera_info.width, 3),
                dtype=np.uint8)
            points = pc2.read_points(msg,
                                     skip_nans=True,
                                     field_names=('x', 'y', 'z', 'c'))
            depths = [p[2] for p in points]
            points = pc2.read_points(msg,
                                     skip_nans=True,
                                     field_names=('x', 'y', 'z', 'c'))

            self.points_3d = np.asarray([[p[0], p[1], p[2]] for p in points],
                                        dtype=np.float32)
            #self.points_3d = self.points_3d.dot(euler_matrix(0, 0, pi)[0:3,0:3])

            self.projected_points, dc = cv2.projectPoints(
                self.points_3d, (0, 0, 0), (0, 0, 0), self.K, self.D)

            for i in range(self.projected_points.shape[0]):
                if 0 <= int(self.projected_points[i,0,1]) < self.depth_image.shape[0] and \
                   0 <= int(self.projected_points[i,0,0]) < self.depth_image.shape[1] and \
                   not(np.isnan(self.projected_points[i,0,0])) and \
                   not(np.isnan(self.projected_points[i,0,1])):
                    try:
                        self.depth_image[int(self.projected_points[i, 0, 1]),
                                         int(self.projected_points[
                                             i, 0, 0]), :] = 255
                    except:
                        pass
            self.depth_image_lock.release()
        except Exception as ex:
            print "Encountered an errror! ", ex
            self.depth_image_lock.release()

    def run(self):
        kernel = np.ones((3, 3), 'uint8')
        r = rospy.Rate(10)
        processed = set()
        while not (rospy.is_shutdown()):
            cv2.waitKey(5)

            if self.image != None and self.depth_image != None:
                self.depth_image_lock.acquire()
                if self.depth_image_timestamp not in processed:
                    nearest_image, _ = self.get_nearest_image_temporally(
                        self.depth_image_timestamp)
                    ret, depth_threshed = cv2.threshold(
                        self.depth_image, 1, 255, cv2.THRESH_BINARY)
                    combined_img = (cv2.dilate(depth_threshed, kernel) * 0.5 +
                                    nearest_image * 0.5).astype(dtype=np.uint8)
                    cv2.imshow(
                        "combined_feed",
                        cv2.resize(
                            combined_img,
                            (self.image.shape[1] / self.downsample_factor,
                             self.image.shape[0] / self.downsample_factor)))

                    processed.add(self.depth_image_timestamp)
                self.depth_image_lock.release()

            r.sleep()
예제 #42
0
class Grasper(object):
    """
    This class contains functionality to make the LoCoBot grasp objects placed in front of it.
    """
    def __init__(self,
                 url=MODEL_URL,
                 model_name="model.pth",
                 n_samples=N_SAMPLES,
                 patch_size=PATCH_SIZE,
                 *kargs,
                 **kwargs):
        """ 
        The constructor for :class:`Grasper` class. 

        :param url: Link to the grasp model file
        :param model_name: Name of the path where the grasp model will be saved
        :param n_samples: Number of samples for the grasp sampler
        :param patch_size: Size of the patch for the grasp sampler
        :type url: string
        :type model_name: string
        :type n_samples: int
        :type patch_size: int
        """

        # TODO - use planning_mode=no_plan, its better
        self.robot = Robot(
            "locobot_kobuki",
            arm_config={
                "use_moveit": True,
                "moveit_planner": "ESTkConfigDefault"
            },
        )
        self.grasp_model = GraspModel(model_name=model_name,
                                      url=url,
                                      nsamples=n_samples,
                                      patchsize=patch_size)
        self.pregrasp_height = 0.2
        self.grasp_height = 0.13
        self.default_Q = Quaternion(0.0, 0.0, 0.0, 1.0)
        self.grasp_Q = Quaternion(0.0, 0.707, 0.0, 0.707)
        self.retract_position = list([-1.5, 0.5, 0.3, -0.7, 0.0])
        self.reset_pan = 0.0
        self.reset_tilt = 0.8
        self.n_tries = 5
        self._sleep_time = 2
        self._transform_listener = TransformListener()

    def reset(self):
        """ 
        Resets the arm to it's retract position.

        :returns: Success of the reset procedure
        :rtype: bool
        """
        success = False
        for _ in range(self.n_tries):
            success = self.robot.arm.set_joint_positions(self.retract_position)
            if success == True:
                break
        self.robot.gripper.open()
        self.robot.camera.set_pan(self.reset_pan)
        self.robot.camera.set_tilt(self.reset_tilt)
        return success

    def _process_depth(self, cur_depth=None):
        if cur_depth is None:
            cur_depth = self.robot.camera.get_depth()
        cur_depth = cur_depth / 1000.0  # conversion from mm to m
        cur_depth[cur_depth > MAX_DEPTH] = 0.0
        return cur_depth

    def _get_z_mean(self, depth, pt, bb=BB_SIZE):
        sum_z = 0.0
        nps = 0
        for i in range(bb * 2):
            for j in range(bb * 2):
                new_pt = [pt[0] - bb + i, pt[1] - bb + j]
                try:
                    new_z = depth[int(new_pt[0]), int(new_pt[1])]
                    if new_z > MIN_DEPTH:
                        sum_z += new_z
                        nps += 1
                except:
                    pass
        if nps == 0.0:
            return 0.0
        else:
            return sum_z / nps

    def _get_3D_camera(self, pt, norm_z=None):
        assert len(pt) == 2
        cur_depth = self._process_depth()
        z = self._get_z_mean(cur_depth, [pt[0], pt[1]])
        rospy.loginfo("depth of point is : {}".format(z))
        if z == 0.0:
            raise RuntimeError
        if norm_z is not None:
            z = z / norm_z
        u = pt[1]
        v = pt[0]
        P = copy.deepcopy(self.robot.camera.camera_P)
        rospy.loginfo("P is: {}".format(P))
        P_n = np.zeros((3, 3))
        P_n[:, :2] = P[:, :2]
        P_n[:, 2] = P[:, 3] + P[:, 2] * z
        P_n_inv = np.linalg.inv(P_n)
        temp_p = np.dot(P_n_inv, np.array([u, v, 1]))
        temp_p = temp_p / temp_p[-1]
        temp_p[-1] = z
        return temp_p

    def _convert_frames(self, pt):
        assert len(pt) == 3
        rospy.loginfo("Point to convert: {}".format(pt))
        ps = PointStamped()
        ps.header.frame_id = KINECT_FRAME
        ps.point.x, ps.point.y, ps.point.z = pt
        base_ps = self._transform_listener.transformPoint(BASE_FRAME, ps)
        rospy.loginfo("transform : {}".format(
            self._transform_listener.lookupTransform(BASE_FRAME, KINECT_FRAME,
                                                     rospy.Time(0))))
        base_pt = np.array([base_ps.point.x, base_ps.point.y, base_ps.point.z])
        rospy.loginfo("Base point to convert: {}".format(base_pt))
        return base_pt

    def get_3D(self, pt, z_norm=None):
        temp_p = self._get_3D_camera(pt, z_norm)
        rospy.loginfo("temp_p: {}".format(temp_p))
        base_pt = self._convert_frames(temp_p)
        return base_pt

    def compute_grasp(self,
                      dims=[(240, 480), (100, 540)],
                      display_grasp=False):
        """ 
        Runs the grasp model to generate the best predicted grasp.
        
        :param dims: List of tuples of min and max indices of the image axis.
        :param display_grasp: Displays image of the grasp.
        :type dims: list
        :type display_grasp: bool

        :returns: Grasp configuration
        :rtype: list
        """

        img = self.robot.camera.get_rgb()
        img = img[dims[0][0]:dims[0][1], dims[1][0]:dims[1][1]]
        # selected_grasp = [183, 221, -1.5707963267948966, 1.0422693]
        selected_grasp = list(self.grasp_model.predict(img))
        rospy.loginfo("Pixel grasp: {}".format(selected_grasp))
        img_grasp = copy.deepcopy(selected_grasp)
        selected_grasp[0] += dims[0][0]
        selected_grasp[1] += dims[1][0]
        selected_grasp[:2] = self.get_3D(selected_grasp[:2])[:2]
        selected_grasp[2] = selected_grasp[2]
        if display_grasp:
            self.grasp_model.display_predicted_image()
            # im_name = '{}.png'.format(time.time())
            # cv2.imwrite('~/Desktop/grasp_images/{}'.format(im_name), self.grasp_model._disp_I)
        return selected_grasp

    def grasp(self, grasp_pose):
        """ 
        Performs manipulation operations to grasp at the desired pose.
        
        :param grasp_pose: Desired grasp pose for grasping.
        :type grasp_pose: list

        :returns: Success of grasping procedure.
        :rtype: bool
        """

        pregrasp_position = [
            grasp_pose[0], grasp_pose[1], self.pregrasp_height
        ]
        # pregrasp_pose = Pose(Point(*pregrasp_position), self.default_Q)
        grasp_angle = self.get_grasp_angle(grasp_pose)
        grasp_position = [grasp_pose[0], grasp_pose[1], self.grasp_height]
        # grasp_pose = Pose(Point(*grasp_position), self.grasp_Q)

        rospy.loginfo(
            "Going to pre-grasp pose:\n\n {} \n".format(pregrasp_position))
        result = self.set_pose(pregrasp_position, roll=grasp_angle)
        if not result:
            return False
        time.sleep(self._sleep_time)

        rospy.loginfo("Going to grasp pose:\n\n {} \n".format(grasp_position))
        result = self.set_pose(grasp_position, roll=grasp_angle)
        if not result:
            return False
        time.sleep(self._sleep_time)

        rospy.loginfo("Closing gripper")
        self.robot.gripper.close()
        time.sleep(self._sleep_time)

        rospy.loginfo("Going to pre-grasp pose")
        result = self.set_pose(pregrasp_position, roll=grasp_angle)
        if not result:
            return False
        time.sleep(self._sleep_time)

        rospy.loginfo("Opening gripper")
        self.robot.gripper.open()
        return True
        time.sleep(self._sleep_time)

    def set_pose(self, position, pitch=DEFAULT_PITCH, roll=0.0):
        """ 
        Sets desired end-effector pose.
        
        :param position: End-effector position to reach.
        :param pitch: Pitch angle of the end-effector.
        :param roll: Roll angle of the end-effector

        :type position: list
        :type pitch: float
        :type roll: float

        :returns: Success of pose setting process.
        :rtype: bool
        """

        success = 0
        for _ in range(self.n_tries):
            position = np.array(position)
            success = self.robot.arm.set_ee_pose_pitch_roll(position=position,
                                                            pitch=pitch,
                                                            roll=roll,
                                                            plan=False,
                                                            numerical=False)
            if success == 1:
                break
        return success

    def get_grasp_angle(self, grasp_pose):
        """ 
        Obtain normalized grasp angle from the grasp pose.

        This is needs since the grasp angle is relative to the end effector.
        
        :param grasp_pose: Desired grasp pose for grasping.
        :type grasp_pose: list

        :returns: Relative grasp angle
        :rtype: float
        """

        cur_angle = np.arctan2(grasp_pose[1], grasp_pose[0])
        delta_angle = grasp_pose[2] + cur_angle
        if delta_angle > np.pi / 2:
            delta_angle = delta_angle - np.pi
        elif delta_angle < -np.pi / 2:
            delta_angle = 2 * np.pi + delta_angle
        return delta_angle

    def exit(self):
        """
        Graceful exit.
        """

        rospy.loginfo("Exiting...")
        self.reset()
        sys.exit(0)

    def signal_handler(self, sig, frame):
        """
        Signal handling function.
        """
        self.exit()
예제 #43
0
class Follower():
    def __init__(self, *args):
        self.tf = TransformListener()
        self.client = actionlib.SimpleActionClient(ACTION_CLIENT,
                                                   PointHeadAction)
        self.client.wait_for_server()
        self.person_tracked_pub = rospy.Publisher(AUDIO_TRACKED_TOPIC,
                                                  PointStamped,
                                                  queue_size=10)
        self.marker = rospy.Publisher(MARKER_TOPIC, Marker, queue_size=10)

        self.angle = np.zeros(FILTER_LENGTH)
        self.last_trigger_time = rospy.Time.now()
        # self.last_trigger_time = rospy.Time.now()
        rospy.Subscriber(AUDIO_TOPIC, SoundSourceAngle, self.AudioCallback)
        rospy.spin()

    def AudioCallback(self, msg):
        if msg.is_valid:
            point = PointStamped()
            marker = Marker()
            start_point = Point()
            end_point = Point()
            point.header.stamp = rospy.Time.now()
            point.header.frame_id = 'head_pan_link'
            marker.header.stamp = rospy.Time.now()
            marker.header.frame_id = 'head_pan_link'
            direction = 0

            #rospy.loginfo("Raw angles: %d", msg.angle)
            #rospy.loginfo(type(msg.angle))

            (direction, self.angle) = TemporalMedian(msg.angle, self.angle)

            rospy.loginfo("Filtered angles: %d", direction)

            (x, y) = PolToCart(3.0, (direction / 180.0) * np.pi)

            point.point.x = x
            point.point.y = y
            point.point.z = 0.0

            start_point.x = 0.0
            start_point.y = 0.0
            start_point.z = 0.0
            end_point.x = x
            end_point.y = y
            end_point.z = 0.0

            # define the marker parameters
            # marker is used to illustrate the source direction in RVIZ
            marker.type = marker.ARROW
            marker.action = marker.ADD
            marker.scale.x = 0.1  #shaft width
            marker.scale.y = 0.2  #arrow head width
            marker.scale.z = 1.0
            marker.color.a = 1.0
            marker.pose.orientation.w = 1.0
            marker.points.append(start_point)
            marker.points.append(end_point)
            if rospy.Time.now() - self.last_trigger_time > rospy.Duration(2):
                self.person_tracked_pub.publish(point)
                # Publish so we can see the marker
                self.marker.publish(marker)
                self.SetPointHead(point)

    def SetPointHead(self, point):
        goal = PointHeadGoal()
        corrected_point = self.tf.transformPoint("head_pan_link", point)
        rospy.loginfo(corrected_point)
        goal.target = corrected_point
        goal.min_duration = rospy.Duration(.5)
        goal.max_velocity = 1

        self.client.send_goal(goal)
        self.last_trigger_time = rospy.Time.now()
예제 #44
0
class PR2Greeter:
    
    def __init__(self, debug=False, online = True, robot_frame="odom_combined"):
        
        self._tf = TransformListener()
        
        self._online = online
        
        # self.snd_handle = SoundClient()
        
        if self._online:
        
            #self._interface = ROSpeexInterface()
            #self._interface.init()
            self._speech_client = SpeechSynthesisClient_NICT()
            
        else:
            
            self.snd_handle = SoundClient()
        
        rospy.sleep(1)
        
        self.say('Hello world!')
        
        rospy.sleep(1)
        
        self._debug = debug
        self._robot_frame = robot_frame
        
        self._point_sub = rospy.Subscriber('nearest_face', PointStamped, self.face_cb)
        
        self._head_action_cl = actionlib.SimpleActionClient('/head_traj_controller/point_head_action', pr2_controllers_msgs.msg.PointHeadAction)
        self._torso_action_cl = actionlib.SimpleActionClient('/torso_controller/position_joint_action', pr2_controllers_msgs.msg.SingleJointPositionAction)
        
        self._left_arm = MoveGroupCommander("left_arm")
        self._right_arm = MoveGroupCommander("right_arm")
        
        print "r.f.: " + self._left_arm.get_pose_reference_frame()

        self.face = None
        # self.face_from = rospy.Time(0)
        self.face_last_dist = 0
        self.last_invited_at = rospy.Time(0)
        self._person_prob_left = 0
        
        self.l_home_pose = [0.283, 0.295, 0.537, -1.646, 0.468, -1.735]
        
        self.l_wave_1 = [-0.1, 0.6, 1.15, -1.7, -0.97, -1.6]
        self.l_wave_2 = [-0.1, 0.6, 1.15, 1.7, -0.97, 1.6]
        
        self.r_home_pose = [0.124, -0.481, 0.439, -1.548, 0.36, -0.035]
        self.r_advert = [0.521, -0.508, 0.845, -1.548, 0.36, -0.035]
        
        self.no_face_random_delay = None
        
        self._initialized = False
        
        self._timer = rospy.Timer(rospy.Duration(1.0), self.timer)
        
        
        self._move_buff = Queue.Queue()
        
        self._head_buff = Queue.Queue()
        
        self._move_thread = threading.Thread(target=self.movements)
        self._move_thread.daemon = True
        self._move_thread.start()
        
        self._head_thread = threading.Thread(target=self.head)
        self._head_thread.daemon = True
        self._head_thread.start()
        
        self.new_face = False
        self.face_last_dist = 0.0
        
        self.face_counter = 0
        
        self.actions = [self.stretchingAction,
                        self.introduceAction,
                        self.waveHandAction,
                        self.lookAroundAction,
                        self.lookAroundAction,
                        self.lookAroundAction,
                        self.advertAction,
                        self.numberOfFacesAction]
        
        self.goodbye_strings = ["Thanks for stopping by.",
                                "Enjoy the event.",
                                "See you later!",
                                "Have a nice day!"]
        
        self.invite_strings = ["Hello. It's nice to see you.",
                               "Come here and take some flyer.",
                               "I hope you are enjoying the event."]
        
        rospy.loginfo("Ready")
    
    def say(self, text):
        
        if self._online:
        
            #self._interface.say(text, 'en', 'nict')
            data = self._speech_client.request(text, 'en')
            
            try:
            
              tmp_file = open('output_tmp.dat', 'wb')
              tmp_file.write(data)
              tmp_file.close()

              # play sound
              subprocess.check_output(['ffplay', 'output_tmp.dat', '-autoexit', '-nodisp'], stderr=subprocess.STDOUT)
              
            except IOError:
              rospy.logerr('file io error.')
            except OSError:
              rospy.logerr('ffplay is not installed.')
            except subprocess.CalledProcessError:
              rospy.logerr('ffplay return error value.')
            except:
              rospy.logerr('unknown exception.')
            
        else:
            
            self.snd_handle.say(text)
    
    def getRandomFromArray(self, arr):
        
        idx = random.randint(0, len(arr) - 1)
        
        return arr[idx]
    
    def stretchingAction(self):
        
        self.say("I'm bit tired. Let's do some stretching.")
        
        self._torso_action_cl.wait_for_server()
        
        goal = pr2_controllers_msgs.msg.SingleJointPositionGoal()
        
        goal.position = 0.195
        goal.max_velocity = 0.5
        
        try:
        
          self._torso_action_cl.send_goal(goal)
          self._torso_action_cl.wait_for_result(rospy.Duration.from_sec(10.0))
          
        except:
            
            rospy.logerr("torso action error (up)")
        
        # TODO move arms
        
        self.say("I feel much better now!")
        
        goal.position = 0.0
        
        try:
        
          self._torso_action_cl.send_goal(goal)
          self._torso_action_cl.wait_for_result(rospy.Duration.from_sec(10.0))
          
        except:
            
            rospy.logerr("torso action error (down)")
    
    def numberOfFacesAction(self):
        
        string = "Today I already saw " + str(self.face_counter) + "face"
        
        if self.face_counter != 1:
            
            string = string + "s"
            
        string = string + "."
        
        self.say(string)
        rospy.sleep(1)
    
    def advertAction(self):
        
        self.say("Hello. Here are some posters for you.")
        self.go(self._right_arm, self.r_advert)
        rospy.sleep(1)
        
    def introduceAction(self):
        
        self.say("Hello. I'm PR2 robot. Come here to check me.")
        rospy.sleep(1)
        
        
    def waveHandAction(self):
        
        self.say("I'm here. Please come to see me.")
        
        rand = random.randint(1, 3)
        
        for _ in range(rand):
            
            self.wave()
            
        self.go(self._left_arm, self.l_home_pose)
        
        rospy.loginfo("Waving")
        rospy.sleep(1)
        
    def lookAroundAction(self):
        
        self.say("I'm looking for somebody. Please come closer.")
        
        p = PointStamped()
        p.header.stamp = rospy.Time.now()
        p.header.frame_id = "/base_link"
        
        p.point.x = 5.0
        
        sign = random.choice([-1, 1])
        
        p.point.y = sign * random.uniform(1.5, 0.5)
        p.point.z = random.uniform(1.7, 0.2)
        self._head_buff.put((copy.deepcopy(p), 0.1, True))
        
        p.point.y = -1 * sign * random.uniform(1.5, 0.5)
        p.point.z = random.uniform(1.7, 0.2)
        self._head_buff.put((copy.deepcopy(p), 0.1, True))
        
        p.point.y = sign * random.uniform(1.5, 0.5)
        p.point.z = random.uniform(1.7, 0.2)
        self._head_buff.put((copy.deepcopy(p), 0.1, True))
        
        p.point.y = -1 * sign * random.uniform(1.5, 0.5)
        p.point.z = random.uniform(1.7, 0.2)
        self._head_buff.put((copy.deepcopy(p), 0.1, True))
        
        rospy.loginfo("Looking around")
        rospy.sleep(1)
        
    def getPointDist(self, pt):
        
        assert(self.face is not None)
        
        # fist, get my position
        p = PoseStamped()
        
        p.header.frame_id = "base_link"
        p.header.stamp = rospy.Time.now() - rospy.Duration(0.5)
        
        p.pose.position.x = 0
        p.pose.position.y = 0
        p.pose.position.z = 0
        p.pose.orientation.x = 0
        p.pose.orientation.y = 0
        p.pose.orientation.z = 0
        p.pose.orientation.w = 1
        
        try:
            
            self._tf.waitForTransform(p.header.frame_id, self._robot_frame, p.header.stamp, rospy.Duration(2))
            p = self._tf.transformPose(self._robot_frame, p)
            
        except:
            
            rospy.logerr("TF error!")
            return None
        
        return sqrt(pow(p.pose.position.x - pt.point.x, 2) + pow(p.pose.position.y - pt.point.y, 2) + pow(p.pose.position.z - pt.point.z, 2))
        
        
        
    def getPoseStamped(self, group, c):
        
        assert(len(c) == 6)
        
        p = PoseStamped()
        
        p.header.frame_id = "base_link"
        p.header.stamp = rospy.Time.now() - rospy.Duration(0.5)
        
        p.pose.position.x = c[0]
        p.pose.position.y = c[1]
        p.pose.position.z = c[2]
        
        quat = tf.transformations.quaternion_from_euler(c[3], c[4], c[5])
        
        p.pose.orientation.x = quat[0]
        p.pose.orientation.y = quat[1]
        p.pose.orientation.z = quat[2]
        p.pose.orientation.w = quat[3]
        
        try:
            
            self._tf.waitForTransform(p.header.frame_id, group.get_pose_reference_frame(), p.header.stamp, rospy.Duration(2))
            p = self._tf.transformPose(group.get_pose_reference_frame(), p)
            
        except:
            
            rospy.logerr("TF error!")
            return None
        
        return p
    
    def go(self, group, where):
        
        self._move_buff.put((group, where))
        
    def wave(self):
        
        self.go(self._left_arm, self.l_wave_1)
        self.go(self._left_arm, self.l_wave_2)
        self.go(self._left_arm, self.l_wave_1)
        
    def head(self):
        
        self._head_action_cl.wait_for_server()
        
        while not rospy.is_shutdown():
            
            (target, vel, imp) = self._head_buff.get()
            
            # we don't need to block it here
            self._head_buff.task_done()
            
            if (not imp) and (not self._head_buff.empty()):
              
              print "skipping head goal"
              
              # head target can be skipped (follow only the latest one)  
              continue
            
            # print "head point goal"
            # print target
            
            # point PR2's head there (http://wiki.ros.org/pr2_controllers/Tutorials/Moving%20the%20Head)
            goal = pr2_controllers_msgs.msg.PointHeadGoal()
        
            goal.target = target
	          # goal.min_duration = rospy.Duration(3.0)
            goal.max_velocity = vel
            # goal.pointing_frame = "high_def_frame"
            goal.pointing_frame = "head_mount_kinect_rgb_link"
            goal.pointing_axis.x = 1
            goal.pointing_axis.y = 0
            goal.pointing_axis.z = 0
            
            try:
            
              self._head_action_cl.send_goal(goal)
              self._head_action_cl.wait_for_result(rospy.Duration.from_sec(5.0))

            except:

               rospy.logerr("head action error")
            
            #self._head_buff.task_done()
        
    def movements(self):
        
        while not rospy.is_shutdown():
            
            (group, where) = self._move_buff.get()
            
            group.set_start_state_to_current_state()
            p = self.getPoseStamped(group, where)
            if p is None:
                
                self._move_buff.task_done()
                continue
                
            group.set_pose_target(p)
            group.set_goal_tolerance(0.1)
            group.allow_replanning(True)
            
            self._move_buff.task_done()
            
            group.go(wait=True)
    
    def timer(self, event):
        
        if self._initialized is False:
            
            rospy.loginfo("Moving arms to home positions")
            
            self.init_head()
            self.go(self._left_arm, self.l_home_pose)
            self.go(self._right_arm, self.r_home_pose)
            self._move_buff.join()
            
            self.say("I'm ready for a great job.")
            self._initialized = True
        
        
        if self.face is None:
            
            if (self.no_face_random_delay is None):
                
                delay = random.uniform(30, 10)    
                self.no_face_random_delay = rospy.Time.now() + rospy.Duration(delay)
                
                rospy.loginfo("Random delay: " + str(delay))
                
                return
                
            else:
                
                if rospy.Time.now() < self.no_face_random_delay:
                    
                    return
            
            self.init_head()
            self.go(self._left_arm, self.l_home_pose)
            self.go(self._right_arm, self.r_home_pose)
                
            rospy.loginfo("Starting selected action")
                
            action = self.getRandomFromArray(self.actions)
            
            action()
            
            delay = random.uniform(30, 10)    
            self.no_face_random_delay = rospy.Time.now() + rospy.Duration(delay)
            
            return
        
        else:
            
            self.no_face_random_delay = None
        
        if self.new_face and (self.last_invited_at + rospy.Duration(10) < rospy.Time.now()):

            self.last_invited_at = rospy.Time.now()

            self.new_face = False
            rospy.loginfo("new person")            

            self.face_counter = self.face_counter + 1
            
            # cd = getPointDist(self.face)
            
            # TODO decide action based on distance ?
            self.go(self._left_arm, self.l_home_pose)  # if a person is too close, dont move hand?
            self.go(self._right_arm, self.r_advert)
            
            string = self.getRandomFromArray(self.invite_strings)
            self.say(string)
            
            # TODO wait some min. time + say something
        
        # after 20 seconds of no detected face, let's continue 
        if self.face.header.stamp + rospy.Duration(10) < rospy.Time.now():

            rospy.loginfo("person left")
            
            string = self.getRandomFromArray(self.goodbye_strings)
            self.say(string)
            
            self.init_head()
            
            self.go(self._left_arm, self.l_home_pose)
            self.go(self._right_arm, self.r_home_pose)
            self.face = None
            
            return
        
        self._head_buff.put((copy.deepcopy(self.face), 0.4, False))
            
     
    def init_head(self):
        
        p = PointStamped()
        p.header.stamp = rospy.Time.now()
        p.header.frame_id = "/base_link"
        
        p.point.x = 2.0
        p.point.y = 0.0
        p.point.z = 1.7
        
        self._head_buff.put((p, 0.1, True))
        
    def face_cb(self, point):
        
        # transform point
        
        try:
        
            self._tf.waitForTransform(point.header.frame_id, self._robot_frame, point.header.stamp, rospy.Duration(2))
            pt = self._tf.transformPoint(self._robot_frame, point)
            
        except:
            
            rospy.logerr("Transform error")
            return
        
        if self.face is not None:
        
            cd = self.getPointDist(pt)  # current distance
            dd = fabs(self.face_last_dist - cd)  # change in distance
            
            if dd < 0.5:
        
                self.face.header = pt.header
                
                # filter x,y,z values a bit
                self.face.point = pt.point
                #self.face.point.x = (self.face.point.x + pt.point.x) / 2
                #self.face.point.y = (self.face.point.y + pt.point.y) / 2
                #self.face.point.z = (self.face.point.z + pt.point.z) / 2
                
            else:
                
               if self._person_prob_left < 2:
                   
                   self._person_prob_left += 1 
                   
               else:
                
                   self._person_prob_left = 0
                
                   print "new face ddist: " + str(dd)

                   self.new_face = True
                   self.face = pt
               
            self.face_last_dist = cd
            
        else:
            
            self._person_prob_left = 0
            self.new_face = True
            self.face = pt
예제 #45
0
class Navigator(object):
    def __init__(self):
        self.tf = TransformListener()
        # a 2-by-2 array, used to store (x,y) coordinates of the two triangulation points
        self.coords = np.zeros((2, 2))
        # a 1-by-2 array, used to store theta angle cooresponding to the two points
        self.orients = np.zeros(2)
        # count, indicate how many (points, orientations) we have obtained, need 2 pairs in order to perform the simpliest triangulation
        self.count = 0

    '''
	Method: GetPoint()
	Once the Fetch robot reaches a stable point, call this to get the coordinate of the robot in world coordinate frame
	'''

    def TransformPoint(self, point):
        now = rospy.Time.now()
        self.tf.waitForTransform('base_link', 'map', now, rospy.Duration(4.0))
        global_coordinate = self.tf.transformPoint('map', point)
        rospy.loginfo(global_coordinate)
        if self.count == 0:
            self.coords[0][0] = global_coordinate.point.x
            self.coords[0][1] = global_coordinate.point.y
            rospy.loginfo(self.coords)
        elif self.count == 1:
            self.coords[1][0] = global_coordinate.point.x
            self.coords[1][1] = global_coordinate.point.y
            rospy.loginfo(self.coords)
        else:
            pass

    def CurrentPosition(self):
        base_link_origin = PointStamped()
        base_link_origin.header.stamp = rospy.Time(
            0)  #rospy.Time.now()#rospy.Time(0)
        base_link_origin.header.frame_id = 'base_link'
        base_link_origin.point.x = 0.0
        base_link_origin.point.y = 0.0
        base_link_origin.point.z = 0.0

        self.TransformPoint(base_link_origin)

    def WaitForTransform(self):
        self.tf.waitForTransform('base_link', 'map', rospy.Time(),
                                 rospy.Duration(4.0))

    '''
	Method: GetDirection()
	Call together with GetPoint() method, 
	'''

    def CurrentDirection(self):
        t = self.tf.getLatestCommonTime('base_link', 'map')
        (position, quaternion) = self.tf.lookupTransform('base_link', 'map', t)
        rospy.loginfo(quaternion)
        rospy.loginfo(transformations.euler_from_quaternion(quaternion)
                      [2])  # theta angle
        if self.count == 0:
            self.orients[0] = transformations.euler_from_quaternion(
                quaternion)[2]
            rospy.loginfo(self.orients)
        elif self.count == 1:
            self.orients[1] = transformations.euler_from_quaternion(
                quaternion)[2]
            rospy.loginfo(self.orients)
        else:
            pass

    def IncreaseCounter(self):
        self.count = self.count + 1

    def Triangulation(self):
        #TODO
        theta1 = self.orients[0]
        theta2 = self.orients[1]
        x1 = self.coords[0][0]
        y1 = self.coords[0][1]
        x2 = self.coords[1][0]
        y2 = self.coords[1][1]
        estimatedX = (np.tan(-theta1) * x1 - np.tan(-theta2) * x2 +
                      (y2 - y1)) / (np.tan(-theta1) - np.tan(-theta2))
        estimatedY = ((x1 - x2) * np.tan(-theta1) * np.tan(-theta2) +
                      y2 * np.tan(-theta1) - y1 * np.tan(-theta2)) / (
                          np.tan(-theta1) - np.tan(-theta2))

        rospy.loginfo(estimatedX)
        rospy.loginfo(estimatedY)

        return (estimatedX, estimatedY)