class MergePoses:

    def __init__(self):

        self.avg_pose = None
        self.tl = TransformListener()

        self.topics = rospy.get_param('~poses',[])
        print self.topics
        if len(self.topics) == 0:
            rospy.logerr('Please provide a poses list as input parameter')
            return
        self.output_pose = rospy.get_param('~output_pose','ar_avg_pose')
        self.output_frame = rospy.get_param('~output_frame', 'rf_map')
        
        self.subscribers = []
        for i in self.topics:
            subi = rospy.Subscriber(i, PoseStamped, self.callback, queue_size=1)
            self.subscribers.append(subi)

        self.pub = rospy.Publisher(self.output_pose, PoseStamped, queue_size=1)
            
        self.mutex_avg = threading.Lock()
        self.mutex_t = threading.Lock()
        
        self.transformations = {}
        
    def callback(self, pose_msg):

        # get transformation to common frame
        position = None
        quaternion = None
        if self.transformations.has_key(pose_msg.header.frame_id):
            position, quaternion = self.transformations[pose_msg.header.frame_id]
        else:
            if self.tl.frameExists(pose_msg.header.frame_id) and \
            self.tl.frameExists(self.output_frame):
                t = self.tl.getLatestCommonTime(self.output_frame, pose_msg.header.frame_id)
                position, quaternion = self.tl.lookupTransform(self.output_frame,
                                                               pose_msg.header.frame_id, t)
                self.mutex_t.acquire()
                self.transformations[pose_msg.header.frame_id] = (position, quaternion)
                self.mutex_t.release()
                rospy.loginfo("Got static transform for %s" % pose_msg.header.frame_id)

        # transform pose
        framemat = self.tl.fromTranslationRotation(position, quaternion)

        posemat = self.tl.fromTranslationRotation([pose_msg.pose.position.x,
                                                   pose_msg.pose.position.y,
                                                   pose_msg.pose.position.z],
                                                  [pose_msg.pose.orientation.x,
                                                   pose_msg.pose.orientation.y,
                                                   pose_msg.pose.orientation.z,
                                                   pose_msg.pose.orientation.w])

        newmat = numpy.dot(framemat,posemat)
        
        xyz = tuple(translation_from_matrix(newmat))[:3]
        quat = tuple(quaternion_from_matrix(newmat))

        tmp_pose = PoseStamped()
        tmp_pose.header.stamp = pose_msg.header.stamp
        tmp_pose.header.frame_id = self.output_frame
        tmp_pose.pose = Pose(Point(*xyz),Quaternion(*quat))
        
        tmp_angles = euler_from_quaternion([tmp_pose.pose.orientation.x,
                                            tmp_pose.pose.orientation.y,
                                            tmp_pose.pose.orientation.z,
                                            tmp_pose.pose.orientation.w])
        
        # compute average
        self.mutex_avg.acquire()
        
        if self.avg_pose == None or (pose_msg.header.stamp - self.avg_pose.header.stamp).to_sec() > 0.5:
            self.avg_pose = tmp_pose
        else:            
            self.avg_pose.header.stamp = pose_msg.header.stamp
            a = 0.7
            b = 0.3
            self.avg_pose.pose.position.x = a*self.avg_pose.pose.position.x + b*tmp_pose.pose.position.x
            self.avg_pose.pose.position.y = a*self.avg_pose.pose.position.y + b*tmp_pose.pose.position.y
            self.avg_pose.pose.position.z = a*self.avg_pose.pose.position.z + b*tmp_pose.pose.position.z

            angles = euler_from_quaternion([self.avg_pose.pose.orientation.x,
                                            self.avg_pose.pose.orientation.y,
                                            self.avg_pose.pose.orientation.z,
                                            self.avg_pose.pose.orientation.w])
            angles = list(angles)
            angles[0] = avgAngles(angles[0], tmp_angles[0], 0.7, 0.3)
            angles[1] = avgAngles(angles[1], tmp_angles[1], 0.7, 0.3)
            angles[2] = avgAngles(angles[2], tmp_angles[2], 0.7, 0.3)

            q = quaternion_from_euler(angles[0], angles[1], angles[2])
            
            self.avg_pose.pose.orientation.x = q[0]
            self.avg_pose.pose.orientation.y = q[1]
            self.avg_pose.pose.orientation.z = q[2]
            self.avg_pose.pose.orientation.w = q[3]

        self.pub.publish(self.avg_pose)
            
        self.mutex_avg.release()
예제 #2
0
class ObjectDetector:
    def __init__(self):
        self.rgb_sub = rospy.Subscriber("/camera/rgb/image_rect_color", Image,
                                        self.rgb_callback)
        self.pcl_sub = rospy.Subscriber("/camera/depth_registered/points",
                                        PointCloud2, self.pointcloud_callback)
        self.depth_raw_sub = rospy.Subscriber(
            "/camera/depth_registered/image_raw", Image,
            self.raw_depth_callback)
        # TODO subscribe only once
        self.camera_info_sub = rospy.Subscriber("camera/rgb/camera_info",
                                                CameraInfo,
                                                self.camera_info_callback)

        self.object_pub = rospy.Publisher('/object_found',
                                          Detected_object,
                                          queue_size=10)
        self.pfh_pub = rospy.Publisher('/pfh_found',
                                       Point_feature_histogram,
                                       queue_size=10)

        self.tf = TransformListener()
        self.stop_callbacks_flag = False
        # Holds the time that the instance has been taken.
        self.current_time = rospy.Time(0)

        # Camera infos - they will be updated with the Callback, hopefully.
        self.cx_d = 0
        self.cy_d = 0
        self.fx_d = 0
        self.fy_d = 0

        self.bridge = CvBridge()
        initial_shape = (480, 640)
        # Divide it by a number, to scale the image and make computations faster.
        self.desired_divide_factor = 2
        self.desired_shape = map(lambda x: x / self.desired_divide_factor,
                                 initial_shape)

        self.rgb_img = np.ndarray(shape=self.desired_shape, dtype=np.uint8)
        self.depth_img = np.ndarray(shape=self.desired_shape, dtype=np.uint8)
        self.depth_raw_img = np.ndarray(shape=self.desired_shape,
                                        dtype=np.uint8)
        self.pcl = PointCloud2()
        # Stores the overall objects that have been found.
        self.detected_objects = []
        # Stores the objects that have been found in the current instance.
        self.newly_detected_objects = []
        print("\nPress R if you want to trigger GUI for object detection...")
        print("Press Esc if you want to end the suffer of this node...\n")

        # Read the parameters from the yaml file
        with open("../cfg/conf.yaml", 'r') as stream:
            try:
                doc = yaml.load(stream)
                self.depth_weight = doc["clustering"]["depth_weight"]
                self.coordinates_weight = doc["clustering"][
                    "coordinates_weight"]
                self.n_clusters = doc["clustering"]["number_of_clusters"]
                self.depth_thresh_up = doc["clustering"]["depth_thresup"]
                self.depth_thresh_down = doc["clustering"]["depth_thresdown"]
            except yaml.YAMLError as exc:
                print(exc)

    def camera_info_callback(self, msg_info):
        self.cx_d = msg_info.K[2]
        self.cy_d = msg_info.K[5]
        self.fx_d = msg_info.K[0]
        self.fy_d = msg_info.K[4]

    def rgb_callback(self, msg_rgb):
        try:
            # TODO check if it is also ok with kinect
            cv_image = self.bridge.imgmsg_to_cv2(msg_rgb,
                                                 desired_encoding="bgr8")
            # Resize to the desired size
            cv_image_resized = cv2.resize(cv_image,
                                          tuple(reversed(self.desired_shape)),
                                          interpolation=cv2.INTER_AREA)
            self.rgb_img = cv_image_resized
            try:
                img = np.concatenate(
                    (self.rgb_img,
                     cv2.cvtColor(self.depth_img, cv2.COLOR_GRAY2BGR)),
                    axis=1)
                cv2.imshow("Combined image from my node", img)
            except ValueError as error:
                print(error)
                print("Images from channels are not ready yet...")
            k = cv2.waitKey(1) & 0xFF
            if k == 114:  # if you press r, trigger the processing
                self.stop_callbacks_flag = True
                self.current_time = rospy.Time(0)
                cv2.destroyAllWindows()
                try:
                    self.process()
                except NameError as warn:
                    cv2.destroyAllWindows()
                    print(warn)
                self.stop_callbacks_flag = False
                print(
                    "\nPress R if you want to trigger GUI for object detection..."
                )
                print(
                    "Press Esc if you want to end the suffer of this node...\n"
                )
            if k == 27:  # if you press Esc, kill the node
                rospy.signal_shutdown("Whatever...")
        except CvBridgeError as e:
            print(e)

    def raw_depth_callback(self, msg_raw_depth):
        # Raw image from device. Contains uint32 depths in mm.
        temp_img = self.bridge.imgmsg_to_cv2(msg_raw_depth, "passthrough")
        self.depth_raw_img = np.array(self.desired_shape, dtype=np.uint32)
        self.depth_raw_img = cv2.resize(temp_img,
                                        tuple(reversed(self.desired_shape)),
                                        interpolation=cv2.INTER_NEAREST)
        nans_positions = np.isnan(self.depth_raw_img)
        self.depth_raw_img[nans_positions] = 0
        self.depth_raw_img = cv2.convertScaleAbs(
            self.depth_raw_img, alpha=(255.0 / np.amax(self.depth_raw_img)))
        self.depth_img = self.depth_raw_img

    def pointcloud_callback(self, msg_pcl):
        # synchronize pcl with images
        if not self.stop_callbacks_flag:
            self.pcl = msg_pcl

    def process(self):
        bounding_boxes = gui_editor.gui_editor(self.rgb_img, self.depth_img,
                                               len(self.detected_objects))
        counter = len(self.detected_objects) + 1
        # For every newly found object.
        for c in bounding_boxes:
            x, y, w, h = cv2.boundingRect(c)
            # Take the center of the bounding box of the object.
            center_x = x + w / 2
            center_y = y + h / 2
            # Get the point from point cloud of the corresponding pixel.
            # Multiply by the desired factor the pixel's position, because I have scaled the images by this number.
            try:
                coords = return_pcl(center_x * self.desired_divide_factor,
                                    center_y * self.desired_divide_factor,
                                    self.pcl)
            except (NameError, IndexError):
                rospy.logwarn("WARNING: Found object" + str(counter) +
                              " with gaps, let's ignore it!")
                continue
            # Based on formula: x3D = (x * 2 - self.cx_d) * z3D/self.fx_d
            # Based on formula: y3D = (y * 2 - self.cy_d) * z3D/self.fy_d
            real_width = self.desired_divide_factor * w * coords[2] / self.fx_d
            real_height = self.desired_divide_factor * h * coords[2] / self.fy_d

            # Crop the image and get just the bounding box.
            crop_rgb_img = self.rgb_img[y:y + h, x:x + w]
            crop_depth_img = self.depth_img[y:y + h, x:x + w]

            # Find the position of detected object relative to world.
            try:
                # Credits: http://www.euclideanspace.com/maths/geometry/affine/matrix4x4/
                [translation, rotation
                 ] = self.tf.lookupTransform('map', 'camera_rgb_optical_frame',
                                             self.current_time)
                transformation_matrix = self.tf.fromTranslationRotation(
                    translation, rotation)
                old_point_vector = np.asanyarray(
                    [coords[0], coords[1], coords[2], 1])
                world_coordinates = transformation_matrix.dot(old_point_vector)
                [world_x, world_y, world_z] = world_coordinates[0:3]
            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException):
                # In case of not turtlebot and just kinect.
                rospy.logwarn(
                    "Cannot use transformation to the the frame of the world.")
                world_x, world_y, world_z = coords[0], coords[1], coords[2]
            det_object = DetectedObject(counter, coords[0], coords[1],
                                        coords[2], world_x, world_y, world_z,
                                        real_width, real_height, crop_rgb_img,
                                        crop_depth_img, self.pcl)
            self.newly_detected_objects.append(det_object)
            counter += 1
        self.update_world()

    def update_world(self):
        detected_objects_length = len(self.detected_objects)
        # For every new object that was found, first check whether it exists and then send it to the tf2_broadcaster.
        for new_det_object in self.newly_detected_objects:
            # Create the message for object_clusterer with the Point Feature Histogram and leave name_id for later.
            new_pfh_msg = Point_feature_histogram()
            new_pfh_msg.pfh = new_det_object.pfh[0]
            # First time with no already found objects.
            if detected_objects_length == 0:
                self.save_and_send(new_det_object)
                # Name_id is the same name_id with the one of the the object because it is the first time you see it.
                new_pfh_msg.name_id = new_det_object.name_id
                self.pfh_pub.publish(new_pfh_msg)
                continue
            for i in range(0, detected_objects_length):
                if self.detected_objects[i].is_the_same_object(new_det_object):
                    # It is the same object - so you have to update the dimensions  of it.
                    self.detected_objects[i].update_dimensions(new_det_object)
                    # Save another pfh of the current frame of the same object.
                    self.detected_objects[i].pfh.append(
                        new_det_object.crop_pointcloud_client())
                    # Name_id is the same with the one of the already_found object as long as they are the same.
                    new_pfh_msg.name_id = self.detected_objects[i].name_id
                    self.pfh_pub.publish(new_pfh_msg)
                    break
                # If you have checked the newly found object with all already found ones, save and send it.
                if i == detected_objects_length - 1:
                    self.save_and_send(new_det_object)
                    # Name_id is the same name_id with the one of the the object because it is the first time you see it
                    new_pfh_msg.name_id = new_det_object.name_id
                    self.pfh_pub.publish(new_pfh_msg)
        # Empty the list newly_detected_objects
        del self.newly_detected_objects[:]
        print("Objects so far found: " + str(len(self.detected_objects)))

    # Saves the newly found object in the list of detected objects and sends it to tf2_broadcaster node.
    def save_and_send(self, to_send_object):
        print(to_send_object)
        self.detected_objects.append(to_send_object)
        msg = Detected_object()
        msg.name_id = to_send_object.name_id
        msg.x = to_send_object.x
        msg.y = to_send_object.y
        msg.z = to_send_object.z
        msg.width = to_send_object.width
        msg.height = to_send_object.height
        self.object_pub.publish(msg)
class get_pos(object):
    def __init__(self, limb_viewer, limb_checker, parameter_reset):
        '''
    Viewer - Limb that will end up reaching out to the user's hand
    Checker - Limb that checks whether the user's hand is still in position
    Parameter Reset - Determines whether the camera parameters should be reset. Reset is only needed once
    '''
        #Initializing for the translation
        self.left_arm = Limb('left')
        self.right_arm = Limb('right')
        self.limb = limb_viewer
        self.img_viewer = None
        self.arm_viewer = baxter_interface.Limb(
            limb_viewer)  # Assinging the viewer to the correct limb
        self.arm_checker = baxter_interface.Limb(
            limb_checker)  # Assigning the checker to the correct limb
        self.hand_area = 0  # Hand area is used by Checker to determine whether the hand is still detected
        self.bridge = CvBridge()  # ROS to Opencv
        # Coordinates and global variables setup
        self.torque = None  # Torque will be used as a detection method
        self.frame = None  # Will be used for image shape
        self.x = 0  # Will be assigned to x-pixel-coordinate of detected hand // Will later be converted to base frame
        self.y = 0  # Will be assigned to y-pixel-coordinate of detected hand // Will later be converted to base frame
        self.z_camera = 0  # Will be assigned to z spatial coordinate WRT to camera
        # Arm sensor setup
        self.distance = {}
        root_name = "/robot/range/"
        sensor_name = ["left_hand_range/state", "right_hand_range/state"]
        # Assigning the camera topics to viewer and checker depending on the user input
        if limb_viewer == 'left':
            self.camera_viewer = 'left_hand_camera'
            camera_checker = 'right_hand_camera'
            # Subscribing to the left sensor
            self.__sensor = rospy.Subscriber(root_name + sensor_name[0],
                                             Range,
                                             callback=self.sensorCallback,
                                             callback_args="left",
                                             queue_size=1)
        else:
            self.camera_viewer = 'right_hand_camera'
            camera_checker = 'left_hand_camera'
            # Subscribing to the right sensor
            self.__sensor = rospy.Subscriber(root_name + sensor_name[1],
                                             Range,
                                             callback=self.sensorCallback,
                                             callback_args="right",
                                             queue_size=1)
        # resetting the parameters of the viewer and checker if instructed
        if parameter_reset == True:
            settings = CameraController.createCameraSettings(width=640,
                                                             height=400,
                                                             exposure=-1)
            CameraController.openCameras(self.camera_viewer,
                                         camera_checker,
                                         settings=settings,
                                         settings2=settings)
            print "Viewer-camera, checker-camera parameter check"
            # self.left_camera = baxter_interface.CameraController(self.camera_viewer)
            # self.left_camera.resolution = (640,400)
            # self.left_camera.exposure = -1
            # print "Viewer-camera parameter check"
            # self.right_camera = baxter_interface.CameraController(camera_checker)
            # self.right_camera.resolution = (640,400)
            # self.right_camera.exposure = -1
            # print "Checker-camera parameter check"
        # Subscribing to the cameras
        self.viewer_image_sub = rospy.Subscriber(
            "/cameras/" + self.camera_viewer + "/image", Image,
            self.callback_viewer)  # Subscribing to Camera
        self.checker_image_sub = rospy.Subscriber(
            "/cameras/" + camera_checker + "/image", Image,
            self.callback_checker)  # Subscribing to Camera
        # Rotation matrix set up
        self.tf = TransformListener()
        # Orientation of the shaker will determine how the viewer's orientation will be once it reaches its final position
        self.left_orientation_shaker = [
            -0.520, 0.506, -0.453, 0.518
        ]  # Defined orientation // Obtained through tf_echo // ****GRIPPER****
        self.right_orientation_shaker = [0.510, 0.550, 0.457, 0.478]
        self.camera_gripper_offset = [
            0.038, 0.012, -0.142
        ]  # Offset of camera from the GRIPPER reference frame
        self._cur_joint = {
            'left_w0': 0.196733035822,
            'left_w1': 0.699878733673,
            'left_w2': 0,
            'left_e0': -0.303344700458,
            'left_e1': 1.90098568922,
            'left_s0': -0.263844695215,
            'left_s1': -1.03467004025
        }

    def follow_up(self, joint_solution=None):
        '''
    Any follow up instructions, after the hand is reached, should be in here
    '''
        if joint_solution == None:
            joint_solution = self._cur_joint
        self.__sensor.unregister()  #
        self.viewer_image_sub.unregister()
        self.checker_image_sub.unregister()
        self.arm_viewer.move_to_joint_positions(joint_solution)

    def sensorCallback(self, msg, side):
        self.distance[side] = msg.range
        if msg.range < 0.05:  # Only assigns a value to sensor if the hand is less than 10cm away
            self.z_camera = msg.range  # Assign the z-coordinate
        else:
            self.z_camera = None

    def callback_viewer(self, data):
        '''
    This is the callback function of the viewer, i.e, the thread that the viewer creates once it's initialized.
    The viewer callback first runs a skin color detection, creates a mask from the given color range, and then
    the hand detection is ran on the mask. The hand detection is done through a cascade classifier, and the
    coordinates of the hands are assigned to the global variables x and y. To be noted that the contour drawing
    is only to provide a good display; it doesn't affect the skin detection, though it uses the same mask
    '''
        try:
            # Creates an OpenCV image from the ROS image
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
        # The torque is used as a method of checking whether the arm has reached the user
        self.torque = self.arm_viewer.endpoint_effort()
        self.torque = self.torque_mag(
            self.torque
        )  # The torque assigned is the magnitude of the torque detected

        img = cv_image
        converted = cv2.cvtColor(
            img, cv2.COLOR_BGR2YCR_CB)  # Convert image color scheme to YCrCb
        min_YCrCb = np.array(
            [0, 133, 77], np.uint8)  # Create a lower bound for the skin color
        max_YCrCb = np.array([255, 173, 127],
                             np.uint8)  # Create an upper bound for skin color

        skinRegion = cv2.inRange(converted, min_YCrCb,
                                 max_YCrCb)  # Create a mask with boundaries

        skinMask = cv2.inRange(converted, min_YCrCb,
                               max_YCrCb)  # Duplicate of the mask f
        kernel = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE,
            (11, 11))  # Apply a series of errosion and dilations to the mask
        #skinMask = cv2.erode(skinMask, kernel, iterations = 2) # Using an elliptical Kernel
        #skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
        skinMask = cv2.GaussianBlur(skinMask, (3, 3),
                                    0)  # Blur the image to remove noise
        skin = cv2.bitwise_and(img, img,
                               mask=skinMask)  # Apply the mask to the frame

        height, width, depth = cv_image.shape  # Obtain the dimensions of the image
        self.frame = cv_image.shape
        hands_cascade = cv2.CascadeClassifier(
            '/home/steven/ros_ws/src/test_cam/haarcascade_hand.xml')
        hands = hands_cascade.detectMultiScale(
            skinMask, 1.3, 5)  # Detect the hands on the converted image
        contours, hierarchy = cv2.findContours(
            skinRegion, cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)  # Find the contour on the skin detection
        for i, c in enumerate(
                contours):  # Draw the contour on the source frame
            area = cv2.contourArea(c)
            if area > 10000:  # Noise isn't circled
                cv2.drawContours(img, contours, i, (255, 255, 0), 2)
        for (x, y, z, h) in hands:  # Get the coordinates of the hands
            d = h / 2
            self.x = x + d  # Gets the center of the detected hand
            self.y = y + d  # Gets the center of the detected hand
            cv2.circle(img, (self.x, self.y), 50, (0, 0, 255),
                       5)  # Circle the detected hand
        self.img_viewer = img

    def callback_checker(self, data):
        '''
    This is the callback function of the checker, i.e, the thread that the checker creates once it's initialized.
    The checker callback runs in the same way as the viewer callback, but its main use is to ensure that a hand
    is still detected. It does so by checking the area of the contour drawn on the image, and hence, unlike the
    viewer callback, the contour affects the hand detection. The contour area is however unstable, and might not
    produce the best results. The skin color detection has a high range of red color as wll, making the contour
    detection less stable when a red colopr is in range. Hence that is why the exposure of the checker is decreased
    so as to reduce the noise colors.
    '''
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")

        except CvBridgeError as e:
            print(e)
        img = cv_image
        converted = cv2.cvtColor(
            img, cv2.COLOR_BGR2YCR_CB)  # Convert image color scheme to YCrCb

        min_YCrCb = np.array(
            [0, 133, 77], np.uint8)  # Create a lower bound for the skin color
        max_YCrCb = np.array([255, 173, 127],
                             np.uint8)  # Create an upper bound for skin color

        skinRegion = cv2.inRange(converted, min_YCrCb,
                                 max_YCrCb)  # Create a mask with boundaries
        skinMask = cv2.inRange(
            converted, min_YCrCb,
            max_YCrCb)  # Duplicate of the mask for comparison
        kernel = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE,
            (11, 11))  # Apply a series of errosion and dilations to the mask
        #skinMask = cv2.erode(skinMask, kernezl, iterations = 2) # Using an elliptical Kernel
        #skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
        skinMask = cv2.GaussianBlur(skinMask, (3, 3),
                                    0)  # Blur the image to remove noise
        skin = cv2.bitwise_and(img, img,
                               mask=skinMask)  # Apply the mask to the frame

        height, width, depth = cv_image.shape
        hands_cascade = cv2.CascadeClassifier(
            '/home/steven/ros_ws/src/test_cam/haarcascade_hand.xml')
        hands = hands_cascade.detectMultiScale(
            skinMask, 1.3, 5)  # Detect the hands on the converted image
        for (x, y, z, h) in hands:  # Get the coordinates of the hands
            d = h / 2
            x = x + d
            y = y + d
            cv2.circle(img, (x, y), 50, (0, 0, 255), 5)

        contours, hierarchy = cv2.findContours(
            skinRegion, cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)  # Find the contour on the skin detection
        self.hand_area = 0
        for i, c in enumerate(
                contours):  # Draw the contour on the source frame
            area = cv2.contourArea(c)
            if area > 10000:
                cv2.drawContours(img, contours, i, (255, 255, 0), 2)
                self.hand_area = area  # - area_1

        # The following function will create a contour based on the red color scheme. This function should be enabled whenever
        # a red color is detected by the checker. The red color detected will alter the hand area detected, and hence will
        # detect an large area even if a hand is not in range, area corresponding to the red color. Hence the red area should be subtracted
        # from the entire detected area, to obtain the actual area of the hand
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        lower_red = np.array([0, 10, 10])
        upper_red = np.array([10, 240, 240])

        red_mask = cv2.inRange(hsv, lower_red, upper_red)
        red = cv2.bitwise_and(img, img, mask=red_mask)

        cv2.imshow("Viewer Camera // Checker Camera",
                   np.hstack([img, self.img_viewer]))
        cv2.waitKey(1)

    def torque_mag(self, a):
        # Gets the magnitude of the torque detected
        mag_a = math.sqrt((a['force'].z * a['force'].z) +
                          (a['force'].y * a['force'].y) +
                          (a['force'].x * a['force'].x))
        return mag_a

    def get_starting_pos(self):
        # Sets the limbs to their correct starting position
        if self.limb == 'left':
            self.left_arm.move_to_joint_positions(dict({
                'left_e0':
                -1.1339952974442922,
                'left_e1':
                1.2954467753692318,
                'left_s0':
                0.8252816638823526,
                'left_s1':
                0.048703890015361885,
                'left_w0':
                -0.14879613642488512,
                'left_w1':
                1.176179769111141,
                'left_w2':
                0.5867476513661708
            }),
                                                  timeout=15.0)
            self.right_arm.move_to_joint_positions(dict({
                'right_e0':
                1.0438739261560241,
                'right_e1':
                1.2797234722934063,
                'right_s0':
                -0.1257864246066039,
                'right_s1':
                -0.3171505278953093,
                'right_w0':
                0.3186845086831947,
                'right_w1':
                1.1278593742927505,
                'right_w2':
                -0.215907795894872
            }),
                                                   timeout=15.0)
        else:
            self.left_arm.move_to_joint_positions(dict({
                'left_e0':
                -1.2475098757478127,
                'left_e1':
                1.1830826826566254,
                'left_s0':
                0.292990330486114,
                'left_s1':
                -0.12540292940963257,
                'left_w0':
                -0.024927187803137973,
                'left_w1':
                1.301966193717745,
                'left_w2':
                0.15953400194008302
            }),
                                                  timeout=15.0)
            self.right_arm.move_to_joint_positions(dict({
                'right_e0':
                1.0933448065653286,
                'right_e1':
                1.2609322076418101,
                'right_s0':
                -0.6024709544419963,
                'right_s1':
                -0.08053399136398422,
                'right_w0':
                0.2906893593042859,
                'right_w1':
                1.212995308020391,
                'right_w2':
                -0.19251458887961942
            }),
                                                   timeout=15.0)

    def unit_vector_to_point(self, x, y):
        '''
    Creates a unit vector from the camera's frame, given a pixel point from the image
    '''
        height, width, depth = self.frame  # Obtain the dimensions of the frame
        cam_info = rospy.wait_for_message("/cameras/" + self.camera_viewer +
                                          "/camera_info",
                                          CameraInfo,
                                          timeout=None)
        img_proc = PinholeCameraModel()
        img_proc.fromCameraInfo(cam_info)
        # The origin of the camera is initally set to the top left corner
        # However the projectPixelTo3dRay uses the origin at the center of the camera. Hence the coordinates have to be converted
        x = x - width / 2
        y = height / 2 - y
        # Creates a unit vector to the given point. Unit vector passes through point from the center of the camera
        n = img_proc.projectPixelTo3dRay((x, y))
        return n

    def unit_vector_gripper_frame(self, u_vector):
        '''
    Converts the unit vector from the camera's frame to that of the gripper
    '''
        pose = self.arm_viewer.endpoint_pose()
        pos_gripper = [
            pose['position'].x, pose['position'].y, pose['position'].z
        ]
        u_vector_gripper = [
            u_vector[0] + self.camera_gripper_offset[0],
            u_vector[1] + self.camera_gripper_offset[1],
            u_vector[2] + self.camera_gripper_offset[2]
        ]
        return u_vector_gripper

    def align(self, alignment_vector=None):
        '''
    To ensure that a inverse kinematics will return a valid joint solution set, a first alignment is needed.
    Aligning the gripper with the unit vector to the hand's position will ensure that.
    '''
        if alignment_vector != None:
            alignment_vector = alignment_vector[:
                                                3]  # Ensure that vector is a 3x1
        height, width, depth = self.frame  # Gets the dimension of the image
        pose = self.arm_viewer.endpoint_pose(
        )  # Gets the current pose of the end effector
        pos = [pose['position'].x, pose['position'].y,
               pose['position'].z]  # Gets the position
        quat = [
            pose['orientation'].x, pose['orientation'].y,
            pose['orientation'].z, pose['orientation'].w
        ]  # Gets the orientation
        __matrix = self.tf.fromTranslationRotation(
            pos, quat)  # Rotational matrix is obtained from pos and quat
        __matrix = __matrix[:3, :
                            3]  # __matrix contain the rotational, and translational component, alongside with a last row of 0,0,0,1 i.e matrix is 4x4

        # Only the rotational part is needed for alignement purposes, i.e a 3x3

        def alignment_to_vec(
            b, a
        ):  # b = z-axis vector // a = alignment vector (unit vector to point)
            '''
        Returns the rotational matrix, that wil align the vector b to a
        '''
            a_dot_b = sum(imap(mul, a, b))  #np.dot(a,b)
            n_a = np.array([float(a[0]), float(a[1]), float(a[2])])
            n_b = np.array([float(b[0]), float(b[1]), float(b[2])])
            a_x_b = np.cross(n_a, n_b)
            a_x_b = np.matrix([[float(a_x_b[0])], [float(a_x_b[1])],
                               [float(a_x_b[2])]])
            mod_a = math.sqrt((float(a[0]) * float(a[0])) +
                              (float(a[1]) * float(a[1])) +
                              (float(a[2]) * float(a[2])))
            mod_b = math.sqrt((float(b[0]) * float(b[0])) +
                              (float(b[1]) * float(b[1])) +
                              (float(b[2]) * float(b[2])))
            mod_a_x_b = math.sqrt((float(a_x_b[0]) * float(a_x_b[0])) +
                                  (float(a_x_b[1]) * float(a_x_b[1])) +
                                  (float(a_x_b[2]) * float(a_x_b[2])))
            x = a_x_b / mod_a_x_b
            alpha = a_dot_b / (mod_a * mod_b)
            theta = math.acos(alpha)
            a_matrix = np.matrix([[0,
                                   float(-x[2]), float(x[1])],
                                  [float(x[2]), 0,
                                   float(-x[0])],
                                  [float(-x[1]), float(x[0]), 0]])
            rotation_matrix = np.identity(3) + (math.sin(theta) * a_matrix) + (
                (1 - (math.cos(theta))) * (np.dot(a_matrix, a_matrix)))
            return rotation_matrix

        def create_from_rot_matrix(rot):
            """
        Rotation matrix created from quaternion
        Create from rotation matrix,
        modified from
        https://github.com/CMU-ARM/ARBT-Baxter-Nav-Task/blob/stable/scripts/Quaternion.py
        """
            tr = np.trace(rot)
            if (tr > 0):
                s = np.sqrt(tr + 1) * 2
                w = 0.25 * s
                x = (rot[2, 1] - rot[1, 2]) / s
                y = (rot[0, 2] - rot[2, 0]) / s
                z = (rot[1, 0] - rot[0, 1]) / s
            elif (rot[0, 0] > rot[1, 1] and rot[0, 0] > rot[2, 2]):
                s = np.sqrt(1 + rot[0, 0] - rot[1, 1] - rot[2, 2]) * 2
                w = (rot[2, 1] - rot[1, 2]) / s
                x = 0.25 * s
                y = (rot[0, 1] + rot[1, 0]) / s
                z = (rot[0, 2] + rot[2, 0]) / s
            elif (rot[1, 1] > rot[2, 2]):
                s = np.sqrt(1 + rot[1, 1] - rot[0, 0] - rot[2, 2]) * 2
                w = (rot[0, 2] - rot[2, 0]) / s
                x = (rot[0, 1] + rot[1, 0]) / s
                y = 0.25 * s
                z = (rot[1, 2] + rot[2, 1]) / s
            else:
                s = np.sqrt(1 + rot[2, 2] - rot[1, 1] - rot[0, 0]) * 2
                w = (rot[1, 0] - rot[0, 1]) / s
                x = (rot[0, 2] + rot[2, 0]) / s
                y = (rot[1, 2] + rot[2, 1]) / s
                z = 0.25 * s
            return x, y, z, w

        def hamilton_product(b, a):
            q = [x, y, z, w]
            q[3] = a[3] * b[3] - a[0] * b[0] - a[1] * b[1] - a[2] * b[2]
            q[0] = a[3] * b[0] + a[0] * b[3] + a[1] * b[2] - a[2] * b[1]
            q[1] = a[3] * b[1] - a[0] * b[2] + a[1] * b[3] + a[2] * b[0]
            q[2] = a[3] * b[2] + a[0] * b[1] - a[1] * b[0] + a[2] * b[3]
            return q

        z_vector = np.dot(__matrix, np.matrix(
            [[0], [0],
             [1]]))  # Converts the z-axis of the camera to the base frame
        print "Z-vector BF: ", z_vector
        rotation_matrix = alignment_to_vec(
            z_vector, alignment_vector
        )  # Rotation matrix that aligns the z-axis to the unit vector, pointing towards the hand
        x, y, z, w = create_from_rot_matrix(
            rotation_matrix)  # Gets the orientation of alignement
        rot_quat = [x, y, z, w]
        final_quat = hamilton_product(
            quat, rot_quat)  # Gets the final orientation of alignement
        self.ik(self.limb, pos, final_quat, True)  # Aligns the viewer
        print "Aligned"

    def pixel_translation(self, uv):
        # Converts the pixel coordinates to spatial coordinates WRT the camera's frame
        pose = self.arm_viewer.endpoint_pose()
        xy = [pose['position'].x, pose['position'].y, pose['position'].z]
        height, width, depth = self.frame  #camera frame dimensions
        print "\nx-pixel: ", uv[0], "\ty-pixel: ", uv[1]
        cx = uv[0]  # x pixel of hand
        cy = uv[1]  # y pixel of hand
        pix_size = 0.0025  # Camera calibration (meter/pixels); Pixel size at 1 meter height of camera
        h = self.z_camera  #Height from hand to camera, when at vision place
        x0b = xy[
            0]  # x position of camera in baxter's base frame, when at vision place
        y0b = xy[
            1]  # y position of camera in baxter's base frame, when at vision place
        x_camera_offset = .045  #x camera offset from center of the gripper
        y_camera_offset = -.01  #y camera offset from center of the gripper
        #Position of the object in the baxter's stationary base frame
        x_camera = (cy - (height / 2)
                    ) * pix_size * h - 0.045  # x-coordinate in camera's frame
        y_camera = (cx - (width / 2)
                    ) * pix_size * h + 0.01  # y-coordiante in camera's frame
        return x_camera, y_camera, h

    def xy_translation(self):
        uv = (self.x, self.y)  # pixel coordinates
        if self.x != 0 and self.y != 0 and self.z_camera != None:  # If a hand has been detected and a position recorded
            x_camera, y_camera, h = self.pixel_translation(
                uv)  # Obtains the spatial x,y coordinates
            self.depth_detection(float(x_camera), float(y_camera),
                                 h)  # proceeds to coordinates translation
        else:
            self.depth_detection(
                0, 0, 0
            )  #If a depth hasn't been detected, proceed to the depth detection

    def depth_detection(self, x, y, z):
        pose = self.arm_viewer.endpoint_pose()
        pos = [pose['position'].x, pose['position'].y, pose['position'].z]
        quat = [
            pose['orientation'].x, pose['orientation'].y,
            pose['orientation'].z, pose['orientation'].w
        ]

        height, width, depth = self.frame  #camera frame dimensions
        __matrix = self.tf.fromTranslationRotation(pos,
                                                   quat)  #self.orientation_cam
        # If a depth has already been detected
        # Proceed to coordinates translation, and use inverse kinematics to move to position

        if self.z_camera != None:
            print "depth detected"
            z = self.z_camera
            xyz = np.dot(__matrix, np.matrix([[x], [y], [z], [1]]))
            print "\nx-coordinate obtained: ", xyz[
                0], "\ny-coordinate obtained: ", xyz[
                    1], "\nz-coordinate obtained: ", xyz[2]
            pos = [xyz[0], xyz[1], xyz[2]]
            self.ik(self.limb, pos, self.orientation_shaker)
        # Else, move the arm towards unit vector until a depth has been detected
        else:
            print "...Moving arm..."
            __matrix = __matrix[:3, :3]
            # Aligns the end effector to the detected hand before moving it
            n = self.unit_vector_to_point(self.x, self.y)
            if self.x > width / 2:  # If self.x > widht/2, hand is above the camera
                up = True  # Will pass this as an argument to generate the unit vector, indicating that the hand is above
            else:
                up = False
            u_vector_gripper = self.unit_vector_gripper_frame(n)
            u_vector = np.dot(
                __matrix,
                np.matrix([[u_vector_gripper[0]], [u_vector_gripper[1]],
                           [u_vector_gripper[2]]]))
            print "...Aligning arm..."
            self.align(u_vector)
            #self.with_check(__matrix,up)
            self.without_check(__matrix, up)

    def without_check(self, __matrix, up=False):
        height, width, depth = self.frame  #camera frame dimensions
        while self.z_camera == None and self.torque < 20:  # While no depth is detcted and no torque is detected
            n = self.unit_vector_to_point(
                width / 2,
                height / 2)  # Unit vector to the center of the camera
            # the unit vector to the center of the camera is generated, since after alignement, the hand should be almost in line with the center
            u_vector_gripper = self.unit_vector_gripper_frame(
                n)  # converts the unit vector to the gripper's frame
            u_vector = np.dot(__matrix,
                              np.matrix([[u_vector_gripper[0]],
                                         [u_vector_gripper[1]],
                                         [u_vector_gripper[2]]
                                         ]))  # converts to the base frame
            if self.limb == 'left':
                if up == True:  # Accounts for the position of the hand, above or below the camera
                    # Unit vector seems to always have a negative z-component, hence if the hand is detected above the camera, the z-component is negated
                    u_vector[2] = -u_vector[2]  # negating the z-component
            elif self.limb == 'right':
                if up != True:  # The inverse is true for the right arm
                    u_vector[2] = -u_vector[2]
            pose = self.arm_viewer.endpoint_pose()
            pos = [
                pose['position'].x + (u_vector[0] / 20),
                pose['position'].y + (u_vector[1] / 20),
                pose['position'].z + (u_vector[2] / 20)
            ]  # Move the arm by increments
            quat = [
                pose['orientation'].x, pose['orientation'].y,
                pose['orientation'].z, pose['orientation'].w
            ]
            self.ik(self.limb, pos, quat)
        # Once depth has been detected, change orientation of gripper to that of the shaker, defined when initialized
        pose = self.arm_viewer.endpoint_pose()
        pos = [pose['position'].x, pose['position'].y, pose['position'].z]
        if self.limb == 'left':
            self.ik('left', pos, self.left_orientation_shaker,
                    True)  # Hand-shaking position for left arm
        else:
            self.ik('right', pos, self.right_orientation_shaker,
                    True)  # Hand-shaking position for right arm
        print "Position reached...Moving to Instructor position"
        self.follow_up()  # Follow_up instructions

    def with_check(self, __matrix, up):
        height, width, depth = self.frame  #camera frame dimensions
        print self.hand_area
        while self.z_camera == None and self.torque < 20 and self.hand_area > 5000:  # As long as hand area is detected
            if self.hand_area > 5000:
                n = self.unit_vector_to_point(width / 2, height / 2)
                u_vector_gripper = self.unit_vector_gripper_frame(n)
                u_vector = np.dot(
                    __matrix,
                    np.matrix([[u_vector_gripper[0]], [u_vector_gripper[1]],
                               [u_vector_gripper[2]]]))
                if self.limb == 'left':
                    if up == True:
                        u_vector[2] = -u_vector[2]
                elif self.limb == 'right':
                    print self.limb
                    if up != True:
                        u_vector[2] = -u_vector[2]
                pose = self.arm_viewer.endpoint_pose()
                pos = [
                    pose['position'].x + (u_vector[0] / 20),
                    pose['position'].y + (u_vector[1] / 20),
                    pose['position'].z + (u_vector[2] / 20)
                ]
                quat = [
                    pose['orientation'].x, pose['orientation'].y,
                    pose['orientation'].z, pose['orientation'].w
                ]
                self.ik(self.limb, pos, quat)
            else:
                print "No hands detected..."
                return 0

        if self.hand_area > 5000:
            pose = self.arm_viewer.endpoint_pose()
            pos = [pose['position'].x, pose['position'].y, pose['position'].z]
            self.ik('left', pos, self.left_orientation_shaker, True)
            print "Position reached...Moving to Instructor position"
            #self.follow_up()
        else:
            print "No hands detected for final alignment"

    def ik(self, limb, pos, quat, block=None, arm=None):
        '''
    This function uses inverse kinematics to calculate the joint states given a certain pose.
    It also applies the joint states to the specified arms, i.e moves the arm. Arguments:
    - limb : the limb which is to be moved. If not specified, limb is viewer limb
    - pos,quat : pose for which joint solutions are desired
    - block - if block is None, the motion of the joints will be done by set_joint_positions.
    Else, it will be done by move_to_joint_positions.
    set_joint_positions allows the operation to be interupted (Used when moving the arm towards the hand)
    move_to_joint_positions cannot be interupted, and is used when aligning the end effector

    ******* CORE OF FUNCTION IS FROM /BAXTER_EXAMPLES/IK_SERVICE_CLIENT.PY********
    '''
        if arm == None:
            arm = self.arm_viewer

        ns = "ExternalTools/" + limb + "/PositionKinematicsNode/IKService"
        iksvc = rospy.ServiceProxy(ns, SolvePositionIK)
        ikreq = SolvePositionIKRequest()
        hdr = Header(stamp=rospy.Time.now(), frame_id='base')
        poses = {
            'left':
            PoseStamped(
                header=hdr,
                pose=Pose(
                    position=Point(
                        x=pos[0],
                        y=pos[1],
                        z=pos[2],
                    ),
                    orientation=Quaternion(
                        x=quat[0],
                        y=quat[1],
                        z=quat[2],
                        w=quat[3],
                    ),
                ),
            ),
            'right':
            PoseStamped(
                header=hdr,
                pose=Pose(
                    position=Point(
                        x=pos[0],
                        y=pos[1],
                        z=pos[2],
                    ),
                    orientation=Quaternion(
                        x=quat[0],
                        y=quat[1],
                        z=quat[2],
                        w=quat[3],
                    ),
                ),
            ),
        }

        ikreq.pose_stamp.append(poses[limb])
        try:
            rospy.wait_for_service(ns, 5.0)
            resp = iksvc(ikreq)

        except (rospy.ServiceException, rospy.ROSException), e:
            rospy.logerr("Service call failed: %s" % (e, ))
            return 1

        # Check if result valid, and type of seed ultimately used to get solution
        # convert rospy's string representation of uint8[]'s to int's
        resp_seeds = struct.unpack('<%dB' % len(resp.result_type),
                                   resp.result_type)
        if (resp_seeds[0] != resp.RESULT_INVALID):
            seed_str = {
                ikreq.SEED_USER: '******',
                ikreq.SEED_CURRENT: 'Current Joint Angles',
                ikreq.SEED_NS_MAP: 'Nullspace Setpoints',
            }.get(resp_seeds[0], 'None')
            #print("SUCCESS - Valid Joint Solution Found from Seed Type: %s" %
            #      (seed_str,))
            # Format solution into Limb API-compatible dictionary
            limb_joints = dict(
                zip(resp.joints[0].name, resp.joints[0].position))
            # reformat the solution arrays into a dictionary
            joint_solution = dict(
                zip(resp.joints[0].name, resp.joints[0].position))
            if block == None:
                arm.set_joint_positions(joint_solution)
            else:
                arm.move_to_joint_positions(joint_solution)
        else:
            print("INVALID POSE - No Valid Joint Solution Found.")
        return 0
class GridSonarSrv():
    def __init__(self, SONAR_FRAME, MAP_FRAME, ROBOT_FRAME):
        # GOAL_LIST_DIR = rospy.get_param('~goal_list_dir')
        # self.path = MAP_DIR
        self.sonar_frame = SONAR_FRAME
        self.map_frame = MAP_FRAME
        self.robot_frame = ROBOT_FRAME
        self.mapinfo = OccupancyGrid()
        self.pose_msg = PoseStamped()
        self.sonar_msg = []

        self.tf_listener = TransformListener()
        rospy.Service('api/grid_sonar', GridSonar, self.handle_gridsonar)
        rospy.Service('api/rt_grid_sonar', GridSonar, self.handle_realtimesonar)
        rospy.Subscriber("map", OccupancyGrid, self.map_callback, queue_size=1)
        rospy.Subscriber('range_dist', Sonar, self.sonar_callback, queue_size=1)
        rospy.Subscriber('pose', PoseStamped, self.pose_callback, queue_size=1)

    def pose_callback(self, msg):
        self.pose_msg = msg

    def handle_realtimesonar(self, req):
        i = 0
        sonar = {}
        rot = self.get_rotate_matrix()[0:3]

        for t in self.sonar_frame:
            pt = {}
            sonar_pt = PointStamped()
            sonar_pt.header.frame_id = t
            # transform sonar center point from sonar to base_link
            # self.tf_listener.waitForTransform(self.map_frame, t, rospy.Time.now(),
            #                                   rospy.Duration(1.0))
            temp = self.tf_listener.transformPoint(self.robot_frame, sonar_pt)

            # calc sonar center coordinate in map
            # p = np.dot([temp.point.x, temp.point.y, 0], rot)
            # p = [p[0]+self.pose_msg.pose.position.x, p[1]+self.pose_msg.pose.position.y, 0]
            p = [temp.point.x * rot[0][0] + temp.point.y * rot[1][0] + self.pose_msg.pose.position.x,
                 temp.point.x * rot[0][1] + temp.point.y * rot[1][1] + self.pose_msg.pose.position.y]

            # calc sonar center grid in map
            start_x = (int)((p[0] - self.mapinfo.info.origin.position.x)
                            / self.mapinfo.info.resolution)
            start_y = (int)((p[1] - self.mapinfo.info.origin.position.y)
                            / self.mapinfo.info.resolution)

            # calc sonar end point from sonar to base_link
            sonar_pt.point.x = self.sonar_msg[i] / 100.0
            i += 1
            temp = self.tf_listener.transformPoint(self.robot_frame, sonar_pt)
            # calc sonar end coordinate in map
            # p = np.dot([temp.point.x, temp.point.y, 0], rot)
            # p = [p[0]+self.pose_msg.pose.position.x, p[1]+self.pose_msg.pose.position.y, 0]
            p = [temp.point.x * rot[0][0] + temp.point.y * rot[1][0] + self.pose_msg.pose.position.x,
                 temp.point.x * rot[0][1] + temp.point.y * rot[1][1] + self.pose_msg.pose.position.y]

            # calc sonar end grid in map
            end_x = (int)((p[0] - self.mapinfo.info.origin.position.x)
                          / self.mapinfo.info.resolution)
            end_y = (int)((p[1] - self.mapinfo.info.origin.position.y)
                          / self.mapinfo.info.resolution)

            pt['start'] = [start_x, start_y]
            pt['end'] = [end_x, end_y]
            sonar[t] = pt

        mapInfo = {}
        mapInfo['gridWidth'] = self.mapinfo.info.width
        mapInfo['gridHeight'] = self.mapinfo.info.height
        mapInfo['resolution'] = self.mapinfo.info.resolution

        res = GridSonarResponse()
        res.gridPoint = json.dumps(sonar)
        res.mapInfo = json.dumps(mapInfo)
        res.msg = 'success'
        return res

    def get_rotate_matrix(self):
        quat = (-self.pose_msg.pose.orientation.x, -self.pose_msg.pose.orientation.y,
                -self.pose_msg.pose.orientation.z, self.pose_msg.pose.orientation.w)
        return self.tf_listener.fromTranslationRotation(translation=(0, 0, 0),
                                                        rotation=quat)

    def handle_gridsonar(self, req):
        i = 0
        sonar = {}
        for t in self.sonar_frame:
            pt = {}
            sonar_pt = PointStamped()
            sonar_pt.header.frame_id = t
            # transform sonar center point from sonar to base_link
            self.tf_listener.waitForTransform(self.robot_frame, t, rospy.Time.now(),
                                              rospy.Duration(1.0))
            temp = self.tf_listener.transformPoint(self.robot_frame, sonar_pt)
            # calc sonar center grid in map
            # start_x = (int)((temp.point.x - self.mapinfo.info.origin.position.x)
            #                 / self.mapinfo.info.resolution)
            # start_y = (int)((temp.point.y - self.mapinfo.info.origin.position.y)
            #                 / self.mapinfo.info.resolution)
            start_x = (int)(temp.point.x / 0.01)
            start_y = (int)(temp.point.y / 0.01)

            # calc sonar end point from sonar to base_link
            sonar_pt.point.x = self.sonar_msg[i] / 100.0
            i += 1
            temp = self.tf_listener.transformPoint(self.robot_frame, sonar_pt)
            # calc sonar end grid in map
            # end_x = (int)((temp.point.x - self.mapinfo.info.origin.position.x)
            #               / self.mapinfo.info.resolution)
            # end_y = (int)((temp.point.y - self.mapinfo.info.origin.position.y)
            #               / self.mapinfo.info.resolution)
            end_x = (int)(temp.point.x / 0.01)
            end_y = (int)(temp.point.y / 0.01)

            pt['start'] = [start_x, start_y]
            pt['end'] = [end_x, end_y]
            pt['range'] = sonar_pt.point.x
            sonar[t] = pt

        mapInfo = {}
        # mapInfo['gridWidth'] = self.mapinfo.info.width
        # mapInfo['gridHeight'] = self.mapinfo.info.height
        # mapInfo['resolution'] = self.mapinfo.info.resolution

        res = GridSonarResponse()
        res.gridPoint = json.dumps(sonar)
        res.mapInfo = json.dumps(mapInfo)
        res.msg = 'success'
        return res

    def map_callback(self, msg):
        self.mapinfo.info = msg.info

    def sonar_callback(self, msg):
        self.sonar_msg = map(int, msg.ranges)
예제 #5
0
class Grid:
    """Occupancy Grid."""
    def __init__(self, map_msg):
        self.header = map_msg.header
        self.origin_translation = [
            map_msg.info.origin.position.x, map_msg.info.origin.position.y,
            map_msg.info.origin.position.z
        ]
        self.origin_quaternion = [
            map_msg.info.origin.orientation.x,
            map_msg.info.origin.orientation.y,
            map_msg.info.origin.orientation.z,
            map_msg.info.origin.orientation.w
        ]
        self.grid = np.reshape(
            map_msg.data, (map_msg.info.height,
                           map_msg.info.width))  # shape: 0: height, 1: width.
        self.resolution = map_msg.info.resolution  # cell size in meters.

        self.tf_listener = TransformListener()  # Transformation listener.

        self.transformation_matrix_map_grid = self.tf_listener.fromTranslationRotation(
            self.origin_translation, self.origin_quaternion)

        self.transformation_matrix_grid_map = np.linalg.inv(
            self.transformation_matrix_map_grid)

        # Array to check neighbors.
        self.cell_radius = int(10 / self.resolution)  # TODO parameter
        self.footprint = np.ones((self.cell_radius + 1, self.cell_radius + 1))
        # self.plot()

    def plot(self):
        fig1 = plt.gcf()
        plt.imshow(self.grid)
        plt.colorbar()
        plt.gca().invert_yaxis()
        plt.xlabel('xlabel', fontsize=18)
        plt.ylabel('ylabel', fontsize=16)
        fig1.savefig('map.png', dpi=100)

    def cell_at(self, x, y):
        """Return cell value at x (column), y (row)."""
        return self.grid[int(y), int(x)]
        # return self.grid[x + y * self.width]

    def is_free(self, x, y):
        if self.within_boundaries(x, y):
            return 0 <= self.cell_at(x, y) < 50  # TODO: set values.
        else:
            return False

    def is_obstacle(self, x, y):
        if self.within_boundaries(x, y):
            return self.cell_at(x, y) >= 50  # TODO: set values.
        else:
            return False

    def is_unknown(self, x, y):
        if self.within_boundaries(x, y):
            return self.cell_at(x, y) < 0  # TODO: set values.
        else:
            return True

    def within_boundaries(self, x, y):
        if 0 <= y < self.grid.shape[0] and 0 <= x < self.grid.shape[1]:
            return True
        else:
            return False

    def convert_coordinates_i_to_xy(self, i):
        """Convert coordinates if the index is given on the flattened array."""
        x = i % self.grid.shape[1]  # col
        y = i / self.grid.shape[1]  # row
        return x, y

    def wall_cells(self):
        """
        Return only *wall cells* -- i.e. obstacle cells that have free or
            unknown cells as neighbors -- as columns and rows.
        """

        # Array to check neighbors.
        window = np.asarray([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
        # Return all cells that are obstacles and satisfy the neighbor_condition
        neighbor_condition = self.grid > minimum_filter(
            self.grid, footprint=window, mode='constant',
            cval=10000)  # TODO: value.
        obstacles = np.nonzero((self.grid >= OCCUPIED) & neighbor_condition)
        obstacles = np.stack((obstacles[1], obstacles[0]),
                             axis=1)  # 1st column x, 2nd column, y.

        return obstacles

    def is_frontier(self, previous_cell, current_cell, distance):
        """current_cell a frontier?"""
        v = previous_cell - current_cell
        u = v / np.linalg.norm(v)

        end_cell = current_cell - distance * u

        x1, y1 = current_cell.astype(int)
        x2, y2 = end_cell.astype(int)
        # rospy.logerr("FP cell: {}, CP cell: {}".format((x1,y1,),(x2,y2)))
        for p in list(bresenham(x1, y1, x2, y2)):
            if self.is_unknown(*p):
                return True
            elif self.is_obstacle(*p):
                return False
        return False

    def line_in_unknown(self, start_cell, end_cell):
        x1, y1 = start_cell.astype(int)
        x2, y2 = end_cell.astype(int)

        unknown_cells_counter = 0
        for p in list(bresenham(x1, y1, x2, y2)):
            if self.is_unknown(*p):
                unknown_cells_counter += 1
            elif self.is_obstacle(*p):
                return False
        return unknown_cells_counter

    def unknown_area_approximate(self, cell):
        """Approximate unknown area with the robot at cell."""
        cell_x = int(cell[INDEX_FOR_X])
        cell_y = int(cell[INDEX_FOR_Y])

        min_x = np.max((0, cell_x - self.cell_radius))
        max_x = np.min((self.grid.shape[1], cell_x + self.cell_radius + 1))
        min_y = np.max((0, cell_y - self.cell_radius))
        max_y = np.min((self.grid.shape[0], cell_y + self.cell_radius + 1))

        return (self.grid[min_y:max_y, min_x:max_x] < FREE).sum()

    def unknown_area(
            self,
            cell):  # TODO orientation of the robot if fov is not 360 degrees
        """Return unknown area with the robot at cell"""
        unknown_cells = set()

        shadow_angle = set()
        cell_x = int(cell[INDEX_FOR_X])
        cell_y = int(cell[INDEX_FOR_Y])
        for d in np.arange(1, self.cell_radius):  # TODO orientation
            for x in xrange(cell_x - d, cell_x + d + 1):  # go over x axis
                for y in range(cell_y - d, cell_y + d + 1):  # go over y axis
                    if self.within_boundaries(x, y):
                        angle = np.around(np.rad2deg(pu.theta(cell, [x, y])),
                                          decimals=1)  # TODO parameter
                        if angle not in shadow_angle:
                            if self.is_obstacle(x, y):
                                shadow_angle.add(angle)
                            elif self.is_unknown(x, y):
                                unknown_cells.add((x, y))

        return len(unknown_cells)

    def pose_to_grid(self, pose):
        """Pose (x,y) in header.frame_id to grid coordinates"""
        # Get transformation matrix map-occupancy grid.
        return (self.transformation_matrix_grid_map.dot([
            pose[0], pose[1], 0, 1
        ]))[0:2] / self.resolution  # TODO check int.

    def grid_to_pose(self, grid_coordinate):
        """Pose (x,y) in grid coordinates to pose in frame_id"""
        # Get transformation matrix map-occupancy grid.
        return (self.transformation_matrix_map_grid.dot(
            np.array([
                grid_coordinate[0] * self.resolution,
                grid_coordinate[1] * self.resolution, 0, 1
            ])))[0:2]

    def get_explored_region(self):
        """ Get all the explored cells on the grid map"""

        # TODO refactor the code, right now hardcoded to quickly solve the problem of non-common areas.
        # TODO more in general use matrices.
        def nearest_multiple(number, res=0.2):
            return np.round(res * np.floor(round(number / res, 2)), 1)

        p_in_sender = PointStamped()
        p_in_sender.header = self.header

        poses = set()
        self.tf_listener.waitForTransform("robot_0/map", self.header.frame_id,
                                          rospy.Time(), rospy.Duration(4.0))
        p_in_sender.header.stamp = rospy.Time()
        for x in range(self.grid.shape[1]):
            for y in range(self.grid.shape[0]):
                if self.is_free(x, y):
                    p = self.grid_to_pose((x, y))
                    p_in_sender.point.x = p[0]
                    p_in_sender.point.y = p[1]

                    p_in_common_ref_frame = self.tf_listener.transformPoint(
                        "robot_0/map", p_in_sender).point
                    poses.add((nearest_multiple(p_in_common_ref_frame.x),
                               nearest_multiple(p_in_common_ref_frame.y)))
        return poses
class SimAdapter(object):

    def __init__(self):
        pass
        
    def start(self):
        rospy.init_node('sim_adapter')
        self.init_params()
        self.init_state()
        self.init_vars()
        self.init_publishers()
        self.init_subscribers()
        self.init_timers()
        rospy.spin()
        
    def init_params(self):
        self.param_model = rospy.get_param("~model", "simple_nonlinear")
        self.rate = rospy.get_param("~rate", 40)
        self.publish_state_estimate = rospy.get_param("~publish_state_estimate", True)
        self.publish_odometry_messages = rospy.get_param("~publish_odometry", True)
        self.sim_odom_topic = rospy.get_param("~sim_odom_topic", "odom")
        
    def init_state(self):
        if self.param_model == "simple_nonlinear":
            self.model = SimpleNonlinearModel()
        elif self.param_model == "linear":
            self.model = LinearModel()
        else:
            rospy.logerror("Model type '%s' unknown" % self.model)
            raise Exception("Model type '%s' unknown" % self.model)

    def init_vars(self):
        self.latest_cmd_msg = control_mode_output()
        self.motor_enable = False
        self.thrust_cmd = 0.0
        self.tfl = TransformListener()
        self.T_vicon_imu = None
        self.T_imu_vicon = None
        self.T_enu_ned = None
        
    def init_publishers(self):
        # Publishers
        if self.publish_odometry_messages: 
            self.pub_odom = rospy.Publisher(self.sim_odom_topic, Odometry)
        if self.publish_state_estimate:
            self.pub_state = rospy.Publisher('state', TransformStamped)
        
    def init_subscribers(self):
        # Subscribers
        self.control_input_sub = rospy.Subscriber('controller_mux/output', control_mode_output, self.control_input_callback)
        self.motor_enable_sub = rospy.Subscriber('teleop_flyer/motor_enable', Bool, self.motor_enable_callback)
      
    def init_timers(self):
        self.simulation_timer = rospy.Timer(rospy.Duration(1.0/self.rate), self.simulation_timer_callback)
                                      
    # Subscriber callbacks:
    def control_input_callback(self, msg):
        rospy.logdebug('Current command is: ' + str(msg))
        self.latest_cmd_msg = msg
    
    def motor_enable_callback(self, msg):
        if msg.data != self.motor_enable:
            #rospy.loginfo('Motor enable: ' + str(msg.data))
            self.motor_enable = msg.data
    
    # Timer callbacks:
    def simulation_timer_callback(self, event):
        if False:
            print event.__dict__
#            print 'last_expected:        ', event.last_expected
#            print 'last_real:            ', event.last_real
#            print 'current_expected:     ', event.current_expected
#            print 'current_real:         ', event.current_real
#            print 'current_error:        ', (event.current_real - event.current_expected).to_sec()
#            print 'profile.last_duration:', event.last_duration.to_sec()
#            if event.last_real:
#                print 'last_error:           ', (event.last_real - event.last_expected).to_sec(), 'secs'
#            print
        if event.last_real is None:
            dt = 0.0
        else:
            dt = (event.current_real - event.last_real).to_sec()
            self.update_controller(dt)
            self.update_state(dt)
        #rospy.loginfo("position: " + str(self.position) + " velocity: " + str(self.velocity) + " thrust_cmd: " + str(self.thrust_cmd))
        if self.publish_odometry_messages:
            self.publish_odometry()
        if self.publish_state_estimate:
            self.publish_state(event.current_real)
        
    def update_state(self, dt):
        # The following model is completely arbitrary and should not be taken to be representative of
        # real vehicle performance!
        # But, it should be good enough to test out control modes etc.
        self.model.update(dt)
            
    def update_controller(self, dt):
        lcm = self.latest_cmd_msg
        self.model.set_input(lcm.motors_on, lcm.roll_cmd, lcm.pitch_cmd, lcm.direct_yaw_rate_commands, 
                  lcm.yaw_cmd, lcm.yaw_rate_cmd, lcm.direct_thrust_commands, lcm.alt_cmd, lcm.thrust_cmd)
        #rospy.loginfo("thrust_cmd = %f, dt = %f" % (self.thrust_cmd, dt))
                    
    def publish_odometry(self):
        odom_msg = Odometry()
        odom_msg.header.stamp = rospy.Time.now()
        odom_msg.header.frame_id = "/ned"
        odom_msg.child_frame_id = "/simflyer1/flyer_imu"
        oppp = odom_msg.pose.pose.position
        oppp.x, oppp.y, oppp.z  = self.model.get_position()
        ottl = odom_msg.twist.twist.linear
        ottl.x, ottl.y, ottl.z = self.model.get_velocity()
        oppo = odom_msg.pose.pose.orientation
        oppo.x, oppo.y, oppo.z, oppo.w = self.model.get_orientation()
        otta = odom_msg.twist.twist.angular
        otta.x, otta.y, otta.z = self.model.get_angular_velocity()
        self.pub_odom.publish(odom_msg)
        
    def publish_state(self, t):
        state_msg = TransformStamped()
        t_ned_imu = tft.translation_matrix(self.model.get_position())
        R_ned_imu = tft.quaternion_matrix(self.model.get_orientation())
        T_ned_imu = tft.concatenate_matrices(t_ned_imu, R_ned_imu)
        #rospy.loginfo("T_ned_imu = \n" + str(T_ned_imu))
        if self.T_imu_vicon is None:
            # grab the static transform from imu to vicon frame from param server:
            frames = rospy.get_param("frames", None)
            ypr = frames['vicon_to_imu']['rotation']
            q_vicon_imu = tft.quaternion_from_euler(*[radians(x) for x in ypr], axes='rzyx') # xyzw
            R_vicon_imu = tft.quaternion_matrix(q_vicon_imu)
            t_vicon_imu = tft.translation_matrix(frames['vicon_to_imu']['translation'])
#            rospy.loginfo(str(R_vicon_imu))
#            rospy.loginfo(str(t_vicon_imu))
            self.T_vicon_imu = tft.concatenate_matrices(t_vicon_imu, R_vicon_imu)
            self.T_imu_vicon = tft.inverse_matrix(self.T_vicon_imu)
            self.T_enu_ned = tft.euler_matrix(radians(90), 0, radians(180), 'rzyx')
            rospy.loginfo(str(self.T_enu_ned))
            rospy.loginfo(str(self.T_imu_vicon))
            #rospy.loginfo(str(T_vicon_imu))
        # we have /ned -> /imu, need to output /enu -> /vicon:
        T_enu_vicon = tft.concatenate_matrices(self.T_enu_ned, T_ned_imu, self.T_imu_vicon )
        state_msg.header.stamp  = t
        state_msg.header.frame_id = "/enu"
        state_msg.child_frame_id = "/simflyer1/flyer_vicon"
        stt = state_msg.transform.translation
        stt.x, stt.y, stt.z = T_enu_vicon[:3,3]
        stro = state_msg.transform.rotation
        stro.x, stro.y, stro.z, stro.w = tft.quaternion_from_matrix(T_enu_vicon)
        
        self.pub_state.publish(state_msg)
        
    def get_transform(self, tgt, src):
        t = self.tfl.getLatestCommonTime(tgt, src)
        t_src_tgt, q_src_tgt = self.tfl.lookupTransform(tgt, src, t)
        T_src_tgt =self.tfl.fromTranslationRotation(t_src_tgt, q_src_tgt)
        return T_src_tgt 
예제 #7
0
class FindObstacles(object):
    # we increment self.id at every complete scan.

    # per complete scan, we publish (potentially) multiple PolygonStamped
    # msgs to /obstacles, all with the same header.seq value (self.id)
    def __init__(self, viz=True, experiment=None):
        self.id = 1
        rospy.init_node('obstacles_node')
        self.tf = TransformListener()
        self.num_obj = 11

        pub = rospy.Publisher('/obstacles', Polygons, queue_size=50)

        obstacle_viz = rospy.Publisher('/obstacles_viz',
                                       MarkerArray,
                                       queue_size=50)

        if experiment:
            if experiment == "-b":
                #print("INIT EXPERIMENT")
                goal_pub = rospy.Publisher('/goal_state',
                                           Point32,
                                           queue_size=10)
                human_obs_pub = rospy.Publisher('obstacle2_poly',
                                                PolygonStamped,
                                                queue_size=1)
                human_v_pub = rospy.Publisher('obstacle2_v',
                                              Twist,
                                              queue_size=1)
                self.num_obj = 1
            if experiment == '-6':
                #print("INIT EXPERIMENT")
                human_pub = rospy.Publisher('/human', Point32, queue_size=10)
                human_obs_pub = rospy.Publisher('obstacle2_poly',
                                                PolygonStamped,
                                                queue_size=1)
                human_v_pub = rospy.Publisher('obstacle2_v',
                                              Twist,
                                              queue_size=1)
                self.num_obj = 1
            if experiment == '-g':
                #print("MODE SWITCHING ENABLED")
                goal_pub = rospy.Publisher('/goal_state',
                                           Point32,
                                           queue_size=10)
                human_pub = rospy.Publisher('/human', Point32, queue_size=10)
                human_obs_pub = rospy.Publisher('obstacle2_poly',
                                                Polygons,
                                                queue_size=1)
                human_v_pub = rospy.Publisher('obstacle2_v',
                                              Twist,
                                              queue_size=1)
                self.num_obj = 3

        # keep track of real lines from the last frame
        self.prev_real_lines = set()
        # keep track of img and lines from the last frame
        self.latest_img, self.latest_lines = np.zeros((GRID, GRID)), []

        self.x = [0, 0, 0]
        self.prev = None
        self.prevHuman = None

        # MODE 0: Goal Following, 1: Assistant
        self.mode = 0

        def odom_callback(msg):
            dd, asd, yaw = euler_from_quaternion([
                msg.pose.pose.orientation.x, msg.pose.pose.orientation.y,
                msg.pose.pose.orientation.z, msg.pose.pose.orientation.w
            ])
            self.x = [msg.pose.pose.position.x, msg.pose.pose.position.y, yaw]
            ##print("turtlebot ODOM", self.x)

        def mode_callback(msg):
            self.mode = msg.data

        rospy.Subscriber('/odom', Odometry, odom_callback)
        rospy.Subscriber('/mode', UInt16, mode_callback)

        def find_lines_and_pub(lidar_data):
            #lines, img = find_lines(lidar_data, self.prev_real_lines, self.x)

            # apply transformation
            try:
                pos, qu = self.tf.lookupTransform("/odom", "/base_scan",
                                                  rospy.Time())
                ##print("TRANS", pos, qu)
                t_mat = self.tf.fromTranslationRotation(pos, qu)

                n = len(lidar_data.points)
                p_mat = np.ones((4, n))
                for i in range(n):
                    pt = lidar_data.points[i]
                    p_mat[0, i] = pt.x
                    p_mat[1, i] = pt.y
                    p_mat[2, i] = 0
                p_trans = np.dot(t_mat, p_mat)

            except:
                return

            # process line data into usable form
            lines = set()
            for i in range(n / 2):
                line = ((p_trans[0, 2 * i], p_trans[1, 2 * i]),
                        (p_trans[0, 2 * i + 1], p_trans[1, 2 * i + 1]))
                lines.add(line)

            polys, obs = make_polys(lines, pos)

            # associates meas obs with previous obs
            obs_fil, flag = data_association(obs, self.prev, self.num_obj)

            pts = []
            visual = MarkerArray()

            ctr = 1
            for o in obs_fil:

                # inits kf if one doesn't exist for this obs
                if o.kf == None:
                    #print("INITIALIZING KF")
                    o.kf = o.init_kalman()

                # update kalman
                meas = np.append(o.center, o.area)
                o.mean, o.cov = o.kf.filter_update(o.mean,
                                                   o.cov,
                                                   observation=meas)

                # update
                o.center = [o.mean[0], o.mean[3]]
                o.area = o.mean[6]

                # visualization
                add = Point()
                add.x = o.mean[0]
                add.y = o.mean[3]
                pts.append(add)

                visual.markers.append(o.get_marker_meas(ctr))
                o.update_poly()
                visual.markers.append(o.get_marker(ctr))
                ctr += 1

            marker = Marker()
            marker.header.frame_id = "/odom"
            marker.id = 0
            marker.type = marker.POINTS
            marker.action = marker.ADD
            marker.scale.x = 0.2
            marker.scale.y = 0.2
            marker.scale.z = 0.2
            marker.color.b = 1.0
            marker.color.a = 1.0
            marker.points = pts
            visual.markers.append(marker)
            obstacle_viz.publish(visual)

            # only allow "good" measurements to be the references
            if len(obs_fil) == self.num_obj and not flag:
                self.prev = obs_fil

            # #print("Kalman est:", self.mean)
            if experiment:
                if experiment == "-b":
                    if len(obs_fil) == self.num_obj:
                        o = obs_fil[0]
                        cen = o.center_sq()
                        goal = Point32()
                        goal.x = cen[0]
                        goal.y = cen[1]
                        #goal_pub.publish(goal)
                        #human_pub.publish(o.poly)
                        #human_v_pub.publish(o.toVel())
                if experiment == "-6":
                    if len(obs_fil) == self.num_obj:
                        o = obs_fil[0]
                        cen = o.center_sq()
                        goal = Point32()
                        goal.x = cen[0]
                        goal.y = cen[1]
                        human_pub.publish(goal)
                        human_obs_pub.publish(o.poly)
                        human_v_pub.publish(o.toVel())
                if experiment == "-g":
                    if len(
                            obs_fil
                    ) >= self.num_obj:  # currently hard-coded to assume num_obj objects
                        polys = Polygons()
                        o = checkHuman(obs_fil, prevHuman=self.prevHuman)
                        self.prevHuman = o
                        cen = o.center_sq()
                        human_cen = Point32()
                        human_cen.x = cen[0]
                        human_cen.y = cen[1]
                        for obj in obs_fil:
                            polys.polygons.append(obj.poly)
                            polys.vels.append(obj.toVel())

                        if self.mode == 0:
                            human_pub.publish(human_cen)
                            human_obs_pub.publish(polys)
                            human_v_pub.publish(o.toVel())
                        elif self.mode == 1:
                            goal = Point32()
                            goal.x = human_cen.x
                            goal.y = human_cen.y - 1.5
                            goal_pub.publish(goal)
                            human_pub.publish(human_cen)
                            human_obs_pub.publish(polys)
                            human_v_pub.publish(o.toVel())

        rospy.Subscriber('/publish_line_markers', Marker, find_lines_and_pub)
        #rospy.Subscriber('/scan', LaserScan, find_lines_and_pub)
        #rospy.Subscriber('/scan_filtered', LaserScan, find_lines_and_pub)
        rospy.spin()
class StatePredictionNode:
    """ This class contains the methods for state prediction """
    def __init__(self):

        rospy.init_node('odometry', anonymous=True)
        self.dt = .02
        self.control_voltages = np.zeros([2, 1])

        self.vl_filter = MedianFilter()
        self.vr_filter = MedianFilter()
        self.vb_filter = MedianFilter()

        self.vl = 0  # left wheel velocity (after filter)
        self.vr = 0  # right wheel velocity (after filter)
        self.vb = 0  # base angular velocity (after filter)

        self.base_imu_ready = False  # base imu is slower than the other IMU's
        # we can't update base velocity all the time

        Q = np.zeros([7, 7])
        Q[0, 0] = 1 * self.dt
        Q[1, 1] = 1 * self.dt
        R = np.eye(3) * .00001
        starting_state = np.zeros([7, 1])
        self.r = .15
        self.l = .55
        self.ekf = ExtendedWMRKalmanFilter(self.r, self.l, Q, R, 2.3364e-04,
                                           -.65, 2.3364e-04, -.65,
                                           starting_state, self.dt)
        self.listener = TransformListener()

        self.i = 0  # print index
        self.j = 0  # move camera index

        self.imus_observed_list = []
        self.ar_observed_list = []
        self.sensed_ar_diff = {
        }  # map from tag_id sensed to array of np.array([dx, dy, dtheta_z])
        self.landmark_map = {}  # map from id to AR TAG obj

        ## ADD PREDEFINED MAPS
        #(tag_num, slam_id, x, y, z, theta_x, theta_y, theta_z, fixed = True)
        self.landmark_map[0] = ARTAG_landmark(0, 1, 0, 1.37, .10, math.pi / 2,
                                              0, 0)
        self.landmark_map[1] = ARTAG_landmark(13, 2, -1.74, 0, .15,
                                              math.pi / 2, 0, math.pi / 2)
        self.landmark_map[3] = ARTAG_landmark(5, 3, 2.84, .15, .32,
                                              math.pi / 2, 0, -math.pi / 2)
        self.landmark_map[2] = ARTAG_landmark(9, 4, 1.37, 1.88, .06,
                                              math.pi / 2, 0, 0)
        self.ekf.add_AR_tag(self.landmark_map[0], np.zeros([3, 3]))
        self.ekf.add_AR_tag(self.landmark_map[1], np.zeros([3, 3]))
        self.ekf.add_AR_tag(self.landmark_map[2], np.zeros([3, 3]))
        self.ekf.add_AR_tag(self.landmark_map[3], np.zeros([3, 3]))
        self.br = tf.TransformBroadcaster()

        ## FILE WRITING INFO
        self.f = open('ekf_data.csv', 'w+')
        self.f.write('time, vl,vr,vx,vy,vtheta,x,y,theta,raw_vl,raw_vr\n')
        self.t0 = time.time()
        self.transform_written = False

        self.write_imu_measurements = [0, 0, 0]

        self.pub = rospy.Publisher('odometry', Odometry, queue_size=1)
        rospy.Subscriber("/imu1", Imu, self._imu1Callback)
        rospy.Subscriber("/imu2", Imu, self._imu2Callback)
        rospy.Subscriber("/imu3", Imu, self._imu3Callback)
        rospy.Subscriber("left_voltage_pwm", Int16, self._leftMotorCallback)
        rospy.Subscriber("right_voltage_pwm", Int16, self._rightMotorCallback)
        rospy.Subscriber("ar_pose_marker", AlvarMarkers, self._arCallback)
        rospy.Subscriber("scan", LaserScan, self._scanCallback)

        self.last_time = rospy.Time.now()

    def _scanCallback(self, data):
        return

    def _leftMotorCallback(self, data):
        self.control_voltages[0] = data.data

    def _rightMotorCallback(self, data):
        self.control_voltages[1] = data.data

    def _imu1Callback(self, data):
        self.imus_observed_list.append('imu1')
        left_wheel_v = -data.angular_velocity.x
        self.write_imu_measurements[0] = left_wheel_v
        if abs(left_wheel_v) < .05:
            self.vl_filter.add_data(0)
            self.vl = self.vl_filter.get_median()
            return

        self.vl_filter.add_data(left_wheel_v)
        self.vl = self.vl_filter.get_median()

    def _imu2Callback(self, data):
        self.imus_observed_list.append('imu2')
        right_wheel_v = -data.angular_velocity.x
        self.write_imu_measurements[1] = right_wheel_v
        if abs(right_wheel_v) < .05:
            self.vr_filter.add_data(0)
            self.vr = self.vr_filter.get_median()
            return

        self.vr_filter.add_data(right_wheel_v)
        self.vr = self.vr_filter.get_median()

    def _imu3Callback(self, data):
        self.imus_observed_list.append('imu3')
        self.vb = data.angular_velocity.z - .01
        self.write_imu_measurements[2] = self.vb
        if abs(self.vb) < .05:
            self.vb = 0
            return

    def _arCallback(self, data):
        for marker in data.markers:
            if marker.id in self.landmark_map:

                tag_frame = 'ar_marker_' + str(marker.id)
                t = self.listener.getLatestCommonTime(tag_frame, 'base')
                p_tr, q_tr = self.listener.lookupTransform(
                    tag_frame, 'base', t)
                H_tr = self.listener.fromTranslationRotation(p_tr, q_tr)

                # IGNORE READING IF IT IS TOO FAR AWAY
                if np.linalg.norm(p_tr) > 2.5:
                    return

                t = self.listener.getLatestCommonTime(tag_frame + '_ref',
                                                      'odom')
                p_ot, q_ot = self.listener.lookupTransform(
                    'odom', tag_frame + '_ref', t)
                H_ot = self.listener.fromTranslationRotation(p_ot, q_ot)

                H_ot = np.dot(H_ot, H_tr)
                angles = transformations.euler_from_matrix(H_ot[0:3, 0:3],
                                                           axes='sxyz')
                self.sensed_ar_diff[marker.id] = np.array([[H_ot[0, 3]],
                                                           [H_ot[1, 3]],
                                                           [angles[2]]])
                self.ar_observed_list.append(
                    marker.id)  # we observed an AR tag!

        return

    def _publish_odom(self):
        state = copy.deepcopy(self.ekf.current_state_estimate)

        ## BUILD AND SEND MSG
        quaternion = tf.transformations.quaternion_from_euler(0, 0, state[6])

        msg = Odometry()
        msg.header.stamp = rospy.Time.now()
        msg.pose.pose.position.x = state[4]
        msg.pose.pose.position.y = state[5]
        msg.pose.pose.position.z = 0
        msg.pose.pose.orientation.x = quaternion[0]
        msg.pose.pose.orientation.y = quaternion[1]
        msg.pose.pose.orientation.z = quaternion[2]
        msg.pose.pose.orientation.w = quaternion[3]

        msg.twist.twist.linear.x = state[2] * math.cos(state[6])
        msg.twist.twist.linear.y = state[2] * math.sin(state[6])
        msg.twist.twist.linear.z = 0
        msg.twist.twist.angular.z = state[3]

        self.pub.publish(msg)

    def _update_with_imus(self):
        if len(self.imus_observed_list) == 0:
            return

        # Initalize intermediary variables
        measurement_list = []
        H_list = []
        R_list = []
        num_states = self.ekf.current_state_estimate.shape[0]

        ## CHECK EACH SENSOR
        if 'imu1' in self.imus_observed_list:
            H_row = np.zeros([1, num_states])
            H_row[0, 0] = 1
            H_list.append(H_row)
            measurement_list.append(self.vl)
            R_list.append(.05)

        if 'imu2' in self.imus_observed_list:
            H_row = np.zeros([1, num_states])
            H_row[0, 1] = 1
            H_list.append(H_row)
            measurement_list.append(self.vr)
            R_list.append(.05)

        if 'imu3' in self.imus_observed_list:
            H_row = np.zeros([1, num_states])
            H_row[0, 3] = 1
            H_list.append(H_row)
            measurement_list.append(self.vb)
            R_list.append(4e-5)

        # BUILD H
        H = H_list[0]
        del H_list[0]
        for h_block in H_list:
            H = np.vstack((H, h_block))

        # BUILD MEASUREMENTS
        measurements = measurement_list[0]
        del measurement_list[0]
        for measurement in measurement_list:
            measurements = np.vstack((measurements, measurement))

        # BUILD R
        R = np.zeros([H.shape[0], H.shape[0]])
        r_index = 0  # element of diagonal to fill
        for r_elem in R_list:
            R[r_index, r_index] = r_elem
            r_index += 1

        # reset things
        self.imus_observed_list = []
        self.ekf.linear_measurement_update(measurements, H, R)

        # record imu measurements used to update kalman filter to write to a file

    def _update_with_landmarks(self):
        if len(self.ar_observed_list) == 0:
            return

        # CHECK ALL LANDMARKS
        for landmark_id in self.landmark_map.keys():
            if landmark_id in self.ar_observed_list:
                measurement = self.sensed_ar_diff[landmark_id]

                H = np.zeros([3, self.ekf.current_prob_estimate.shape[0]])
                H[:, 4:7] = np.eye(3)

                dist = np.linalg.norm(measurement[0:2])
                R = np.eye(3) * dist**3 * .1

                self.ekf.linear_measurement_update(measurement, H, R, True)
                #print measurement

        # reset things
        self.ar_observed_list = []
        self.sensed_ar_diff = {}

    def _publish_tf(self):
        # ROBOT TRANSFORM
        self.last_time = rospy.Time.now()
        state = copy.deepcopy(self.ekf.current_state_estimate)
        for landmark_id in self.landmark_map.keys():
            ar_obj = self.landmark_map[landmark_id]
            self.br.sendTransform((ar_obj.x, ar_obj.y, ar_obj.z),
                 tf.transformations.quaternion_from_euler(ar_obj.theta_x, ar_obj.theta_y, ar_obj.theta_z),\
                 self.last_time,\
                 "ar_marker_" + str(ar_obj.tag_num) + "_ref",\
                 "odom")

        self.br.sendTransform((state[4], state[5], 0),\
                 tf.transformations.quaternion_from_euler(0, 0, state[6]),\
                 self.last_time,\
                 "base_link",\
                 "odom")

        self.br.sendTransform(
            (0, 0, .1), tf.transformations.quaternion_from_euler(0, 0, 0),
            self.last_time, "laser", "base_link")
        self.br.sendTransform(
            (0, 0, 0), tf.transformations.quaternion_from_euler(0, 0, 0),
            self.last_time, "base", "base_link")
        return

    def _update_ekf(self):
        self.ekf.predict(self.control_voltages)
        self._update_with_imus()
        self._update_with_landmarks()

    def _print(self):
        """ Print info to the screen every once and a while """
        self.i += 1
        np.set_printoptions(precision=3, suppress=True)
        if self.i % 40 == 0:
            self.i = 0
            print self.ekf.current_state_estimate[4:7]

    def _write_to_file(self):
        state = copy.deepcopy(self.ekf.current_state_estimate)
        self.f.write(
            str(time.time() - self.t0) + ',' + str(state[0, 0]) + ',' +
            str(state[1, 0]) + ',' + str(state[2, 0]) + ',' +
            str(state[3, 0]) + ',' + str(state[4, 0]) + ',' +
            str(state[5, 0]) + ',' + str(state[6, 0]) + ',' +
            str(state[7, 0]) + ',' + str(self.write_imu_measurements[0]) +
            ',' + str(self.write_imu_measurements[1]) + ',' +
            str(self.write_imu_measurements[2]) + '\n')

    def spin(self):
        r = rospy.Rate(1 / self.dt)
        while not rospy.is_shutdown():
            self._publish_tf()
            self._update_ekf()
            self._print()
            self._publish_odom()
            self._write_to_file()

            self.control_voltages[0] = 0
            self.control_voltages[1] = 0
            r.sleep()
class GridLaserSrv():
    def __init__(self, map_frame, robot_frame):
        self.map_frame = map_frame
        self.robot_frame = robot_frame

        self.pose_msg = PoseStamped()
        self.mapinfo = OccupancyGrid()
        self.laser_data = LaserScan()

        self.tf_listener = TransformListener()
        rospy.Subscriber('map', OccupancyGrid, self.map_callback)
        rospy.Subscriber('pose', PoseStamped, self.pose_callback)
        rospy.Subscriber('scan', LaserScan, self.laser_callback)
        rospy.Service('api/rt_grid_laser', GridLaser,
                      self.handle_realtimelaser)
        rospy.Service('api/grid_laser', GridLaser, self.handle_gridlaser)

    def pose_callback(self, msg):
        self.pose_msg = msg

    def handle_realtimelaser(self, req):
        laser_point = PointStamped()
        laser_point.header.frame_id = self.laser_data.header.frame_id
        res = GridLaserResponse()
        index = 0
        self.tf_listener.waitForTransform(self.robot_frame,
                                          laser_point.header.frame_id,
                                          rospy.Time(), rospy.Duration(1.0))

        rot = self.get_rotate_matrix()[0:3]

        # now = rospy.Time.now().to_sec()
        for r in self.laser_data.ranges:
            # laser point coordinate in laser frame
            angle = self.laser_data.angle_min + self.laser_data.angle_increment * index
            laser_point.point.x = r * math.cos(angle)
            laser_point.point.y = r * math.sin(angle)
            index = index + 1

            # calc laser point coodinate in map frame
            temp = self.tf_listener.transformPoint(self.robot_frame,
                                                   laser_point)
            # calc laser point coordinate in map frame
            # temp.point.x = self.pose_msg.pose.position.x + temp.point.x
            # temp.point.y = self.pose_msg.pose.position.y + temp.point.y
            p = [
                temp.point.x * rot[0][0] + temp.point.y * rot[1][0] +
                self.pose_msg.pose.position.x, temp.point.x * rot[0][1] +
                temp.point.y * rot[1][1] + self.pose_msg.pose.position.y
            ]
            # p = np.dot([temp.point.x, temp.point.y, 0], rot)
            # p = [p[0]+self.pose_msg.pose.position.x, p[1]+self.pose_msg.pose.position.y, 0]

            # calc grid point coordinate in grid map
            pp = GridPoint()
            pp.x = (int)((p[0] - self.mapinfo.info.origin.position.x) /
                         self.mapinfo.info.resolution)
            pp.y = (int)((p[1] - self.mapinfo.info.origin.position.y) /
                         self.mapinfo.info.resolution)

            res.gridPoint.append(pp)

        # rospy.logwarn(rospy.Time.now().to_sec() - now)
        mapInfo = {}
        mapInfo['gridWidth'] = self.mapinfo.info.width
        mapInfo['gridHeight'] = self.mapinfo.info.height
        mapInfo['resolution'] = self.mapinfo.info.resolution
        res.mapInfo = json.dumps(mapInfo)
        res.msg = 'success'

        return res

    def get_rotate_matrix(self):
        quat = (-self.pose_msg.pose.orientation.x,
                -self.pose_msg.pose.orientation.y,
                -self.pose_msg.pose.orientation.z,
                self.pose_msg.pose.orientation.w)
        return (self.tf_listener.fromTranslationRotation(translation=(0, 0, 0),
                                                         rotation=quat))

    def handle_gridlaser(self, req):
        laser_point = PointStamped()
        laser_point.header.frame_id = self.laser_data.header.frame_id
        res = GridLaserResponse()
        index = 0
        for r in self.laser_data.ranges:
            # laser point coordinate in laser frame
            laser_point.point.x = r * math.cos(
                self.laser_data.angle_min +
                self.laser_data.angle_increment * index)
            laser_point.point.y = r * math.sin(
                self.laser_data.angle_min +
                self.laser_data.angle_increment * index)
            index = index + 1

            # calc laser point coodinate in robot frame
            temp = self.tf_listener.transformPoint(self.robot_frame,
                                                   laser_point)

            # calc laser point coordinate in vertual grid map
            pp = GridPoint()
            # pp.x = (int)((temp.point.x - self.mapinfo.info.origin.position.x)
            #              / self.mapinfo.info.resolution)
            # pp.y = (int)((temp.point.y - self.mapinfo.info.origin.position.y)
            #              / self.mapinfo.info.resolution)

            pp.x = (int)(temp.point.x / 0.01)
            pp.y = (int)(temp.point.y / 0.01)
            res.gridPoint.append(pp)

        mapInfo = {}
        # mapInfo['gridWidth'] = self.mapinfo.info.width
        # mapInfo['gridHeight'] = self.mapinfo.info.height
        # mapInfo['resolution'] = self.mapinfo.info.resolution
        res.mapInfo = json.dumps(mapInfo)
        res.msg = 'success'

        return res

    # map callback, update map data,actually, map data need no update
    def map_callback(self, msg):
        self.mapinfo.info = msg.info

    # laser callback, update laser data
    def laser_callback(self, msg):
        self.laser_data.header.frame_id = msg.header.frame_id
        self.laser_data.angle_increment = msg.angle_increment
        self.laser_data.angle_max = msg.angle_max
        self.laser_data.angle_min = msg.angle_min
        self.laser_data.ranges = msg.ranges