Esempio n. 1
0
    def __init__(self, simulation=True):
        """Initializes various aspects of the Fetch."""
        rospy.init_node("fetch")
        rospy.loginfo("initializing the Fetch...")
        self.arm = Arm()
        self.arm_joints = ArmJoints()
        self.base = Base()
        self.camera = RGBD()
        self.head = Head()
        self.gripper = Gripper(self.camera)
        self.torso = Torso()
        self.joint_reader = JointStateReader()

        # Tucked arm starting joint angle configuration
        self.names = ArmJoints().names()
        self.tucked = [1.3200, 1.3999, -0.1998, 1.7199, 0.0, 1.6600, 0.0]
        self.tucked_list = [(x,y) for (x,y) in zip(self.names, self.tucked)]

        # Initial (x,y,yaw) position of the robot wrt map origin. We keep this
        # fixed so that we can reset to this position as needed. The HSR's
        # `omni_base.pose` (i.e., the start pose) returns (x,y,yaw) where yaw is
        # the rotation about that axis (intuitively, the z axis). For the base,
        # `base.odom` supplies both `position` and `orientation` attributes.
        start = copy.deepcopy(self.base.odom.position)
        yaw = Base._yaw_from_quaternion(self.base.odom.orientation)
        self.start_pose = np.array([start.x, start.y, yaw])
        rospy.loginfo("...finished initialization!")
    def __init__(self, simulation=True):
        """Initializes various aspects of the Fetch.
        
        TODOs: get things working, also use `simulation` flag to change ROS
        topic names if necessary (especially for the cameras!). UPDATE: actually
        I don't think this is necessary now, they have the same topic names.
        """
        rospy.init_node("fetch")
        self.arm = Arm()
        self.arm_joints = ArmJoints()
        self.base = Base()
        self.camera = RGBD()
        self.head = Head()
        self.gripper = Gripper(self.camera)
        self.torso = Torso()
        self.joint_reader = JointStateReader()

        # Tucked arm starting joint angle configuration
        self.names = ArmJoints().names()
        self.tucked = [1.3200, 1.3999, -0.1998, 1.7199, 0.0, 1.6600, 0.0]
        self.tucked_list = [(x, y) for (x, y) in zip(self.names, self.tucked)]

        # Initial (x,y,yaw) position of the robot wrt map origin. We keep this
        # fixed so that we can reset to this position as needed. The HSR's
        # `omni_base.pose` (i.e., the start pose) returns (x,y,yaw) where yaw is
        # the rotation about that axis (intuitively, the z axis). For the base,
        # `base.odom` supplies both `position` and `orientation` attributes.
        start = copy.deepcopy(self.base.odom.position)
        yaw = Base._yaw_from_quaternion(self.base.odom.orientation)
        self.start_pose = np.array([start.x, start.y, yaw])
        self.TURN_SPEED = 0.3

        self.num_restarts = 0
Esempio n. 3
0
    def __init__(self):
        # 初始化move_group的API
        moveit_commander.roscpp_initialize(sys.argv)

        # 初始化ROS节点
        rospy.init_node('moveit_demo')
        self.camera = RGBD()
        self.dis = 100
        # robot reset
        self.robot_pose = ModelState()
        self.robot_state = dict()
        self.pose_pub = rospy.Publisher("/gazebo/set_model_state",
                                        ModelState,
                                        queue_size=1)
        # 初始化需要使用move group控制的机械臂中的arm group
        self.arm = moveit_commander.MoveGroupCommander('arm')
        self.gripper = moveit_commander.MoveGroupCommander('gripper')
        self._client = actionlib.SimpleActionClient(
            ACTION_SERVER, control_msgs.msg.GripperCommandAction)
        self._client.wait_for_server(rospy.Duration(10))
        self.Box_position = [0.6, 0.1, 0.65]
        # 获取终端link的名称
        self.end_effector_link = self.arm.get_end_effector_link()

        # 设置目标位置所使用的参考坐标系
        self.reference_frame = 'base_link'

        self.arm.set_pose_reference_frame(self.reference_frame)
        # 获取场景中的物体
        self.head_action = PointHeadClient()
        self.grasping_client = GraspingClient()
        # self.move_base = MoveBaseClient()
        # 向下看
        self.head_action.look_at(1.2, 0.0, 0.0, "base_link")
        # 初始化机器人手臂位置
        self.arm_goal = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        self.end_goal = [0.0, 0.0, 0.0]
        self.reset()
        # 初始化reward
        now_position = self.gripper.get_current_pose(
            "gripper_link").pose.position
        now_dis = math.sqrt(
            math.pow(now_position.x - self.Box_position[0], 2) +
            math.pow(now_position.y - self.Box_position[1], 2) +
            math.pow(now_position.z - self.Box_position[2], 2))
        self.reward = math.exp(-now_dis)
        # 更新场景到rviz中

        # 设置目标位置所使用的参考坐标系
        reference_frame = 'base_link'
        self.arm.set_pose_reference_frame(reference_frame)

        # 当运动规划失败后,允许重新规划
        self.arm.allow_replanning(True)

        # 设置位置(单位:米)和姿态(单位:弧度)的允许误差
        self.arm.set_goal_position_tolerance(0.01)
        self.arm.set_goal_orientation_tolerance(0.05)
        self.target_pose = PoseStamped()
Esempio n. 4
0
class Robot_Skeleton(object):
    """Basic bare-bones solution for the Fetch robot interface.
    
    We recommend extending this class with additional convenience methods based
    on your application needs.
    """

    def __init__(self, simulation=True):
        """Initializes various aspects of the Fetch."""
        rospy.init_node("fetch")
        rospy.loginfo("initializing the Fetch...")
        self.arm = Arm()
        self.arm_joints = ArmJoints()
        self.base = Base()
        self.camera = RGBD()
        self.head = Head()
        self.gripper = Gripper(self.camera)
        self.torso = Torso()
        self.joint_reader = JointStateReader()

        # Tucked arm starting joint angle configuration
        self.names = ArmJoints().names()
        self.tucked = [1.3200, 1.3999, -0.1998, 1.7199, 0.0, 1.6600, 0.0]
        self.tucked_list = [(x,y) for (x,y) in zip(self.names, self.tucked)]

        # Initial (x,y,yaw) position of the robot wrt map origin. We keep this
        # fixed so that we can reset to this position as needed. The HSR's
        # `omni_base.pose` (i.e., the start pose) returns (x,y,yaw) where yaw is
        # the rotation about that axis (intuitively, the z axis). For the base,
        # `base.odom` supplies both `position` and `orientation` attributes.
        start = copy.deepcopy(self.base.odom.position)
        yaw = Base._yaw_from_quaternion(self.base.odom.orientation)
        self.start_pose = np.array([start.x, start.y, yaw])
        rospy.loginfo("...finished initialization!")


    def body_start_pose(self, start_height=0.10, end_height=0.10, velocity_factor=0.5):
        """Sets the robot's body to some initial configuration.

        Tucks the arm using motion planning. NEVER directly set joints as that
        often leads to collisions.

        Args:
            start_height: Height in meters for Fetch before arm-tuck.
            end_height: Height in meters for Fetch after arm-tuck.
            velocity_factor: controls the speed, closer to 0 means slower,
                closer to 1 means faster. (If 0.0, then it turns into 1.0 for
                some reason.) Values greater than 1.0 are cut to 1.0.
        """
        self.torso.set_height(start_height)
        self.arm.move_to_joint_goal(self.tucked_list, velocity_factor=velocity_factor)
        self.torso.set_height(end_height)


    def head_start_pose(self, pan=0.0, tilt=0.0):
        """Sets the robot's head to some initial configuration.

        Args: 
            pan: Value in radians for head sideways rotation, counterclockwise
                when looking at robot from an aerial view.
            tilt: Value in radians for head up/down movement, positive means
                looking downwards.
        """
        self.head.pan_tilt(pan=pan, tilt=tilt)


    def get_img_data(self):
        """Obtain camera and depth image.
        
        Returns:
            Tuple containing RGB camera image and corresponding depth image.
        """
        c_img = self.camera.read_color_data()
        d_img = self.camera.read_depth_data()
        return (c_img, d_img)


    def create_grasp_pose(self, x, y, z, rot_x, rot_y, rot_z):
        """Creates a pose in the world for the robot's end-effect to go to.
        
        Args:
            x, y, z, rot_x, rot_y, rot_z: A 6-D pose description.
        """
        pose_name = self.gripper.create_grasp_pose_intuitive(
                x, y, z, rot_x, rot_y, rot_z)
        return pose_name


    def move_to_pose(self, pose_name, velocity_factor=None):
        """Moves to a pose.
 
        In the HSR, moved the `hand_palm_link` to the frame named `pose_name` at
        the correct pose. For the Fetch we should be able to extract the pose
        from `pose_name` and then call the Arm's `move_to_pose` method.
        
        Args:
            pose_name: A string name for the pose to go 
            velocity_factor: controls the speed, closer to 0 means slower,
                closer to 1 means faster. (If 0.0, then it turns into 1.0 for
                some reason.) Values greater than 1.0 are cut to 1.0.
        """
        # See: 
        #   http://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20listener%20%28Python%29
        #   https://answers.ros.org/question/256354/does-tftransformlistenerlookuptransform-return-quaternion-position-or-translation-and-rotation/
        # First frame should be the reference frame, use `base_link`, not `odom`.
        point, quat = self.gripper.tl.lookupTransform('base_link', pose_name, rospy.Time(0))

        # See:
        #   https://github.com/cse481wi18/cse481wi18/blob/indigo-devel/applications/scripts/cart_arm_demo.py
        #   https://github.com/cse481wi18/cse481wi18/wiki/Lab-19%3A-Cartesian-space-manipulation
        ps = PoseStamped()
        ps.header.frame_id = 'base_link'
        ps.pose = Pose(
                Point(point[0], point[1], point[2]),
                Quaternion(quat[0], quat[1], quat[2], quat[3])
        )

        # See `arm.py` written by Justin Huang
        error = self.arm.move_to_pose(pose_stamped=ps, velocity_factor=velocity_factor)
        if error is not None:
            rospy.logerr(error)

    def open_gripper(self):
        self.gripper.open()

    def close_gripper(self, width=0.0, max_effort=100):
        self.gripper.close(width=width, max_effort=max_effort)
Esempio n. 5
0
class Robot(object):
    MIN_EFFORT = 35  # Min grasp force, in Newtons
    MAX_EFFORT = 100  # Max grasp force, in Newtons
    dt = 0.005  # 转动的速度和 dt 有关
    action_bound = [-1, 1]  # 转动的角度范围
    state_dim = 7  # 7个观测值
    action_dim = 7  # 7个动作
    Robot = {'fetch': {'init': [0, 0, 0]}}
    def __init__(self):
        # 初始化move_group的API
        moveit_commander.roscpp_initialize(sys.argv)

        # 初始化ROS节点
        rospy.init_node('moveit_demo')
        self.camera = RGBD()
        # robot reset
        self.robot_pose = ModelState()
        self.robot_state = dict()
        self.pose_pub = rospy.Publisher("/gazebo/set_model_state", ModelState, queue_size=1)
        # 初始化需要使用move group控制的机械臂中的arm group
        self.arm = moveit_commander.MoveGroupCommander('arm')
        self.gripper = moveit_commander.MoveGroupCommander('gripper')
        self._client = actionlib.SimpleActionClient(ACTION_SERVER, control_msgs.msg.GripperCommandAction)
        self._client.wait_for_server(rospy.Duration(10))
        self.Box_position = [0.8, 0.1, 0.75]
        # 获取终端link的名称
        self.end_effector_link = self.arm.get_end_effector_link()

        # 设置目标位置所使用的参考坐标系
        self.reference_frame = 'base_link'

        self.arm.set_pose_reference_frame(self.reference_frame)
        # 获取场景中的物体
        self.head_action = PointHeadClient()
        self.grasping_client = GraspingClient()
        # self.move_base = MoveBaseClient()
        # 向下看
        self.head_action.look_at(1.2, 0.0, 0.0, "base_link")
        # 初始化机器人手臂位置
        self.arm_goal = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        self.end_goal = [0.0, 0.0, 1.0]
        self.reset()
        # 初始化reward
        # now_position = self.gripper.get_current_pose("gripper_link").pose.position
        # now_dis = math.sqrt(math.pow(now_position.x - self.Box_position[0], 2)
        #                       + math.pow(now_position.y - self.Box_position[1], 2)
        #                       + math.pow(now_position.z - self.Box_position[2], 2))
        # self.reward = math.exp(-now_dis)
        self.dis = 0
        # 更新场景到rviz中

        # 设置目标位置所使用的参考坐标系
        reference_frame = 'base_link'
        self.arm.set_pose_reference_frame(reference_frame)

        # 当运动规划失败后,允许重新规划
        self.arm.allow_replanning(True)

        # 设置位置(单位:米)和姿态(单位:弧度)的允许误差
        self.arm.set_goal_position_tolerance(0.01)
        self.arm.set_goal_orientation_tolerance(0.05)
        self.target_pose = PoseStamped()

    def open(self):
        """Opens the gripper.
        """
        goal = control_msgs.msg.GripperCommandGoal()
        goal.command.position = OPENED_POS
        self._client.send_goal_and_wait(goal, rospy.Duration(10))

    def close(self, width=0.0, max_effort=MAX_EFFORT):
        """Closes the gripper.

        Args:
            width: The target gripper width, in meters. (Might need to tune to
                make sure the gripper won't damage itself or whatever it's
                gripping.)
            max_effort: The maximum effort, in Newtons, to use. Note that this
                should not be less than 35N, or else the gripper may not close.
        """
        assert CLOSED_POS <= width <= OPENED_POS
        goal = control_msgs.msg.GripperCommandGoal()
        goal.command.position = width
        goal.command.max_effort = max_effort
        self._client.send_goal_and_wait(goal, rospy.Duration(10))

    def set_end_pose(self, x, y, z):
        a = copy.deepcopy(x)
        b = copy.deepcopy(y)
        c = copy.deepcopy(z)
        self.target_pose.header.frame_id = self.reference_frame
        self.target_pose.header.stamp = rospy.Time.now()
        self.target_pose.pose.position.x = a % 0.3 + 0.4  # 0.70
        self.target_pose.pose.position.y = b % 0.7 - 0.35  # 0.0
        self.target_pose.pose.position.z = c
        self.target_pose.pose.orientation.x = -0.0000
        self.target_pose.pose.orientation.y = 0.681666289017
        self.target_pose.pose.orientation.z = 0
        self.target_pose.pose.orientation.w = 0.73166304586
        self.end_goal[0] = self.target_pose.pose.position.x
        self.end_goal[1] = self.target_pose.pose.position.y
        self.end_goal[2] = self.target_pose.pose.position.z
        # print self.target_pose.pose.position

    def go_end_pose(self):
        self.arm.set_start_state_to_current_state()
        self.arm.set_pose_target(self.target_pose, self.end_effector_link)
        traj = self.arm.plan()
        success = self.arm.execute(traj)
        rospy.sleep(1)
        return success

    def step(self, action):
        done = False
        state = None
        success = True
        # 转动一定的角度(执行动作)
        # self.arm_goal[0] = self.arm_goal[0] % (np.pi/4)
        # self.arm_goal = [(-0.6~0.6), (0~-0.8), (0), (0~1.25), 0, 1.7, 0]
        limit = [0.6, -0.8, 10, 1.25, 3.14, 2.16, 3.14]
        # self.arm_goal = action[0] % (np.pi/4)
        # if action.shape[0] == 1:
        self.end_goal += action[0]
        temporarity = copy.deepcopy(self.end_goal)

        you_want_to_pick_now = True
        if you_want_to_pick_now:  # todo
            temporarity = copy.deepcopy(self.Box_position)
        # if action.shape[0] == 7:
        #     self.arm_goal += action
        # for i in range(7):
        #     self.arm_goal[i] = self.arm_goal[i] % (limit[i] * 2)  # change the limit for each joint todo
        #     self.arm_goal[i] -= limit[i]
        # self.arm_goal[1] = -math.fabs(self.arm_goal[1])
        # self.arm_goal[2] = 0
        # self.arm_goal[3] = math.fabs(self.arm_goal[3])
        # [1.32, 0.7, 0.0, -2.0, 0.0, -0.57, 0.0]
        # self.arm_goal[0] = 1
        # self.arm_goal[1] = 0
        # self.arm_goal[2] = 2
        # self.arm_goal[3] = 0
        # self.arm_goal[4] = 0
        # self.arm_goal[5] = 1
        # self.arm_goal[6] = 0
        # print "action: ", action  # todo
        # self.arm_goal = np.clip(self.arm_goal, *self.action_bound)
        # print("GOAL_ARM:", self.arm_goal)

        print "---frist---"
        self.set_end_pose(temporarity[0]-0.17, temporarity[1], 1.0)
        print temporarity
        try:
            success = self.go_end_pose()
            print "---second---"
            self.set_end_pose(temporarity[0] - 0.17, temporarity[1], 0.91)
            print temporarity
            success = self.go_end_pose()
            # self.arm.set_joint_value_target(self.arm_goal.tolist())
            # success = self.arm.go()
            # rospy.sleep(1)
        except Exception as e:
            print e.message
            done = True


        print(success)
        if not success or done:    # 规划失败,发生碰撞
            reward = -10
            done = True
        else:                   # 规划成功,开始运动
            # 计算距离物块的距离,来返回reward
            af_position = self.gripper.get_current_pose("gripper_link").pose.position
            af_dis = math.sqrt(math.pow(af_position.x - self.Box_position[0], 2)
                                + math.pow(af_position.y - self.Box_position[1], 2)
                                + math.pow(af_position.z - self.Box_position[2], 2))
            reward = (-af_dis)*10 - self.reward
            self.reward = (-af_dis)*10
            # 尝试抓取
            self.close()
            # 抓取成功和碰撞到环境均为结束
            l = self.gripper.get_current_pose("l_gripper_finger_link").pose.position
            r = self.gripper.get_current_pose("r_gripper_finger_link").pose.position
            # 获取夹爪的距离(范围:0.03 ~ 0.13)
            dis = math.sqrt(pow(l.x-r.x, 2)+pow(l.y-r.y, 2)+pow(l.z-r.z, 2))
            if dis > 0.031:  # 抓取成功
                self.pick_up()
                rospy.sleep(1)
                l = self.gripper.get_current_pose("l_gripper_finger_link").pose.position
                r = self.gripper.get_current_pose("r_gripper_finger_link").pose.position
                # 再次获取夹爪的距离(范围:0.3 ~ 0.13)
                dis = math.sqrt(pow(l.x - r.x, 2) + pow(l.y - r.y, 2) + pow(l.z - r.z, 2))
                if dis > 0.031:
                    reward += 100
                    print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!success!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
                done = True
            else:
                self.open()
            # 获取状态
        state = self.get_state()
        return state, reward, done
        # print(end_x, end_y, end_z)

    def read_depth_data(self):
        # 获取深度摄像头信息
        rospy.sleep(2)
        rgb = self.camera.read_color_data()
        dep = self.camera.read_depth_data()
        dep[np.transpose(np.argwhere(np.isnan(dep)))[0], np.transpose(np.argwhere(np.isnan(dep)))[1]]=0
        # if the depth value is nan to 0 todo
        rgb = np.array(rgb)
        dep = np.array(dep)
        dep = dep[:, :, np.newaxis]
        rgbd = np.concatenate((rgb, dep), axis=2)
        new_rgbd = cv2.resize(rgbd, (224, 224))
        return new_rgbd

    def get_state(self):
        # self.camera = RGBD()
        # return self.arm_goal, self.read_depth_data()
        return self.end_goal, self.read_depth_data()

    # 初始化机器人手臂和物块位置以及RViz中的场景
    def reset(self):
        # self.arm_goal = [(-0.6~0.6), (0~-0.8), (0), (0~1.25), 0, 1.7, 0]
        # [0.0, -0.8, 0, 1.25, 0, 0, 0]

        self.arm_goal = [0.0, -0.8, 0, 0.7, 0, 1.7, 0]
        self.arm_goal = [1.32, 0.7, 0.0, -2.0, 0.0, -0.57, 0.0]
        print "reset:", type(self.arm_goal[0])
        # self.arm_goal = [0, 0, 0, 0, 0, 0, 0]
        self.arm.set_joint_value_target(self.arm_goal)
        self.arm.go()
        rospy.sleep(2)
        self.reset_robot()
        self.open()
        rospy.sleep(1)
        while not self.grasping_client.updateScene():
            print "-----update scence fail-----"
            self.reset_robot()
            cubmanager = CubesManager()
            cubmanager.reset_cube(True)
        rospy.sleep(3)
        # print "--------------test--------------"
        # self.test()

    def pick_up(self):
        self.arm_goal = [1.32, 0.7, 0.0, -2.0, 0.0, -0.57, 0.0]
        print "reset:", type(self.arm_goal[0])
        self.arm.set_joint_value_target(self.arm_goal)
        self.arm.go()
        rospy.sleep(2)

    def reset_robot(self):
        for k, v in self.Robot.items():
            self.set_robot_pose(k, v['init'])

    def set_robot_pose(self, name, pose, orient=None):
        """
        :param name: cube name, a string
        :param pose: cube position, a list of three float, [x, y, z]
        :param orient: cube orientation, a list of three float, [ix, iy, iz]
        :return:
        """
        self.robot_pose.model_name = name
        p = self.robot_pose.pose
        # cube_init = self.CubeMap[name]["init"]
        p.position.x = pose[0] + 0.15
        p.position.y = pose[1]
        p.position.z = pose[2]
        if orient is None:
            orient = [0, 0, 0]
        q = quaternion_from_euler(orient[0], orient[1], orient[2])
        p.orientation = Quaternion(*q)
        self.pose_pub.publish(self.robot_pose)

    def sample(self):
        # -0.5 ~ 0.5
        return (2*np.pi*np.random.rand(7)-np.pi).to(device)

    def test(self):
        target_pose = PoseStamped()
        target_pose.header.frame_id = self.reference_frame
        target_pose.header.stamp = rospy.Time.now()
        target_pose.pose.position.x = 0.70
        target_pose.pose.position.y = 0.0
        target_pose.pose.position.z = 0.9
        target_pose.pose.orientation.x = -0.0000
        target_pose.pose.orientation.y = 0.681666289017
        target_pose.pose.orientation.z = 0
        target_pose.pose.orientation.w = 0.73166304586
        self.arm.set_start_state_to_current_state()
        self.arm.set_pose_target(target_pose, self.end_effector_link)
        traj = self.arm.plan()
        # print "it :", traj
        self.arm.execute(traj)
        rospy.sleep(1)

    def test1(self):
        self.set_end_pose(0.8 - 0.17, 0.1, 1)
        self.go_end_pose()
        rospy.sleep(1)
        self.set_end_pose(0.8 - 0.17, 0.1, 0.91)
        self.go_end_pose()
        rospy.sleep(1)
        self.close()
        rospy.sleep(1)
        self.set_end_pose(0.8 - 0.17, 0.1, 1)
        self.go_end_pose()
        rospy.sleep(1)
        self.set_end_pose(0.8 - 0.17, -0.3, 1)
        self.go_end_pose()
        rospy.sleep(1)
        self.set_end_pose(0.8 - 0.17, -0.3, 0.91)
        self.go_end_pose()
        rospy.sleep(1)
        self.open()

    def test_step(self, action):
        done = False
        self.end_goal += action[0]
        # print "---frist---"
        x = self.end_goal[0] % 0.3+0.4
        y = self.end_goal[1] % 0.7-0.35
        # print x, y
        dis = math.sqrt(math.pow(x - self.Box_position[0], 2)
                            + math.pow(y - self.Box_position[1], 2))

        if dis<0.05: # 阈值,可调
            done = True
            reward = 100
        elif dis<= self.dis:
            reward = 5
        elif dis > self.dis:
            reward = -5
        self.dis = dis
        new_position = [x, y, 1.0]
        return new_position, reward, done
class Robot_Interface(object):
    """For usage with the Fetch robot."""
    def __init__(self, simulation=True):
        """Initializes various aspects of the Fetch.
        
        TODOs: get things working, also use `simulation` flag to change ROS
        topic names if necessary (especially for the cameras!). UPDATE: actually
        I don't think this is necessary now, they have the same topic names.
        """
        rospy.init_node("fetch")
        self.arm = Arm()
        self.arm_joints = ArmJoints()
        self.base = Base()
        self.camera = RGBD()
        self.head = Head()
        self.gripper = Gripper(self.camera)
        self.torso = Torso()
        self.joint_reader = JointStateReader()

        # Tucked arm starting joint angle configuration
        self.names = ArmJoints().names()
        self.tucked = [1.3200, 1.3999, -0.1998, 1.7199, 0.0, 1.6600, 0.0]
        self.tucked_list = [(x, y) for (x, y) in zip(self.names, self.tucked)]

        # Initial (x,y,yaw) position of the robot wrt map origin. We keep this
        # fixed so that we can reset to this position as needed. The HSR's
        # `omni_base.pose` (i.e., the start pose) returns (x,y,yaw) where yaw is
        # the rotation about that axis (intuitively, the z axis). For the base,
        # `base.odom` supplies both `position` and `orientation` attributes.
        start = copy.deepcopy(self.base.odom.position)
        yaw = Base._yaw_from_quaternion(self.base.odom.orientation)
        self.start_pose = np.array([start.x, start.y, yaw])
        self.TURN_SPEED = 0.3

        self.num_restarts = 0

    def body_start_pose(self,
                        start_height=0.10,
                        end_height=0.10,
                        velocity_factor=None):
        """Sets the robot's body to some initial configuration.
        
        The HSR uses `whole_body.move_to_go()` which initializes an appropriate
        posture so that the hand doesn't collide with movement. For the Fetch,
        we should probably make the torso extend, so the arms can extend more
        easily without collisions. Use `move_to_joint_goal` since that uses
        motion planning. Do NOT directly set the joints without planning!!
        """
        self.torso.set_height(start_height)
        self.arm.move_to_joint_goal(self.tucked_list,
                                    velocity_factor=velocity_factor)
        self.torso.set_height(end_height)
        # Specific to the siemens challenge (actually a lot of this stuff is ...)
        if self.num_restarts == 0:
            self.base.turn(angular_distance=45 * DEG_TO_RAD)
            self.num_restarts += 1

    def head_start_pose(self):
        """Hard-coded starting pose for the robot's head.
        
        These values are from the HSR. The Fetch needs a different pan and tilt.
        Positive pan means rotating counterclockwise when looking at robot from
        an aerial view.
        """
        self.head.pan_tilt(pan=0.0, tilt=0.8)

    def position_start_pose(self, offsets=None, do_something=False):
        """Assigns the robot's base to some starting pose.

        Mainly to "reset" the robot to the original starting position (and also,
        rotation about base axis) after it has moved, usually w/no offsets.
        
        Ugly workaround: we have target (x,y), and compute the distance to the
        point and the angle. We turn the Fetch according to that angle, and go
        forward. Finally, we do a second turn which corresponds to the target
        yaw at the end. This turns w.r.t. the current angle, so we undo the
        effect of the first turn.  See `examples/test_position_start_pose.py`
        for tests.
        
        Args:
            offsets: a list of length 3, indicating offsets in the x, y, and
            yaws, respectively, to be added onto the starting pose.
        """
        # Causing problems during my tests of the Siemens demo.
        if not do_something:
            return

        current_pos = copy.deepcopy(self.base.odom.position)
        current_theta = Base._yaw_from_quaternion(
            self.base.odom.orientation)  # [-pi, pi]
        ss = np.array([current_pos.x, current_pos.y, current_theta])

        # Absolute target position and orientation specified with `pp`.
        pp = np.copy(self.start_pose)
        if offsets:
            pp += np.array(offsets)

        # Get distance to travel, critically assumes `pp` is starting position.
        dist = np.sqrt((ss[0] - pp[0])**2 + (ss[1] - pp[1])**2)
        rel_x = ss[0] - pp[0]
        rel_y = ss[1] - pp[1]
        assert -1 <= rel_x / dist <= 1
        assert -1 <= rel_y / dist <= 1

        # But we also need to be *facing* the correct direction, w/input [-1,1].
        # First, get the opposite view (facing "outwards"), then flip by 180.
        desired_facing = np.arctan2(rel_y, rel_x)  # [-pi, pi], facing outward
        desired_theta = math.pi + desired_facing  # [0, 2*pi], flip by 180
        if desired_theta > math.pi:
            desired_theta -= 2 * math.pi  # [-pi, pi]

        # Reconcile with the current theta. Got this by basically trial/error
        angle = desired_theta - current_theta  # [-2*pi, 2*pi]
        if angle > math.pi:
            angle -= 2 * math.pi
        elif angle < -math.pi:
            angle += 2 * math.pi

        self.base.turn(angular_distance=angle, speed=self.TURN_SPEED)
        self.base.go_forward(distance=dist, speed=0.2)

        # Back at the start x, y, but now need to consider the _second_ turn.
        # The robot is facing at `desired_theta` rads, but wants `pp[2]` rads.
        final_angle = pp[2] - desired_theta
        if final_angle > math.pi:
            final_angle -= 2 * math.pi
        elif final_angle < -math.pi:
            final_angle += 2 * math.pi
        self.base.turn(angular_distance=final_angle, speed=self.TURN_SPEED)

    def get_img_data(self):
        """Obtain camera and depth image.
        
        Returns:
            Tuple containing RGB camera image and corresponding depth image.
        """
        c_img = self.camera.read_color_data()
        d_img = self.camera.read_depth_data()
        return (c_img, d_img)

    def get_depth(self, point, d_img):
        """Compute mean depth near grasp point.

        NOTE: assumes that we have a simlar `cfg.ZRANGE` as with the HSR. I'm
        not sure where exactly this comes from.
        """
        y, x = int(point[0]), int(point[1])
        # z_box = d_img[y-ZRANGE:y+ZRANGE, x-ZRANGE:x+ZRANGE]
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        indx = np.nonzero(z_box)
        z = np.mean(z_box[indx])
        return z
        # y, x = int(point[0]), int(point[1])
        # # z_box = d_img[y-ZRANGE:y+ZRANGE, x-ZRANGE:x+ZRANGE]
        # z_box = d_img[y-10:y + 10, x - 10:x + 10]
        # indx = np.nonzero(z_box)
        # z = np.mean(z_box[indx])
        # return z

    def get_rot(self, direction):
        """Compute rotation of gripper such that given vector is grasped.

        Currently this directly follows the HSR code as there's nothing
        Fetch-dependent.
        """
        dy, dx = direction[0], direction[1]
        dx *= -1
        if dy < 0:
            dx *= -1
            dy *= -1
        rot = np.arctan2(dy, dx)
        rot = np.pi - rot
        return rot

    def create_grasp_pose(self, x, y, z, rot):
        """ If `intuitive=True` then x,y,z,rot interpreted wrt some link in the
        world, e.g., 'odom' for the Fetch. It's False by default to maintain
        backwards compatibility w/Siemens-based code.
        """
        pose_name = self.gripper.create_grasp_pose(x, y, z, rot)
        return pose_name

    def open_gripper(self):
        self.gripper.open()

    def close_gripper(self):
        self.gripper.close()

    def move_to_pose(self, pose_name, z_offset, velocity_factor=None):
        """Moves to a pose.
 
        In the HSR, moved the `hand_palm_link` to the frame named `pose_name` at
        the correct pose. For the Fetch we should be able to extract the pose
        from `pose_name` and then call the Arm's `move_to_pose` method.
        
        Args:
            pose_name: A string name for the pose to go 
            z_offset: Scalar offset in z-direction, offset is wrt the pose
                specified by `pose_name`.
            velocity_factor: controls the speed, closer to 0 means slower,
                closer to 1 means faster. (If 0.0, then it turns into 1.0 for
                some reason.) Values greater than 1.0 are cut to 1.0.
        """
        # See:
        #   http://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20listener%20%28Python%29
        #   https://answers.ros.org/question/256354/does-tftransformlistenerlookuptransform-return-quaternion-position-or-translation-and-rotation/
        # First frame should be the reference frame, use `base_link`, not `odom`.
        point, quat = self.gripper.tl.lookupTransform('base_link', pose_name,
                                                      rospy.Time(0))
        z_point = point[2] + z_offset

        # See:
        #   https://github.com/cse481wi18/cse481wi18/blob/indigo-devel/applications/scripts/cart_arm_demo.py
        #   https://github.com/cse481wi18/cse481wi18/wiki/Lab-19%3A-Cartesian-space-manipulation
        ps = PoseStamped()
        ps.header.frame_id = 'base_link'
        ps.pose = Pose(Point(point[0], point[1], z_point),
                       Quaternion(quat[0], quat[1], quat[2], quat[3]))

        # See `arm.py` written by Justin Huang
        error = self.arm.move_to_pose(pose_stamped=ps,
                                      velocity_factor=velocity_factor)
        if error is not None:
            rospy.logerr(error)

    def find_ar(self, ar_number, velocity_factor=None):
        try:
            ar_name = 'ar_marker/' + str(ar_number)

            # HSR code, with two hard-coded offsets?
            #self.whole_body.move_end_effector_pose(geometry.pose(y=0.08, z=-0.3), ar_name)

            # Fetch 'translation'. Note the `ar_name` for pose name.
            point, quat = self.gripper.tl.lookupTransform(
                'base_link', ar_name, rospy.Time(0))
            y_point = point[1] + 0.08
            z_point = point[2] - 0.3

            ps = PoseStamped()
            ps.header.frame_id = 'base_link'
            ps.pose = Pose(Point(point[0], y_point, z_point),
                           Quaternion(quat[0], quat[1], quat[2], quat[3]))

            error = self.arm.move_to_pose(pose_stamped=ps,
                                          velocity_factor=velocity_factor)
            if error is not None:
                rospy.logerr(error)

            return True
        except:
            return False

    def pan_head(self, tilt):
        """Adjusts tilt of the robot, AND set pan at zero.
        
        Args: 
            tilt: Value in radians, positive means looking downwards.
        """
        self.head.pan_tilt(pan=0, tilt=tilt)