コード例 #1
0
ファイル: can_solver.py プロジェクト: dhadfieldmenell/tampy
    def sample_rp_from_ee(self, env, rs_param, ee_pose):
        """
            Using Inverse Kinematics to solve all possible robot armpose and basepose that reaches this ee_pose

            robot_pose: list of full robot pose with format of(basepose, backHeight, lArmPose, lGripper, rArmPose, rGripper)
        """
        ee_pos, ee_rot = ee_pose[0], ee_pose[1]
        base_raidus = np.linspace(-1, 1, num=BASE_SAMPLE_SIZE)
        poses = list(itertools.product(base_raidus, base_raidus))
        # sample
        dummy_body = OpenRAVEBody(env, rs_param.name, PR2())
        possible_robot_pose = []
        for pos in poses:
            bp = pos + ee_pos[:2]
            vec = ee_pos[:2] - bp
            vec = vec / np.linalg.norm(vec)
            theta = math.atan2(vec[1], vec[0])
            bp = np.array([bp[0], bp[1], theta])
            dummy_body.set_pose(bp)
            arm_poses = robot_body.ik_arm_pose(ee_pos, ee_rot)
            for arm_pose in arm_poses:
                possible_robot_pose.append(
                    (bp, ee_arm_poses[0], rs_param.lArmPose, rs_param.lGripper, ee_arm_poses[1:], rs_param.rGripper)
                )

        import ipdb

        ipdb.set_trace()
        dummy_body.delete()
        return possible_robot_pose
コード例 #2
0
    def test_add_obstacle(self):
        attrs = {
            "geom": [],
            "pose": [(3, 5)],
            "_type": ["Obstacle"],
            "name": ["obstacle"]
        }
        attr_types = {
            "geom": Obstacle,
            "pose": Vector2d,
            "_type": str,
            "name": str
        }
        obstacle = parameter.Object(attrs, attr_types)

        env = Environment()
        """
        to ensure the _add_obstacle is working, uncomment the line below to make
        sure the obstacle is being created
        """
        # env.SetViewer('qtcoin')

        obstacle_body = OpenRAVEBody(env, obstacle.name, obstacle.geom)
        obstacle_body.set_pose([2, 0])
        arr = np.eye(4)
        arr[0, 3] = 2
        self.assertTrue(np.allclose(obstacle_body.env_body.GetTransform(),
                                    arr))
コード例 #3
0
    def test_get_ik_arm_pose(self):
        robot = self.setup_robot()
        can = self.setup_can()
        test_env = self.setup_environment()

        robot_body = OpenRAVEBody(test_env, robot.name, robot.geom)

        # robot.pose = np.array([-.5, 0, 0]).reshape((3,1))
        robot.pose = np.array([-0., 0, 0]).reshape((3, 1))
        robot_body.set_pose(robot.pose)
        dof_value_map = {
            "backHeight": robot.backHeight,
            "lArmPose": robot.lArmPose.flatten(),
            "lGripper": robot.lGripper,
            "rArmPose": robot.rArmPose.flatten(),
            "rGripper": robot.rGripper
        }
        robot_body.set_dof(dof_value_map)
        # can.pose = np.array([-0.31622543, -0.38142561,  1.19321209]).reshape((3,1))
        # can.pose = np.array([0.5, -0.2,  .8]).reshape((3,1))
        can.pose = np.array([.5, .2, .8]).reshape((3, 1))
        # can.rotation = np.array([ 0.04588155, -0.38504402,  0.19207589]).reshape((3,1))
        can.rotation = np.array([-.0, -0., 0.]).reshape((3, 1))
        can_body = OpenRAVEBody(test_env, can.name, can.geom)
        can_body.set_pose(can.pose, can.rotation)
コード例 #4
0
 def get_robot_info(self, robot_body):
     # Provide functionality of Obtaining Robot information
     tool_link = robot_body.env_body.GetLink("right_gripper")
     manip_trans = tool_link.GetTransform()
     # This manip_trans is off by 90 degree
     pose = OpenRAVEBody.obj_pose_from_transform(manip_trans)
     robot_trans = OpenRAVEBody.get_ik_transform(pose[:3], pose[3:])
     arm_inds = list(range(10, 17))
     return robot_trans, arm_inds
コード例 #5
0
    def test_ee_reachable(self):

        # InGripper, Robot, Can

        robot = ParamSetup.setup_baxter()
        test_env = ParamSetup.setup_env()
        rPose = ParamSetup.setup_baxter_pose()
        ee_pose = ParamSetup.setup_ee_pose()

        pred = baxter_predicates.BaxterEEReachablePos("ee_reachable", [robot, rPose, ee_pose], ["Robot", "RobotPose", "EEPose"], test_env)
        pred2 = baxter_predicates.BaxterEEReachableRot("ee_reachable_rot", [robot, rPose, ee_pose], ["Robot", "RobotPose", "EEPose"], test_env)
        baxter = pred._param_to_body[robot]
        # Since this predicate is not yet concrete
        self.assertFalse(pred.test(0))

        ee_pose.value = np.array([[1.2, -0.1, 0.925]]).T
        ee_pose.rotation = np.array([[0,0,0]]).T
        ee_targ = ParamSetup.setup_green_can()
        ee_body = OpenRAVEBody(test_env, "EE_Pose", ee_targ.geom)
        ee_body.set_pose(ee_pose.value[:, 0], ee_pose.rotation[:, 0])

        robot.lArmPose = np.zeros((7,7))
        robot.lGripper = np.ones((1, 7))*0.02
        robot.rGripper = np.ones((1, 7))*0.02
        robot.pose = np.zeros((1,7))
        robot.rArmPose = np.zeros((7,7))
        # initialized pose value is not right
        self.assertFalse(pred.test(0))

        # Find IK Solution
        trajectory = []
        trajectory.append(baxter.get_ik_from_pose([1.2-3*const.APPROACH_DIST, -0.1, 0.925], [0,0,0], "right_arm")[0])    #s=-3
        trajectory.append(baxter.get_ik_from_pose([1.2-2*const.APPROACH_DIST, -0.1, 0.925], [0,0,0], "right_arm")[0])    #s=-2
        trajectory.append(baxter.get_ik_from_pose([1.2-const.APPROACH_DIST, -0.1, 0.925], [0,0,0], "right_arm")[0])       #s=-1
        trajectory.append(baxter.get_ik_from_pose([1.2, -0.1, 0.925], [0,0,0], "right_arm")[0])                     #s=0
        trajectory.append(baxter.get_ik_from_pose([1.2, -0.1, 0.925+const.RETREAT_DIST], [0,0,0], "right_arm")[0])        #s=1
        trajectory.append(baxter.get_ik_from_pose([1.2, -0.1, 0.925+2*const.RETREAT_DIST], [0,0,0], "right_arm")[0])      #s=2
        trajectory.append(baxter.get_ik_from_pose([1.2, -0.1, 0.925+3*const.RETREAT_DIST], [0,0,0], "right_arm")[0])      #s=3
        trajectory = np.array(trajectory)


        robot.rArmPose = trajectory.T
        # Predicate should succeed in the grasping post at t=3,
        # EEreachableRot should always pass since rotation is right all the time
        self.assertFalse(pred.test(0))
        self.assertTrue(pred2.test(0))
        self.assertFalse(pred.test(1))
        self.assertTrue(pred2.test(1))
        self.assertFalse(pred.test(2))
        self.assertTrue(pred2.test(2))
        self.assertTrue(pred.test(3))
        self.assertTrue(pred2.test(3))
        # Since finding ik introduced some error, we relax the tolerance to 1e-3
        if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(3), True, 1e-3)

        if const.TEST_GRAD: pred2.expr.expr.grad(pred2.get_param_vector(3), True, 1e-3)
コード例 #6
0
    def test_ee_reachable(self):

        # InGripper, Robot, Can

        robot = ParamSetup.setup_pr2()
        test_env = ParamSetup.setup_env()
        rPose = ParamSetup.setup_pr2_pose()
        ee_pose = ParamSetup.setup_ee_pose()

        pred = pr2_predicates.PR2EEReachablePos("ee_reachable", [robot, rPose, ee_pose], ["Robot", "RobotPose", "EEPose"], test_env)
        pred2 = pr2_predicates.PR2EEReachableRot("ee_reachable_rot", [robot, rPose, ee_pose], ["Robot", "RobotPose", "EEPose"], test_env)
        pr2 = pred._param_to_body[robot]
        # Since this predicate is not yet concrete
        self.assertFalse(pred.test(0))
        # ee_pose.value = np.array([[0.951, -0.188, 0.790675]]).T
        ee_pose.value = np.array([[0.440, -0.339, 0.791]]).T
        ee_pose.rotation = np.array([[0,0,0]]).T
        ee_targ = ParamSetup.setup_green_can()
        ee_body = OpenRAVEBody(test_env, "EE_Pose", ee_targ.geom)
        ee_body.set_pose(ee_pose.value[:, 0], ee_pose.rotation[:, 0])

        robot.lArmPose = np.zeros((7,7))
        robot.lGripper = np.ones((1, 7))*0.528
        robot.rArmPose = np.zeros((7,7))
        robot.rGripper = np.ones((1, 7))*0.528
        robot.pose = np.zeros((3,7))
        robot.backHeight = np.zeros((1,7))
        # initialized pose value is not right
        self.assertFalse(pred.test(0))

        # Find IK Solution
        trajectory = []
        trajectory.append(pr2.get_ik_from_pose([0.440-3*const.APPROACH_DIST, -0.339, 0.791], [0,0,0], "rightarm_torso")[0])    #s=-3
        trajectory.append(pr2.get_ik_from_pose([0.440-2*const.APPROACH_DIST, -0.339, 0.791], [0,0,0], "rightarm_torso")[0])    #s=-2
        trajectory.append(pr2.get_ik_from_pose([0.440-const.APPROACH_DIST, -0.339, 0.791], [0,0,0], "rightarm_torso")[0])       #s=-1
        trajectory.append(pr2.get_ik_from_pose([0.440, -0.339, 0.791], [0,0,0], "rightarm_torso")[0])                     #s=0
        trajectory.append(pr2.get_ik_from_pose([0.440, -0.339, 0.791+const.RETREAT_DIST], [0,0,0], "rightarm_torso")[0])        #s=1
        trajectory.append(pr2.get_ik_from_pose([0.440, -0.339, 0.791+2*const.RETREAT_DIST], [0,0,0], "rightarm_torso")[0])      #s=2
        trajectory.append(pr2.get_ik_from_pose([0.440, -0.339, 0.791+3*const.RETREAT_DIST], [0,0,0], "rightarm_torso")[0])      #s=3
        trajectory = np.array(trajectory).T

        robot.backHeight = trajectory[[0], :]
        robot.rArmPose = trajectory[1:, :]
        # Predicate should succeed in the grasping post at t=3,
        # EEreachableRot should always pass since rotation is right all the time
        self.assertFalse(pred.test(0))
        self.assertTrue(pred2.test(0))
        self.assertFalse(pred.test(1))
        self.assertTrue(pred2.test(1))
        self.assertFalse(pred.test(2))
        self.assertTrue(pred2.test(2))
        self.assertTrue(pred.test(3))
        self.assertTrue(pred2.test(3))
コード例 #7
0
ファイル: test_baxter.py プロジェクト: Simon0xzx/tampy
    def test_baxter(self):

        env = Environment()
        attrs = {"name": ['baxter'], "pose": ['undefined'], "_type": ["Robot"], "geom": []}
        attr_types = {"name": str, "pose": matrix.Vector3d, "_type": str, "geom": Baxter}
        baxter = parameter.Object(attrs, attr_types)
        baxter.pose = np.zeros((1,1))
        baxter_body = OpenRAVEBody(env, baxter.name, baxter.geom)
        baxter_body.set_transparency(0.5)
        body = baxter_body.env_body
        dof = body.GetActiveDOFValues()
        """
コード例 #8
0
def get_ik_transform(pos, rot):
    trans = OpenRAVEBody.transform_from_obj_pose(pos, rot)
    # Openravepy flip the rotation axis by 90 degree, thus we need to change it back
    rot_mat = matrixFromAxisAngle([0, PI / 2, 0])
    trans_mat = trans[:3, :3].dot(rot_mat[:3, :3])
    trans[:3, :3] = trans_mat
    return trans
コード例 #9
0
def get_col_free_torso_arm_pose(t,
                                pos,
                                rot,
                                robot_param,
                                robot_body,
                                arm_pose_seed=None,
                                save=False,
                                callback=None):
    target_trans = get_ee_transform_from_pose(pos, rot)

    # save arm pose and back height
    old_arm_pose = robot_param.rArmPose[:, t].copy()
    old_back_height = robot_param.backHeight[:, t].copy()

    if arm_pose_seed is None:
        arm_pose_seed = old_arm_pose

    torso_pose, arm_pose = get_torso_arm_ik(robot_body,
                                            target_trans,
                                            old_arm_pose=arm_pose_seed)
    if torso_pose is not None:
        robot_param.rArmPose[:, t] = arm_pose
        robot_param.backHeight[:, t] = torso_pose
        if callback is not None:
            trans = OpenRAVEBody.transform_from_obj_pose(pos, rot)
            callback(trans)
            # callback(target_trans)

    # setting parameter values back
    robot_param.rArmPose[:, t] = old_arm_pose
    robot_param.backHeight[:, t] = old_back_height
    return torso_pose, arm_pose
コード例 #10
0
def get_ee_transform_from_pose(pose, rotation):
    ee_trans = OpenRAVEBody.transform_from_obj_pose(pose, rotation)
    #the rotation is to transform the tool frame into the end effector transform
    rot_mat = matrixFromAxisAngle([0, PI / 2, 0])
    ee_rot_mat = ee_trans[:3, :3].dot(rot_mat[:3, :3])
    ee_trans[:3, :3] = ee_rot_mat
    return ee_trans
コード例 #11
0
 def lazy_spawn_or_body(self, param, name, geom):
     if param.openrave_body is not None:
         assert geom == param.openrave_body._geom
         assert self._env == param.openrave_body.env_body.GetEnv()
     else:
         param.openrave_body = OpenRAVEBody(self._env, name, geom)
     return param.openrave_body
コード例 #12
0
    def test_add_obstacle(self):
        attrs = {"geom": [], "pose": [(3, 5)], "_type": ["Obstacle"], "name": ["obstacle"]}
        attr_types = {"geom": Obstacle, "pose": Vector2d, "_type": str, "name": str}
        obstacle = parameter.Object(attrs, attr_types)

        env = Environment()
        """
        to ensure the _add_obstacle is working, uncomment the line below to make
        sure the obstacle is being created
        """
        # env.SetViewer('qtcoin')

        obstacle_body = OpenRAVEBody(env, obstacle.name, obstacle.geom)
        obstacle_body.set_pose([2,0])
        arr = np.eye(4)
        arr[0,3] = 2
        self.assertTrue(np.allclose(obstacle_body.env_body.GetTransform(), arr))
コード例 #13
0
ファイル: can_solver.py プロジェクト: Simon0xzx/tampy
    def sample_rp_from_ee(t, plan, ee_poses, robot):
        target_pose = target.value[:, 0]
        dummy_body = OpenRAVEBody(plan.env, robot.name+'_dummy', robot.geom)
        possible_rps = []
        for ee_pose in ee_poses:
            base_pose = sampling.get_col_free_base_pose_around_target(t, plan, ee_pose.value, robot).reshape((1,3))
            ik_arm_poses = dummy_body.ik_arm_pose(ee_pose[0], ee_pose[1])
            cur_arm_poses = np.c_[robot.backHeight, robot.rArmPose].reshape((8,))
            chosen_pose = sampling.closest_arm_pose(ik_arm_poses, cur_arm_poses)
            torso_pose, arm_pose = chosen_pose[0], chosen_pose[1:]
            possible_rp = np.hstack((base_pose, torso_pose, robot.lArmPose[:,t], robot.lGripper[:,t], rArmPose, robot.rGripper[:,t]))
            possible_rps.append(possible_rp)

        dummy_body.delete()
        if len(possible_rps) <= 0:
            import ipdb; ipdb.set_trace()

        return possible_rps
コード例 #14
0
def get_ee_transform_from_pose(pose, rotation):
    """
        This helper function that returns the correct end effector rotation axis (perpendicular to gripper side)
    """
    ee_trans = OpenRAVEBody.transform_from_obj_pose(pose, rotation)
    #the rotation is to transform the tool frame into the end effector transform
    rot_mat = matrixFromAxisAngle([0, PI / 2, 0])
    ee_rot_mat = ee_trans[:3, :3].dot(rot_mat[:3, :3])
    ee_trans[:3, :3] = ee_rot_mat
    return ee_trans
コード例 #15
0
ファイル: can_solver.py プロジェクト: dhadfieldmenell/tampy
    def sample_ee_from_target(self, target):
        """
            Sample all possible EE Pose that pr2 can grasp with

            target: parameter of type Target
            return: list of tuple in the format of (ee_pos, ee_rot)
        """
        possible_ee_poses = []
        targ_pos, targ_rot = target.value.flatten(), target.rotation.flatten()
        ee_pos = targ_pos.copy()
        target_trans = OpenRAVEBody.transform_from_obj_pose(targ_pos, targ_rot)
        # rotate can's local z-axis by the amount of linear spacing between 0 to 2pi
        angle_range = np.linspace(0, np.pi * 2, num=SAMPLE_SIZE)
        for rot in angle_range:
            target_trans = OpenRAVEBody.transform_from_obj_pose(targ_pos, targ_rot)
            # rotate new ee_pose around can's rotation axis
            rot_mat = matrixFromAxisAngle([0, 0, rot])
            ee_trans = target_trans.dot(rot_mat)
            ee_rot = OpenRAVEBody.obj_pose_from_transform(ee_trans)[3:]
            possible_ee_poses.append((ee_pos, ee_rot))
        return possible_ee_poses
コード例 #16
0
def get_ee_from_target(targ_pos, targ_rot):
    """
        Sample all possible EE Pose that pr2 can grasp with

        target_pos: position of target we want to sample ee_pose form
        target_rot: rotation of target we want to sample ee_pose form
        return: list of ee_pose tuple in the format of (ee_pos, ee_rot) around target axis
    """
    possible_ee_poses = []
    ee_pos = targ_pos.copy()
    target_trans = OpenRAVEBody.transform_from_obj_pose(targ_pos, targ_rot)
    # rotate can's local z-axis by the amount of linear spacing between 0 to 2pi
    angle_range = np.linspace(0, PI * 2, num=const.EE_ANGLE_SAMPLE_SIZE)
    for rot in angle_range:
        target_trans = OpenRAVEBody.transform_from_obj_pose(targ_pos, targ_rot)
        # rotate new ee_pose around can's rotation axis
        rot_mat = matrixFromAxisAngle([0, 0, rot])
        ee_trans = target_trans.dot(rot_mat)
        ee_rot = OpenRAVEBody.obj_pose_from_transform(ee_trans)[3:]
        possible_ee_poses.append((ee_pos, ee_rot))
    return possible_ee_poses
コード例 #17
0
    def test_add_circle(self):
        attrs = {"geom": [1], "pose": [(3, 5)], "_type": ["Can"], "name": ["can0"]}
        attr_types = {"geom": circle.GreenCircle, "pose": Vector2d, "_type": str, "name": str}
        green_can = parameter.Object(attrs, attr_types)

        attrs = {"geom": [1], "pose": [(3, 5)], "_type": ["Can"], "name": ["can0"]}
        attr_types = {"geom": circle.BlueCircle, "pose": Vector2d, "_type": str, "name": str}
        blue_can = parameter.Object(attrs, attr_types)

        env = Environment()
        """
        to ensure the _add_circle and create_cylinder is working, uncomment the
        line below to make sure cylinders are being created
        """
        # env.SetViewer('qtcoin')

        green_body = OpenRAVEBody(env, "can0", green_can.geom)
        blue_body = OpenRAVEBody(env, "can1", blue_can.geom)

        green_body.set_pose([2,0])
        arr = np.eye(4)
        arr[0,3] = 2
        self.assertTrue(np.allclose(green_body.env_body.GetTransform(), arr))
        blue_body.set_pose(np.array([0,-1]))
        arr = np.eye(4)
        arr[1,3] = -1
        self.assertTrue(np.allclose(blue_body.env_body.GetTransform(), arr))
コード例 #18
0
ファイル: can_solver.py プロジェクト: m-j-mcdonald/tampy
    def sample_rp_from_ee(self, env, rs_param, ee_pose):
        """
            Using Inverse Kinematics to solve all possible robot armpose and basepose that reaches this ee_pose

            robot_pose: list of full robot pose with format of(basepose, backHeight, lArmPose, lGripper, rArmPose, rGripper)
        """
        ee_pos, ee_rot = ee_pose[0], ee_pose[1]
        base_raidus = np.linspace(-1, 1, num=BASE_SAMPLE_SIZE)
        poses = list(itertools.product(base_raidus, base_raidus))
        # sample
        dummy_body = OpenRAVEBody(env, rs_param.name, PR2())
        possible_robot_pose = []
        for pos in poses:
            bp = pos + ee_pos[:2]
            vec = ee_pos[:2] - bp
            vec = vec / np.linalg.norm(vec)
            theta = math.atan2(vec[1], vec[0])
            bp = np.array([bp[0], bp[1], theta])
            dummy_body.set_pose(bp)
            arm_poses = robot_body.ik_arm_pose(ee_pos, ee_rot)
            for arm_pose in arm_poses:
                possible_robot_pose.append(
                    (bp, ee_arm_poses[0], rs_param.lArmPose, rs_param.lGripper,
                     ee_arm_poses[1:], rs_param.rGripper))

        import ipdb
        ipdb.set_trace()
        dummy_body.delete()
        return possible_robot_pose
コード例 #19
0
ファイル: can_solver.py プロジェクト: m-j-mcdonald/tampy
    def sample_ee_from_target(self, target):
        """
            Sample all possible EE Pose that pr2 can grasp with

            target: parameter of type Target
            return: list of tuple in the format of (ee_pos, ee_rot)
        """
        possible_ee_poses = []
        targ_pos, targ_rot = target.value.flatten(), target.rotation.flatten()
        ee_pos = targ_pos.copy()
        target_trans = OpenRAVEBody.transform_from_obj_pose(targ_pos, targ_rot)
        # rotate can's local z-axis by the amount of linear spacing between 0 to 2pi
        angle_range = np.linspace(0, np.pi * 2, num=SAMPLE_SIZE)
        for rot in angle_range:
            target_trans = OpenRAVEBody.transform_from_obj_pose(
                targ_pos, targ_rot)
            # rotate new ee_pose around can's rotation axis
            rot_mat = matrixFromAxisAngle([0, 0, rot])
            ee_trans = target_trans.dot(rot_mat)
            ee_rot = OpenRAVEBody.obj_pose_from_transform(ee_trans)[3:]
            possible_ee_poses.append((ee_pos, ee_rot))
        return possible_ee_poses
コード例 #20
0
def resample_rrt_planner(pred, netgated, t, plan):
    startp, endp = pred.startp, pred.endp
    robot = pred.robot
    body = pred._param_to_body[robot].env_body
    manip_trans = body.GetManipulator("right_arm").GetTransform()
    pose = OpenRAVEBody.obj_pose_from_transform(manip_trans)
    manip_trans = OpenRAVEBody.get_ik_transform(pose[:3], pose[3:])
    gripper_direction = manip_trans[:3, :3].dot(np.array([-1, 1, 0]))
    lift_direction = manip_trans[:3, :3].dot(np.array([0, 0, -1]))
    active_dof = body.GetManipulator("right_arm").GetArmIndices()
    attr_inds = OrderedDict()
    res = []
    pred_test = [not pred.test(k, negated) for k in range(20)]
    resample_ts = np.where(pred_test)[0]
    start, end = resample_ts[0] - 1, resample_ts[-1] + 1

    rave_body = pred._param_to_body[pred.robot]
    dof_value_map = {
        "lArmPose": pred.robot.lArmPose[:, start],
        "lGripper": 0.02,
        "rArmPose": pred.robot.rArmPose[:, start],
        "rGripper": 0.02
    }
    rave_body.set_dof(dof_value_map)
    rave_body.set_pose([0, 0, pred.robot.pose[:, start][0]])

    body = pred._param_to_body[pred.robot].env_body
    active_dof = body.GetManipulator('right_arm').GetArmIndices()
    r_arm = pred.robot.rArmPose
    traj = get_rrt_traj(plan.env, body, active_dof, r_arm[:, start],
                        r_arm[:, end])
    result = process_traj(traj, end - start)
    body.SetActiveDOFs(range(18))
    for time in range(start + 1, end):
        robot_attr_name_val_tuples = [('rArmPose', result[:,
                                                          time - start - 1])]
        add_to_attr_inds_and_res(time, attr_inds, res, pred.robot,
                                 robot_attr_name_val_tuples)
    return np.array(res), attr_inds
コード例 #21
0
    def test_add_circle(self):
        attrs = {
            "geom": [1],
            "pose": [(3, 5)],
            "_type": ["Can"],
            "name": ["can0"]
        }
        attr_types = {
            "geom": circle.GreenCircle,
            "pose": Vector2d,
            "_type": str,
            "name": str
        }
        green_can = parameter.Object(attrs, attr_types)

        attrs = {
            "geom": [1],
            "pose": [(3, 5)],
            "_type": ["Can"],
            "name": ["can0"]
        }
        attr_types = {
            "geom": circle.BlueCircle,
            "pose": Vector2d,
            "_type": str,
            "name": str
        }
        blue_can = parameter.Object(attrs, attr_types)

        env = Environment()
        """
        to ensure the _add_circle and create_cylinder is working, uncomment the
        line below to make sure cylinders are being created
        """
        # env.SetViewer('qtcoin')

        green_body = OpenRAVEBody(env, "can0", green_can.geom)
        blue_body = OpenRAVEBody(env, "can1", blue_can.geom)

        green_body.set_pose([2, 0])
        arr = np.eye(4)
        arr[0, 3] = 2
        self.assertTrue(np.allclose(green_body.env_body.GetTransform(), arr))
        blue_body.set_pose(np.array([0, -1]))
        arr = np.eye(4)
        arr[1, 3] = -1
        self.assertTrue(np.allclose(blue_body.env_body.GetTransform(), arr))
コード例 #22
0
    def sample_rp_from_ee(t, plan, ee_poses, robot):
        target_pose = target.value[:, 0]
        dummy_body = OpenRAVEBody(plan.env, robot.name + '_dummy', robot.geom)
        possible_rps = []
        for ee_pose in ee_poses:
            base_pose = sampling.get_col_free_base_pose_around_target(
                t, plan, ee_pose.value, robot).reshape((1, 3))
            ik_arm_poses = dummy_body.ik_arm_pose(ee_pose[0], ee_pose[1])
            cur_arm_poses = np.c_[robot.backHeight, robot.rArmPose].reshape(
                (8, ))
            chosen_pose = sampling.closest_arm_pose(ik_arm_poses,
                                                    cur_arm_poses)
            torso_pose, arm_pose = chosen_pose[0], chosen_pose[1:]
            possible_rp = np.hstack(
                (base_pose, torso_pose, robot.lArmPose[:, t],
                 robot.lGripper[:, t], rArmPose, robot.rGripper[:, t]))
            possible_rps.append(possible_rp)

        dummy_body.delete()
        if len(possible_rps) <= 0:
            print "something went wrong"
            # import ipdb; ipdb.set_trace()

        return possible_rps
コード例 #23
0
ファイル: test_openrave_body.py プロジェクト: Simon0xzx/tampy
    def test_get_ik_arm_pose(self):
        robot = self.setup_robot()
        can = self.setup_can()
        test_env = self.setup_environment()

        robot_body = OpenRAVEBody(test_env, robot.name, robot.geom)

        # robot.pose = np.array([-.5, 0, 0]).reshape((3,1))
        robot.pose = np.array([-0., 0, 0]).reshape((3,1))
        robot_body.set_pose(robot.pose)
        robot_body.set_dof(robot.backHeight, robot.lArmPose, robot.lGripper, robot.rArmPose, robot.rGripper)
        # can.pose = np.array([-0.31622543, -0.38142561,  1.19321209]).reshape((3,1))
        # can.pose = np.array([0.5, -0.2,  .8]).reshape((3,1))
        can.pose = np.array([.5, .2, .8]).reshape((3,1))
        # can.rotation = np.array([ 0.04588155, -0.38504402,  0.19207589]).reshape((3,1))
        can.rotation = np.array([ -.0, -0.,  0.]).reshape((3,1))
        can_body = OpenRAVEBody(test_env, can.name, can.geom)
        can_body.set_pose(can.pose, can.rotation)
コード例 #24
0
def get_col_free_armPose_ik(pred, negated, t, plan):
    ee_pose = OpenRAVEBody.obj_pose_from_transform(
        body.env_body.GetManipulator('right_arm').GetTransform())
    pos, rot = ee_pose[:3], ee_pose[3:]
    while arm_pose is None and iteration < const.MAX_ITERATION_STEP:
        # for i in range(const.NUM_RESAMPLES):
        pos_bias = np.random.random_sample(
            (3, )) * const.BIAS_RADIUS * 2 - const.BIAS_RADIUS
        rot_bias = np.random.random_sample(
            (3, )) * const.ROT_BIAS * 2 - const.ROT_BIAS
        # print pos_bias, rot_bias, iteration
        print pos_bias, rot_bias
        iteration += 1
        arm_pose = get_ik_from_pose(pos + pos_bias, rot + rot_bias,
                                    body.env_body, 'right_arm')
        if arm_pose is not None:
            print iteration
            body.set_dof({'rArmPose': arm_pose})
コード例 #25
0
 def test_exception(self):
     env = Environment()
     attrs = {
         "geom": [],
         "pose": [(3, 5)],
         "_type": ["Can"],
         "name": ["can0"]
     }
     attr_types = {
         "geom": DummyGeom,
         "pose": Vector2d,
         "_type": str,
         "name": str
     }
     green_can = parameter.Object(attrs, attr_types)
     with self.assertRaises(OpenRAVEException) as cm:
         green_body = OpenRAVEBody(env, "can0", green_can.geom)
     self.assertTrue("Geometry not supported" in cm.exception.message)
コード例 #26
0
ファイル: test_sampling.py プロジェクト: Simon0xzx/ml_tampy
    def test_sample_ee_from_target(self):
        solver = can_solver.CanSolver()
        env = ParamSetup.setup_env()
        # env.SetViewer('qtcoin')
        target = ParamSetup.setup_target()
        target.value = np.array([[0, 0, 0]]).T
        target.rotation = np.array([[1.1, .3, 0]]).T

        dummy_targ_geom = can.BlueCan(0.04, 0.25)
        target_body = OpenRAVEBody(env, target.name, dummy_targ_geom)
        target_body.set_pose(target.value.flatten(), target.rotation.flatten())
        target_body.set_transparency(.7)

        robot = ParamSetup.setup_pr2()
        robot_body = OpenRAVEBody(env, robot.name, robot.geom)
        robot_body.set_transparency(.7)
        robot_body.set_pose(robot.pose.flatten())
        dof_value_map = {
            "backHeight": robot.backHeight,
            "lArmPose": robot.lArmPose.flatten(),
            "lGripper": robot.lGripper,
            "rArmPose": robot.rArmPose.flatten(),
            "rGripper": robot.rGripper
        }
        robot_body.set_dof(dof_value_map)

        dummy_ee_pose_geom = can.GreenCan(.03, .3)
        ee_list = list(
            enumerate(
                sampling.get_ee_from_target(target.value, target.rotation)))
        for ee_pose in ee_list:
            ee_pos, ee_rot = ee_pose[1]
            body = OpenRAVEBody(env, "dummy" + str(ee_pose[0]),
                                dummy_ee_pose_geom)
            body.set_pose(ee_pos, ee_rot)
            body.set_transparency(.9)
コード例 #27
0
def resample_eereachable_rrt(pred, negated, t, plan, inv=False):
    # Preparing the variables
    attr_inds, res = OrderedDict(), []
    robot, rave_body = pred.robot, pred.robot.openrave_body
    target_pos, target_rot = pred.ee_pose.value.flatten(
    ), pred.ee_pose.rotation.flatten()
    body = rave_body.env_body
    manip_name = "right_arm"
    active_dof = body.GetManipulator(manip_name).GetArmIndices()
    active_dof = np.hstack([[0], active_dof])
    # Make sure baxter is well positioned in the env
    rave_body.set_pose([0, 0, robot.pose[:, t]])
    rave_body.set_dof({
        'lArmPose': robot.lArmPose[:, t].flatten(),
        'rArmPose': robot.rArmPose[:, t].flatten(),
        "lGripper": np.array([0.02]),
        "rGripper": np.array([0.02])
    })
    for param in plan.params.values():
        if not param.is_symbol() and param != robot:
            param.openrave_body.set_pose(param.pose[:, t].flatten(),
                                         param.rotation[:, t].flatten())
    # Resample poses at grasping time
    grasp_arm_pose = get_ik_from_pose(target_pos, target_rot, body, manip_name)

    # When Ik infeasible
    if grasp_arm_pose is None:
        return None, None
    add_to_attr_inds_and_res(t, attr_inds, res, robot,
                             [('rArmPose', grasp_arm_pose.copy()),
                              ('pose', robot.pose[:, t])])
    # Store sampled pose
    plan.sampling_trace.append({
        'type': robot.get_type(),
        'data': {
            'rArmPose': grasp_arm_pose
        },
        'timestep': t,
        'pred': pred,
        'action': "grasp"
    })
    # Prepare grasping direction and lifting direction
    manip_trans = body.GetManipulator("right_arm").GetTransform()
    pose = OpenRAVEBody.obj_pose_from_transform(manip_trans)
    manip_trans = OpenRAVEBody.get_ik_transform(pose[:3], pose[3:])
    if inv:
        # inverse resample_eereachable used in putdown action
        approach_dir = manip_trans[:3, :3].dot(np.array([0, 0, -1]))
        retreat_dir = manip_trans[:3, :3].dot(np.array([-1, 0, 0]))
        approach_dir = approach_dir / np.linalg.norm(
            approach_dir) * const.APPROACH_DIST
        retreat_dir = -retreat_dir / np.linalg.norm(
            retreat_dir) * const.RETREAT_DIST
    else:
        # Normal resample eereachable used in grasp action
        approach_dir = manip_trans[:3, :3].dot(np.array([-1, 0, 0]))
        retreat_dir = manip_trans[:3, :3].dot(np.array([0, 0, -1]))
        approach_dir = -approach_dir / np.linalg.norm(
            approach_dir) * const.APPROACH_DIST
        retreat_dir = retreat_dir / np.linalg.norm(
            retreat_dir) * const.RETREAT_DIST

    resample_failure = False
    # Resample entire approaching and retreating traj
    for i in range(const.EEREACHABLE_STEPS):
        approach_pos = target_pos + approach_dir * (3 - i)
        approach_arm_pose = get_ik_from_pose(approach_pos, target_rot, body,
                                             'right_arm')
        retreat_pos = target_pos + retreat_dir * (i + 1)
        retreat_arm_pose = get_ik_from_pose(retreat_pos, target_rot, body,
                                            'right_arm')

        if approach_arm_pose is None or retreat_arm_pose is None:
            resample_failure = True
        add_to_attr_inds_and_res(t - 3 + i, attr_inds, res, robot,
                                 [('rArmPose', approach_arm_pose)])
        add_to_attr_inds_and_res(t + 1 + i, attr_inds, res, robot,
                                 [('rArmPose', retreat_arm_pose)])
    # Ik infeasible
    if resample_failure:
        plan.sampling_trace[-1]['reward'] = -1
        return None, None
    # lock the variables
    robot._free_attrs['rArmPose'][:, t - const.EEREACHABLE_STEPS:t +
                                  const.EEREACHABLE_STEPS + 1] = 0
    robot._free_attrs['pose'][:, t - const.EEREACHABLE_STEPS:t +
                              const.EEREACHABLE_STEPS + 1] = 0
    # finding initial pose
    init_timestep, ref_index = 0, 0
    for i in range(len(plan.actions)):
        act_range = plan.actions[i].active_timesteps
        if act_range[0] <= t <= act_range[1]:
            init_timestep = act_range[0]
            ref_index = i

    if pred.ee_resample is True and ref_index > 0:
        init_timestep = plan.actions[ref_index - 1].active_timesteps[0]

    init_dof = robot.rArmPose[:, init_timestep].flatten()
    init_dof = np.hstack([robot.pose[:, init_timestep], init_dof])
    end_dof = robot.rArmPose[:, t - const.EEREACHABLE_STEPS].flatten()
    end_dof = np.hstack([robot.pose[:, t - const.EEREACHABLE_STEPS], end_dof])
    timesteps = t - const.EEREACHABLE_STEPS - init_timestep + 2

    raw_traj = get_rrt_traj(plan.env, body, active_dof, init_dof, end_dof)
    # Restore dof0
    dof = body.GetActiveDOFValues()
    dof[0] = 0
    body.SetActiveDOFValues(dof)
    # trajectory is infeasible
    if raw_traj == None:
        plan.sampling_trace[-1]['reward'] = -1
        return None, None
    # initailize feasible trajectory
    result_traj = process_traj(raw_traj, timesteps).T[1:-1]
    ts = 1
    for traj in result_traj:
        add_to_attr_inds_and_res(init_timestep + ts, attr_inds, res, robot,
                                 [('rArmPose', traj[1:]), ('pose', traj[:1])])
        ts += 1

    pred.ee_resample = True
    can = plan.params['can0']
    can.openrave_body.set_pose(can.pose[:, t], can.rotation[:, t])
    rave_body.set_dof({'rArmPose': robot.rArmPose[:, t]})
    return np.array(res), attr_inds
コード例 #28
0
ファイル: test_sampling.py プロジェクト: Simon0xzx/ml_tampy
 def test_closest_arm_pose(self):
     env = ParamSetup.setup_env()
     # env.SetViewer('qtcoin')
     can = ParamSetup.setup_blue_can()
     robot = ParamSetup.setup_pr2()
     can.pose = np.array([[0, -.2, .8]]).T
     can_body = OpenRAVEBody(env, can.name, can.geom)
     can_body.set_pose(can.pose.flatten(), can.rotation.flatten())
     can_body.set_transparency(.7)
     robot.pose = np.array([[-.5, 0, 0]]).T
     robot_body = OpenRAVEBody(env, robot.name, robot.geom)
     robot_body.set_transparency(.7)
     robot_body.set_pose(robot.pose.flatten())
     dof_value_map = {
         "backHeight": robot.backHeight,
         "lArmPose": robot.lArmPose.flatten(),
         "lGripper": robot.lGripper,
         "rArmPose": robot.rArmPose.flatten(),
         "rGripper": robot.rGripper
     }
     robot_body.set_dof(dof_value_map)
     can_trans = OpenRAVEBody.transform_from_obj_pose(
         can.pose, can.rotation)
     rot_mat = matrixFromAxisAngle([0, np.pi / 2, 0])
     rot_mat = can_trans[:3, :3].dot(rot_mat[:3, :3])
     can_trans[:3, :3] = rot_mat
     torso_pose, arm_pose = sampling.get_torso_arm_ik(
         robot_body, can_trans, robot.rArmPose)
     dof_value_map = {
         "backHeight": robot.backHeight,
         "lArmPose": robot.lArmPose.flatten(),
         "lGripper": robot.lGripper,
         "rArmPose": robot.rArmPose.flatten(),
         "rGripper": robot.rGripper
     }
     robot_body.set_dof(dof_value_map)
コード例 #29
0
ファイル: test_sampling.py プロジェクト: Simon0xzx/tampy
    def test_sample_ee_from_target(self):
        solver = can_solver.CanSolver()
        env = ParamSetup.setup_env()
        # env.SetViewer('qtcoin')
        target = ParamSetup.setup_target()
        target.value = np.array([[0,0,0]]).T
        target.rotation = np.array([[1.1,.3,0]]).T

        dummy_targ_geom = can.BlueCan(0.04, 0.25)
        target_body = OpenRAVEBody(env, target.name, dummy_targ_geom)
        target_body.set_pose(target.value.flatten(), target.rotation.flatten())
        target_body.set_transparency(.7)

        robot = ParamSetup.setup_pr2()
        robot_body = OpenRAVEBody(env, robot.name, robot.geom)
        robot_body.set_transparency(.7)
        robot_body.set_pose(robot.pose.flatten())
        robot_body.set_dof(robot.backHeight, robot.lArmPose.flatten(), robot.lGripper, robot.rArmPose.flatten(), robot.rGripper)

        dummy_ee_pose_geom = GreenCan(.03,.3)
        ee_list = list(enumerate(sampling.get_ee_from_target(target)))
        for ee_pose in ee_list:
            ee_pos, ee_rot = ee_pose[1]
            body = OpenRAVEBody(env, "dummy"+str(ee_pose[0]), dummy_ee_pose_geom)
            body.set_pose(ee_pos, ee_rot)
            body.set_transparency(.9)
コード例 #30
0
    def test_sample_ee_from_target(self):
        from openravepy import Environment
        from core.util_classes.can import BlueCan, GreenCan
        from core.util_classes.openrave_body import OpenRAVEBody
        from core.util_classes import matrix
        from core.util_classes.pr2 import PR2
        solver = can_solver.CanSolver()
        env = Environment()
        # env.SetViewer('qtcoin')
        attrs = {"name": ['targ'], "value": [(0, 1, .8)], "rotation": [(0,0,0)], "_type": ["Target"]}
        attr_types = {"name": str, "value": matrix.Vector3d, "rotation": matrix.Vector3d, "_type": str}
        target = parameter.Symbol(attrs, attr_types)
        target.rotation = np.array([[1.1,.3,0]]).T
        dummy_targ_geom = BlueCan(0.04, 0.25)
        target_body = OpenRAVEBody(env, target.name, dummy_targ_geom)
        target_body.set_pose(target.value.flatten(), target.rotation.flatten())
        target_body.set_transparency(.7)
        dummy_ee_pose_geom = GreenCan(.03,.3)
        ee_list = list(enumerate(solver.sample_ee_from_target(target)))
        for ee_pose in ee_list:
            ee_pos, ee_rot = ee_pose[1]
            body = OpenRAVEBody(env, "dummy"+str(ee_pose[0]), dummy_ee_pose_geom)
            body.set_pose(ee_pos, ee_rot)
            body.set_transparency(.9)

        attrs = {"name": ['pr2'], "pose": [(-.45, 1.19,-.1)], "_type": ["Robot"], "geom": [], "backHeight": [0.2], "lGripper": [0.5], "rGripper": [0.5]}
        attrs["lArmPose"] = [(np.pi/4, np.pi/8, np.pi/2, -np.pi/2, np.pi/8, -np.pi/8, np.pi/2)]
        attrs["rArmPose"] = [(-np.pi/4, np.pi/8, -np.pi/2, -np.pi/2, -np.pi/8, -np.pi/8, np.pi/2)]
        attr_types = {"name": str, "pose": matrix.Vector3d, "_type": str, "geom": PR2, "backHeight": matrix.Value, "lArmPose": matrix.Vector7d, "rArmPose": matrix.Vector7d, "lGripper": matrix.Value, "rGripper": matrix.Value}
        robot = parameter.Object(attrs, attr_types)
        robot_body = OpenRAVEBody(env, robot.name, robot.geom)
        robot_body.set_transparency(.7)
        robot_body.set_pose(robot.pose.flatten())
        robot_body.set_dof(robot.backHeight, robot.lArmPose.flatten(), robot.lGripper, robot.rArmPose.flatten(), robot.rGripper)
        def set_arm(n):
            pos, rot = ee_list[n][1][0], ee_list[n][1][1]
            iksol = robot_body.ik_arm_pose(pos, rot)
            for k in range(len(iksol)):
                robot_body.set_pose(robot.pose.flatten())
                robot_body.set_dof(iksol[k][0], robot.lArmPose.flatten(), robot.lGripper, iksol[k][1:], robot.rGripper)
                time.sleep(0.03)
コード例 #31
0
    def test_ik_arm_pose(self):
        env = Environment()
        attrs = {"name": ["pr2"], "pose": [(0, 0, 0)], "_type": ["Robot"], "geom": [], "backHeight": [0.2], "lGripper": [0.5], "rGripper": [0.5]}
        attrs["lArmPose"] = [(0,0,0,0,0,0,0)]
        attrs["rArmPose"] = [(0,0,0,0,0,0,0)]
        attr_types = {"name": str, "pose": matrix.Vector3d, "_type": str, "geom": PR2, "backHeight": matrix.Value, "lArmPose": matrix.Vector7d, "rArmPose": matrix.Vector7d, "lGripper": matrix.Value, "rGripper": matrix.Value}
        robot = parameter.Object(attrs, attr_types)
        # Set the initial arm pose so that pose is not close to joint limit
        robot.lArmPose = np.array([[np.pi/4, np.pi/8, np.pi/2, -np.pi/2, np.pi/8, -np.pi/8, np.pi/2]]).T
        robot.rArmPose = np.array([[-np.pi/4, np.pi/8, -np.pi/2, -np.pi/2, -np.pi/8, -np.pi/8, np.pi/2]]).T

        attrs = {"name": ["can"], "geom": (0.04, 0.25), "pose": ["undefined"], "rotation": [(0, 0, 0)], "_type": ["Can"]}
        attr_types = {"name": str, "geom": BlueCan, "pose": matrix.Vector3d, "rotation": matrix.Vector3d, "_type": str}
        can = parameter.Object(attrs, attr_types)
        can.pose = np.array([[5.77887566e-01,  -1.26743678e-01,   8.37601627e-01]]).T
        can.rotation = np.array([[np.pi/4, np.pi/4, np.pi/4]]).T
        # Create openRAVEBody for each parameter
        can_body = OpenRAVEBody(env, can.name, can.geom)
        robot_body = OpenRAVEBody(env, robot.name, robot.geom)
        # Set the poses and dof values for each body
        can_body.set_pose(can.pose, can.rotation)
        robot_body.set_pose(robot.pose)
        robot_body.set_dof(robot.backHeight, robot.lArmPose, robot.lGripper, robot.rArmPose, robot.rGripper)
        # Solve the IK solution
        ik_arm = robot_body.ik_arm_pose(can.pose, can.rotation)[0]
        robot_body.set_dof(ik_arm[:1], robot.lArmPose, robot.lGripper, ik_arm[1:], robot.rGripper)
        robot_trans = robot_body.env_body.GetLink("r_gripper_tool_frame").GetTransform()
        robot_pos = OpenRAVEBody.obj_pose_from_transform(robot_trans)
        # resulted robot eepose should be exactly the same as that can pose
        self.assertTrue(np.allclose(can.pose.flatten(), robot_pos[:3], atol = 1e-4))
        self.assertTrue(np.allclose(can.rotation.flatten(), robot_pos[3:], atol = 1e-4))
        """
コード例 #32
0
 def test_base_pose_mat_trans(self):
     for i in range(N):
         pose = np.random.rand(3)
         mat = OpenRAVEBody.base_pose_to_mat(pose)
         pose2 = OpenRAVEBody.mat_to_base_pose(mat)
         self.assertTrue(np.allclose(pose, pose2))
コード例 #33
0
    def test_ik_arm_pose(self):
        env = Environment()
        attrs = {
            "name": ["pr2"],
            "pose": [(0, 0, 0)],
            "_type": ["Robot"],
            "geom": [],
            "backHeight": [0.2],
            "lGripper": [0.5],
            "rGripper": [0.5]
        }
        attrs["lArmPose"] = [(0, 0, 0, 0, 0, 0, 0)]
        attrs["rArmPose"] = [(0, 0, 0, 0, 0, 0, 0)]
        attr_types = {
            "name": str,
            "pose": matrix.Vector3d,
            "_type": str,
            "geom": PR2,
            "backHeight": matrix.Value,
            "lArmPose": matrix.Vector7d,
            "rArmPose": matrix.Vector7d,
            "lGripper": matrix.Value,
            "rGripper": matrix.Value
        }
        robot = parameter.Object(attrs, attr_types)
        # Set the initial arm pose so that pose is not close to joint limit
        robot.lArmPose = np.array([[
            np.pi / 4, np.pi / 8, np.pi / 2, -np.pi / 2, np.pi / 8, -np.pi / 8,
            np.pi / 2
        ]]).T
        robot.rArmPose = np.array([[
            -np.pi / 4, np.pi / 8, -np.pi / 2, -np.pi / 2, -np.pi / 8,
            -np.pi / 8, np.pi / 2
        ]]).T

        attrs = {
            "name": ["can"],
            "geom": (0.04, 0.25),
            "pose": ["undefined"],
            "rotation": [(0, 0, 0)],
            "_type": ["Can"]
        }
        attr_types = {
            "name": str,
            "geom": BlueCan,
            "pose": matrix.Vector3d,
            "rotation": matrix.Vector3d,
            "_type": str
        }
        can = parameter.Object(attrs, attr_types)
        can.pose = np.array([[5.77887566e-01, -1.26743678e-01,
                              8.37601627e-01]]).T
        can.rotation = np.array([[np.pi / 4, np.pi / 4, np.pi / 4]]).T
        # Create openRAVEBody for each parameter
        can_body = OpenRAVEBody(env, can.name, can.geom)
        robot_body = OpenRAVEBody(env, robot.name, robot.geom)
        # Set the poses and dof values for each body
        can_body.set_pose(can.pose, can.rotation)
        robot_body.set_pose(robot.pose)
        robot_body.set_dof(robot.backHeight, robot.lArmPose, robot.lGripper,
                           robot.rArmPose, robot.rGripper)
        # Solve the IK solution
        ik_arm = robot_body.ik_arm_pose(can.pose, can.rotation)[0]
        robot_body.set_dof(ik_arm[:1], robot.lArmPose, robot.lGripper,
                           ik_arm[1:], robot.rGripper)
        robot_trans = robot_body.env_body.GetLink(
            "r_gripper_tool_frame").GetTransform()
        robot_pos = OpenRAVEBody.obj_pose_from_transform(robot_trans)
        # resulted robot eepose should be exactly the same as that can pose
        self.assertTrue(
            np.allclose(can.pose.flatten(), robot_pos[:3], atol=1e-4))
        self.assertTrue(
            np.allclose(can.rotation.flatten(), robot_pos[3:], atol=1e-4))
        """
コード例 #34
0
 def test_baxter_ik(self):
     env = ParamSetup.setup_env()
     baxter = ParamSetup.setup_baxter()
     can = ParamSetup.setup_green_can(geom=(0.02, 0.25))
     baxter_body = OpenRAVEBody(env, baxter.name, baxter.geom)
     can_body = OpenRAVEBody(env, can.name, can.geom)
     baxter_body.set_transparency(0.5)
     can_body.set_transparency(0.5)
     manip = baxter_body.env_body.GetManipulator('right_arm')
     robot = baxter_body.env_body
     can = can_body.env_body
     dof = robot.GetActiveDOFValues()
     #Open the Gripper so there won't be collisions between gripper and can
     dof[9], dof[-1] = 0.02, 0.02
     robot.SetActiveDOFValues(dof)
     iktype = IkParameterizationType.Transform6D
     thetas = np.linspace(0, np.pi * 2, 10)
     target_trans = OpenRAVEBody.transform_from_obj_pose([0.9, -0.23, 0.93],
                                                         [0, 0, 0])
     can_body.env_body.SetTransform(target_trans)
     target_trans[:3, :3] = target_trans[:3, :3].dot(
         matrixFromAxisAngle([0, np.pi / 2, 0])[:3, :3])
     can_trans = target_trans
     body_trans = np.eye(4)
     """
     To check whether baxter ik model works, uncomment the following
     """
     # env.SetViewer('qtcoin')
     for theta in thetas:
         can_trans[:3, :3] = target_trans[:3, :3].dot(
             matrixFromAxisAngle([theta, 0, 0])[:3, :3])
         solution = manip.FindIKSolutions(
             IkParameterization(can_trans, iktype),
             IkFilterOptions.CheckEnvCollisions)
         if len(solution) > 0:
             print "Solution found with pose and rotation:"
             print OpenRAVEBody.obj_pose_from_transform(can_trans)
         else:
             print "Solution not found with pose and rotation:"
             print OpenRAVEBody.obj_pose_from_transform(can_trans)
         for sols in solution:
             dof[10:17] = sols
             robot.SetActiveDOFValues(dof)
             time.sleep(.2)
             body_trans[:3, :3] = can_trans[:3, :3].dot(
                 matrixFromAxisAngle([0, -np.pi / 2, 0])[:3, :3])
             can_body.env_body.SetTransform(body_trans)
             self.assertTrue(
                 np.allclose([0.9, -0.23, 0.93],
                             manip.GetTransform()[:3, 3]))
コード例 #35
0
def ee_reachable_resample(pred, negated, t, plan):
    assert not negated
    handles = []
    v = OpenRAVEViewer.create_viewer()

    def target_trans_callback(target_trans):
        handles.append(plot_transform(v.env, target_trans))
        v.draw_plan_ts(plan, t)

    def plot_time_step_callback():
        v.draw_plan_ts(plan, t)

    plot_time_step_callback()

    targets = plan.get_param('GraspValid', 1, {0: pred.ee_pose})
    assert len(targets) == 1
    # confirm target is correct
    target_pose = targets[0].value[:, 0]
    set_robot_body_to_pred_values(pred, t)

    theta = 0
    robot = pred.robot
    robot_body = pred._param_to_body[robot]
    for _ in range(NUM_EEREACHABLE_RESAMPLE_ATTEMPTS):
        # generate collision free base pose
        base_pose = get_col_free_base_pose_around_target(
            t,
            plan,
            target_pose,
            robot,
            save=True,
            dist=OBJ_RING_SAMPLING_RADIUS,
            callback=plot_time_step_callback)
        if base_pose is None:
            print "we should always be able to sample a collision-free base pose"
            st()
        # generate collision free arm pose
        target_rot = np.array([get_random_theta(), 0, 0])

        torso_pose, arm_pose = get_col_free_torso_arm_pose(
            t,
            target_pose,
            target_rot,
            robot,
            robot_body,
            save=True,
            arm_pose_seed=None,
            callback=target_trans_callback)
        st()
        if torso_pose is None:
            print "we should be able to find an IK"
            continue

        # generate approach IK
        ee_trans = OpenRAVEBody.transform_from_obj_pose(
            target_pose, target_rot)
        rel_pt = pred.get_rel_pt(-pred._steps)
        target_pose_approach = np.dot(ee_trans, np.r_[rel_pt, 1])[:3]

        torso_pose_approach, arm_pose_approach = get_col_free_torso_arm_pose(
            t,
            target_pose_approach,
            target_rot,
            robot,
            robot_body,
            save=True,
            arm_pose_seed=arm_pose,
            callback=target_trans_callback)
        st()
        if torso_pose_approach is None:
            continue

        # generate retreat IK
        ee_trans = OpenRAVEBody.transform_from_obj_pose(
            target_pose, target_rot)
        rel_pt = pred.get_rel_pt(pred._steps)
        target_pose_retreat = np.dot(ee_trans, np.r_[rel_pt, 1])[:3]

        torso_pose_retreat, arm_pose_retreat = get_col_free_torso_arm_pose(
            t,
            target_pose_retreat,
            target_rot,
            robot,
            robot_body,
            save=True,
            arm_pose_seed=arm_pose,
            callback=target_trans_callback)
        st()
        if torso_pose_retreat is not None:
            break
    else:
        print "we should always be able to sample a collision-free base and arm pose"
        st()

    attr_inds = OrderedDict()
    res = []
    arm_approach_traj = lin_interp_traj(arm_pose_approach, arm_pose,
                                        pred._steps)
    torso_approach_traj = lin_interp_traj(torso_pose_approach, torso_pose,
                                          pred._steps)
    base_approach_traj = lin_interp_traj(base_pose, base_pose, pred._steps)

    arm_retreat_traj = lin_interp_traj(arm_pose, arm_pose_retreat, pred._steps)
    torso_retreat_traj = lin_interp_traj(torso_pose, torso_pose_retreat,
                                         pred._steps)
    base_retreat_traj = lin_interp_traj(base_pose, base_pose, pred._steps)

    arm_traj = np.hstack((arm_approach_traj, arm_retreat_traj[:, 1:]))
    torso_traj = np.hstack((torso_approach_traj, torso_retreat_traj[:, 1:]))
    base_traj = np.hstack((base_approach_traj, base_retreat_traj[:, 1:]))

    # add attributes for approach and retreat
    for ind in range(2 * pred._steps + 1):
        robot_attr_name_val_tuples = [('rArmPose', arm_traj[:, ind]),
                                      ('backHeight', torso_traj[:, ind]),
                                      ('pose', base_traj[:, ind])]
        add_to_attr_inds_and_res(t + ind - pred._steps, attr_inds, res,
                                 pred.robot, robot_attr_name_val_tuples)
    st()

    ee_pose_attr_name_val_tuples = [('value', target_pose),
                                    ('rotation', target_rot)]
    add_to_attr_inds_and_res(t, attr_inds, res, pred.ee_pose,
                             ee_pose_attr_name_val_tuples)
    # v.draw_plan_ts(plan, t)
    v.animate_range(plan, (t - pred._steps, t + pred._steps))
    # check that indexes are correct
    # import ipdb; ipdb.set_trace()

    return np.array(res), attr_inds
コード例 #36
0
def resample_eereachable(pred, negated, t, plan):
    attr_inds, res = OrderedDict(), []
    robot, rave_body = pred.robot, pred._param_to_body[pred.robot]
    target_pos, target_rot = pred.ee_pose.value.flatten(
    ), pred.ee_pose.rotation.flatten()
    body = rave_body.env_body
    rave_body.set_pose([0, 0, robot.pose[0, t]])
    # Resample poses at grasping time
    grasp_arm_pose = get_ik_from_pose(target_pos, target_rot, body,
                                      'right_arm')
    add_to_attr_inds_and_res(t, attr_inds, res, robot,
                             [('rArmPose', grasp_arm_pose.copy())])

    plan.sampling_trace.append({
        'type': robot.get_type(),
        'data': {
            'rArmPose': grasp_arm_pose
        },
        'timestep': t,
        'pred': pred,
        'action': "grasp"
    })

    # Setting poses for environments to extract transform infos
    dof_value_map = {
        "lArmPose": robot.lArmPose[:, t].reshape((7, )),
        "lGripper": 0.02,
        "rArmPose": grasp_arm_pose,
        "rGripper": 0.02
    }
    rave_body.set_dof(dof_value_map)
    # Prepare grasping direction and lifting direction
    manip_trans = body.GetManipulator("right_arm").GetTransform()
    pose = OpenRAVEBody.obj_pose_from_transform(manip_trans)
    manip_trans = OpenRAVEBody.get_ik_transform(pose[:3], pose[3:])
    gripper_direction = manip_trans[:3, :3].dot(np.array([-1, 0, 0]))
    lift_direction = manip_trans[:3, :3].dot(np.array([0, 0, -1]))
    # Resample grasping and retreating traj
    for i in range(const.EEREACHABLE_STEPS):
        approach_pos = target_pos - gripper_direction / np.linalg.norm(
            gripper_direction) * const.APPROACH_DIST * (3 - i)
        # rave_body.set_pose([0,0,robot.pose[0, t-3+i]])
        approach_arm_pose = get_ik_from_pose(approach_pos, target_rot, body,
                                             'right_arm')
        # rave_body.set_dof({"rArmPose": approach_arm_pose})
        add_to_attr_inds_and_res(t - 3 + i, attr_inds, res, robot,
                                 [('rArmPose', approach_arm_pose)])

        retreat_pos = target_pos + lift_direction / np.linalg.norm(
            lift_direction) * const.RETREAT_DIST * (i + 1)
        # rave_body.set_pose([0,0,robot.pose[0, t+1+i]])
        retreat_arm_pose = get_ik_from_pose(retreat_pos, target_rot, body,
                                            'right_arm')
        add_to_attr_inds_and_res(t + 1 + i, attr_inds, res, robot,
                                 [('rArmPose', retreat_arm_pose)])

    robot._free_attrs['rArmPose'][:, t - const.EEREACHABLE_STEPS:t +
                                  const.EEREACHABLE_STEPS + 1] = 0
    robot._free_attrs['pose'][:, t - const.EEREACHABLE_STEPS:t +
                              const.EEREACHABLE_STEPS + 1] = 0
    return np.array(res), attr_inds
コード例 #37
0
ファイル: test_sampling.py プロジェクト: Simon0xzx/tampy
 def test_closest_arm_pose(self):
     env = ParamSetup.setup_env()
     # env.SetViewer('qtcoin')
     can = ParamSetup.setup_blue_can()
     robot = ParamSetup.setup_pr2()
     can.pose = np.array([[0,-.2,.8]]).T
     can_body = OpenRAVEBody(env, can.name, can.geom)
     can_body.set_pose(can.pose.flatten(), can.rotation.flatten())
     can_body.set_transparency(.7)
     robot.pose = np.array([[-.5,0,0]]).T
     robot_body = OpenRAVEBody(env, robot.name, robot.geom)
     robot_body.set_transparency(.7)
     robot_body.set_pose(robot.pose.flatten())
     robot_body.set_dof(robot.backHeight, robot.lArmPose.flatten(), robot.lGripper, robot.rArmPose.flatten(), robot.rGripper)
     can_trans = OpenRAVEBody.transform_from_obj_pose(can.pose, can.rotation)
     rot_mat = matrixFromAxisAngle([0, np.pi/2, 0])
     rot_mat = can_trans[:3, :3].dot(rot_mat[:3, :3])
     can_trans[:3, :3] = rot_mat
     torso_pose, arm_pose = sampling.get_torso_arm_ik(robot_body, can_trans, robot.rArmPose)
     robot_body.set_dof(robot.backHeight, robot.lArmPose.flatten(), robot.lGripper, arm_pose, robot.rGripper)
コード例 #38
0
 def test_base_pose_mat_trans(self):
     for i in range(N):
         pose = np.random.rand(3)
         mat = OpenRAVEBody.base_pose_to_mat(pose)
         pose2 = OpenRAVEBody.mat_to_base_pose(mat)
         self.assertTrue(np.allclose(pose, pose2))