Пример #1
0
    def reset(self, force_reset=False):
        """
        Reset the simulation environment.
        """
        if self._first_reset or force_reset:
            self._pb.resetSimulation()

            yumi_pos = self.cfgs.ARM.PYBULLET_RESET_POS
            yumi_ori = arutil.euler2quat(self.cfgs.ARM.PYBULLET_RESET_ORI)
            if self._self_collision:
                colli_flag = {'flags': self._pb.URDF_USE_SELF_COLLISION}
                self.robot_id = self._pb.loadURDF(self.cfgs.PYBULLET_URDF,
                                                  yumi_pos, yumi_ori,
                                                  **colli_flag)
            else:
                self.robot_id = self._pb.loadURDF(self.cfgs.PYBULLET_URDF,
                                                  yumi_pos, yumi_ori)

            self._build_jnt_id()

            self.setup_single_arms(right_arm=self.right_arm,
                                   left_arm=self.left_arm)

            for arm in self.arms.values():
                if hasattr(arm, 'eetool'):
                    arm.eetool.feed_robot_info(self.robot_id, self.jnt_to_id)
                    arm.eetool.activate()
                    if arm._self_collision:
                        arm.eetool.disable_gripper_self_collision()
        else:
            self.go_home(ignore_physics=True)
            for arm in self.arms.values():
                if hasattr(arm, 'eetool'):
                    arm.eetool.close(ignore_physics=True)
        self._first_reset = False
Пример #2
0
def auto_control(args, obj_id=None):
    """
    Automatically read grasp pose from grasp pose file, this function only for grasp single object,
    and object type, position, orientation must be the same with data_x/info.txt
       Besides, set the position that input into control_robot() as the center point of contact point uniformly
    Args:
        args: parser
        obj_id: pybullet object ID, used to reset the arena
    """
    # Set the position as the center point of contact point uniformly
    if args.method == 'GPNet':
        poses, scores = load_pose_GPNet(
            os.path.join(args.data_path, args.pose_file))
    elif args.method == '6dof-graspnet':
        poses, scores = load_pose_6dofgraspnet(
            os.path.join(args.data_path, args.pose_file))
    elif args.method == 'graspnet_baseline':
        poses, scores = load_pose_graspnet_baseline(
            os.path.join(args.data_path, args.pose_file))
    else:
        raise NotImplementedError

    for pose in poses:
        control_robot(pose,
                      robot_category=args.robot_arm,
                      control_mode=args.control_mode,
                      move_up=True)
        time.sleep(3)
        robot.pb_client.reset_body(obj_id,
                                   base_pos=args.object_pos,
                                   base_quat=ut.euler2quat(args.object_ori))
        time.sleep(0.8)
    print('all done!!!!')
    input()
Пример #3
0
def load_objects(robot):
    ori = euler2quat([0, 0, np.pi / 2])
    robot.pb_client.load_urdf('table/table.urdf', [0.46, 0, 0],
                              ori,
                              scaling=0.9)
    # sphere_id = robot.pb_client.load_geom('sphere',
    #                                       size=0.05,
    #                                       mass=1,
    #                                       base_pos=[0.5, 0, 0.55],
    #                                       rgba=[0, 1, 0, 1])
    box_1_id = robot.pb_client.load_geom('box',
                                         size=0.03,
                                         mass=1,
                                         base_pos=[0.35, -0.23, 0.55],
                                         rgba=[1, 0, 0, 1])
    box_2_id = robot.pb_client.load_geom('box',
                                         size=[0.03, 0.012, 0.015],
                                         mass=1,
                                         base_pos=[0.53, 0.17, 0.55],
                                         rgba=[0, 0, 1, 1])
    cylinder_id = robot.pb_client.load_geom('cylinder',
                                            size=[0.03, 0.04],
                                            mass=1,
                                            base_pos=[0.49, -0.12, 0.55],
                                            rgba=[0, 1, 1, 1])
    duck_id = robot.pb_client.load_geom('mesh',
                                        mass=1,
                                        visualfile='duck.obj',
                                        mesh_scale=0.06,
                                        base_pos=[0.63, 0.3, 0.55],
                                        rgba=[0.5, 0.2, 1, 1])
Пример #4
0
    def reset(self, force_reset=False):
        """
        Reset the simulation environment.
        """
        if self._first_reset or force_reset:
            self._pb.resetSimulation()

            yumi_pos = self.cfgs.ARM.PYBULLET_RESET_POS
            yumi_ori = arutil.euler2quat(self.cfgs.ARM.PYBULLET_RESET_ORI)
            if self._self_collision:
                colli_flag = {'flags': self._pb.URDF_USE_SELF_COLLISION}
                self.robot_id = self._pb.loadURDF(self.cfgs.PYBULLET_URDF,
                                                  yumi_pos, yumi_ori,
                                                  **colli_flag)
            else:
                self.robot_id = self._pb.loadURDF(self.cfgs.PYBULLET_URDF,
                                                  yumi_pos, yumi_ori)

            self._build_jnt_id()

            self.setup_single_arms(right_arm=self.right_arm,
                                   left_arm=self.left_arm)
        else:
            self.go_home(ignore_physics=True)
        self._first_reset = False
Пример #5
0
	def __init__(self, action_repeat=10, render=False):
		self._action_repeat = action_repeat		
		self.robot = Robot('ur5e_stick', pb=True, pb_cfg={'gui': render, 'realtime':False})
		self.ee_ori = [-np.sqrt(2) / 2, np.sqrt(2) / 2, 0, 0]
		self._action_bound = 1.0
		self._ee_pos_scale = 0.02
		self._ee_ori_scale = np.pi / 36.0
		self._action_high = np.array([self._action_bound] * 2)
		self.action_space = spaces.Box(low=-self._action_high,
									   high=self._action_high,
									   dtype=np.float32)
		
		self.goal = np.array([0.75, -0.3, 1.0])
		self.init = np.array([0.5, 0.1, 1.0])
		self.robot.arm.reset()
		
		ori = euler2quat([0, 0, np.pi / 2])
		self.table_id = self.robot.pb_client.load_urdf('table/table.urdf',
													   [.5, 0, 0.4],
													   ori,
													   scaling=0.9)
		self.marker_id = self.robot.pb_client.load_geom('box', size=0.05, mass=1,
													 base_pos=self.goal.tolist(),
													 rgba=[0, 1, 0, 0.4])
		client_id = self.robot.pb_client.get_client_id()
		
		p.setCollisionFilterGroupMask(self.marker_id, -1, 0, 0, physicsClientId=client_id)
		p.setCollisionFilterPair(self.marker_id, self.table_id, -1, -1, 1, physicsClientId=client_id)

		self.reset()
		state_low = np.full(len(self._get_obs()), -float('inf'))
		state_high = np.full(len(self._get_obs()), float('inf'))
		self.observation_space = spaces.Box(state_low,
											state_high,
											dtype=np.float32)
Пример #6
0
def main():
    """
    This function demonstrates how to load different kinds of
    objects and get state information of objects.
    """
    np.set_printoptions(precision=3, suppress=True)
    robot = Robot('ur5e_stick')
    robot.arm.go_home()

    ori = euler2quat([0, 0, np.pi / 2])
    robot.pb_client.load_urdf('table/table.urdf',
                              [1, 0, 0.4],
                              ori,
                              scaling=0.9)
    sphere_id = robot.pb_client.load_geom('sphere',
                                          size=0.05,
                                          mass=1,
                                          base_pos=[1, 0, 1.0],
                                          rgba=[0, 1, 0, 1])
    robot.pb_client.load_geom('box',
                              size=0.05,
                              mass=1,
                              base_pos=[1, 0.12, 1.0],
                              rgba=[1, 0, 0, 1])
    robot.pb_client.load_geom('box',
                              size=[0.06, 0.02, 0.03],
                              mass=1,
                              base_pos=[1.3, 0.12, 1.0],
                              rgba=[0, 0, 1, 1])
    cylinder_id = robot.pb_client.load_geom('cylinder',
                                            size=[0.06, 0.08],
                                            mass=1,
                                            base_pos=[0.8, -0.12, 1.0],
                                            rgba=[0, 1, 1, 1])
    duck_id = robot.pb_client.load_geom('mesh',
                                        mass=1,
                                        visualfile='duck.obj',
                                        mesh_scale=0.1,
                                        base_pos=[0.9, -0.4, 1.0],
                                        rgba=[0.5, 0.2, 1, 1])
    pos, quat, lin_vel, ang_vel = robot.pb_client.get_body_state(cylinder_id)
    log_info('Cylinder:')
    log_info('         position: %s' % np.array2string(pos,
                                                       precision=2))
    log_info('         quaternion: %s' % np.array2string(quat,
                                                         precision=2))
    log_info('         linear vel: %s' % np.array2string(lin_vel,
                                                         precision=2))
    log_info('         angular vel: %s' % np.array2string(ang_vel,
                                                          precision=2))
    log_info('Removing sphere')
    robot.pb_client.remove_body(sphere_id)
    time.sleep(2)
    log_info('Reset duck')
    robot.pb_client.reset_body(duck_id, base_pos=[0.9, -0.4, 1.0],
                               base_quat=[0, 0, 0, 1],
                               lin_vel=[0, 2, 0],
                               ang_vel=[0, 0, 2])
    time.sleep(10)
Пример #7
0
def reach_pose_goal(pos, ori, get_func, object_id, pos_tol=0.01, ori_tol=0.02):
    """
    Check if end effector reached goal or not. Returns true
    if both position and orientation goals have been reached
    within specified tolerance

    Args:
        pos (list np.ndarray): goal position
        ori (list or np.ndarray): goal orientation. It can be:
            **quaternion** ([qx, qy, qz, qw], shape: :math:`[4]`)
            **rotation matrix** (shape: :math:`[3, 3]`)
            **euler angles** ([roll, pitch, yaw], shape: :math:`[3]`)
        get_func (function): name of the function with which we can get
            the current pose
        pos_tol (float): tolerance of position error
        ori_tol (float): tolerance of orientation error


    Returns:
        bool: If goal pose is reached or not
    """
    if not isinstance(pos, np.ndarray):
        goal_pos = np.array(pos)
    else:
        goal_pos = pos
    if not isinstance(ori, np.ndarray):
        goal_ori = np.array(ori)
    else:
        goal_ori = ori

    if goal_ori.size == 3:
        goal_ori = common.euler2quat(goal_ori)
    elif goal_ori.shape == (3, 3):
        goal_ori = common.rot2quat(goal_ori)
    elif goal_ori.size != 4:
        raise TypeError('Orientation must be in one '
                        'of the following forms:'
                        'rotation matrix, euler angles, or quaternion')
    goal_ori = goal_ori.flatten()
    goal_pos = goal_pos.flatten()

    new_ee_pos = np.array(get_func(object_id)[0])
    new_ee_quat = get_func(object_id)[1]

    pos_diff = new_ee_pos.flatten() - goal_pos
    pos_error = np.max(np.abs(pos_diff))

    quat_diff = common.quat_multiply(common.quat_inverse(goal_ori),
                                     new_ee_quat)
    rot_similarity = np.abs(quat_diff[3])

    if pos_error < pos_tol and \
            rot_similarity > 1 - ori_tol:
        return True, pos_error, 1 - rot_similarity
    else:
        print("pos error: " + str(pos_error))
        print("ori_error: " + str(1 - rot_similarity))
        return False, pos_error, 1 - rot_similarity
Пример #8
0
 def __init__(self, ifRender=False):
     # np.set_printoptions(precision=3, suppress=True)
     # table scaling and table center location
     self.table_scaling = 0.6  # tabletop x ~ (0.3, 0.9); y ~ (-0.45, 0.45)
     self.table_x = 0.6
     self.table_y = 0
     self.table_z = 0.6
     self.table_surface_height = 0.975  # get from running robot.cam.get_pix.3dpt
     self.table_ori = euler2quat([0, 0, np.pi / 2])
     # task space x ~ (0.4, 0.8); y ~ (-0.3, 0.3)
     self.max_arm_reach = 0.91
     self.workspace_max_x = 0.75  # 0.8 discouraged, as box can move past max arm reach
     self.workspace_min_x = 0.4
     self.workspace_max_y = 0.3
     self.workspace_min_y = -0.3
     # robot end-effector
     self.ee_min_height = 0.99
     self.ee_rest_height = 1.1  # stick scale="0.0001 0.0001 0.0007"
     self.ee_home = [self.table_x, self.table_y, self.ee_rest_height]
     # initial object location
     self.box_z = 1 - 0.005
     self.box_pos = [self.table_x, self.table_y, self.box_z]
     self.box_size = 0.02  # distance between center frame and size, box size 0.04
     # push config: push_len by default [0.06-0.1]
     self.push_len_min = 0.06  # 0.06 ensures no contact with box empiracally
     self.push_len_range = 0.04
     # image processing config
     self.row_min = 40
     self.row_max = 360
     self.col_min = 0
     self.col_max = 640
     self.output_row = 100
     self.output_col = 200
     self.row_scale = (self.row_max - self.row_min) / float(self.output_row)
     self.col_scale = (self.col_max - self.col_min) / float(self.output_col)
     assert self.col_scale == self.row_scale
     # load robot
     self.robot = Robot('ur5e_stick', pb=True, pb_cfg={'gui': ifRender})
     self.robot.arm.go_home()
     self.ee_origin = self.robot.arm.get_ee_pose()
     self.go_home()
     self._home_jpos = self.robot.arm.get_jpos()
     # load table
     self.table_id = self.load_table()
     # load box
     self.box_id = self.load_box()
     # initialize camera matrices
     self.robot.cam.setup_camera(focus_pt=[0.7, 0, 1.],
                                 dist=0.5,
                                 yaw=90,
                                 pitch=-60,
                                 roll=0)
     self.ext_mat = self.robot.cam.get_cam_ext()
     self.int_mat = self.robot.cam.get_cam_int()
Пример #9
0
def main():
    """
    This function demonstrates how to move the robot arm
    to the desired joint positions.
    """
    dir_path = os.path.dirname(os.path.realpath(__file__))
    texture_path = os.path.join(dir_path, 'textures')
    robot = Robot('ur5e')
    robot.arm.go_home()

    modder = TextureModder(pb_client_id=robot.pb_client.get_client_id())

    ori = euler2quat([0, 0, np.pi / 2])
    table_id = robot.pb_client.load_urdf('table/table.urdf',
                                         [1, 0, 0.4],
                                         ori,
                                         scaling=0.9)
    sphere_id = robot.pb_client.load_geom('sphere',
                                          size=0.05,
                                          mass=1,
                                          base_pos=[1, 0, 1.0],
                                          rgba=[0, 1, 0, 1])
    box_id = robot.pb_client.load_geom('box',
                                       size=0.05,
                                       mass=1,
                                       base_pos=[1, 0.12, 1.0],
                                       rgba=[1, 0, 0, 1])
    duck_id = robot.pb_client.load_geom('mesh',
                                        mass=1,
                                        visualfile='duck.obj',
                                        mesh_scale=0.1,
                                        base_pos=[0.9, -0.4, 1.0],
                                        rgba=[0.5, 0.2, 1, 1])
    modder.set_texture(table_id, -1, os.path.join(texture_path, '1.jpg'))
    modder.set_texture(sphere_id, -1, os.path.join(texture_path, '2.jpg'))
    modder.set_texture(box_id, -1, os.path.join(texture_path, '3.jpg'))
    modder.set_texture(duck_id, -1, os.path.join(texture_path, '4.jpg'))
    modder.set_texture_path(texture_path)

    while True:
        start = time.time()
        # modder.randomize('rgb')
        # modder.randomize('gradient')
        # modder.randomize('noise')
        # modder.randomize('texture', exclude={robot.arm.robot_id: []})
        # modder.randomize('texture',
        #                  exclude={robot.arm.robot_id: [3, 4, 5, 6]})
        modder.randomize('all')
        print('Time cost (s): ', time.time() - start)
        time.sleep(1)
Пример #10
0
def main():
    """
    This function shows an example of block stacking.
    """
    np.set_printoptions(precision=4, suppress=True)
    robot = Robot('franka')
    success = robot.arm.go_home()
    if not success:
        log_warn('Robot go_home failed!!!')
    ori = euler2quat([0, 0, np.pi / 2])
    robot.pb_client.load_urdf('table/table.urdf',
                              [.6, 0, 0.4],
                              ori,
                              scaling=0.9)
    box_size = 0.03
    box_id1 = robot.pb_client.load_geom('box', size=box_size,
                                        mass=0.1,
                                        base_pos=[.5, 0.12, 1.0],
                                        rgba=[1, 0, 0, 1])
    box_id2 = robot.pb_client.load_geom('box',
                                        size=box_size,
                                        mass=0.1,
                                        base_pos=[0.3, 0.12, 1.0],
                                        rgba=[0, 0, 1, 1])
    robot.arm.eetool.open()
    obj_pos = robot.pb_client.get_body_state(box_id1)[0]
    move_dir = obj_pos - robot.arm.get_ee_pose()[0]
    move_dir[2] = 0
    eef_step = 0.025

    # an example of using IK with nullspace enabled
    ik_kwargs = dict(ns=True)
    robot.arm.move_ee_xyz(move_dir, eef_step=eef_step, **dict(ik_kwargs=ik_kwargs))

    move_dir = np.zeros(3)
    move_dir[2] = obj_pos[2] - robot.arm.get_ee_pose()[0][2]
    robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
    robot.arm.eetool.close(wait=False)
    robot.arm.move_ee_xyz([0, 0, 0.3], eef_step=eef_step)
    obj_pos = robot.pb_client.get_body_state(box_id2)[0]
    move_dir = obj_pos - robot.arm.get_ee_pose()[0]
    move_dir[2] = 0
    robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
    move_dir = obj_pos - robot.arm.get_ee_pose()[0]
    move_dir[2] += box_size * 2
    robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
    robot.arm.eetool.open()
    move_dir[2] = 0.2
    robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
    time.sleep(10)
Пример #11
0
 def reset(self):
     self.robot.arm.reset()
     self.robot.arm.go_home(ignore_physics=True)
     ori = euler2quat([0, 0, np.pi / 2])
     self.table_id = self.robot.pb_client.load_urdf('table/table.urdf',
                                                    [.5, 0, 0.4],
                                                    ori,
                                                    scaling=0.9)
     self.box_id = self.robot.pb_client.load_geom('box', size=0.05, mass=1,
                                                  base_pos=[0.5, 0.12, 1.0],
                                                  rgba=[1, 0, 0, 1])
     self.ref_ee_ori = self.robot.arm.get_ee_pose()[1]
     self.gripper_ori = 0
     return self._get_obs()
Пример #12
0
def perturb_box(yumi, box_id, delta_pos, delta_ori_euler=None):
    object_pos = list(yumi.arm.p.getBasePositionAndOrientation(box_id)[0])
    object_ori = list(yumi.arm.p.getBasePositionAndOrientation(box_id)[1])

    new_pos = np.array(object_pos) + np.array(delta_pos)
    if delta_ori_euler is not None:
        pass
        delta_ori_quat = common.euler2quat(delta_ori_euler)
        new_ori = common.quat_multiply(object_ori, delta_ori_quat)

    else:
        new_ori = object_ori

    yumi.arm.p.resetBasePositionAndOrientation(box_id, new_pos.tolist(),
                                               new_ori)
Пример #13
0
def load_single_object(robot, args):
    """
    Warnings: you need to set object type and size
    Args:
        robot:
        args: args.object_pos, args.object_ori

    """
    obj_id = robot.pb_client.load_geom(
        shape_type='box',
        size=0.025,
        # shape_type='cylinder',   # object type
        # size=[0.022, 0.1],  # object size
        mass=0.3,
        base_pos=args.object_pos,
        base_ori=euler2quat(args.object_ori),
        rgba=[1, 0, 0, 1])
    return obj_id
Пример #14
0
def main():
    """
    This function demonstrates how to load a custom
    end effector to the UR5e robot in pybullet.

    NOTE: This implementation using PyBullet's createConstraint
    function is known to have issues with fixed joints (fixed joints
    are not actually rigidly fixed).
    """
    robot = Robot('ur5e',
                  pb=True,
                  use_eetool=False,
                  arm_cfg={'self_collision': False})
    robot.arm.go_home()

    # custom EE tool can either be added by making a new separate URDF
    # or using the load_geom capbility in pb_util file

    # uncomment below to load an example custom URDF
    root_path = os.path.dirname(os.path.realpath(__file__))
    urdf_path = '../../../src/airobot/urdfs/custom_ee_tools/stick_ee_tool.urdf'
    stick_id = robot.pb_client.load_urdf(
        os.path.join(root_path, urdf_path),
        [0.3, 0.0, 0.0],
        [0.0, 0.0, 0.0, 1.0],
    )

    # uncomment below to load a basic geometry with load_geom
    # from airobot.utils.pb_util import load_geom
    # stick_id = load_geom('cylinder', size=[0.015, 0.1524], mass=0.01,
    #                      base_pos=[0.3, 0.0, 0.0], rgba=[0, 1, 1, 1])

    robot.pb_client.createConstraint(robot.arm.robot_id,
                                     robot.arm.ee_link_id,
                                     stick_id,
                                     -1,
                                     p.JOINT_FIXED,
                                     jointAxis=[1, 1, 1],
                                     parentFramePosition=[0, 0, 0],
                                     childFramePosition=[0, 0, -0.077],
                                     childFrameOrientation=euler2quat(
                                         [0, 0, 0]))

    time.sleep(5)
Пример #15
0
    def reset(self, force_reset=False):
        """
        Reset the simulation environment.
        """
        self._pb.configureDebugVisualizer(self._pb.COV_ENABLE_RENDERING, 0)
        if self._first_reset or force_reset:
            self._pb.resetSimulation()
            self.floor_id = self._pb.load_geom('box',
                                               size=[10, 10, 0.01],
                                               mass=0,
                                               base_pos=[0, 0, 0],
                                               rgba=[0.7, 0.77, 0.7, 1],
                                               specular=[1, 1, 1, 1])

            self.robot_base_pos = self.cfgs.ARM.PYBULLET_RESET_POS
            robot_base_ori = self.cfgs.ARM.PYBULLET_RESET_ORI
            self.robot_base_ori = arutil.euler2quat(robot_base_ori).tolist()
            if self._self_collision:
                colli_flag = self._pb.URDF_USE_SELF_COLLISION
                self.robot_id = self._pb.loadURDF(self.cfgs.PYBULLET_URDF,
                                                  self.robot_base_pos,
                                                  self.robot_base_ori,
                                                  useFixedBase=True,
                                                  flags=colli_flag)
            else:
                self.robot_id = self._pb.loadURDF(self.cfgs.PYBULLET_URDF,
                                                  self.robot_base_pos,
                                                  self.robot_base_ori,
                                                  useFixedBase=True)
            self._build_jnt_id()
            # self.set_visual_shape()
            if hasattr(self, 'eetool'):
                self.eetool.feed_robot_info(self.robot_id, self.jnt_to_id)
                self.eetool.activate()
                if self._self_collision:
                    # weird behavior occurs on the gripper
                    # when self-collision is enforced
                    self.eetool.disable_gripper_self_collision()
        else:
            self.go_home(ignore_physics=True)
            if hasattr(self, 'eetool'):
                self.eetool.close(ignore_physics=True)
        self._pb.configureDebugVisualizer(self._pb.COV_ENABLE_RENDERING, 1)
        self._first_reset = False
Пример #16
0
def main():
    """
	This function shows an example of block stacking.
	"""
    np.set_printoptions(precision=4, suppress=True)
    robot = Robot('ur5e_2f140', pb=True, pb_cfg={'gui': False})
    success = robot.arm.go_home()
    if not success:
        ar.log_warn('Robot go_home failed!!!')

    #setup cam
    robot_base = robot.arm.robot_base_pos
    robot.cam.setup_camera(focus_pt=robot_base,
                           dist=3,
                           yaw=55,
                           pitch=-30,
                           roll=0)
    out = cv2.VideoWriter('pickandplace.mp4', cv2.VideoWriter_fourcc(*'mp4v'),
                          30, (640, 480))

    img = robot.cam.get_images(get_rgb=True, get_depth=False)[0]
    out.write(np.array(img))

    ori = euler2quat([0, 0, np.pi / 2])
    robot.pb_client.load_urdf('table/table.urdf', [.5, 0, 0.4],
                              ori,
                              scaling=0.9)
    box_size = 0.05
    box_id1 = robot.pb_client.load_geom('box',
                                        size=box_size,
                                        mass=1,
                                        base_pos=[.5, 0.12, 1.0],
                                        rgba=[1, 0, 0, 1])
    robot.arm.eetool.open()
    record(robot, out)

    obj_pos = robot.pb_client.get_body_state(box_id1)[0]
    move_dir = obj_pos - robot.arm.get_ee_pose()[0]
    move_dir[2] = 0
    eef_step = 0.025

    for i in range(3):
        robot.arm.move_ee_xyz(move_dir / 3, eef_step=eef_step)
        record(robot, out)

    move_dir = np.zeros(3)
    move_dir[2] = obj_pos[2] - robot.arm.get_ee_pose()[0][2]

    for i in range(4):
        robot.arm.move_ee_xyz(move_dir / 4, eef_step=eef_step)
        record(robot, out)

    robot.arm.eetool.close(wait=False)
    record(robot, out)

    for i in range(10):
        robot.arm.move_ee_xyz([0, 0, 0.03], eef_step=eef_step)
        record(robot, out)

    for i in range(10):
        robot.arm.move_ee_xyz([0.025, 0, 0.0], eef_step=eef_step)
        record(robot, out)

    for i in range(10):
        robot.arm.move_ee_xyz([0, 0, -0.03], eef_step=eef_step)
        record(robot, out)

    robot.arm.eetool.open()
    record(robot, out)

    time.sleep(1)
    record(robot, out)

    for i in range(10):
        robot.arm.move_ee_xyz([0, 0, 0.03], eef_step=eef_step)
        record(robot, out)

    time.sleep(3)
    record(robot, out)

    out.release()
Пример #17
0
def main(ifRender=False):
    """
    this function loads initial oboject on a table,
    """
    np.set_printoptions(precision=3, suppress=True)


    # load robot
    robot = Robot('ur5e', use_eetool=False, arm_cfg={'render': ifRender, 'self_collision': True})
    robot.arm.go_home()
    origin = robot.arm.get_ee_pose()

    def go_home():
        robot.arm.set_ee_pose(home, origin[1])
        # robot.arm.eetool.close()
    # init robot
    go_home()

    # load table
    table_ori = euler2quat([0, 0, np.pi/2.0])
    table_pos = [table_x, table_y, table_z]
    table_id = load_urdf('table/table.urdf', table_pos, table_ori, scaling=table_scaling)

    # load box
    box_id = load_geom('box', size=box_size, mass=1, base_pos=box_pos, rgba=[1, 0, 0, 1])

    # init camera
    def get_img():
        # screenshot camera images
        focus_pt = [0.7, 0, 1.]
        robot.cam.setup_camera(focus_pt=focus_pt, dist=0.5, yaw=90, pitch=-60, roll=0)
        rgb, depth = robot.cam.get_images(get_rgb=True, get_depth=True)
        # crop the rgb
        img = rgb[40:360, 0:640]
        # dep = depth[40:360, 0:640]
        # low pass filter : Gaussian blur
        # blurred_img = cv2.GaussianBlur(img.astype('float32'), (5, 5), 0)
        small_img = cv2.resize(img.astype('float32'), dsize=(200, 100),
                    interpolation=cv2.INTER_CUBIC) # numpy array dtype numpy int64 by default
        # small_dep = cv2.resize(dep.astype('float32'), dsize=(200, 100),
        #             interpolation=cv2.INTER_CUBIC) # numpy array dtype numpy int64 by default
        return small_img, depth

    # test run
    def test():
        time_to_sleep = 0.5
        go_home()
        pose = robot.arm.get_ee_pose()
        robot.arm.set_ee_pose([pose[0][0], pose[0][1], min_height], origin[1])
        time.sleep(time_to_sleep)
        get_img()
        # test boundary
        robot.arm.set_ee_pose([pose[0][0], pose[0][1]+table_length/2.0, min_height], origin[1])
        # robot.arm.eetool.close()
        time.sleep(time_to_sleep)
        get_img()
        robot.arm.move_ee_xyz([0, -table_length, 0])
        time.sleep(time_to_sleep)
        get_img()
        pose = robot.arm.get_ee_pose()
        robot.arm.set_ee_pose([pose[0][0], pose[0][1], rest_height], origin[1])
        time.sleep(time_to_sleep)
        get_img()
        robot.arm.move_ee_xyz([0, table_length, 0])
        time.sleep(time_to_sleep)
        get_img()
        pose = robot.arm.get_ee_pose()
        robot.arm.set_ee_pose([pose[0][0], pose[0][1], min_height], origin[1])
        time.sleep(time_to_sleep)
        get_img()
        # test arc
        for i in list([np.pi/3.0, np.pi*2/5.0, np.pi*3/7.0, np.pi/2.0,
                       np.pi*4/7.0, np.pi*3/5.0, np.pi*2/3.0]):
            robot.arm.set_ee_pose([arm_span*np.sin(i),
            arm_span*np.cos(i), rest_height], origin[1])
            time.sleep(time_to_sleep)
            get_img()
            robot.arm.set_ee_pose([arm_span*np.sin(i),
            arm_span*np.cos(i), min_height], origin[1])
            time.sleep(time_to_sleep)
            get_img()
            robot.arm.set_ee_pose([arm_span*np.sin(i),
            arm_span*np.cos(i), rest_height], origin[1])
            time.sleep(time_to_sleep)
            get_img()

    def get_pixel(X, Y, Z=0.975):
        """return fractional pixels representations
           Z values comes from running robot.cam.get_pix.3dpt
           representing the table surface height"""
        ext_mat = robot.cam.get_cam_ext()
        int_mat = robot.cam.get_cam_int()
        XYZ = np.array([X, Y, Z, 1])
        xyz = np.linalg.inv(ext_mat).dot(XYZ)[:3]
        pixel_xyz = int_mat.dot(xyz)
        pixel_xyz = pixel_xyz / pixel_xyz[2]
        pixel_col = pixel_xyz[0] / 3.2 # due to image cropping and scaling
        pixel_row = (pixel_xyz[1] - 40) / 3.2
        return pixel_row, pixel_col

    # # log object
    # pos, quat, lin_vel, ang_vel = get_body_state(box_id)
    # ar.log_info('Box:')
    # ar.log_info('     position: %s' % np.array2string(pos, precision=2))
    # ar.log_info('     quaternion: %s' % np.array2string(quat, precision=2))
    # ar.log_info('     linear vel: %s' % np.array2string(lin_vel, precision=2))
    # ar.log_info('     angular vel: %s' % np.array2string(ang_vel, precision=2))

    # plt.figure()
    # plt.imshow(rgb)
    # plt.figure()
    # plt.imshow(depth * 25, cmap='gray', vmin=0, vmax=255)
    # print('Maximum Depth (m): %f' % np.max(depth))
    # print('Minimum Depth (m): %f' % np.min(depth))
    # plt.show()

    def poke(save_dir='data/image'):
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        save_depth_dir = save_dir + '_depth'
        if not os.path.exists(save_depth_dir):
            os.makedirs(save_depth_dir)
        index = 0
        curr_img, curr_dep = get_img()
        while True:
            obj_pos, obj_quat, lin_vel, _ = get_body_state(box_id)
            obj_ang = quat2euler(obj_quat)[2] # -pi ~ pi
            obj_x, obj_y, obj_z = obj_pos
            # check if cube is on table
            if obj_z < box_z / 2.0 or lin_vel[0] > 1e-3: # important that box is still
                print(obj_z, lin_vel[0])
                reset_body(box_id, box_pos)
                continue
            while True:
                # choose random poke point on the object
                poke_x = np.random.random()*box_size + obj_x - box_size/2.0
                poke_y = np.random.random()*box_size + obj_y - box_size/2.0
                # choose poke angle along the z axis
                poke_ang = np.random.random() * np.pi * 2 - np.pi
                # choose poke length
                poke_len = np.random.random() * poke_len_std + poke_len_mean
                # calc starting poke location and ending poke loaction
                start_x = poke_x - poke_len * np.cos(poke_ang)
                start_y = poke_y - poke_len * np.sin(poke_ang)
                end_x = poke_x + poke_len * np.cos(poke_ang)
                end_y = poke_y + poke_len * np.sin(poke_ang)
                if end_x > workspace_min_x and end_x < workspace_max_x \
                    and end_y > workspace_min_y and end_y < workspace_max_y:
                    break
            robot.arm.move_ee_xyz([start_x-home[0], start_y-home[1], 0], 0.015)
            robot.arm.move_ee_xyz([0, 0, min_height-rest_height], 0.015)
            js1, js2, js3, js4, js5, js6 = robot.arm.get_jpos()
            robot.arm.move_ee_xyz([end_x-start_x, end_y-start_y, 0], 0.015)
            je1, je2, je3, je4, je5, je6 = robot.arm.get_jpos()
            # important that we use move_ee_xyz, as set_ee_pose can throw obj in motion
            robot.arm.move_ee_xyz([0, 0, rest_height-min_height], 0.015)
            # move arm away from camera view
            go_home() # important to have one set_ee_pose every loop to reset accu errors
            next_img, next_dep = get_img()
            # calc poke and obj locations in image pixel space
            start_r, start_c = get_pixel(start_x, start_y)
            end_r, end_c = get_pixel(end_x, end_y)
            obj_r, obj_c = get_pixel(obj_x, obj_y)
            with open(save_dir + '.txt', 'a') as file:
                file.write('%d %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f\n' % \
                    (index, start_x, start_y, end_x, end_y, obj_x, obj_y,
                    obj_quat[0], obj_quat[1], obj_quat[2], obj_quat[3],
                    js1, js2, js3, js4, js5, js6, je1, je2, je3, je4, je5, je6,
                    start_r, start_c, end_r, end_c, obj_r, obj_c))
            cv2.imwrite(save_dir + '/' + str(index) +'.png',
                        cv2.cvtColor(curr_img, cv2.COLOR_RGB2BGR))
            # cv2.imwrite(save_dir + '_depth/' + str(index) +'.png', curr_dep)
            np.save(save_dir + '_depth/' + str(index), curr_dep)
            curr_img = next_img
            curr_dep = next_dep
            if index % 1000 == 0:
                ar.log_info('number of pokes: %sk' % str(index/1000))
            index += 1

    def test_data(filename):
        data = np.loadtxt(filename, unpack=True)
        idx, stx, sty, edx, edy, obx, oby, qt1, qt2, qt3, qt4, js1, js2, js3, js4, js5, js6, je1, je2, je3, je4, je5, je6 = data
        go_home()
        reset_body(box_id, box_pos)
        _, _ = get_img()
        for i in range(5):
            robot.arm.set_ee_pose([stx[i], sty[i], rest_height], origin[1])
            robot.arm.move_ee_xyz([0, 0, min_height-rest_height], 0.015)
            robot.arm.move_ee_xyz([edx[i]-stx[i], edy[i]-sty[i], 0], 0.015)
            robot.arm.move_ee_xyz([0, 0, rest_height-min_height], 0.015)
            _, _ = get_img()
            start_jpos = robot.arm.compute_ik([stx[i], sty[i], min_height])
            end_jpos = robot.arm.compute_ik([edx[i], edy[i], min_height])
            print('start')
            print(js1[i], js2[i], js3[i], js4[i], js5[i], js6[i])
            print(start_jpos)
            print('end')
            print(je1[i], je2[i], je3[i], je4[i], je5[i], je6[i])
            print(end_jpos)
            time.sleep(1)

    def eval_poke(tag, ground_truth, img_idx, poke):
        # ground_truth is the entire matrix from 'image_xx.txt' file
        # img_idx is image index, first column of ground_truth
        # poke is the predicted 4 vector for world, 8 for pixel and joint
        # second half of the 8 vector are world coordinates computed from gt pixel and joint values

        # label index in the poke data array (29, 1)
        # index 0 is the poke index corresponding to starting image
        stx, sty, edx, edy = 1, 2, 3, 4 # ee pos of start and end poke
        obx, oby, qt1, qt2, qt3, qt4 = 5, 6, 7, 8, 9, 10 # obj pose before poke
        js1, js2, js3, js4, js5, js6 = 11, 12, 13, 14, 15, 16 # jpos before poke
        je1, je2, je3, je4, je5, je6 = 17, 18, 19, 20, 21, 22 # jpos after poke
        sr, stc, edr, edc, obr, obc = 23, 24, 25, 26, 27, 28 # row and col locations in image

        gt = ground_truth[img_idx:img_idx+2]
        tgt_posi = gt[1, 5:8]
        tgt_posi[-1] = box_z
        tgt_quat = gt[1, 7:11]
        init_posi = gt[0, 5:8]
        init_posi[-1] = box_z
        init_quat = gt[0, 7:11]
        gt_poke = gt[0, 1:5]
        go_home()
        reset_body(box_id, init_posi, init_quat)

        box_id2 = load_geom('box', size=box_size, mass=1,
            base_pos=tgt_posi, base_ori=tgt_quat, rgba=[0,0,1,0.5])
        p.setCollisionFilterPair(box_id, box_id2, -1, -1, enableCollision=0)
        # to check robot_id and link_id
        # robot.arm.robot_id
        # robot.arm.p.getNumJoints(robot_id)
        # robot.arm.p.getJointInfo(robot_id, 0-max_lnik_id)
        p.setCollisionFilterPair(box_id2, 1, -1, 11, enableCollision=0)
        _, _ = get_img()
        # time.sleep(1)
        # reset_body(box_id, init_posi, init_quat)
        # robot.arm.move_ee_xyz([gt_poke[0]-home[0], gt_poke[1]-home[1], 0], 0.015)
        # robot.arm.move_ee_xyz([0, 0, min_height-rest_height], 0.015)
        # robot.arm.move_ee_xyz([gt_poke[2]-gt_poke[0], gt_poke[3]-gt_poke[1], 0], 0.015)
        # robot.arm.move_ee_xyz([0, 0, rest_height-min_height], 0.015)
        # go_home()
        # _, _ = get_img()
        # reset_body(box_id, init_posi, init_quat)
        # poke = ground_truth[img_idx, 1:5]
        robot.arm.move_ee_xyz([poke[0]-home[0], poke[1]-home[1], 0], 0.015)
        robot.arm.move_ee_xyz([0, 0, min_height-rest_height], 0.015)
        robot.arm.move_ee_xyz([poke[2]-poke[0], poke[3]-poke[1], 0], 0.015)
        robot.arm.move_ee_xyz([0, 0, rest_height-min_height], 0.015)
        go_home()
        _, _ = get_img()
        curr_posi, _, _, _ = get_body_state(box_id)
        dist = np.sqrt((tgt_posi[0]-curr_posi[0])**2 + (tgt_posi[1]-curr_posi[1])**2)
        print(dist)
        # time.sleep(0.5)
        remove_body(box_id2)
        return dist

    def calc_dist(gtfile, pdfile, tag, test_num=50):
        # this function generate 50 visualization sequence
        true = np.loadtxt(gtfile)
        pred = np.loadtxt(pdfile)
        accu_dist = 0.0
        # query = random.sample(range(0, pred.shape[0]), test_num)
        total = pred.shape[0]
        query = range(total)[::total/test_num]
        for i in query:
            img_idx = int(pred[i][0])
            if tag == 'world':
                poke = pred[i][1:5]
            elif tag == 'pixel':
                rows = np.array([pred[i, 1], pred[i, 3], pred[i, 7], pred[i, 9]])
                cols = np.array([pred[i, 2], pred[i, 4], pred[i, 8], pred[i, 10]])
                rows = ((rows*3.2 + 40) + 0.5).astype(int)
                cols = ((cols*3.2) + 0.5).astype(int)
                depth_file = gtfile[:-4] + '_depth/' + str(img_idx) + '.npy'
                depth_im = np.load(depth_file)
                pokes_3d = get_pix_3dpt(depth_im, rows, cols)
                poke = pokes_3d[:, :2].flatten()
            else:
                raise Exception("experiment_tag has to be 'world', 'joint', or 'pixel'")

            accu_dist += eval_poke(tag, true, img_idx, poke)
        return accu_dist / len(query)


    def get_pix_3dpt(depth_im, rs, cs, in_world=True, filter_depth=False,
                     k=1, ktype='median', depth_min=None, depth_max=None):

        if not isinstance(rs, int) and not isinstance(rs, list) and \
                not isinstance(rs, np.ndarray):
            raise TypeError('rs should be an int, a list or a numpy array')
        if not isinstance(cs, int) and not isinstance(cs, list) and \
                not isinstance(cs, np.ndarray):
            raise TypeError('cs should be an int, a list or a numpy array')
        if isinstance(rs, int):
            rs = [rs]
        if isinstance(cs, int):
            cs = [cs]
        if isinstance(rs, np.ndarray):
            rs = rs.flatten()
        if isinstance(cs, np.ndarray):
            cs = cs.flatten()
        if not (isinstance(k, int) and (k % 2) == 1):
            raise TypeError('k should be a positive odd integer.')
        # _, depth_im = self.get_images(get_rgb=False, get_depth=True)
        if k == 1:
            depth_im = depth_im[rs, cs]
        else:
            depth_im_list = []
            if ktype == 'min':
                ktype_func = np.min
            elif ktype == 'max':
                ktype_func = np.max
            elif ktype == 'median':
                ktype_func = np.median
            elif ktype == 'mean':
                ktype_func = np.mean
            else:
                raise TypeError('Unsupported ktype:[%s]' % ktype)
            for r, c in zip(rs, cs):
                s = k // 2
                rmin = max(0, r - s)
                rmax = min(self.img_height, r + s + 1)
                cmin = max(0, c - s)
                cmax = min(self.img_width, c + s + 1)
                depth_im_list.append(ktype_func(depth_im[rmin:rmax,
                                                cmin:cmax]))
            depth_im = np.array(depth_im_list)

        depth = depth_im.reshape(-1) * 1 #self.depth_scale
        img_pixs = np.stack((rs, cs)).reshape(2, -1)
        img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
        depth_min = depth_min if depth_min else 0.01 #self.depth_min
        depth_max = depth_max if depth_max else 10 #self.depth_max
        if filter_depth:
            valid = depth > depth_min
            valid = np.logical_and(valid,
                                   depth < depth_max)
            depth = depth[:, valid]
            img_pixs = img_pixs[:, valid]

        get_img() # to set up camera matrices
        cam_int_mat_inv = robot.cam.cam_int_mat_inv
        cam_ext_mat = robot.cam.cam_ext_mat
        uv_one = np.concatenate((img_pixs,
                                 np.ones((1, img_pixs.shape[1]))))
        uv_one_in_cam = np.dot(cam_int_mat_inv, uv_one)
        pts_in_cam = np.multiply(uv_one_in_cam, depth)
        if in_world:
            if cam_ext_mat is None:
                raise ValueError('Please call set_cam_ext() first to set up'
                                 ' the camera extrinsic matrix')
            pts_in_cam = np.concatenate((pts_in_cam,
                                         np.ones((1, pts_in_cam.shape[1]))),
                                        axis=0)
            pts_in_world = np.dot(cam_ext_mat, pts_in_cam)
            pts_in_world = pts_in_world[:3, :].T
            return pts_in_world
        else:
            return pts_in_cam.T


    from IPython import embed
    embed()