def point_down(env): current_pos = env._right_hand_pos target_rot_X = T.rotation_matrix(0, np.array([1, 0, 0]), point=current_pos) target_rot_Y = T.rotation_matrix(math.pi, np.array([0, 1, 0]), point=current_pos) target_rot_Z = T.rotation_matrix(math.pi, np.array([0, 0, 1]), point=current_pos) target_rot = np.matmul(target_rot_Z, np.matmul(target_rot_Y, target_rot_X)) target_quat = T.mat2quat(target_rot) done_task = False while not done_task: current_pos = env._right_hand_pos current_quat = env._right_hand_quat dquat = T.quat_slerp(current_quat, target_quat, 0.01) dquat = T.quat_multiply(dquat, T.quat_inverse(current_quat)) if np.abs(dquat[3] - 1.0) < 1e-4: done_task = True grasp = -1 dpos = np.zeros(3) action = np.concatenate([dpos, dquat, [grasp]]) obs, reward, done, info = env.step(action) if RENDER: env.render()
def get_controller_state(self): """ Grabs the current state of the 3D mouse. Returns: dict: A dictionary containing dpos, orn, unmodified orn, grasp, and reset """ dpos = self.control[:3] * 0.005 * self.pos_sensitivity roll, pitch, yaw = self.control[3:] * 0.005 * self.rot_sensitivity # convert RPY to an absolute orientation drot1 = rotation_matrix(angle=-pitch, direction=[1.0, 0, 0], point=None)[:3, :3] drot2 = rotation_matrix(angle=roll, direction=[0, 1.0, 0], point=None)[:3, :3] drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.0], point=None)[:3, :3] self.rotation = self.rotation.dot(drot1.dot(drot2.dot(drot3))) return dict( dpos=dpos, rotation=self.rotation, raw_drotation=np.array([roll, pitch, yaw]), grasp=self.control_gripper, reset=self._reset_state, )
def rotate_camera(self, point, axis, angle): """ Rotate the camera view about a direction (in the camera frame). Args: point (None or 3-array): (x,y,z) cartesian coordinates about which to rotate camera in camera frame. If None, assumes the point is the current location of the camera axis (3-array): (ax,ay,az) axis about which to rotate camera in camera frame angle (float): how much to rotate about that direction Returns: 2-tuple: pos: (x,y,z) updated camera position quat: (x,y,z,w) updated camera quaternion orientation """ # current camera rotation + pos camera_pos = np.array(self.env.sim.data.get_mocap_pos(self.mover_body_name)) camera_rot = T.quat2mat(T.convert_quat(self.env.sim.data.get_mocap_quat(self.mover_body_name), to="xyzw")) # rotate by angle and direction to get new camera rotation rad = np.pi * angle / 180.0 R = T.rotation_matrix(rad, axis, point=point) camera_pose = np.zeros((4, 4)) camera_pose[:3, :3] = camera_rot camera_pose[:3, 3] = camera_pos camera_pose = camera_pose @ R # Update camera pose pos, quat = camera_pose[:3, 3], T.mat2quat(camera_pose[:3, :3]) self.set_camera_pose(pos=pos, quat=quat) return pos, quat
def get_controller_state(self): """Returns the current state of the 3d mouse, a dictionary of pos, orn, grasp, and reset.""" dpos = self.control[:3] * 0.005 roll, pitch, yaw = self.control[3:] * 0.005 self.grasp = self.control_gripper # convert RPY to an absolute orientation drot1 = rotation_matrix(angle=-pitch, direction=[1., 0, 0], point=None)[:3, :3] drot2 = rotation_matrix(angle=roll, direction=[0, 1., 0], point=None)[:3, :3] drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.], point=None)[:3, :3] self.rotation = self.rotation.dot(drot1.dot(drot2.dot(drot3))) return dict( dpos=dpos, rotation=self.rotation, grasp=self.grasp, reset=self._reset_state )
def get_sim2real_posquat(env): sim_eef_pose = deepcopy(env.robots[0].pose_in_base_from_name('gripper0_eef')) angle = np.deg2rad(-90) direction_axis = [0, 0, 1] rotation_matrix = transform_utils.rotation_matrix(angle, direction_axis) sim_pose_rotated = np.dot(rotation_matrix, sim_eef_pose) sim_eef_pos_rotated = deepcopy(sim_pose_rotated)[:3, 3] sim_eef_quat_rotated = deepcopy(transform_utils.mat2quat(sim_pose_rotated)) return sim_eef_pos_rotated, sim_eef_quat_rotated
def get_real2sim_posquat(pos, quat): real_eef_pose = transform_utils.pose2mat((pos,quat)) angle = np.deg2rad(90) direction_axis = [0, 0, 1] rotation_matrix = transform_utils.rotation_matrix(angle, direction_axis) real_pose_rotated = np.dot(rotation_matrix, real_eef_pose) real_eef_pos_rotated = deepcopy(real_pose_rotated)[:3, 3] real_eef_quat_rotated = deepcopy(transform_utils.mat2quat(real_pose_rotated)) return real_eef_pos_rotated, real_eef_quat_rotated
def on_press(self, window, key, scancode, action, mods): """ Key handler for key presses. Args: window: [NOT USED] key (int): keycode corresponding to the key that was pressed scancode: [NOT USED] action: [NOT USED] mods: [NOT USED] """ # controls for moving position if key == glfw.KEY_W: self.pos[0] -= self._pos_step * self.pos_sensitivity # dec x elif key == glfw.KEY_S: self.pos[0] += self._pos_step * self.pos_sensitivity # inc x elif key == glfw.KEY_A: self.pos[1] -= self._pos_step * self.pos_sensitivity # dec y elif key == glfw.KEY_D: self.pos[1] += self._pos_step * self.pos_sensitivity # inc y elif key == glfw.KEY_F: self.pos[2] -= self._pos_step * self.pos_sensitivity # dec z elif key == glfw.KEY_R: self.pos[2] += self._pos_step * self.pos_sensitivity # inc z # controls for moving orientation elif key == glfw.KEY_Z: drot = rotation_matrix(angle=0.1 * self.rot_sensitivity, direction=[1.0, 0.0, 0.0])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates x self.raw_drotation[1] -= 0.1 * self.rot_sensitivity elif key == glfw.KEY_X: drot = rotation_matrix(angle=-0.1 * self.rot_sensitivity, direction=[1.0, 0.0, 0.0])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates x self.raw_drotation[1] += 0.1 * self.rot_sensitivity elif key == glfw.KEY_T: drot = rotation_matrix(angle=0.1 * self.rot_sensitivity, direction=[0.0, 1.0, 0.0])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates y self.raw_drotation[0] += 0.1 * self.rot_sensitivity elif key == glfw.KEY_G: drot = rotation_matrix(angle=-0.1 * self.rot_sensitivity, direction=[0.0, 1.0, 0.0])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates y self.raw_drotation[0] -= 0.1 * self.rot_sensitivity elif key == glfw.KEY_C: drot = rotation_matrix(angle=0.1 * self.rot_sensitivity, direction=[0.0, 0.0, 1.0])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates z self.raw_drotation[2] += 0.1 * self.rot_sensitivity elif key == glfw.KEY_V: drot = rotation_matrix(angle=-0.1 * self.rot_sensitivity, direction=[0.0, 0.0, 1.0])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates z self.raw_drotation[2] -= 0.1 * self.rot_sensitivity
def _step(self, target_qpos): o = None delta_xyz = (target_qpos[:3] - self._eef_pos) / self._hp.substeps for i in range(self._hp.substeps): current_rot = self._env._right_hand_orn pitch, roll, yaw = 0, 0, target_qpos[3] drot1 = rotation_matrix(angle=-pitch, direction=[1., 0, 0], point=None)[:3, :3] drot2 = rotation_matrix(angle=roll, direction=[0, 1., 0], point=None)[:3, :3] drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.], point=None)[:3, :3] desired_rot = start_rot.dot(drot1.dot(drot2.dot(drot3))) drotation = current_rot.T.dot(desired_rot) dquat = mat2quat(drotation) o = self._env.step( np.concatenate((delta_xyz, dquat, [target_qpos[-1]])))[0] self._previous_target_qpos = target_qpos return self._proc_obs(o)
def on_press(self, key): """ Key handler for key presses. Args: key (int): keycode corresponding to the key that was pressed """ # controls for moving position if key == keyboard.KeyCode.from_char('w'): self.pos[0] -= self._pos_step * self.pos_sensitivity # dec x elif key == keyboard.KeyCode.from_char('s'): self.pos[0] += self._pos_step * self.pos_sensitivity # inc x elif key == keyboard.KeyCode.from_char('a'): self.pos[1] -= self._pos_step * self.pos_sensitivity # dec y elif key == keyboard.KeyCode.from_char('d'): self.pos[1] += self._pos_step * self.pos_sensitivity # inc y elif key == keyboard.KeyCode.from_char('f'): self.pos[2] -= self._pos_step * self.pos_sensitivity # dec z elif key == keyboard.KeyCode.from_char('r'): self.pos[2] += self._pos_step * self.pos_sensitivity # inc z # controls for moving orientation elif key == keyboard.KeyCode.from_char('z'): drot = rotation_matrix(angle=0.1 * self.rot_sensitivity, direction=[1., 0., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates x self.raw_drotation[1] -= 0.1 * self.rot_sensitivity elif key == keyboard.KeyCode.from_char('x'): drot = rotation_matrix(angle=-0.1 * self.rot_sensitivity, direction=[1., 0., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates x self.raw_drotation[1] += 0.1 * self.rot_sensitivity elif key == keyboard.KeyCode.from_char('t'): drot = rotation_matrix(angle=0.1 * self.rot_sensitivity, direction=[0., 1., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates y self.raw_drotation[0] += 0.1 * self.rot_sensitivity elif key == keyboard.KeyCode.from_char('g'): drot = rotation_matrix(angle=-0.1 * self.rot_sensitivity, direction=[0., 1., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates y self.raw_drotation[0] -= 0.1 * self.rot_sensitivity elif key == keyboard.KeyCode.from_char('c'): drot = rotation_matrix(angle=0.1 * self.rot_sensitivity, direction=[0., 0., 1.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates z self.raw_drotation[2] += 0.1 * self.rot_sensitivity elif key == keyboard.KeyCode.from_char('v'): drot = rotation_matrix(angle=-0.1 * self.rot_sensitivity, direction=[0., 0., 1.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates z self.raw_drotation[2] -= 0.1 * self.rot_sensitivity
def joint_positions_for_eef_command(self, dpos, rotation): """ This function runs inverse kinematics to back out target joint positions from the provided end effector command. Same arguments as @get_control. Returns: A list of size @num_joints corresponding to the target joint angles. """ self.ik_robot_target_pos += dpos * self.user_sensitivity # this rotation accounts for offsetting the rotation between the final joint and # the hand link, since the pybullet eef frame is the final joint # of robot arm, whereas the mujoco eef frame is the hand link, which is rotated # from the final joint by the 'quat' value for 'right_hand' specified in panda/robot.xml rotation = rotation.dot( T.rotation_matrix(angle=-np.pi/4, direction=[0., 0., 1.], point=None)[ :3, :3 ] ) self.ik_robot_target_orn = T.mat2quat(rotation) # convert from target pose in base frame to target pose in bullet world frame world_targets = self.bullet_base_pose_to_world_pose( (self.ik_robot_target_pos, self.ik_robot_target_orn) ) # Set default rest pose as a neutral down-position over the center of the table rest_poses = [0, np.pi/6, 0.00, -(np.pi - 2*np.pi/6), 0.00, (np.pi - np.pi/6), np.pi/4] for bullet_i in range(100): arm_joint_pos = self.inverse_kinematics( world_targets[0], world_targets[1], rest_poses=rest_poses ) self.sync_ik_robot(arm_joint_pos, sync_last=True) return arm_joint_pos
def rotate_camera(env, direction, angle, camera_id): """ Rotate the camera view about a direction (in the camera frame). :param direction: a 3-dim numpy array for where to move camera in camera frame :param angle: a float for how much to rotate about that direction :param camera_id: which camera to modify """ # current camera rotation camera_rot = T.quat2mat( T.convert_quat(env.sim.data.get_mocap_quat("cameramover"), to='xyzw')) # rotate by angle and direction to get new camera rotation rad = np.pi * angle / 180.0 R = T.rotation_matrix(rad, direction, point=None) camera_rot = camera_rot.dot(R[:3, :3]) # set new rotation env.sim.data.set_mocap_quat( "cameramover", T.convert_quat(T.mat2quat(camera_rot), to='wxyz')) env.sim.forward()
def joint_positions_for_eef_command(self, dpos, rotation): """ This function runs inverse kinematics to back out target joint positions from the provided end effector command. Same arguments as @get_control. Returns: A list of size @num_joints corresponding to the target joint angles. """ self.ik_robot_target_pos += dpos * self.user_sensitivity # this rotation accounts for rotating the end effector by 90 degrees # from its rest configuration. The corresponding line in most demo # scripts is: # `env.set_robot_joint_positions([0, -1.18, 0.00, 2.18, 0.00, 0.57, 1.5708])` rotation = rotation.dot( T.rotation_matrix(angle=-np.pi / 2, direction=[0., 0., 1.], point=None)[ :3, :3 ] ) self.ik_robot_target_orn = T.mat2quat(rotation) # convert from target pose in base frame to target pose in bullet world frame world_targets = self.bullet_base_pose_to_world_pose( (self.ik_robot_target_pos, self.ik_robot_target_orn) ) rest_poses = [0, -1.18, 0.00, 2.18, 0.00, 0.57, 3.3161] for bullet_i in range(100): arm_joint_pos = self.inverse_kinematics( world_targets[0], world_targets[1], rest_poses=rest_poses ) self.sync_ik_robot(arm_joint_pos, sync_last=True) return arm_joint_pos
def rotate_camera(env, direction, angle, cam_body_id): """ Rotate the camera view about a direction (in the camera frame). Args: direction (np.array): 3-array for where to move camera in camera frame angle (float): how much to rotate about that direction cam_body_id (int): id corresponding to parent body of camera element """ # current camera rotation camera_pos = np.array(env.sim.model.body_pos[cam_body_id]) camera_rot = T.quat2mat(T.convert_quat(env.sim.model.body_quat[cam_body_id], to='xyzw')) # rotate by angle and direction to get new camera rotation rad = np.pi * angle / 180.0 R = T.rotation_matrix(rad, direction, point=None) camera_rot = camera_rot.dot(R[:3, :3]) # set new rotation env.sim.model.body_quat[cam_body_id] = T.convert_quat(T.mat2quat(camera_rot), to='wxyz') env.sim.forward()
def eef_pos_orn_2_joint_pos(self, eef_pos, orientation): self.ik_robot_target_pos = eef_pos orientation = orientation.dot( T.rotation_matrix(angle=-np.pi/4, direction=[0., 0., 1.], point=None)[ :3, :3 ] ) self.ik_robot_target_orn = T.mat2quat(orientation) # convert from target pose in base frame to target pose in bullet world frame world_targets = self.bullet_base_pose_to_world_pose( (self.ik_robot_target_pos, self.ik_robot_target_orn) ) # Set default rest pose as a neutral down-position over the center of the table rest_poses = [0, np.pi/6, 0.00, -(np.pi - 2*np.pi/6), 0.00, (np.pi - np.pi/6), np.pi/4] for bullet_i in range(100): arm_joint_pos = self.inverse_kinematics( world_targets[0], world_targets[1], rest_poses=rest_poses ) self.sync_ik_robot(arm_joint_pos, sync_last=True) return arm_joint_pos
gripper_type="TwoFingerGripper", use_camera_obs=False, # do not use pixel observations has_offscreen_renderer=False, # not needed since not using pixel obs has_renderer=True, # make sure we can render to the screen reward_shaping=False, # use dense rewards control_freq=30, # control should happen fast enough so that simulation looks smooth ) world.mode = 'human' world.reset() world.viewer.set_camera(camera_id=2) world.render() for i in range(5000): dpos, rotation, grasp = [0, 0, -0.01], T.rotation_matrix(angle=0, direction=[1., 0., 0.])[:3, :3], 0 current = world._right_hand_orn drotation = current.T.dot(rotation) # relative rotation of desired from current dquat = T.mat2quat(drotation) grasp = grasp - 1 print(current) print(drotation) print(dquat) # print(grasp) action = np.concatenate([dpos, dquat, [grasp]]) world.step(action) world.render() pass
f, cx, cy = env._get_cam_K(CAM_ID) K = np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]]) print(K) # Get original camera location cam_pos, cam_quat = env._get_cam_pos(CAM_ID) cam_rot = T.quat2mat(cam_quat) cam_pose = np.eye(4) cam_pose[:3, :3] = cam_rot cam_pose[:3, 3] = cam_pos print(cam_pose) # Set new camera location table_pos, table_quat = env._get_body_pos("table") print("table:", table_pos) cam_pose_new = T.rotation_matrix(math.pi / 2, np.array([0, 0, 1]), table_pos) cam_pose_new = np.matmul(cam_pose_new, cam_pose) cam_quat_new = T.mat2quat(cam_pose_new[:3, :3]) cam_pos_new = cam_pose_new[:3, 3] env._set_cam_pos(CAM_ID, cam_pos_new, cam_quat_new) print(cam_pose_new) cam_pos, cam_quat = env._get_cam_pos(CAM_ID) cam_rot = T.quat2mat(cam_quat) cam_pose = np.eye(4) cam_pose[:3, :3] = cam_rot cam_pose[:3, 3] = cam_pos print(cam_pose) assert np.allclose(cam_pose, cam_pose_new, rtol=1e-05, atol=1e-05)
pos_traj = [] angle_traj = [] for a in range(1000): current_pos = env._right_hand_pos grasp = -1 dpos = target_pos + table_top_center - current_pos dpos = dpos * np.array([.01, .01, 0]) current_pos = env._right_hand_pos current_quat = env._right_hand_quat target_rot_X = T.rotation_matrix(0, np.array([1, 0, 0]), point=target_pos) target_rot_Y = T.rotation_matrix(math.pi, np.array([0, 1, 0]), point=target_pos) target_rot_Z = T.rotation_matrix(math.pi + target_angle, np.array([0, 0, 1]), point=target_pos) target_rot = np.matmul(target_rot_Z, np.matmul(target_rot_Y, target_rot_X)) target_quat = T.mat2quat(target_rot) dquat = T.quat_slerp(current_quat, target_quat, 0.01) dquat = T.quat_multiply(dquat, T.quat_inverse(current_quat)) action = np.concatenate([dpos, dquat, [grasp]])