action_direction = forward elif 'up' in primact_type: action_direction = left if action_direction is not None: end_rotmat = np.array(rotmat, dtype=np.float32) end_rotmat[:3, 3] = position_world - action_direction_world * final_dist + action_direction * 0.05 out_info['end_rotmat_world'] = end_rotmat.tolist() ### viz the EE gripper position # setup robot robot_urdf_fn = './robots/panda_gripper.urdf' robot_material = env.get_material(4, 4, 0.01) robot = Robot(env, robot_urdf_fn, robot_material, open_gripper=('pulling' in primact_type)) # move to the final pose robot.robot.set_root_pose(final_pose) env.render() rgb_final_pose, _, _, _ = cam.get_observation() Image.fromarray((rgb_final_pose * 255).astype(np.uint8)).save( os.path.join(out_dir, 'viz_target_pose.png')) # move back robot.robot.set_root_pose(start_pose) env.render() # activate contact checking env.start_checking_contact(robot.hand_actor_id, robot.gripper_actor_ids,
pc[:, 0] -= 5 pc = torch.from_numpy(pc).unsqueeze(0).to(device) input_pcid = furthest_point_sample(pc, train_conf.num_point_per_shape).long().reshape(-1) pc = pc[:, input_pcid, :3] # 1 x N x 3 pc_movable = pc_movable[input_pcid.cpu().numpy()] # N pcids = pcids[input_pcid.cpu().numpy()] pccolors = rgb[pcids[:, 0], pcids[:, 1]] # push through unet feats = network.pointnet2(pc.repeat(1, 1, 2))[0].permute(1, 0) # N x F # setup robot robot_urdf_fn = './robots/panda_gripper.urdf' robot_material = env.get_material(4, 4, 0.01) robot = Robot(env, robot_urdf_fn, robot_material) def plot_figure(up, forward): # cam to world up = mat33 @ up forward = mat33 @ forward up = np.array(up, dtype=np.float32) forward = np.array(forward, dtype=np.float32) left = np.cross(up, forward) left /= np.linalg.norm(left) forward = np.cross(left, up) forward /= np.linalg.norm(forward) rotmat = np.eye(4).astype(np.float32) rotmat[:3, 0] = forward rotmat[:3, 1] = left