def visualize_samples(samples, problem_env, target_obj_name): target_obj = problem_env.env.GetKinBody(target_obj_name) orig_color = utils.get_color_of(target_obj) utils.set_color(target_obj, [1, 0, 0]) utils.visualize_placements(samples, target_obj_name) utils.set_color(target_obj, orig_color)
def main(): problem_idx = 0 problem_env = Mover(problem_idx, problem_type='jobtalk') problem_env.set_motion_planner(BaseMotionPlanner(problem_env, 'rrt')) utils.viewer() seed = 1 np.random.seed(seed) random.seed(seed) pick_smpler, place_smpler = create_sampler(problem_env) # Set camera view - to get camera transform: viewer.GetCameraTransform() cam_transform = np.array( [[-0.54866337, -0.70682829, 0.44650004, -1.45953619], [-0.83599448, 0.45806221, -0.30214604, 2.02016926], [0.00904058, -0.53904803, -0.8422265, 4.88620949], [0., 0., 0., 1.]]) cam_transform = np.array( [[0.76808539, 0.51022899, -0.38692533, 2.7075901], [0.63937823, -0.57785198, 0.50723029, -2.0267117], [0.03521803, -0.6369878, -0.77006898, 4.52542162], [0., 0., 0., 1.]]) init_goal_cam_transform = np.array( [[0.99941927, -0.00186311, 0.03402425, 1.837726], [-0.02526303, -0.71058334, 0.70315937, -5.78141165], [0.022867, -0.70361058, -0.71021775, 6.03373909], [0., 0., 0., 1.]]) goal_obj_poses = pickle.load( open('./test_scripts/jobtalk_figure_cache_files/goal_obj_poses.pkl', 'r')) for o in problem_env.objects: utils.set_obj_xytheta(goal_obj_poses[o.GetName()], o) viewer = problem_env.env.GetViewer() viewer.SetCamera(init_goal_cam_transform) """ # how do you go from intrinsic params to [fx, fy, cx, cy]? What are these anyways? # cam_intrinsic_params = viewer.GetCameraIntrinsics() I = problem_env.env.GetViewer().GetCameraImage(1920, 1080, cam_transform, cam_intrinsic_params) scipy.misc.imsave('test.png', I) """ utils.set_obj_xytheta(np.array([0.01585576, 1.06516767, 5.77099297]), target_obj_name) pick_smpls = visualize_pick(pick_smpler, problem_env) feasible_pick_op = get_feasible_pick(problem_env, pick_smpls) feasible_pick_op.execute() utils.visualize_placements(place_smpler(100), problem_env.robot.GetGrabbed()[0]) import pdb pdb.set_trace() # Visualize path """
def main(): problem_seed = 1 states, konf_relevance, poses, rel_konfs, goal_flags, actions, sum_rewards = \ get_data('n_objs_pack_4', 'place', 'loading_region') actions = actions[:, -4:] actions = [utils.decode_pose_with_sin_and_cos_angle(a) for a in actions] problem_env, openrave_env = create_environment(problem_seed) key_configs = pickle.load(open('prm.pkl', 'r'))[0] key_configs = np.delete(key_configs, [415, 586, 615, 618, 619], axis=0) indices_to_delete = sampler_utils.get_indices_to_delete( 'loading_region', key_configs) key_configs = np.delete(key_configs, indices_to_delete, axis=0) filtered_konfs = filter_configs_that_are_too_close(actions) import pdb pdb.set_trace() utils.viewer() utils.visualize_placements(filtered_konfs, 'square_packing_box1') import pdb pdb.set_trace()
def visualize_samples(samples, problem_env, target_obj_name, policy_mode): target_obj = problem_env.env.GetKinBody(target_obj_name) orig_color = utils.get_color_of(target_obj) utils.set_color(target_obj, [1, 0, 0]) if policy_mode == 'full': picks, places = samples[0], samples[1] utils.visualize_path(picks[0:20, :]) utils.visualize_path(picks[20:40, :]) utils.visualize_path(picks[40:60, :]) utils.visualize_path(picks[60:80, :]) elif policy_mode == 'pick': # assumes that pick sampler is predicting ir parameters pick_base_poses = [] for p in samples: _, pose = utils.get_pick_base_pose_and_grasp_from_pick_parameters( target_obj, p) pick_base_poses.append(pose) pick_base_poses = np.array(pick_base_poses) utils.visualize_path(pick_base_poses[0:10, :], ) else: utils.visualize_placements(samples, target_obj_name) utils.set_color(target_obj, orig_color)