def set_problem_type(self, problem_type): if problem_type == 'normal': pass elif problem_type == 'nonmonotonic': # from manipulation.primitives.display import set_viewer_options # self.env.SetViewer('qtcoin') # set_viewer_options(self.env) set_color(self.objects[0], [1, 1, 1]) set_color(self.objects[4], [0, 0, 0]) poses = [ [2.3, -6.4, 0], [3.9, -6.2, 0], [1.5, -6.3, 0], [3.9, -5.5, 0], [0.8, -5.5, 0], [3.2, -6.2, 0], [1.5, -5.5, 0], [3.2, -5.5, 0], ] q = [2.3, -5.5, 0] set_robot_config(q) for obj, pose in zip(self.objects, poses): set_obj_xytheta(pose, obj)
def restore(self, problem_env=None): if problem_env is None: problem_env = self.problem_env for obj_name, obj_pose in self.object_poses.items(): set_obj_xytheta(obj_pose, problem_env.env.GetKinBody(obj_name)) set_robot_config(self.robot_pose, problem_env.robot)
def __init__(self, problem_idx): Mover.__init__(self, problem_idx) self.operator_names = ['one_arm_pick', 'one_arm_place'] set_robot_config([4.19855789, 2.3236321, 5.2933337], self.robot) set_obj_xytheta([3.35744004, 2.19644156, 3.52741118], self.objects[1]) self.boxes = self.objects self.objects = self.problem_config['shelf_objects'] self.objects = [k for v in self.objects.values() for k in v] self.objects[0], self.objects[1] = self.objects[1], self.objects[0] self.target_box = self.env.GetKinBody('rectangular_packing_box1') utils.randomly_place_region(self.target_box, self.regions['home_region']) self.regions['rectangular_packing_box1_region'] = self.compute_box_region(self.target_box) self.shelf_regions = self.problem_config['shelf_regions'] self.target_box_region = self.regions['rectangular_packing_box1_region'] self.regions.update(self.shelf_regions) self.entity_names = [obj.GetName() for obj in self.objects] + ['rectangular_packing_box1_region', 'center_shelf_region'] self.name = 'one_arm_mover' self.init_saver = utils.CustomStateSaver(self.env) self.object_names = self.entity_names # fix incorrectly named regions self.regions = { region.name: region for region in self.regions.values() }
def compute_and_cache_ik_solutions(self, ikcachename): before = CustomStateSaver(self.problem_env.env) utils.open_gripper() # for o in self.problem_env.env.GetBodies()[2:]: # o.Enable(False) self.problem_env.disable_objects() self.iksolutions = {} o = u'c_obst0' obj = self.problem_env.env.GetKinBody(o) if True: # for o, obj in self.objects.items(): # print(o) self.iksolutions[o] = {r: [] for r in self.regions} pick_op = Operator(operator_type='one_arm_pick', discrete_parameters={'object': obj}) pick_generator = UniformGenerator(pick_op, self.problem_env) pick_feasibility_checker = OneArmPickFeasibilityChecker(self.problem_env) for _ in range(10000): pick_params = pick_generator.sample_from_uniform() iks = [] for r, region in self.regions.items(): place_op = Operator(operator_type='one_arm_place', discrete_parameters={ 'object': obj, 'region': region}) place_generator = UniformGenerator(place_op, self.problem_env) status = 'NoSolution' for _ in range(10): obj_pose = place_generator.sample_from_uniform() set_obj_xytheta(obj_pose, obj) set_point(obj, np.hstack([obj_pose[0:2], region.z + 0.001])) params, status = pick_feasibility_checker.check_feasibility(pick_op, pick_params) if status == 'HasSolution': iks.append((obj.GetTransform(), params)) break if status == 'NoSolution': break if len(iks) == len(self.regions): for r, ik in zip(self.regions, iks): self.iksolutions[o][r].append(ik) if all(len(self.iksolutions[o][r]) >= 1000 for r in self.regions): break # print([len(self.iksolutions[o][r]) for r in self.regions]) self.iksolutions = self.iksolutions[o] # for o in self.problem_env.env.GetBodies()[2:]: # o.Enable(True) self.problem_env.enable_objects() before.Restore() pickle.dump(self.iksolutions, open(ikcachename, 'w'))
def main(): problem_idx = 0 problem_env = Mover(problem_idx, problem_type='jobtalk') problem_env.set_motion_planner(BaseMotionPlanner(problem_env, 'rrt')) utils.viewer() seed = 1 np.random.seed(seed) random.seed(seed) pick_smpler, place_smpler = create_sampler(problem_env) # Set camera view - to get camera transform: viewer.GetCameraTransform() cam_transform = np.array( [[-0.54866337, -0.70682829, 0.44650004, -1.45953619], [-0.83599448, 0.45806221, -0.30214604, 2.02016926], [0.00904058, -0.53904803, -0.8422265, 4.88620949], [0., 0., 0., 1.]]) cam_transform = np.array( [[0.76808539, 0.51022899, -0.38692533, 2.7075901], [0.63937823, -0.57785198, 0.50723029, -2.0267117], [0.03521803, -0.6369878, -0.77006898, 4.52542162], [0., 0., 0., 1.]]) init_goal_cam_transform = np.array( [[0.99941927, -0.00186311, 0.03402425, 1.837726], [-0.02526303, -0.71058334, 0.70315937, -5.78141165], [0.022867, -0.70361058, -0.71021775, 6.03373909], [0., 0., 0., 1.]]) goal_obj_poses = pickle.load( open('./test_scripts/jobtalk_figure_cache_files/goal_obj_poses.pkl', 'r')) for o in problem_env.objects: utils.set_obj_xytheta(goal_obj_poses[o.GetName()], o) viewer = problem_env.env.GetViewer() viewer.SetCamera(init_goal_cam_transform) """ # how do you go from intrinsic params to [fx, fy, cx, cy]? What are these anyways? # cam_intrinsic_params = viewer.GetCameraIntrinsics() I = problem_env.env.GetViewer().GetCameraImage(1920, 1080, cam_transform, cam_intrinsic_params) scipy.misc.imsave('test.png', I) """ utils.set_obj_xytheta(np.array([0.01585576, 1.06516767, 5.77099297]), target_obj_name) pick_smpls = visualize_pick(pick_smpler, problem_env) feasible_pick_op = get_feasible_pick(problem_env, pick_smpls) feasible_pick_op.execute() utils.visualize_placements(place_smpler(100), problem_env.robot.GetGrabbed()[0]) import pdb pdb.set_trace() # Visualize path """
def process_single_data(fname, problem_env, key_configs, save_file): mp_results = pickle.load(open(fname, 'r')) first_obj_poses = mp_results[0]['object_poses'] [ utils.set_obj_xytheta(first_obj_poses[obj.GetName()], obj.GetName()) for obj in problem_env.objects ] pick_q0s = [] pick_qgs = [] pick_labels = [] pick_collisions = [] place_q0s = [] place_qgs = [] place_labels = [] place_collisions = [] collision_vector = utils.compute_occ_vec(key_configs) for mp_result in mp_results: object_poses = mp_result['object_poses'] assert object_poses == first_obj_poses if mp_result['held_obj'] is None: pick_q0s.append(mp_result['q0']) pick_qgs.append(mp_result['qg']) pick_labels.append(mp_result['label']) pick_collisions.append(collision_vector) else: place_q0s.append(mp_result['q0']) place_qgs.append(mp_result['qg']) place_labels.append(mp_result['label']) utils.two_arm_pick_object(mp_result['held_obj'], {'q_goal': mp_result['q0']}) place_collision = utils.compute_occ_vec(key_configs) utils.two_arm_place_object({'q_goal': mp_result['q0']}) place_collisions.append(place_collision) pickle.dump( { 'pick_q0s': pick_q0s, 'pick_qgs': pick_qgs, 'pick_collisions': pick_collisions, 'pick_labels': pick_labels, 'place_q0s': place_q0s, 'place_qgs': place_qgs, 'place_collisions': place_collisions, 'place_labels': place_labels }, open(save_file, 'wb')) print "Done with file", fname
def set_body_poses(self, poses): for body_name, body_pose in zip(poses.keys(), poses.values()): utils.set_obj_xytheta(body_pose, self.env.GetKinBody(body_name))
def main(): problem_idx = 0 env_name = 'two_arm_mover' if env_name == 'one_arm_mover': problem_env = PaPOneArmMoverEnv(problem_idx) goal = ['rectangular_packing_box1_region' ] + [obj.GetName() for obj in problem_env.objects[:3]] state_fname = 'one_arm_state_gpredicate.pkl' else: problem_env = PaPMoverEnv(problem_idx) goal_objs = [ 'square_packing_box1', 'square_packing_box2', 'rectangular_packing_box3', 'rectangular_packing_box4' ] goal_region = 'home_region' goal = [goal_region] + goal_objs state_fname = 'two_arm_state_gpredicate.pkl' problem_env.set_goal(goal) if os.path.isfile(state_fname): state = pickle.load(open(state_fname, 'r')) else: statecls = helper.get_state_class(env_name) state = statecls(problem_env, problem_env.goal) utils.viewer() if env_name == 'one_arm_mover': obj_name = 'c_obst9' pick_op = Operator( operator_type='one_arm_pick', discrete_parameters={ 'object': problem_env.env.GetKinBody(obj_name) }, continuous_parameters=state.pick_params[obj_name][0]) pick_op.execute() problem_env.env.Remove(problem_env.env.GetKinBody('computer_table')) utils.set_color(obj_name, [0.9, 0.8, 0.0]) utils.set_color('c_obst0', [0, 0, 0.8]) utils.set_obj_xytheta( np.array([[4.47789478, -0.01477591, 4.76236795]]), 'c_obst0') T_viewer = np.array( [[-0.69618481, -0.41674492, 0.58450867, 3.62774134], [-0.71319601, 0.30884202, -0.62925993, 0.39102399], [0.08172004, -0.85495045, -0.51223194, 1.70261502], [0., 0., 0., 1.]]) viewer = problem_env.env.GetViewer() viewer.SetCamera(T_viewer) import pdb pdb.set_trace() utils.release_obj() else: T_viewer = np.array( [[0.99964468, -0.00577897, 0.02602139, 1.66357124], [-0.01521307, -0.92529857, 0.37893419, -7.65383244], [0.02188771, -0.37919541, -0.92505771, 6.7393589], [0., 0., 0., 1.]]) viewer = problem_env.env.GetViewer() viewer.SetCamera(T_viewer) import pdb pdb.set_trace() # prefree and occludespre target = 'rectangular_packing_box4' utils.set_obj_xytheta(np.array([[0.1098148, -6.33305931, 0.22135689]]), target) utils.get_body_xytheta(target) utils.visualize_path( state.cached_pick_paths['rectangular_packing_box4']) import pdb pdb.set_trace() # manipfree and occludesmanip pick_obj = 'square_packing_box2' pick_used = state.pick_used[pick_obj] utils.two_arm_pick_object(pick_obj, pick_used.continuous_parameters) utils.visualize_path(state.cached_place_paths[(u'square_packing_box2', 'home_region')]) import pdb pdb.set_trace()
def generate_smpls(problem_env, sampler, plan): # make a prediction # Make a feasible pick sample for the target object idx = 0 plan_action = plan[0] while True: if plan_action.discrete_parameters['place_region'] == 'home_region': utils.set_obj_xytheta( plan_action.continuous_parameters['place']['object_pose'], plan_action.discrete_parameters['object']) else: if plan_action.discrete_parameters[ 'object'] == 'rectangular_packing_box2': break else: utils.set_obj_xytheta( plan_action.continuous_parameters['place']['object_pose'], plan_action.discrete_parameters['object']) idx += 1 plan_action = plan[idx] import pdb pdb.set_trace() target_obj_name = plan_action.discrete_parameters['object'] place_region = 'loading_region' abstract_action = Operator('two_arm_pick_two_arm_place', { 'object': target_obj_name, 'place_region': place_region }) abstract_action.continuous_parameters = plan_action.continuous_parameters pick_base_pose = plan_action.continuous_parameters['pick']['q_goal'] abstract_action.execute_pick() import pdb pdb.set_trace() utils.set_robot_config( plan_action.continuous_parameters['place']['q_goal']) goal_entities = [ 'square_packing_box1', 'square_packing_box2', 'rectangular_packing_box3', 'rectangular_packing_box4', 'home_region' ] sampler_state = ConcreteNodeState(problem_env, target_obj_name, place_region, goal_entities) inp = make_input_for_place(sampler_state, pick_base_pose) key_configs = pickle.load(open('prm.pkl', 'r'))[0] cols = inp['collisions'].squeeze() colliding_idxs = np.where(cols[:, 1] == 0)[0] colliding_key_configs = key_configs[colliding_idxs, :] samples = [] values = [] for _ in range(20): sample = sampler.sample_from_voo(inp['collisions'], inp['poses'], voo_iter=50, colliding_key_configs=None, tried_samples=np.array([])) values.append( sampler.value_network.predict( [sample[None, :], inp['collisions'], inp['poses']])[0, 0]) sample = utils.decode_pose_with_sin_and_cos_angle(sample) samples.append(sample) utils.visualize_path(samples) print values # visualize_samples(samples, problem_env, target_obj_name) # visualize_samples([samples[np.argmax(values)]], problem_env, target_obj_name) return samples