def get_uniform_sampler_place_feasibility_rate(pick_smpls, place_smpls, target_obj, problem_env): feasibility_checker = two_arm_pap_feasiblity_checker.TwoArmPaPFeasibilityChecker( problem_env) op = Operator('two_arm_place', { "object": target_obj, "place_region": 'loading_region' }) n_success = 0 orig_xytheta = utils.get_robot_xytheta(problem_env.robot) for pick_smpl, place_smpl in zip(pick_smpls, place_smpls): parameters = np.hstack([pick_smpl, place_smpl]) param, status = feasibility_checker.check_feasibility( op, parameters, swept_volume_to_avoid=None, parameter_mode='obj_pose') if status == 'HasSolution': motion, status = problem_env.motion_planner.get_motion_plan( [param['pick']['q_goal']], cached_collisions=None) if status == 'HasSolution': utils.two_arm_pick_object(target_obj, param['pick']) motion, status = problem_env.motion_planner.get_motion_plan( [param['place']['q_goal']], cached_collisions=None) utils.two_arm_place_object(param['pick']) utils.set_robot_config(orig_xytheta, problem_env.robot) n_success += status == 'HasSolution' total_samples = len(pick_smpls) return n_success / float(total_samples) * 100
def get_konf_obstacles_while_holding(pick_samples, sampler_state, problem_env): konf_obstacles_while_holding = [] xmin = -0.7 xmax = 4.3 ymin = -8.55 ymax = -4.85 key_configs = pickle.load(open('prm.pkl', 'r'))[0] key_configs = np.delete(key_configs, [415, 586, 615, 618, 619], axis=0) indices_to_delete = np.hstack([ np.where(key_configs[:, 1] > ymax)[0], np.where(key_configs[:, 1] < ymin)[0], np.where(key_configs[:, 0] > xmax)[0], np.where(key_configs[:, 0] < xmin)[0] ]) sampler_state.key_configs = np.delete(key_configs, indices_to_delete, axis=0) for pick_smpl in pick_samples: utils.two_arm_pick_object(sampler_state.obj, pick_smpl) sampler_state.place_collision_vector = sampler_state.get_collison_vector( None) konf_obstacles_while_holding.append( sampler_state.place_collision_vector) utils.two_arm_place_object(pick_smpl) return np.array(konf_obstacles_while_holding).reshape( (len(pick_samples), 291, 2, 1))
def reset_to_init_state(self, node): saver = node.state_saver # print "Restoring from mover_env" saver.Restore() # this call re-enables objects that are disabled # todo I need to re-grab the object if the object was grabbed, because Restore destroys the grabs if node.parent_action is not None: # todo # When we are switching to a place, none-operator skeleton node, then we need to pick the object # We need to determine the node's operator type. If it is a pick # Corner case: node is a non-operator skeleton node, and it is time to place if node.parent_action.type is 'two_arm_pick' and node.is_operator_skeleton_node: # place op-skeleton node # pick parent's object grabbed_object = node.parent_action.discrete_parameters[ 'object'] two_arm_pick_object(grabbed_object, self.robot, node.parent_action.continuous_parameters) elif node.parent_action.type is 'two_arm_place' and not node.is_operator_skeleton_node: # place op-instance node # pick grand-parent's object grabbed_object = node.parent.parent_action.discrete_parameters[ 'object'] two_arm_pick_object( grabbed_object, self.robot, node.parent.parent_action.continuous_parameters) self.curr_state = self.get_state() self.robot.SetActiveDOFs([], DOFAffine.X | DOFAffine.Y | DOFAffine.RotationAxis, [0, 0, 1])
def apply_two_arm_pick_action_stripstream(self, action, obj=None, do_check_reachability=False): if obj is None: obj_to_pick = self.curr_obj else: obj_to_pick = obj pick_base_pose, grasp_params = action set_robot_config(pick_base_pose, self.robot) grasps = compute_two_arm_grasp(depth_portion=grasp_params[2], height_portion=grasp_params[1], theta=grasp_params[0], obj=obj_to_pick, robot=self.robot) g_config = solveTwoArmIKs(self.env, self.robot, obj_to_pick, grasps) try: assert g_config is not None except: pass # import pdb; pdb.set_trace() action = {'base_pose': pick_base_pose, 'g_config': g_config} two_arm_pick_object(obj_to_pick, self.robot, action) curr_state = self.get_state() reward = 0 pick_path = None return curr_state, reward, g_config, pick_path
def execute_pick(self): env = openravepy.RaveGetEnvironments()[0] if isinstance(self.discrete_parameters['object'], openravepy.KinBody): obj_to_pick = self.discrete_parameters['object'] else: obj_to_pick = env.GetKinBody(self.discrete_parameters['object']) if self.type == 'two_arm_pick_two_arm_place': two_arm_pick_object(obj_to_pick, self.continuous_parameters['pick']) else: one_arm_pick_object(obj_to_pick, self.continuous_parameters['pick'])
def get_place_param_with_feasible_motion_plan(self, chosen_pick_param, candidate_pap_parameters, cached_holding_collisions): original_config = utils.get_body_xytheta( self.problem_env.robot).squeeze() utils.two_arm_pick_object( self.operator_skeleton.discrete_parameters['object'], chosen_pick_param) place_op_params = [op['place'] for op in candidate_pap_parameters] chosen_place_param = self.get_op_param_with_feasible_motion_plan( place_op_params, cached_holding_collisions) utils.two_arm_place_object(chosen_pick_param) utils.set_robot_config(original_config) return chosen_place_param
def process_single_data(fname, problem_env, key_configs, save_file): mp_results = pickle.load(open(fname, 'r')) first_obj_poses = mp_results[0]['object_poses'] [ utils.set_obj_xytheta(first_obj_poses[obj.GetName()], obj.GetName()) for obj in problem_env.objects ] pick_q0s = [] pick_qgs = [] pick_labels = [] pick_collisions = [] place_q0s = [] place_qgs = [] place_labels = [] place_collisions = [] collision_vector = utils.compute_occ_vec(key_configs) for mp_result in mp_results: object_poses = mp_result['object_poses'] assert object_poses == first_obj_poses if mp_result['held_obj'] is None: pick_q0s.append(mp_result['q0']) pick_qgs.append(mp_result['qg']) pick_labels.append(mp_result['label']) pick_collisions.append(collision_vector) else: place_q0s.append(mp_result['q0']) place_qgs.append(mp_result['qg']) place_labels.append(mp_result['label']) utils.two_arm_pick_object(mp_result['held_obj'], {'q_goal': mp_result['q0']}) place_collision = utils.compute_occ_vec(key_configs) utils.two_arm_place_object({'q_goal': mp_result['q0']}) place_collisions.append(place_collision) pickle.dump( { 'pick_q0s': pick_q0s, 'pick_qgs': pick_qgs, 'pick_collisions': pick_collisions, 'pick_labels': pick_labels, 'place_q0s': place_q0s, 'place_qgs': place_qgs, 'place_collisions': place_collisions, 'place_labels': place_labels }, open(save_file, 'wb')) print "Done with file", fname
def is_grasp_config_feasible(self, obj, pick_base_pose, grasp_params, grasp_config): pick_action = { 'operator_name': 'two_arm_pick', 'q_goal': pick_base_pose, 'grasp_params': grasp_params, 'g_config': grasp_config } two_arm_pick_object(obj, pick_action) no_collision = not self.env.CheckCollision(self.robot) inside_region = self.problem_env.regions['home_region'].contains(self.robot.ComputeAABB()) or \ self.problem_env.regions['loading_region'].contains(self.robot.ComputeAABB()) # Why did I have the below? I cannot plan to some of the spots in entire region # inside_region = self.problem_env.regions['entire_region'].contains(self.robot.ComputeAABB()) two_arm_place_object(pick_action) return no_collision and inside_region
def apply_operator_instance_and_get_reward(self, operator_instance, is_op_feasible): if not is_op_feasible: print "Infeasible parameters" return self.infeasible_reward else: # apply the action objects_in_collision = self.problem_env.get_objs_in_collision( operator_instance.low_level_motion, 'entire_region') if operator_instance.type == 'two_arm_pick': two_arm_pick_object( operator_instance.discrete_parameters['object'], self.robot, operator_instance.continuous_parameters) elif operator_instance.type == 'two_arm_place': object_held = self.problem_env.robot.GetGrabbed()[0] two_arm_place_object(object_held, self.robot, operator_instance.continuous_parameters) return -len(objects_in_collision) / 8.0
def compute_clf_rates_from_diff_pinstances(problem_env, key_configs, net, action_type): clf_rates = [] for problem_seed in range(400): np.random.seed(problem_seed) random.seed(problem_seed) [ utils.randomly_place_region(obj, problem_env.regions['loading_region']) for obj in problem_env.objects ] utils.randomly_place_region(problem_env.robot, problem_env.regions['loading_region']) if action_type == 'place': sampler = UniformSampler(problem_env.regions['home_region']) checker = TwoArmPaPFeasibilityCheckerWithoutSavingFeasiblePick( problem_env) status = "NoSolution" while status != "HasSolution": pick_param = sampler.sample() target_obj = problem_env.objects[np.random.randint(8)] abstract_action = Operator( operator_type='two_arm_pick_two_arm_place', discrete_parameters={ 'object': target_obj, 'place_region': 'home_region' }) op_parameters, status = checker.check_feasibility( abstract_action, pick_param) utils.two_arm_pick_object(target_obj, op_parameters['pick']) region_name = 'loading_region' else: region_name = 'loading_region' clf_rate = get_clf_accuracy(problem_env, key_configs, net, region_name) if action_type == 'place': utils.two_arm_place_object(op_parameters['pick']) clf_rates.append(clf_rate) # print "pidx %d Clf rate %.2f " % (problem_seed, clf_rate) print np.array(clf_rates).sum(axis=0), problem_seed
def apply_operator_instance_and_get_reward(self, state, operator_instance, is_op_feasible): if not is_op_feasible: reward = self.infeasible_reward else: if operator_instance.type == 'two_arm_pick': two_arm_pick_object( operator_instance.discrete_parameters['object'], self.robot, operator_instance.continuous_parameters) reward = 0 elif operator_instance.type == 'two_arm_place': object_held = self.robot.GetGrabbed()[0] previous_region = self.problem_env.get_region_containing( object_held) two_arm_place_object(object_held, self.robot, operator_instance.continuous_parameters) current_region = self.problem_env.get_region_containing( object_held) if current_region.name == 'home_region' and previous_region != current_region: task_reward = 1 elif current_region.name == 'loading_region' and previous_region.name == 'home_region': task_reward = -1.5 else: task_reward = 0 # placing it in the same region reward = task_reward elif operator_instance.type == 'one_arm_pick': one_arm_pick_object( operator_instance.discrete_parameters['object'], operator_instance.continuous_parameters) reward = 1 elif operator_instance.type == 'one_arm_place': one_arm_place_object( operator_instance.discrete_parameters['object'], operator_instance.continuous_parameters) else: raise NotImplementedError return reward
def get_pap_param_with_feasible_motion_plan(self, operator_skeleton, feasible_op_parameters, cached_collisions, cached_holding_collisions): # getting pick motion - I can still use the cached collisions from state computation pick_op_params = [op['pick'] for op in feasible_op_parameters] chosen_pick_param = self.get_op_param_with_feasible_motion_plan( pick_op_params, cached_collisions) if not chosen_pick_param['is_feasible']: return {'is_feasible': False} target_obj = operator_skeleton.discrete_parameters['object'] if target_obj in self.feasible_pick_params: self.feasible_pick_params[target_obj].append(chosen_pick_param) else: self.feasible_pick_params[target_obj] = [chosen_pick_param] # self.feasible_pick_params[target_obj].append(pick_op_params) # getting place motion original_config = utils.get_body_xytheta( self.problem_env.robot).squeeze() utils.two_arm_pick_object( operator_skeleton.discrete_parameters['object'], chosen_pick_param) place_op_params = [op['place'] for op in feasible_op_parameters] chosen_place_param = self.get_op_param_with_feasible_motion_plan( place_op_params, cached_holding_collisions) utils.two_arm_place_object(chosen_pick_param) utils.set_robot_config(original_config) if not chosen_place_param['is_feasible']: return {'is_feasible': False} chosen_pap_param = { 'pick': chosen_pick_param, 'place': chosen_place_param, 'is_feasible': True } return chosen_pap_param
def predict(self, op_parameters, abstract_state, abstract_action): collisions = self.process_abstract_state_collisions_into_key_config_obstacles(abstract_state) key_configs = abstract_state.prm_vertices pick_qg = op_parameters['pick']['q_goal'] v = self.make_vertices(pick_qg, key_configs, collisions) is_pick_reachable = ((self.pick_net(v) > 0.5).cpu().numpy() == True)[0, 0] if is_pick_reachable: # I have to hold the object and check collisions orig_config = utils.get_robot_xytheta() target_obj = abstract_action.discrete_parameters['object'] utils.two_arm_pick_object(target_obj, {'q_goal': pick_qg}) collisions = utils.compute_occ_vec(key_configs) collisions = utils.convert_binary_vec_to_one_hot(collisions) utils.two_arm_place_object({'q_goal': pick_qg}) utils.set_robot_config(orig_config) place_qg = op_parameters['place']['q_goal'] v = self.make_vertices(place_qg, key_configs, collisions) pred = self.place_net(v) is_place_reachable = ((pred > 0.5).cpu().numpy() == True)[0, 0] return is_place_reachable else: return False
def execute(self): env = openravepy.RaveGetEnvironments()[0] if self.type == 'two_arm_pick': obj_to_pick = utils.convert_to_kin_body( self.discrete_parameters['object']) if 'q_goal' in self.continuous_parameters and type(self.continuous_parameters['q_goal']) == list and\ len(self.continuous_parameters['q_goal']) > 1: try: two_arm_pick_object( obj_to_pick, {'q_goal': self.continuous_parameters['q_goal'][0]}) except: import pdb pdb.set_trace() else: two_arm_pick_object(obj_to_pick, self.continuous_parameters) elif self.type == 'two_arm_place': two_arm_place_object(self.continuous_parameters) elif self.type == 'two_arm_pick_two_arm_place': obj_to_pick = utils.convert_to_kin_body( self.discrete_parameters['object']) two_arm_pick_object(obj_to_pick, self.continuous_parameters['pick']) two_arm_place_object(self.continuous_parameters['place']) elif self.type == 'one_arm_pick': obj_to_pick = utils.convert_to_kin_body( self.discrete_parameters['object']) one_arm_pick_object(obj_to_pick, self.continuous_parameters) elif self.type == 'one_arm_place': one_arm_place_object(self.continuous_parameters) elif self.type == 'one_arm_pick_one_arm_place': obj_to_pick = utils.convert_to_kin_body( self.discrete_parameters['object']) one_arm_pick_object(obj_to_pick, self.continuous_parameters['pick']) one_arm_place_object(self.continuous_parameters['place']) else: raise NotImplementedError
def get_pap_param_with_feasible_motion_plan(self, operator_skeleton, feasible_op_parameters, cached_collisions, cached_holding_collisions): # getting pick motion - I can still use the cached collisions from state computation n_feasible = len(feasible_op_parameters) n_mp_tried = 0 obj_poses = { o.GetName(): utils.get_body_xytheta(o) for o in self.problem_env.objects } prepick_q0 = utils.get_body_xytheta(self.problem_env.robot) all_mp_data = [] for op in feasible_op_parameters: print "n_mp_tried / n_feasible_params = %d / %d" % (n_mp_tried, n_feasible) chosen_pick_param = self.get_op_param_with_feasible_motion_plan( [op['pick']], cached_collisions) n_mp_tried += 1 mp_data = { 'q0': prepick_q0, 'qg': op['pick']['q_goal'], 'object_poses': obj_poses, 'held_obj': None } if not chosen_pick_param['is_feasible']: print "Pick motion does not exist" mp_data['label'] = False all_mp_data.append(mp_data) continue else: mp_data['label'] = True mp_data['motion'] = chosen_pick_param['motion'] all_mp_data.append(mp_data) original_config = utils.get_body_xytheta( self.problem_env.robot).squeeze() utils.two_arm_pick_object( operator_skeleton.discrete_parameters['object'], chosen_pick_param) mp_data = { 'q0': op['pick']['q_goal'], 'qg': op['place']['q_goal'], 'object_poses': obj_poses, 'held_obj': operator_skeleton.discrete_parameters['object'], 'region': operator_skeleton.discrete_parameters['place_region'] } chosen_place_param = self.get_op_param_with_feasible_motion_plan( [op['place']], cached_holding_collisions) # calls MP utils.two_arm_place_object(chosen_pick_param) utils.set_robot_config(original_config) if chosen_place_param['is_feasible']: mp_data['label'] = True mp_data['motion'] = chosen_place_param['motion'] all_mp_data.append(mp_data) print 'Motion plan exists' break else: mp_data['label'] = False all_mp_data.append(mp_data) print "Place motion does not exist" pickle.dump( all_mp_data, open( './planning_experience/motion_planning_experience/' + str(uuid.uuid4()) + '.pkl', 'wb')) if not chosen_pick_param['is_feasible']: print "Motion plan does not exist" return {'is_feasible': False} if not chosen_place_param['is_feasible']: print "Motion plan does not exist" return {'is_feasible': False} chosen_pap_param = { 'pick': chosen_pick_param, 'place': chosen_place_param, 'is_feasible': True } return chosen_pap_param
def main(): device = torch.device("cpu") edges = pickle.load(open('prm_edges_for_reachability_gnn.pkl', 'r')) n_msg_passing = 0 net = ReachabilityNet(edges, n_key_configs=618, device=device, n_msg_passing=n_msg_passing) action_type = 'place' load_weights(net, 35, action_type, 1, n_msg_passing, device) get_data_test_acc = False if get_data_test_acc: seed = 0 np.random.seed(seed) torch.cuda.manual_seed_all(seed) dataset = GNNReachabilityDataset(action_type) n_train = int(len(dataset) * 0.9) trainset, testset = torch.utils.data.random_split( dataset, [n_train, len(dataset) - n_train]) testloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=False, num_workers=20, pin_memory=True) print "Getting test accuracy for n_test %d..." % len(testset) test_acc = get_test_acc(testloader, net, device, len(testset)) print test_acc import pdb pdb.set_trace() problem_seed = 1 problem_env, openrave_env = create_environment(problem_seed) if 'Encoded' in net.__class__.__name__: tmp, _ = pickle.load(open('prm.pkl', 'r')) key_configs = np.zeros((618, 4)) for pidx, p in enumerate(tmp): key_configs[pidx] = utils.encode_pose_with_sin_and_cos_angle(p) else: key_configs, _ = pickle.load(open('prm.pkl', 'r')) compute_clf_acc = False if compute_clf_acc: compute_clf_rates_from_diff_pinstances(problem_env, key_configs, net, action_type) utils.viewer() if action_type == 'place': status = "NoSolution" while status != "HasSolution": sampler = UniformSampler(problem_env.regions['home_region']) checker = TwoArmPaPFeasibilityCheckerWithoutSavingFeasiblePick( problem_env) pick_param = sampler.sample() target_obj = problem_env.objects[np.random.randint(8)] abstract_action = Operator( operator_type='two_arm_pick_two_arm_place', discrete_parameters={ 'object': target_obj, 'place_region': 'home_region' }) op_parameters, status = checker.check_feasibility( abstract_action, pick_param) utils.two_arm_pick_object(target_obj, op_parameters['pick']) collisions = utils.compute_occ_vec(key_configs) col = utils.convert_binary_vec_to_one_hot(collisions.squeeze()) qg = sample_qg(problem_env, 'home_region') utils.visualize_path([qg]) vertex = make_vertices(qg, key_configs, col, net) vertex_outputs = net.get_vertex_activations(vertex).data.numpy().squeeze() """ top_activations = np.max(vertex_outputs, axis=0) top_k_args = np.argsort(abs(top_activations))[-10:] """ top_k_args = np.argsort(abs(vertex_outputs))[-30:] top_k_key_configs = key_configs[top_k_args, :] import pdb pdb.set_trace() utils.visualize_path(top_k_key_configs) # todo visualize the activations import pdb pdb.set_trace()
def check_existence_of_feasible_motion_plan(self, candidate_parameters): n_feasible = len(candidate_parameters) n_mp_tried = 0 for op in candidate_parameters: stime = time.time() self.n_mp_checks += 1 param = np.hstack([ op['pick']['action_parameters'], op['place']['action_parameters'] ]) # todo why is there a mismatch betwen pick and place samples? print "n_mp_tried / n_feasible_params = %d / %d" % (n_mp_tried, n_feasible) chosen_pick_param = self.get_motion_plan([op['pick']]) n_mp_tried += 1 self.n_pick_mp_checks += 1 print "Pick motion planning time {:.5f}".format(time.time() - stime) if not chosen_pick_param['is_feasible']: print "Pick motion does not exist" self.n_mp_infeasible += 1 self.n_pick_mp_infeasible += 1 continue original_config = utils.get_body_xytheta( self.problem_env.robot).squeeze() utils.two_arm_pick_object( self.abstract_action.discrete_parameters['object'], chosen_pick_param) stime = time.time() chosen_place_param = self.get_motion_plan([op['place'] ]) # calls MP self.n_place_mp_checks += 1 utils.two_arm_place_object(chosen_pick_param) utils.set_robot_config(original_config) print "Place motion planning time {:.5f}".format(time.time() - stime) if chosen_place_param['is_feasible']: print 'Motion plan exists' break else: self.n_mp_infeasible += 1 self.n_place_mp_infeasible += 1 print "Place motion does not exist" if not chosen_pick_param['is_feasible']: print "Motion plan does not exist" return {'is_feasible': False} if not chosen_place_param['is_feasible']: print "Motion plan does not exist" return {'is_feasible': False} chosen_pap_param = { 'pick': chosen_pick_param, 'place': chosen_place_param, 'is_feasible': True } return chosen_pap_param
def main(): problem_idx = 0 env_name = 'two_arm_mover' if env_name == 'one_arm_mover': problem_env = PaPOneArmMoverEnv(problem_idx) goal = ['rectangular_packing_box1_region' ] + [obj.GetName() for obj in problem_env.objects[:3]] state_fname = 'one_arm_state_gpredicate.pkl' else: problem_env = PaPMoverEnv(problem_idx) goal_objs = [ 'square_packing_box1', 'square_packing_box2', 'rectangular_packing_box3', 'rectangular_packing_box4' ] goal_region = 'home_region' goal = [goal_region] + goal_objs state_fname = 'two_arm_state_gpredicate.pkl' problem_env.set_goal(goal) if os.path.isfile(state_fname): state = pickle.load(open(state_fname, 'r')) else: statecls = helper.get_state_class(env_name) state = statecls(problem_env, problem_env.goal) utils.viewer() if env_name == 'one_arm_mover': obj_name = 'c_obst9' pick_op = Operator( operator_type='one_arm_pick', discrete_parameters={ 'object': problem_env.env.GetKinBody(obj_name) }, continuous_parameters=state.pick_params[obj_name][0]) pick_op.execute() problem_env.env.Remove(problem_env.env.GetKinBody('computer_table')) utils.set_color(obj_name, [0.9, 0.8, 0.0]) utils.set_color('c_obst0', [0, 0, 0.8]) utils.set_obj_xytheta( np.array([[4.47789478, -0.01477591, 4.76236795]]), 'c_obst0') T_viewer = np.array( [[-0.69618481, -0.41674492, 0.58450867, 3.62774134], [-0.71319601, 0.30884202, -0.62925993, 0.39102399], [0.08172004, -0.85495045, -0.51223194, 1.70261502], [0., 0., 0., 1.]]) viewer = problem_env.env.GetViewer() viewer.SetCamera(T_viewer) import pdb pdb.set_trace() utils.release_obj() else: T_viewer = np.array( [[0.99964468, -0.00577897, 0.02602139, 1.66357124], [-0.01521307, -0.92529857, 0.37893419, -7.65383244], [0.02188771, -0.37919541, -0.92505771, 6.7393589], [0., 0., 0., 1.]]) viewer = problem_env.env.GetViewer() viewer.SetCamera(T_viewer) import pdb pdb.set_trace() # prefree and occludespre target = 'rectangular_packing_box4' utils.set_obj_xytheta(np.array([[0.1098148, -6.33305931, 0.22135689]]), target) utils.get_body_xytheta(target) utils.visualize_path( state.cached_pick_paths['rectangular_packing_box4']) import pdb pdb.set_trace() # manipfree and occludesmanip pick_obj = 'square_packing_box2' pick_used = state.pick_used[pick_obj] utils.two_arm_pick_object(pick_obj, pick_used.continuous_parameters) utils.visualize_path(state.cached_place_paths[(u'square_packing_box2', 'home_region')]) import pdb pdb.set_trace()
def search(self, object_to_move, parent_swept_volumes, obstacles_to_remove, objects_moved_before, plan, parent_pick=None, parent_obj=None, ultimate_plan_stime=None, timelimit=None): print time.time() - ultimate_plan_stime if time.time() - ultimate_plan_stime > timelimit: return False, 'NoSolution' swept_volumes = PickAndPlaceSweptVolume(self.problem_env, parent_swept_volumes) objects_moved_before = [o for o in objects_moved_before] plan = [p for p in plan] self.problem_env.set_exception_objs_when_disabling_objects_in_region( objects_moved_before) self.number_of_nodes += 1 object_to_move = self.problem_env.env.GetKinBody( object_to_move) if isinstance(object_to_move, unicode) else object_to_move target_region = self.get_target_region_for_obj(object_to_move) # Debugging purpose color_before = get_color(object_to_move) set_color(object_to_move, [1, 0, 0]) # End of debugging purpose # PlanGrasp saver = utils.CustomStateSaver(self.problem_env.env) stime = time.time() # this contains mc-path from initial config to the target obj _, _, pick_operator_instance_for_curr_object = self.get_pick_from_initial_config( object_to_move) #print 'Time pick', time.time() - stime if pick_operator_instance_for_curr_object is None: saver.Restore() self.reset() #print "Infeasible branch" return False, 'NoSolution' utils.two_arm_pick_object( object_to_move, pick_operator_instance_for_curr_object.continuous_parameters) # FindPlacements _, _, place_operator_instance = self.plan_place( target_region, swept_volumes) if place_operator_instance is None: saver.Restore() self.reset() print "Infeasible branch" return False, 'NoSolution' # O_{FUC} update objects_moved_before.append(object_to_move) self.problem_env.set_exception_objs_when_disabling_objects_in_region( objects_moved_before) if parent_pick is not None: utils.two_arm_place_object( place_operator_instance.continuous_parameters) _, _, pick_operator_instance_for_parent_object = self.plan_pick_motion_for( parent_obj, parent_pick) # PlanNavigation if pick_operator_instance_for_parent_object is None: print "Infeasible branch" saver.Restore() return False, 'NoSolution' swept_volumes.add_pick_swept_volume( pick_operator_instance_for_parent_object) swept_volumes.add_place_swept_volume( place_operator_instance, pick_operator_instance_for_curr_object) plan.insert(0, pick_operator_instance_for_parent_object) plan.insert(0, place_operator_instance) else: pick_operator_instance_for_parent_object = None swept_volumes.add_place_swept_volume( place_operator_instance, pick_operator_instance_for_curr_object) plan.insert(0, place_operator_instance) saver.Restore() # O_{PAST} self.problem_env.enable_objects_in_region('entire_region') objs_in_collision_for_pap \ = swept_volumes.get_objects_in_collision_with_pick_and_place(pick_operator_instance_for_parent_object, place_operator_instance) obstacles_to_remove = objs_in_collision_for_pap + obstacles_to_remove # Note: # For this code to be precisely HPN, I need to keep all objects that have not been moved so far in obstacles # to remove. I am making the assumption that, because we are in a continuous domain, we always keep the # tried-actions in the queue, and because the swept-volume heuristic tells us to move the ones in collision # first, we will always try to move the first-colliding object. if len(obstacles_to_remove) == 0: objs_in_collision_for_picking_curr_obj \ = swept_volumes.pick_swept_volume.get_objects_in_collision_with_given_op_inst( pick_operator_instance_for_curr_object) if len(objs_in_collision_for_picking_curr_obj) == 0: plan.insert(0, pick_operator_instance_for_curr_object) return plan, 'HasSolution' else: obstacles_to_remove += objs_in_collision_for_picking_curr_obj # enumerate through all object orderings print "Obstacles to remove", obstacles_to_remove """ cbefore = [] for oidx, o in enumerate(obstacles_to_remove): cbefore.append(get_color(o)) set_color(o, [0, 0, float(oidx) / len(obstacles_to_remove)]) [set_color(o, c) for c, o in zip(cbefore, obstacles_to_remove)] """ for new_obj_to_move in obstacles_to_remove: set_color(object_to_move, color_before) tmp_obstacles_to_remove = set(obstacles_to_remove).difference( set([new_obj_to_move])) tmp_obstacles_to_remove = list(tmp_obstacles_to_remove) #print "tmp obstacles to remove:", tmp_obstacles_to_remove branch_plan, status = self.search( new_obj_to_move, swept_volumes, tmp_obstacles_to_remove, objects_moved_before, plan, pick_operator_instance_for_curr_object, parent_obj=object_to_move, ultimate_plan_stime=ultimate_plan_stime, timelimit=timelimit) is_branch_success = status == 'HasSolution' if is_branch_success: return branch_plan, status # It should never come down here, as long as the number of nodes have not exceeded the limit # but to which level do I back track? To the root node. If this is a root node and # the number of nodes have not reached the maximum, keep searching. return False, 'NoSolution'
def generate_training_data_single(): np.random.seed(config.pidx) random.seed(config.pidx) if config.domain == 'two_arm_mover': mover = Mover(config.pidx, config.problem_type) elif config.domain == 'one_arm_mover': mover = OneArmMover(config.pidx) else: raise NotImplementedError np.random.seed(config.planner_seed) random.seed(config.planner_seed) mover.set_motion_planner(BaseMotionPlanner(mover, 'prm')) mover.seed = config.pidx # todo change the root node mover.init_saver = DynamicEnvironmentStateSaver(mover.env) hostname = socket.gethostname() if hostname in {'dell-XPS-15-9560', 'phaedra', 'shakey', 'lab', 'glaucus', 'luke-laptop-1'}: root_dir = './' #root_dir = '/home/beomjoon/Dropbox (MIT)/cloud_results/' else: root_dir = '/data/public/rw/pass.port/tamp_q_results/' solution_file_dir = root_dir + '/test_results/greedy_results_on_mover_domain/domain_%s/n_objs_pack_%d'\ % (config.domain, config.n_objs_pack) if config.dont_use_gnn: solution_file_dir += '/no_gnn/' elif config.dont_use_h: solution_file_dir += '/gnn_no_h/loss_' + str(config.loss) + '/num_train_' + str(config.num_train) + '/' elif config.hcount: solution_file_dir += '/hcount/' elif config.hadd: solution_file_dir += '/gnn_hadd/loss_' + str(config.loss) + '/num_train_' + str(config.num_train) + '/' else: solution_file_dir += '/gnn/loss_' + str(config.loss) + '/num_train_' + str(config.num_train) + '/' solution_file_name = 'pidx_' + str(config.pidx) + \ '_planner_seed_' + str(config.planner_seed) + \ '_train_seed_' + str(config.train_seed) + \ '_domain_' + str(config.domain) + '.pkl' if not os.path.isdir(solution_file_dir): os.makedirs(solution_file_dir) solution_file_name = solution_file_dir + solution_file_name is_problem_solved_before = os.path.isfile(solution_file_name) if is_problem_solved_before and not config.plan: with open(solution_file_name, 'rb') as f: trajectory = pickle.load(f) success = trajectory.metrics['success'] tottime = trajectory.metrics['tottime'] else: t = time.time() trajectory, num_nodes = get_problem(mover) tottime = time.time() - t success = trajectory is not None plan_length = len(trajectory.actions) if success else 0 if not success: trajectory = Trajectory(mover.seed, mover.seed) trajectory.states = [s.get_predicate_evaluations() for s in trajectory.states] trajectory.state_prime = None trajectory.metrics = { 'n_objs_pack': config.n_objs_pack, 'tottime': tottime, 'success': success, 'plan_length': plan_length, 'num_nodes': num_nodes, } #with open(solution_file_name, 'wb') as f: # pickle.dump(trajectory, f) print 'Time: %.2f Success: %d Plan length: %d Num nodes: %d' % (tottime, success, trajectory.metrics['plan_length'], trajectory.metrics['num_nodes']) import pdb;pdb.set_trace() """ print("time: {}".format(','.join(str(trajectory.metrics[m]) for m in [ 'n_objs_pack', 'tottime', 'success', 'plan_length', 'num_nodes', ]))) """ print('\n'.join(str(a.discrete_parameters.values()) for a in trajectory.actions)) def draw_robot_line(env, q1, q2): return draw_line(env, list(q1)[:2] + [.5], list(q2)[:2] + [.5], color=(0,0,0,1), width=3.) mover.reset_to_init_state_stripstream() if not config.dontsimulate: if config.visualize_sim: mover.env.SetViewer('qtcoin') set_viewer_options(mover.env) raw_input('Start?') handles = [] for step_idx, action in enumerate(trajectory.actions): if action.type == 'two_arm_pick_two_arm_place': def check_collisions(q=None): if q is not None: set_robot_config(q, mover.robot) collision = False if mover.env.CheckCollision(mover.robot): collision = True for obj in mover.objects: if mover.env.CheckCollision(obj): collision = True if collision: print('collision') if config.visualize_sim: raw_input('Continue after collision?') check_collisions() o = action.discrete_parameters['two_arm_place_object'] pick_params = action.continuous_parameters['pick'] place_params = action.continuous_parameters['place'] full_path = [get_body_xytheta(mover.robot)[0]] + pick_params['motion'] + [pick_params['q_goal']] + \ place_params['motion'] + [place_params['q_goal']] for i, (q1, q2) in enumerate(zip(full_path[:-1], full_path[1:])): if np.linalg.norm(np.squeeze(q1)[:2] - np.squeeze(q2)[:2]) > 1: print(i, q1, q2) import pdb; pdb.set_trace() pickq = pick_params['q_goal'] pickt = pick_params['motion'] check_collisions(pickq) for q in pickt: check_collisions(q) obj = mover.env.GetKinBody(o) if len(pickt) > 0: for q1, q2 in zip(pickt[:-1], pickt[1:]): handles.append( draw_robot_line(mover.env, q1.squeeze(), q2.squeeze())) # set_robot_config(pickq, mover.robot) # if config.visualize_sim: # raw_input('Continue?') # set_obj_xytheta([1000,1000,0], obj) two_arm_pick_object(obj, pick_params) if config.visualize_sim: raw_input('Place?') o = action.discrete_parameters['two_arm_place_object'] r = action.discrete_parameters['two_arm_place_region'] placeq = place_params['q_goal'] placep = place_params['object_pose'] placet = place_params['motion'] check_collisions(placeq) for q in placet: check_collisions(q) obj = mover.env.GetKinBody(o) if len(placet) > 0: for q1, q2 in zip(placet[:-1], placet[1:]): handles.append( draw_robot_line(mover.env, q1.squeeze(), q2.squeeze())) # set_robot_config(placeq, mover.robot) # if config.visualize_sim: # raw_input('Continue?') # set_obj_xytheta(placep, obj) two_arm_place_object(place_params) check_collisions() if config.visualize_sim: raw_input('Continue?') elif action.type == 'one_arm_pick_one_arm_place': def check_collisions(q=None): if q is not None: set_robot_config(q, mover.robot) collision = False if mover.env.CheckCollision(mover.robot): collision = True for obj in mover.objects: if mover.env.CheckCollision(obj): collision = True if collision: print('collision') if config.visualize_sim: raw_input('Continue after collision?') check_collisions() pick_params = action.continuous_parameters['pick'] place_params = action.continuous_parameters['place'] one_arm_pick_object(mover.env.GetKinBody(action.discrete_parameters['object']), pick_params) check_collisions() if config.visualize_sim: raw_input('Place?') one_arm_place_object(place_params) check_collisions() if config.visualize_sim: raw_input('Continue?') else: raise NotImplementedError n_objs_pack = config.n_objs_pack if config.domain == 'two_arm_mover': goal_region = 'home_region' elif config.domain == 'one_arm_mover': goal_region = 'rectangular_packing_box1_region' else: raise NotImplementedError if all(mover.regions[goal_region].contains(o.ComputeAABB()) for o in mover.objects[:n_objs_pack]): print("successful plan length: {}".format(len(trajectory.actions))) else: print('failed to find plan') if config.visualize_sim: raw_input('Finish?') return