def single_run(self, name): results_rrtstar = self.learning_loop(self.planner, planner_type="rrtstar") # Then astar for 0.8 self.planner.astar_res = 0.8 results_astar_08 = self.learning_loop(self.planner, planner_type="astar") # astar for 0.4 # astar for 0.2 #self.planner.astar_res = 0.2 #results_astar_02 = self.learning_loop(self.planner,planner_type="astar") # cached RRT star results_cached_rrt = self.learning_loop(self.planner, planner_type="cached_rrt") self.planner.astar_res = 0.3 results_astar_03 = self.learning_loop(self.planner, planner_type="astar") results = { "rrtstar": results_rrtstar, "astar_0.8": results_astar_08, "astar_0.3": results_astar_03, "cached_rrt": results_cached_rrt } #results = {"star_0.8":results_astar_08} fn.pickle_saver(results, self.results_dir + "results_" + name + ".pkl")
def sevilla_test_run(self,name): results = {} self.planner.planning_time = 3;self.planner.max_planning_time = 5 self.cache_size = 2500 results["RLT_NC_5"]= self.learning_loop(self.planner,planner_type="rrtstar") results["RLT_5"]= self.learning_loop(self.planner,planner_type="cached_rrt") fn.pickle_saver(results,self.results_dir+"results_"+name+".pkl")
def point_recieved(self, data): if self.done == False: self.goals.append([data.point.x, data.point.y]) print "data_Save" print self.number_of_goals, len(self.goals) if len(self.goals) == self.number_of_goals: #save to directory fn.pickle_saver(self.goals, self.save_name + "/goals.pkl") print "DATA SAVED. WILL ADDING MORE GOALS WILL NOT HAVE AN EFFECT" self.done = True
def sevilla_test_run(self, name): results = {} self.planner.planning_time = 3 self.planner.max_planning_time = 5 self.cache_size = 2500 results["RLT_NC_5"] = self.learning_loop(self.planner, planner_type="rrtstar") results["RLT_5"] = self.learning_loop(self.planner, planner_type="cached_rrt") fn.pickle_saver(results, self.results_dir + "results_" + name + ".pkl")
def point_recieved(self,data): if self.done == False: self.goals.append([data.point.x,data.point.y]) print "data_Save" print self.number_of_goals,len(self.goals) if len(self.goals) == self.number_of_goals: #save to directory fn.pickle_saver(self.goals,self.save_name+"/goals.pkl") print "DATA SAVED. WILL ADDING MORE GOALS WILL NOT HAVE AN EFFECT" self.done = True
def pareto_run(self, name): # Pareto front run involves RRT at 2,5,8,10 seconds results = {} self.planner.planning_time = 2 self.planner.max_planning_time = 3 self.cache_size = 1300 results["RLT-NC_2"] = self.learning_loop(self.planner, planner_type="rrtstar") results["RLT_2"] = self.learning_loop(self.planner, planner_type="cached_rrt") self.planner.planning_time = 5 self.planner.max_planning_time = 6 self.cache_size = 2300 results["RLT-NC_5"] = self.learning_loop(self.planner, planner_type="rrtstar") results["RLT_5"] = self.learning_loop(self.planner, planner_type="cached_rrt") self.planner.planning_time = 8 self.planner.max_planning_time = 9 self.cache_size = 2900 results["RLT-NC_8"] = self.learning_loop(self.planner, planner_type="rrtstar") results["RLT_8"] = self.learning_loop(self.planner, planner_type="cached_rrt") self.planner.planning_time = 10 self.planner.max_planning_time = 11 self.cache_size = 3300 results["RLT-NC_10"] = self.learning_loop(self.planner, planner_type="rrtstar") results["RLT_10"] = self.learning_loop(self.planner, planner_type="cached_rrt") # Then astar for 0.8 self.planner.astar_res = 0.8 results["MMP_0.8"] = self.learning_loop(self.planner, planner_type="astar") # astar for 0.4 self.planner.astar_res = 0.5 results["MMP_0.5"] = self.learning_loop(self.planner, planner_type="astar") self.planner.astar_res = 0.3 results["MMP_0.3"] = self.learning_loop(self.planner, planner_type="astar") self.planner.astar_res = 0.2 results["MMP_0.2"] = self.learning_loop(self.planner, planner_type="astar") #results = {"star_0.8":results_astar_08} fn.pickle_saver(results, self.results_dir + "results_" + name + ".pkl")
def __init__(self): self.filepath = os.path.dirname(os.path.abspath(__file__)) self.directory = self.filepath + "/sevilla_test/" print self.directory self.point_sub = rospy.Subscriber("clicked_point",PointStamped,self.point_cb) self.expert_path_pub = rospy.Publisher("expert_path",Path,queue_size = 1) self.ppl_pub = rospy.Publisher("person_poses",PersonArray,queue_size = 10) self.experiment_data = experiment_load_sevilla(self.directory) ex_dat = [] good_traj = [0,1,2,4,6,7,9,10,11,16,17,20,22,23,24,25,26,27,28,30,31,34,36,37,38,39,43] for i in good_traj: ex_dat.append(self.experiment_data[i]) self.experiment_data = ex_dat fn.pickle_saver(ex_dat,self.directory+"filtered.pkl") print "DATA LOADED--------------------------------" self.visualization_counter=0
def single_run(self,name): results_rrtstar = self.learning_loop(self.planner,planner_type="rrtstar") # Then astar for 0.8 self.planner.astar_res = 0.8 results_astar_08 = self.learning_loop(self.planner,planner_type="astar") # astar for 0.4 # astar for 0.2 #self.planner.astar_res = 0.2 #results_astar_02 = self.learning_loop(self.planner,planner_type="astar") # cached RRT star results_cached_rrt = self.learning_loop(self.planner,planner_type="cached_rrt") self.planner.astar_res = 0.3 results_astar_03 = self.learning_loop(self.planner,planner_type="astar") results = {"rrtstar":results_rrtstar,"astar_0.8":results_astar_08,"astar_0.3":results_astar_03,"cached_rrt":results_cached_rrt} #results = {"star_0.8":results_astar_08} fn.pickle_saver(results,self.results_dir+"results_"+name+".pkl")
def point_recieved(self,data): if len(self.people[self.current_person].persons)!= self.checkpoints_per_person: p = Person() p.pose.position.x = data.point.x ; p.pose.position.y = data.point.y quatern = tf.transformations.quaternion_from_euler(0,0,np.random.uniform(-np.pi,np.pi,1)) p.pose.orientation.z = quatern[-2];p.pose.orientation.w = quatern[-1] p.id = self.current_person ; p.vel=0 ;p.header.stamp=rospy.get_rostime();p.header.frame_id="map" self.people[self.current_person].persons.append(p) print "Registered point",data.point.x,data.point.y print "For Person",self.current_person statement = len(self.people[self.current_person].persons)== self.checkpoints_per_person if statement == True and self.current_person == self.no_of_people-1: fn.pickle_saver(self.people,self.save_name+"/ppl_paths.pkl") print "DATA SAVED.Adding more routes for people will not have an effect" rospy.signal_shutdown("Done collecting people. On to collecting goals") if len(self.people[self.current_person].persons)== self.checkpoints_per_person: print "DONE WITH THIS PERSON. New points will be registered for others or none" else: self.current_person+=1 p = Person() p.pose.position.x = data.point.x ; p.pose.position.y = data.point.y quatern = tf.transformations.quaternion_from_euler(0,0,np.random.uniform(-np.pi,np.pi,1)) p.pose.orientation.z = quatern[-2];p.pose.orientation.w = quatern[-1] p.id = self.current_person ; p.vel=0 ;p.header.stamp=rospy.get_rostime();p.header.frame_id="map" self.people[self.current_person].persons.append(p) print "NEW PERSON" print "Registered point",data.point.x,data.point.y print "For Person",self.current_person marker = Marker() marker.type = Marker.CUBE marker.header.frame_id = "map" marker.header.stamp = rospy.get_rostime() marker.pose.position.x = data.point.x;marker.pose.position.y = data.point.y;marker.pose.position.z = 2.5 marker.scale.x = 0.3;marker.scale.y=0.3;marker.scale.z=0.3 marker.color.a=1.0;marker.color.g=1.0 marker.id = len(self.all_points.markers) self.all_points.markers.append(marker) self.point_pub.publish(self.all_points)
def __init__(self,mode,goal_directory,people_static = True): # load the goals from the correct file. self.goals = fn.pickle_loader(goal_directory+"/goals.pkl") self.current_goal = None self.max_goals = rospy.get_param("~datapoints_to_collect", 5) self.goals_sent = 0 self.succeded = 0 self.goal_index = 100 self.goal_directory = goal_directory self.weights = None self.planner = MotionPlanner() weight_info = {"feature_set":self.planner.planning_featureset,"weights":self.planner.cost_manager.weights} fn.pickle_saver(weight_info,self.goal_directory+"/weights.pkl") fn.make_dir(goal_directory+'/traj_data/') rospy.on_shutdown(self.shutdown) #first bag file to record self.bag = rosbag.Bag(goal_directory+'/traj_data/test1.bag','w') self.tf_listener = tf.TransformListener() self.people_static = people_static #self.set_initial_position() # initial position set based on the first place in goals # action client so send goals self.goal_pub = rospy.Publisher('move_base_simple/goal',PoseStamped,queue_size=1) # Dealing with cases when people are not detected. self.new_people_message =False;self.people_message_timeout = 0; self.people_latest = PersonArray() self.new_bag = False #initialise extra markers and publisers for experiment self.initialise_marker() self.people_publisher = People_Publisher(self.goal_directory,static = True) self.pub1 = rospy.Publisher('visualise_goal',Marker,queue_size = 200) self.pub2 = rospy.Publisher('visualise_orient',Marker,queue_size = 200) # Robot position and people subscribers self.people_sub = rospy.Subscriber("/person_poses",PersonArray,self.people_callback) #self.weights_sub = rospy.Subscriber("weights",Float32MultiArray,self.weights_cb) rospy.sleep(4.) print "ABOUT TO ENTER THE LOOOOOOOPP__________________________________________________" self.random_goal_loop()
def pareto_run(self,name): # Pareto front run involves RRT at 2,5,8,10 seconds results = {} self.planner.planning_time = 2;self.planner.max_planning_time = 3 self.cache_size = 1300 results["RLT-NC_2"]= self.learning_loop(self.planner,planner_type="rrtstar") results["RLT_2"]= self.learning_loop(self.planner,planner_type="cached_rrt") self.planner.planning_time = 5;self.planner.max_planning_time = 6 self.cache_size = 2300 results["RLT-NC_5"]= self.learning_loop(self.planner,planner_type="rrtstar") results["RLT_5"]= self.learning_loop(self.planner,planner_type="cached_rrt") self.planner.planning_time = 8;self.planner.max_planning_time = 9 self.cache_size = 2900 results["RLT-NC_8"]= self.learning_loop(self.planner,planner_type="rrtstar") results["RLT_8"]= self.learning_loop(self.planner,planner_type="cached_rrt") self.planner.planning_time = 10;self.planner.max_planning_time = 11 self.cache_size = 3300 results["RLT-NC_10"]= self.learning_loop(self.planner,planner_type="rrtstar") results["RLT_10"]= self.learning_loop(self.planner,planner_type="cached_rrt") # Then astar for 0.8 self.planner.astar_res = 0.8 results["MMP_0.8"]= self.learning_loop(self.planner,planner_type="astar") # astar for 0.4 self.planner.astar_res = 0.5 results["MMP_0.5"]= self.learning_loop(self.planner,planner_type="astar") self.planner.astar_res = 0.3 results["MMP_0.3"]= self.learning_loop(self.planner,planner_type="astar") self.planner.astar_res = 0.2 results["MMP_0.2"]= self.learning_loop(self.planner,planner_type="astar") #results = {"star_0.8":results_astar_08} fn.pickle_saver(results,self.results_dir+"results_"+name+".pkl")
def __init__(self): self.filepath = os.path.dirname(os.path.abspath(__file__)) self.directory = self.filepath + "/sevilla_test/" print self.directory self.point_sub = rospy.Subscriber("clicked_point", PointStamped, self.point_cb) self.expert_path_pub = rospy.Publisher("expert_path", Path, queue_size=1) self.ppl_pub = rospy.Publisher("person_poses", PersonArray, queue_size=10) self.experiment_data = experiment_load_sevilla(self.directory) ex_dat = [] good_traj = [ 0, 1, 2, 4, 6, 7, 9, 10, 11, 16, 17, 20, 22, 23, 24, 25, 26, 27, 28, 30, 31, 34, 36, 37, 38, 39, 43 ] for i in good_traj: ex_dat.append(self.experiment_data[i]) self.experiment_data = ex_dat fn.pickle_saver(ex_dat, self.directory + "filtered.pkl") print "DATA LOADED--------------------------------" self.visualization_counter = 0
def point_recieved(self, data): if len(self.people[ self.current_person].persons) != self.checkpoints_per_person: p = Person() p.pose.position.x = data.point.x p.pose.position.y = data.point.y quatern = tf.transformations.quaternion_from_euler( 0, 0, np.random.uniform(-np.pi, np.pi, 1)) p.pose.orientation.z = quatern[-2] p.pose.orientation.w = quatern[-1] p.id = self.current_person p.vel = 0 p.header.stamp = rospy.get_rostime() p.header.frame_id = "map" self.people[self.current_person].persons.append(p) print "Registered point", data.point.x, data.point.y print "For Person", self.current_person statement = len(self.people[ self.current_person].persons) == self.checkpoints_per_person if statement == True and self.current_person == self.no_of_people - 1: fn.pickle_saver(self.people, self.save_name + "/ppl_paths.pkl") print "DATA SAVED.Adding more routes for people will not have an effect" rospy.signal_shutdown( "Done collecting people. On to collecting goals") if len(self.people[self.current_person].persons ) == self.checkpoints_per_person: print "DONE WITH THIS PERSON. New points will be registered for others or none" else: self.current_person += 1 p = Person() p.pose.position.x = data.point.x p.pose.position.y = data.point.y quatern = tf.transformations.quaternion_from_euler( 0, 0, np.random.uniform(-np.pi, np.pi, 1)) p.pose.orientation.z = quatern[-2] p.pose.orientation.w = quatern[-1] p.id = self.current_person p.vel = 0 p.header.stamp = rospy.get_rostime() p.header.frame_id = "map" self.people[self.current_person].persons.append(p) print "NEW PERSON" print "Registered point", data.point.x, data.point.y print "For Person", self.current_person marker = Marker() marker.type = Marker.CUBE marker.header.frame_id = "map" marker.header.stamp = rospy.get_rostime() marker.pose.position.x = data.point.x marker.pose.position.y = data.point.y marker.pose.position.z = 2.5 marker.scale.x = 0.3 marker.scale.y = 0.3 marker.scale.z = 0.3 marker.color.a = 1.0 marker.color.g = 1.0 marker.id = len(self.all_points.markers) self.all_points.markers.append(marker) self.point_pub.publish(self.all_points)