Ejemplo n.º 1
0
    def __init__(self):
        self.iterations = rospy.get_param("~iterations", 10)
        self.learning_rate = rospy.get_param("~learning_rate", 0.5)
        self.time_margin_factor = rospy.get_param("~time_margin_factor", 1.0)
        self.cache_size = rospy.get_param("~point_cache_size", 2500)
        self.momentum = 0.2
        self.exp_name = rospy.get_param("~experiment_name", "sevilla_test")
        self.session_name = rospy.get_param("~session_name", "sevilla_learn1")
        self.baseline_eval = rospy.get_param("~baseline", False)
        self.path = PARENT
        self.directory = self.path + "/data/" + self.exp_name
        self.results_dir = self.path + "/results/" + self.session_name + "/"
        fn.make_dir(self.results_dir)
        self.experiment_data = experiment_load_sevilla(self.directory)
        self.planner = MotionPlanner()
        self.validation_proportion = rospy.get_param("~validation_proportion",
                                                     0.8)
        self.costlib = self.planner.cost_manager
        self.ppl_pub = rospy.Publisher("person_poses",
                                       PersonArray,
                                       queue_size=10)
        if self.baseline_eval == True:
            try:
                loaded = fn.pickle_loader(self.directory + "/weights.pkl")
                self.gt_weights = loaded["weights"]
                self.gt_featureset = loaded["feature_set"]
            except:
                self.gt_weights = None
                self.baseline_eval = False

        self.write_learning_params(self.results_dir)
        self.sevilla_test_run("1")
Ejemplo n.º 2
0
    def __init__(self, maxCSpaceJump, timeout):
        '''Constructor for view planner; creates base class instance.'''

        MotionPlanner.__init__(self, maxCSpaceJump, timeout)

        # possible sensor up choices
        theta = linspace(0, 2 * pi, 20)
        x = cos(theta)
        y = sin(theta)
        self.upChoices = []
        for i in xrange(len(theta)):
            self.upChoices.append(array([x[i], 0, y[i]]))
Ejemplo n.º 3
0
    def __init__(self):
        self.iterations = rospy.get_param("~iterations", 10)
        self.learning_rate = rospy.get_param("~learning_rate", 0.5)
        self.time_margin_factor = rospy.get_param("~time_margin_factor", 1.0)
        self.cache_size = rospy.get_param("~point_cache_size", 2500)
        self.momentum = 0.2
        self.exp_name = rospy.get_param("~experiment_name", "sevilla_test")
        self.session_name = rospy.get_param("~session_name", "sevilla_learn1")
        self.baseline_eval = rospy.get_param("~baseline", False)
        self.path = PARENT
        self.directory = self.path+"/data/"+self.exp_name
        self.results_dir = self.path+"/results/"+self.session_name+"/"
        fn.make_dir(self.results_dir)
        self.experiment_data = experiment_load_sevilla(self.directory)
        self.planner = MotionPlanner()
        self.validation_proportion = rospy.get_param("~validation_proportion", 0.8)
        self.costlib = self.planner.cost_manager
        self.ppl_pub =  rospy.Publisher("person_poses",PersonArray,queue_size = 10)
        if self.baseline_eval==True:
            try:
                loaded = fn.pickle_loader(self.directory+"/weights.pkl")
                self.gt_weights = loaded["weights"]
                self.gt_featureset = loaded["feature_set"]
            except:
                self.gt_weights=None
                self.baseline_eval=False

        self.write_learning_params(self.results_dir)
        self.sevilla_test_run("1")
Ejemplo n.º 4
0
 def real_data_eval(self):
     self.results = []
     self.experiment_data = experiment_load_sevilla(self.directory)
     for i in range(self.no_of_runs):
         self.results.append(
             fn.pickle_loader(self.results_dir + "results_" + str(i + 1) +
                              ".pkl"))
     self.planner = MotionPlanner()
     self.costlib = self.planner.cost_manager
     self.ppl_pub = rospy.Publisher("person_poses",
                                    PersonArray,
                                    queue_size=1)
     self.expert_path_pub = rospy.Publisher("expert_path",
                                            Path,
                                            queue_size=1)
     self.initial_paths_pub = rospy.Publisher("initial_weights_path",
                                              Path,
                                              queue_size=1)
     self.final_paths_pub = rospy.Publisher("final_weights_path",
                                            Path,
                                            queue_size=1)
     self.point_sub = rospy.Subscriber("clicked_point", PointStamped,
                                       self.point_cb_real)
Ejemplo n.º 5
0
from hpp.gepetto import ViewerFactory, PathPlayer

robot = Robot('ur5')
ps = ProblemSolver(robot)

vf = ViewerFactory(ps)

vf.loadObstacleModel("hpp_practicals", "ur_benchmark/obstacles", "obstacles")
vf.loadObstacleModel("hpp_practicals", "ur_benchmark/table", "table")
vf.loadObstacleModel("hpp_practicals", "ur_benchmark/wall", "wall")

v = vf.createViewer()

q1 = [0, -1.57, 1.57, 0, 0, 0]
q2 = [0.2, -1.57, -1.8, 0, 0.8, 0]
q3 = [1.57, -1.57, -1.8, 0, 0.8, 0]

v(q2)
v(q3)

ps.setInitialConfig(q2)
ps.addGoalConfig(q3)

from motion_planner import MotionPlanner

m = MotionPlanner(robot, ps)
pathId = m.solveBiRRT(maxIter=1000)

pp = PathPlayer(v)
#pp (pathId)
Ejemplo n.º 6
0
from hpp.corbaserver.practicals.ur5 import Robot
from hpp.corbaserver import ProblemSolver
from hpp.gepetto import ViewerFactory, PathPlayer

robot = Robot ('ur5')
ps = ProblemSolver (robot)

vf = ViewerFactory (ps)

vf.loadObstacleModel ("hpp_practicals","ur_benchmark/obstacles","obstacles")
vf.loadObstacleModel ("hpp_practicals","ur_benchmark/table","table")
vf.loadObstacleModel ("hpp_practicals","ur_benchmark/wall","wall")

v = vf.createViewer ()

q1 = [0, -1.57, 1.57, 0, 0, 0]; q2 = [0.2, -1.57, -1.8, 0, 0.8, 0]
q3 = [1.57, -1.57, -1.8, 0, 0.8, 0]

v (q2)
v (q3)

ps.setInitialConfig (q2)
ps.addGoalConfig (q3)

from motion_planner import MotionPlanner
m = MotionPlanner (robot, ps)
pathId = m.solveBiRRT (maxIter = 1000)

pp = PathPlayer (v)
#pp (pathId)
Ejemplo n.º 7
0
class Learner(object):
    def __init__(self):
        self.iterations = rospy.get_param("~iterations", 10)
        self.learning_rate = rospy.get_param("~learning_rate", 0.5)
        self.time_margin_factor = rospy.get_param("~time_margin_factor", 1.0)
        self.cache_size = rospy.get_param("~point_cache_size", 2500)
        self.momentum = 0.2
        self.exp_name = rospy.get_param("~experiment_name", "sevilla_test")
        self.session_name = rospy.get_param("~session_name", "sevilla_learn1")
        self.baseline_eval = rospy.get_param("~baseline", False)
        self.path = PARENT
        self.directory = self.path+"/data/"+self.exp_name
        self.results_dir = self.path+"/results/"+self.session_name+"/"
        fn.make_dir(self.results_dir)
        self.experiment_data = experiment_load_sevilla(self.directory)
        self.planner = MotionPlanner()
        self.validation_proportion = rospy.get_param("~validation_proportion", 0.8)
        self.costlib = self.planner.cost_manager
        self.ppl_pub =  rospy.Publisher("person_poses",PersonArray,queue_size = 10)
        if self.baseline_eval==True:
            try:
                loaded = fn.pickle_loader(self.directory+"/weights.pkl")
                self.gt_weights = loaded["weights"]
                self.gt_featureset = loaded["feature_set"]
            except:
                self.gt_weights=None
                self.baseline_eval=False

        self.write_learning_params(self.results_dir)
        self.sevilla_test_run("1")
        # shuffle(self.experiment_data)
        # self.pareto_run("10")
        # shuffle(self.experiment_data)
        # self.pareto_run("11")
        # shuffle(self.experiment_data)
        # self.pareto_run("12")
        # shuffle(self.experiment_data)
        # self.pareto_run("13")
        # shuffle(self.experiment_data)
        # self.pareto_run("14")
        # shuffle(self.experiment_data)
        # self.pareto_run("15")
        # shuffle(self.experiment_data)
        # self.pareto_run("16")
        # shuffle(self.experiment_data)
        # self.pareto_run("17")
        # self.time_margin_run("tm_11")
        # shuffle(self.experiment_data)
        # self.time_margin_run("tm_22")
        # shuffle(self.experiment_data)
        # self.time_margin_run("tm_33")
        # shuffle(self.experiment_data)
        # self.time_margin_run("tm_44")
        # shuffle(self.experiment_data)
        # self.time_margin_run("tm_55")
        #shuffle(self.experiment_data)
        #shuffle(self.experiment_data)
        #self.pareto_run("2")
        #shuffle(self.experiment_data)
        #self.pareto_run("3")
        #shuffle(self.experiment_data)
        #self.pareto_run("4")
        #shuffle(self.experiment_data)
        #self.pareto_run("5")
        #self.time_margin_run("1")
        #shuffle(self.experiment_data)
        #self.time_margin_run("2")
        #shuffle(self.experiment_data)
        #self.time_margin_run("3")
        #shuffle(self.experiment_data)
        #self.time_margin_run("4")
        #shuffle(self.experiment_data)
        #self.time_margin_run("5")
    def write_learning_params(self,directory):
        f = open(directory+"readme","w")
        f.write("Learning parameters used: \n------ \n \n")
        f.write("Experiment name:  "+ self.exp_name +"\n")
        f.write("Iteration:  "+ str(self.iterations) +"\n")
        f.write("learning_rate:  "+ str(self.learning_rate) +"\n")
        f.write("validation_proportion:  "+ str(self.validation_proportion) +"\n")
        f.close()        

    def sevilla_test_run(self,name):
        results = {}
        self.planner.planning_time = 3;self.planner.max_planning_time = 5
        self.cache_size = 2500
        results["RLT_NC_5"]= self.learning_loop(self.planner,planner_type="rrtstar")
        results["RLT_5"]=  self.learning_loop(self.planner,planner_type="cached_rrt")
        fn.pickle_saver(results,self.results_dir+"results_"+name+".pkl")

    def pareto_run(self,name):
        # Pareto front run involves RRT at 2,5,8,10 seconds
        results = {}
        self.planner.planning_time = 2;self.planner.max_planning_time = 3
        self.cache_size = 1300
        results["RLT-NC_2"]= self.learning_loop(self.planner,planner_type="rrtstar")
        results["RLT_2"]=  self.learning_loop(self.planner,planner_type="cached_rrt")

        self.planner.planning_time = 5;self.planner.max_planning_time = 6
        self.cache_size = 2300
        results["RLT-NC_5"]= self.learning_loop(self.planner,planner_type="rrtstar")
        results["RLT_5"]=  self.learning_loop(self.planner,planner_type="cached_rrt")

        self.planner.planning_time = 8;self.planner.max_planning_time = 9
        self.cache_size = 2900
        results["RLT-NC_8"]= self.learning_loop(self.planner,planner_type="rrtstar")
        results["RLT_8"]=  self.learning_loop(self.planner,planner_type="cached_rrt")

        self.planner.planning_time = 10;self.planner.max_planning_time = 11
        self.cache_size = 3300
        results["RLT-NC_10"]= self.learning_loop(self.planner,planner_type="rrtstar")
        results["RLT_10"]= self.learning_loop(self.planner,planner_type="cached_rrt")      

        # Then astar for 0.8
        self.planner.astar_res = 0.8
        results["MMP_0.8"]=  self.learning_loop(self.planner,planner_type="astar")
        # astar for 0.4
        self.planner.astar_res = 0.5
        results["MMP_0.5"]= self.learning_loop(self.planner,planner_type="astar")

        self.planner.astar_res = 0.3
        results["MMP_0.3"]=  self.learning_loop(self.planner,planner_type="astar")

        self.planner.astar_res = 0.2
        results["MMP_0.2"]=  self.learning_loop(self.planner,planner_type="astar")
        #results = {"star_0.8":results_astar_08}
        fn.pickle_saver(results,self.results_dir+"results_"+name+".pkl")



    def time_margin_run(self,name):
        # Pareto front run involves RRT at 2,5,8,10 seconds
        results = {}
        self.planner.planning_time = 5;self.planner.max_planning_time = 5
 
        self.time_margin_factor=0.7
        results["RLT-NC-TM"]= self.learning_loop(self.planner,planner_type="rrtstar")
        
        self.time_margin_factor=1
        self.cache_size = 1800
        results["RLT-TM"]=  self.learning_loop(self.planner,planner_type="cached_rrt")

        self.cache_size = 2300
        self.time_margin_factor=1.
        results["RLT-NC"]= self.learning_loop(self.planner,planner_type="rrtstar")
        results["RLT"]=  self.learning_loop(self.planner,planner_type="cached_rrt")


    def single_run(self,name):

        results_rrtstar = self.learning_loop(self.planner,planner_type="rrtstar")
        # Then astar for 0.8
        self.planner.astar_res = 0.8
        results_astar_08 = self.learning_loop(self.planner,planner_type="astar")
        # astar for 0.4

        # astar for 0.2
        #self.planner.astar_res = 0.2
        #results_astar_02 = self.learning_loop(self.planner,planner_type="astar")
        # cached RRT star
        results_cached_rrt = self.learning_loop(self.planner,planner_type="cached_rrt")

        self.planner.astar_res = 0.3
        results_astar_03 = self.learning_loop(self.planner,planner_type="astar")
        results = {"rrtstar":results_rrtstar,"astar_0.8":results_astar_08,"astar_0.3":results_astar_03,"cached_rrt":results_cached_rrt}
        #results = {"star_0.8":results_astar_08}
        fn.pickle_saver(results,self.results_dir+"results_"+name+".pkl")


        #self.robot_pose_srv = rospy.ServiceProxy('change_robot_position', positionChange)
        #self.ppl_pose_srv = rospy.ServiceProxy('change_people_position', ppl_positionChange, self.handle_ppl_position_change)
    
    def learning_loop(self,motion_planner,planner_type = "rrtstar"):

        # some time bookkeeping
        if planner_type == "rrtstar" or planner_type == "astar":
            motion_planner.planner = planner_type
            time_to_cache=0
        elif planner_type =="cached_rrt":
            motion_planner.planner = "rrtstar"
            cached_trees = []
            time_to_cache = []

        # Initialise weights.
        self.learner_weights = np.zeros(self.costlib.weights.shape[0])
        self.learner_weights[1] = 1
        self.learner_weights[0] = 1# some cost for distance
        validating = False
        similarity = []
        cost_diff = []
        time_taken = []
        initial_paths = []
        final_paths = []
        all_feature_sums = []
        gradient_store = []
        for n,i in enumerate(self.experiment_data):
            self.ppl_pub.publish(i.people)
            rospy.sleep(0.5)
            i.feature_sum = self.feature_sums(i.path_array,i.goal_xy)

            print "FEATURE SUM", i.feature_sum
            all_feature_sums.append(i.feature_sum/len(i.path_array))
            if self.baseline_eval == True:
                self.costlib.set_featureset(self.gt_featureset)
                i.gt_feature_sum =self.feature_sums(i.path_array,i.goal_xy)
                i.path_cost = np.dot(self.gt_weights,i.gt_feature_sum)
                self.costlib.set_featureset(self.planner.planning_featureset)
        feature_sum_variance = np.var(np.array(all_feature_sums),axis = 0)
        print "FEATURE SUM VARIANCE",feature_sum_variance


        for iteration in range(self.iterations):
            prev_grad = 0
            print "####################ITERATION",iteration
            iter_similarity = []
            iter_cost_diff = []
            iter_time = []

            iter_grad = np.zeros(self.learner_weights.shape[0])
            self.costlib.weights = self.learner_weights
            self.initial_weights = np.copy(self.learner_weights)
            for n,i in enumerate(self.experiment_data):
                print "DATA POINT",n
                print "ITERATION",iteration
                if n>=len(self.experiment_data)*(1-self.validation_proportion):
                    validating = True
                else:
                    validating = False
                print "CHANGING POSITION"
                self.planner.publish_empty_path()
                
                config_change(i.path.poses[0],i.people)
                rospy.sleep(1.)
                config_change(i.path.poses[0],i.people)
                rospy.sleep(1.)
                self.planner._goal = i.goal

                if validating==False and planner_type=="cached_rrt" and iteration==0:
                    tic = time.time()
                    cached_trees.append(motion_planner.make_cached_rrt(motion_planner.sample_goal_bias,points_to_cache=self.cache_size))
                    toc = time.time()
                    time_to_cache.append(toc-tic) 

                if validating==False:
                    self.costlib.ref_path_nn = i.nbrs
                    tic = time.time()
                    if planner_type!="cached_rrt":
                        # if there is a time margin factor it will be used during the learning planning step
                        motion_planner.planning_time*=self.time_margin_factor
                        pose_path_la,array_path_la = motion_planner.plan()
                    else:
                        pose_path_la,array_path_la = motion_planner.plan_cached_rrt(cached_trees[n])
                    toc = time.time()
                    iter_time.append(toc-tic)

                self.costlib.ref_path_nn = None
                # recover time margin factor to normal planning time.
                motion_planner.planning_time*=1/self.time_margin_factor
                pose_path,array_path = motion_planner.plan()

                if iteration ==0:
                    initial_paths.append(array_path)
                elif iteration == self.iterations-1:
                    final_paths.append(array_path)

                rospy.sleep(0.5)

                if validating == False:
                    path_feature_sum_la = self.feature_sums(array_path_la,i.goal_xy)
                    iter_grad+= self.learner_weights*0.00 + (i.feature_sum - path_feature_sum_la)#/feature_sum_variance
                    print "GRADIENT", (i.feature_sum - path_feature_sum_la)#/feature_sum_variance#

                path_feature_sum = self.feature_sums(array_path,i.goal_xy)
                # Baseline evaluation if possible.
                if self.baseline_eval == True:
                    #calculate featuresums based on the ground truth featureset whatever that is
                    self.costlib.set_featureset(self.gt_featureset)
                    gt_feature_sum = self.feature_sums(array_path,i.goal_xy)
                    path_base_cost = np.dot(self.gt_weights,gt_feature_sum)
                    iter_cost_diff.append(path_base_cost - i.path_cost)
                    self.costlib.set_featureset(self.planner.planning_featureset)
                print "COST DIFFERENCE", iter_cost_diff
                          
                print "PATH FEATURE SUM",path_feature_sum
                iter_similarity.append(self.get_similarity(i.path_array,array_path))
            gradient_store.append(iter_grad/(len(self.experiment_data)*(1-self.validation_proportion)))
            grad = (1-self.momentum)*iter_grad/(len(self.experiment_data)*(1-self.validation_proportion)) + self.momentum*prev_grad
            self.learner_weights = self.learner_weights - self.learning_rate*grad
            prev_grad = grad
            idx = np.where(self.learner_weights<0)
            self.learner_weights[idx]=0
            print "WEIGHTS",self.learner_weights
            print "GRADIENT ", grad
            similarity.append(np.array(iter_similarity))
            cost_diff.append(iter_cost_diff)
            time_taken.append(iter_time)

        print cost_diff
        results = {"similarity":similarity,"cost_diff":cost_diff,
                "weights_final":self.learner_weights,"weights_initial":self.initial_weights,"gradients":gradient_store,
                 "initial_paths":initial_paths,"final_paths":final_paths,"validation_proportion":self.validation_proportion,"time_to_cache":time_to_cache,"time_per_iter":time_taken}

        return results

    def feature_sums(self,xy_path,goal_xy,interpolation=True):
        # calculates path feature sums.
        if interpolation==True:
            xy_path = interpolate_path(xy_path,resolution=0.005)
        feature_sum = 0
        for i in range(len(xy_path)-1):
            if i ==0:
                f1 = self.costlib.featureset(xy_path[i],goal_xy)
                f2 = self.costlib.featureset(xy_path[i+1],goal_xy)
                feature_sum= self.costlib.edge_feature(f1,f2,xy_path[i],xy_path[i+1])
            else:
                f1 = self.costlib.featureset(xy_path[i],goal_xy)
                f2 = self.costlib.featureset(xy_path[i+1],goal_xy)
                feature_sum+= self.costlib.edge_feature(f1,f2,xy_path[i],xy_path[i+1])
        return feature_sum
   

    def get_similarity(self,example_path,learner_path):
        # inputs are the example and learner path in xy.
        nbrs = NearestNeighbors(n_neighbors=1,algorithm="kd_tree",leaf_size = 30)
        nbrs.fit(np.array(learner_path))
        (dist, idx) = nbrs.kneighbors(example_path)
        return np.sum(dist)

    def test_similarity(self):
        ex1 = self.experiment_data[0].path_array
        ex2 = self.experiment_data[1].path_array
        sim = self.get_similarity(ex1,ex2)
        return sim
    def test_feature_sum(self):
        self.ppl_pub.publish(self.experiment_data[0].people[0])
        rospy.sleep(0.1)
        fs = self.feature_sums(self.experiment_data[0].path_array,self.experiment_data[0].goal_xy)
        return fs
Ejemplo n.º 8
0
    grid, n_offset, e_offset = load_grid_from_csv("colliders.csv",
                                                  FLIGHT_ALTITUDE)
    start_grid = (int(drone.local_position[0] - n_offset),
                  int(drone.local_position[1] - e_offset))
    goal_grid = get_random_point_from_grid(grid)

    # Plot grid and goal
    plt.matshow(grid, origin='lower')
    plt.plot(start_grid[1], start_grid[0], 'go')
    plt.plot(goal_grid[1], goal_grid[0], 'rx')
    plt.show()

    # Path planner
    print(f"Start: {start_grid}, Goal: {goal_grid}")

    mp = MotionPlanner(grid)
    success, path, cost = mp.run_planner(start_grid, goal_grid)
    if success:
        pruned_path = np.array(mp.prune_path(path))
        plt.matshow(grid, origin='lower')
        plt.plot(pruned_path[:, 1], pruned_path[:, 0], 'b')
        plt.show()
        waypoints = [[p[0] + n_offset, p[1] + e_offset, FLIGHT_ALTITUDE, 0]
                     for p in pruned_path]
        print(waypoints)
        drone.waypoints = waypoints

        # Draw position
        plt.matshow(grid, origin='lower')
        plt.plot(pruned_path[:, 1], pruned_path[:, 0], 'b')
        while drone.in_mission:
Ejemplo n.º 9
0
class Learner(object):
    def __init__(self):
        self.iterations = rospy.get_param("~iterations", 10)
        self.learning_rate = rospy.get_param("~learning_rate", 0.5)
        self.time_margin_factor = rospy.get_param("~time_margin_factor", 1.0)
        self.cache_size = rospy.get_param("~point_cache_size", 2500)
        self.momentum = 0.2
        self.exp_name = rospy.get_param("~experiment_name", "sevilla_test")
        self.session_name = rospy.get_param("~session_name", "sevilla_learn1")
        self.baseline_eval = rospy.get_param("~baseline", False)
        self.path = PARENT
        self.directory = self.path + "/data/" + self.exp_name
        self.results_dir = self.path + "/results/" + self.session_name + "/"
        fn.make_dir(self.results_dir)
        self.experiment_data = experiment_load_sevilla(self.directory)
        self.planner = MotionPlanner()
        self.validation_proportion = rospy.get_param("~validation_proportion",
                                                     0.8)
        self.costlib = self.planner.cost_manager
        self.ppl_pub = rospy.Publisher("person_poses",
                                       PersonArray,
                                       queue_size=10)
        if self.baseline_eval == True:
            try:
                loaded = fn.pickle_loader(self.directory + "/weights.pkl")
                self.gt_weights = loaded["weights"]
                self.gt_featureset = loaded["feature_set"]
            except:
                self.gt_weights = None
                self.baseline_eval = False

        self.write_learning_params(self.results_dir)
        self.sevilla_test_run("1")
        # shuffle(self.experiment_data)
        # self.pareto_run("10")
        # shuffle(self.experiment_data)
        # self.pareto_run("11")
        # shuffle(self.experiment_data)
        # self.pareto_run("12")
        # shuffle(self.experiment_data)
        # self.pareto_run("13")
        # shuffle(self.experiment_data)
        # self.pareto_run("14")
        # shuffle(self.experiment_data)
        # self.pareto_run("15")
        # shuffle(self.experiment_data)
        # self.pareto_run("16")
        # shuffle(self.experiment_data)
        # self.pareto_run("17")
        # self.time_margin_run("tm_11")
        # shuffle(self.experiment_data)
        # self.time_margin_run("tm_22")
        # shuffle(self.experiment_data)
        # self.time_margin_run("tm_33")
        # shuffle(self.experiment_data)
        # self.time_margin_run("tm_44")
        # shuffle(self.experiment_data)
        # self.time_margin_run("tm_55")
        #shuffle(self.experiment_data)
        #shuffle(self.experiment_data)
        #self.pareto_run("2")
        #shuffle(self.experiment_data)
        #self.pareto_run("3")
        #shuffle(self.experiment_data)
        #self.pareto_run("4")
        #shuffle(self.experiment_data)
        #self.pareto_run("5")
        #self.time_margin_run("1")
        #shuffle(self.experiment_data)
        #self.time_margin_run("2")
        #shuffle(self.experiment_data)
        #self.time_margin_run("3")
        #shuffle(self.experiment_data)
        #self.time_margin_run("4")
        #shuffle(self.experiment_data)
        #self.time_margin_run("5")
    def write_learning_params(self, directory):
        f = open(directory + "readme", "w")
        f.write("Learning parameters used: \n------ \n \n")
        f.write("Experiment name:  " + self.exp_name + "\n")
        f.write("Iteration:  " + str(self.iterations) + "\n")
        f.write("learning_rate:  " + str(self.learning_rate) + "\n")
        f.write("validation_proportion:  " + str(self.validation_proportion) +
                "\n")
        f.close()

    def sevilla_test_run(self, name):
        results = {}
        self.planner.planning_time = 3
        self.planner.max_planning_time = 5
        self.cache_size = 2500
        results["RLT_NC_5"] = self.learning_loop(self.planner,
                                                 planner_type="rrtstar")
        results["RLT_5"] = self.learning_loop(self.planner,
                                              planner_type="cached_rrt")
        fn.pickle_saver(results, self.results_dir + "results_" + name + ".pkl")

    def pareto_run(self, name):
        # Pareto front run involves RRT at 2,5,8,10 seconds
        results = {}
        self.planner.planning_time = 2
        self.planner.max_planning_time = 3
        self.cache_size = 1300
        results["RLT-NC_2"] = self.learning_loop(self.planner,
                                                 planner_type="rrtstar")
        results["RLT_2"] = self.learning_loop(self.planner,
                                              planner_type="cached_rrt")

        self.planner.planning_time = 5
        self.planner.max_planning_time = 6
        self.cache_size = 2300
        results["RLT-NC_5"] = self.learning_loop(self.planner,
                                                 planner_type="rrtstar")
        results["RLT_5"] = self.learning_loop(self.planner,
                                              planner_type="cached_rrt")

        self.planner.planning_time = 8
        self.planner.max_planning_time = 9
        self.cache_size = 2900
        results["RLT-NC_8"] = self.learning_loop(self.planner,
                                                 planner_type="rrtstar")
        results["RLT_8"] = self.learning_loop(self.planner,
                                              planner_type="cached_rrt")

        self.planner.planning_time = 10
        self.planner.max_planning_time = 11
        self.cache_size = 3300
        results["RLT-NC_10"] = self.learning_loop(self.planner,
                                                  planner_type="rrtstar")
        results["RLT_10"] = self.learning_loop(self.planner,
                                               planner_type="cached_rrt")

        # Then astar for 0.8
        self.planner.astar_res = 0.8
        results["MMP_0.8"] = self.learning_loop(self.planner,
                                                planner_type="astar")
        # astar for 0.4
        self.planner.astar_res = 0.5
        results["MMP_0.5"] = self.learning_loop(self.planner,
                                                planner_type="astar")

        self.planner.astar_res = 0.3
        results["MMP_0.3"] = self.learning_loop(self.planner,
                                                planner_type="astar")

        self.planner.astar_res = 0.2
        results["MMP_0.2"] = self.learning_loop(self.planner,
                                                planner_type="astar")
        #results = {"star_0.8":results_astar_08}
        fn.pickle_saver(results, self.results_dir + "results_" + name + ".pkl")

    def time_margin_run(self, name):
        # Pareto front run involves RRT at 2,5,8,10 seconds
        results = {}
        self.planner.planning_time = 5
        self.planner.max_planning_time = 5

        self.time_margin_factor = 0.7
        results["RLT-NC-TM"] = self.learning_loop(self.planner,
                                                  planner_type="rrtstar")

        self.time_margin_factor = 1
        self.cache_size = 1800
        results["RLT-TM"] = self.learning_loop(self.planner,
                                               planner_type="cached_rrt")

        self.cache_size = 2300
        self.time_margin_factor = 1.
        results["RLT-NC"] = self.learning_loop(self.planner,
                                               planner_type="rrtstar")
        results["RLT"] = self.learning_loop(self.planner,
                                            planner_type="cached_rrt")

    def single_run(self, name):

        results_rrtstar = self.learning_loop(self.planner,
                                             planner_type="rrtstar")
        # Then astar for 0.8
        self.planner.astar_res = 0.8
        results_astar_08 = self.learning_loop(self.planner,
                                              planner_type="astar")
        # astar for 0.4

        # astar for 0.2
        #self.planner.astar_res = 0.2
        #results_astar_02 = self.learning_loop(self.planner,planner_type="astar")
        # cached RRT star
        results_cached_rrt = self.learning_loop(self.planner,
                                                planner_type="cached_rrt")

        self.planner.astar_res = 0.3
        results_astar_03 = self.learning_loop(self.planner,
                                              planner_type="astar")
        results = {
            "rrtstar": results_rrtstar,
            "astar_0.8": results_astar_08,
            "astar_0.3": results_astar_03,
            "cached_rrt": results_cached_rrt
        }
        #results = {"star_0.8":results_astar_08}
        fn.pickle_saver(results, self.results_dir + "results_" + name + ".pkl")

        #self.robot_pose_srv = rospy.ServiceProxy('change_robot_position', positionChange)
        #self.ppl_pose_srv = rospy.ServiceProxy('change_people_position', ppl_positionChange, self.handle_ppl_position_change)

    def learning_loop(self, motion_planner, planner_type="rrtstar"):

        # some time bookkeeping
        if planner_type == "rrtstar" or planner_type == "astar":
            motion_planner.planner = planner_type
            time_to_cache = 0
        elif planner_type == "cached_rrt":
            motion_planner.planner = "rrtstar"
            cached_trees = []
            time_to_cache = []

        # Initialise weights.
        self.learner_weights = np.zeros(self.costlib.weights.shape[0])
        self.learner_weights[1] = 1
        self.learner_weights[0] = 1  # some cost for distance
        validating = False
        similarity = []
        cost_diff = []
        time_taken = []
        initial_paths = []
        final_paths = []
        all_feature_sums = []
        gradient_store = []
        for n, i in enumerate(self.experiment_data):
            self.ppl_pub.publish(i.people)
            rospy.sleep(0.5)
            i.feature_sum = self.feature_sums(i.path_array, i.goal_xy)

            print "FEATURE SUM", i.feature_sum
            all_feature_sums.append(i.feature_sum / len(i.path_array))
            if self.baseline_eval == True:
                self.costlib.set_featureset(self.gt_featureset)
                i.gt_feature_sum = self.feature_sums(i.path_array, i.goal_xy)
                i.path_cost = np.dot(self.gt_weights, i.gt_feature_sum)
                self.costlib.set_featureset(self.planner.planning_featureset)
        feature_sum_variance = np.var(np.array(all_feature_sums), axis=0)
        print "FEATURE SUM VARIANCE", feature_sum_variance

        for iteration in range(self.iterations):
            prev_grad = 0
            print "####################ITERATION", iteration
            iter_similarity = []
            iter_cost_diff = []
            iter_time = []

            iter_grad = np.zeros(self.learner_weights.shape[0])
            self.costlib.weights = self.learner_weights
            self.initial_weights = np.copy(self.learner_weights)
            for n, i in enumerate(self.experiment_data):
                print "DATA POINT", n
                print "ITERATION", iteration
                if n >= len(self.experiment_data) * (
                        1 - self.validation_proportion):
                    validating = True
                else:
                    validating = False
                print "CHANGING POSITION"
                self.planner.publish_empty_path()

                config_change(i.path.poses[0], i.people)
                rospy.sleep(1.)
                config_change(i.path.poses[0], i.people)
                rospy.sleep(1.)
                self.planner._goal = i.goal

                if validating == False and planner_type == "cached_rrt" and iteration == 0:
                    tic = time.time()
                    cached_trees.append(
                        motion_planner.make_cached_rrt(
                            motion_planner.sample_goal_bias,
                            points_to_cache=self.cache_size))
                    toc = time.time()
                    time_to_cache.append(toc - tic)

                if validating == False:
                    self.costlib.ref_path_nn = i.nbrs
                    tic = time.time()
                    if planner_type != "cached_rrt":
                        # if there is a time margin factor it will be used during the learning planning step
                        motion_planner.planning_time *= self.time_margin_factor
                        pose_path_la, array_path_la = motion_planner.plan()
                    else:
                        pose_path_la, array_path_la = motion_planner.plan_cached_rrt(
                            cached_trees[n])
                    toc = time.time()
                    iter_time.append(toc - tic)

                self.costlib.ref_path_nn = None
                # recover time margin factor to normal planning time.
                motion_planner.planning_time *= 1 / self.time_margin_factor
                pose_path, array_path = motion_planner.plan()

                if iteration == 0:
                    initial_paths.append(array_path)
                elif iteration == self.iterations - 1:
                    final_paths.append(array_path)

                rospy.sleep(0.5)

                if validating == False:
                    path_feature_sum_la = self.feature_sums(
                        array_path_la, i.goal_xy)
                    iter_grad += self.learner_weights * 0.00 + (
                        i.feature_sum - path_feature_sum_la
                    )  #/feature_sum_variance
                    print "GRADIENT", (i.feature_sum - path_feature_sum_la
                                       )  #/feature_sum_variance#

                path_feature_sum = self.feature_sums(array_path, i.goal_xy)
                # Baseline evaluation if possible.
                if self.baseline_eval == True:
                    #calculate featuresums based on the ground truth featureset whatever that is
                    self.costlib.set_featureset(self.gt_featureset)
                    gt_feature_sum = self.feature_sums(array_path, i.goal_xy)
                    path_base_cost = np.dot(self.gt_weights, gt_feature_sum)
                    iter_cost_diff.append(path_base_cost - i.path_cost)
                    self.costlib.set_featureset(
                        self.planner.planning_featureset)
                print "COST DIFFERENCE", iter_cost_diff

                print "PATH FEATURE SUM", path_feature_sum
                iter_similarity.append(
                    self.get_similarity(i.path_array, array_path))
            gradient_store.append(iter_grad /
                                  (len(self.experiment_data) *
                                   (1 - self.validation_proportion)))
            grad = (1 - self.momentum) * iter_grad / (
                len(self.experiment_data) *
                (1 - self.validation_proportion)) + self.momentum * prev_grad
            self.learner_weights = self.learner_weights - self.learning_rate * grad
            prev_grad = grad
            idx = np.where(self.learner_weights < 0)
            self.learner_weights[idx] = 0
            print "WEIGHTS", self.learner_weights
            print "GRADIENT ", grad
            similarity.append(np.array(iter_similarity))
            cost_diff.append(iter_cost_diff)
            time_taken.append(iter_time)

        print cost_diff
        results = {
            "similarity": similarity,
            "cost_diff": cost_diff,
            "weights_final": self.learner_weights,
            "weights_initial": self.initial_weights,
            "gradients": gradient_store,
            "initial_paths": initial_paths,
            "final_paths": final_paths,
            "validation_proportion": self.validation_proportion,
            "time_to_cache": time_to_cache,
            "time_per_iter": time_taken
        }

        return results

    def feature_sums(self, xy_path, goal_xy, interpolation=True):
        # calculates path feature sums.
        if interpolation == True:
            xy_path = interpolate_path(xy_path, resolution=0.005)
        feature_sum = 0
        for i in range(len(xy_path) - 1):
            if i == 0:
                f1 = self.costlib.featureset(xy_path[i], goal_xy)
                f2 = self.costlib.featureset(xy_path[i + 1], goal_xy)
                feature_sum = self.costlib.edge_feature(
                    f1, f2, xy_path[i], xy_path[i + 1])
            else:
                f1 = self.costlib.featureset(xy_path[i], goal_xy)
                f2 = self.costlib.featureset(xy_path[i + 1], goal_xy)
                feature_sum += self.costlib.edge_feature(
                    f1, f2, xy_path[i], xy_path[i + 1])
        return feature_sum

    def get_similarity(self, example_path, learner_path):
        # inputs are the example and learner path in xy.
        nbrs = NearestNeighbors(n_neighbors=1,
                                algorithm="kd_tree",
                                leaf_size=30)
        nbrs.fit(np.array(learner_path))
        (dist, idx) = nbrs.kneighbors(example_path)
        return np.sum(dist)

    def test_similarity(self):
        ex1 = self.experiment_data[0].path_array
        ex2 = self.experiment_data[1].path_array
        sim = self.get_similarity(ex1, ex2)
        return sim

    def test_feature_sum(self):
        self.ppl_pub.publish(self.experiment_data[0].people[0])
        rospy.sleep(0.1)
        fs = self.feature_sums(self.experiment_data[0].path_array,
                               self.experiment_data[0].goal_xy)
        return fs
Ejemplo n.º 10
0
class Evaluator(object):
    def __init__(self):
        self.exp_name = rospy.get_param("~experiment_name", "posq_test_exp3")
        self.session_name = rospy.get_param("~session_name",
                                            "posq_learn_small2")
        self.real_data = rospy.get_param("~real_data", True)
        self.no_of_runs = rospy.get_param("number_of_runs", 2)
        self.filepath = PARENT
        print PARENT, CURRENT
        self.directory = self.filepath + "/data/" + self.exp_name
        self.results_dir = self.filepath + "/results/" + self.session_name + "/"
        #self.real_data_eval()
        self.experiment_data = experiment_load2(self.directory)
        self.gt_weights = fn.pickle_loader(self.directory + "/weights.pkl")
        self.results = []
        # print self.results_dir
        # print "gothere"
        for i in range(self.no_of_runs):
            self.results.append(
                fn.pickle_loader(self.results_dir + "results_" + str(i + 1) +
                                 ".pkl"))
        # self.results = self.results[:3]
        # self.no_of_runs=2
        self.plots()
        # self.planner = MotionPlanner()
        # self.costlib = self.planner.cost_manager
        # self.ppl_pub =  rospy.Publisher("person_poses",PersonArray,queue_size = 10)
        # self.expert_path_pub = rospy.Publisher("expert_path",Path,queue_size = 1)
        # self.initial_paths_pub = rospy.Publisher("initial_weights_path",Path,queue_size = 1)
        # self.final_paths_pub = rospy.Publisher("final_weights_path",Path,queue_size = 1)
        # self.point_sub = rospy.Subscriber("clicked_point",PointStamped,self.point_cb)
        # self.astar_03_path_pub = rospy.Publisher("astar03_path",Path,queue_size = 1)
        # self.astar_08_path_pub = rospy.Publisher("astar08_path",Path,queue_size = 1)
        # self.crlt_path_pub = rospy.Publisher("crlt_path",Path,queue_size = 1)
        self.visualization_counter = 0

    def real_data_eval(self):
        self.results = []
        self.experiment_data = experiment_load_sevilla(self.directory)
        for i in range(self.no_of_runs):
            self.results.append(
                fn.pickle_loader(self.results_dir + "results_" + str(i + 1) +
                                 ".pkl"))
        self.planner = MotionPlanner()
        self.costlib = self.planner.cost_manager
        self.ppl_pub = rospy.Publisher("person_poses",
                                       PersonArray,
                                       queue_size=1)
        self.expert_path_pub = rospy.Publisher("expert_path",
                                               Path,
                                               queue_size=1)
        self.initial_paths_pub = rospy.Publisher("initial_weights_path",
                                                 Path,
                                                 queue_size=1)
        self.final_paths_pub = rospy.Publisher("final_weights_path",
                                               Path,
                                               queue_size=1)
        self.point_sub = rospy.Subscriber("clicked_point", PointStamped,
                                          self.point_cb_real)
        # self.crlt_path_pub = rospy.Publisher("crlt_path",Path,queue_size = 1)
    def get_multiple_runs(self, method):
        time_taken = []
        for i in range(self.no_of_runs):
            time_taken.append(
                np.sum(self.results[i][method]["time_per_iter"]) +
                self.results[i][method]["time_to_cache"])
            train_size = np.array(
                self.results[i][method]["cost_diff"]).shape[1] * (
                    1 - self.results[i][method]["validation_proportion"])
            if i == 0:
                cost_diff_val = np.mean(np.array(
                    self.results[i][method]["cost_diff"])[:, train_size:],
                                        axis=1)
                cost_diff_train = np.mean(np.array(
                    self.results[i][method]["cost_diff"])[:, :train_size],
                                          axis=1)

            else:
                cost_diff_val = np.vstack([
                    cost_diff_val,
                    np.mean(np.array(
                        self.results[i][method]["cost_diff"])[:, train_size:],
                            axis=1)
                ])
                cost_diff_train = np.vstack([
                    cost_diff_train,
                    np.mean(np.array(
                        self.results[i][method]["cost_diff"])[:, :train_size],
                            axis=1)
                ])
        # return the mean and standard error across runs
        val_mean = np.mean(cost_diff_val, axis=0)
        val_std_err = np.std(cost_diff_val, axis=0) / np.sqrt(self.no_of_runs)
        train_mean = np.mean(cost_diff_train, axis=0)
        train_std_err = np.std(cost_diff_train, axis=0) / np.sqrt(
            self.no_of_runs)
        time_mean = np.mean(time_taken)
        time_std = np.std(time_taken)
        return train_mean, train_std_err, val_mean, val_std_err, time_mean, time_std

    def plots(self):
        results_for_plots = []
        for r in self.results:
            print r.keys()
        for method in self.results[0].keys():
            results_for_plots.append(self.get_multiple_runs(method))

        f = plt.figure()
        ax = f.add_subplot(111)
        for n, method in enumerate(self.results[0].keys()):
            res = results_for_plots[n]
            ax.errorbar(range(len(res[0])), res[0], label=method, yerr=res[1])
        plt.legend(bbox_to_anchor=(1., 1, 0., -0.06), loc=1)
        ax.set_ylabel("Average Cost difference",
                      fontweight='bold',
                      fontsize=14)
        ax.set_xlabel("Iterations", fontweight='bold', fontsize=14)
        f.savefig(self.results_dir + "/cost_diff_train.png")

        f = plt.figure()
        ax = f.add_subplot(111)
        for n, method in enumerate(self.results[0].keys()):
            res = results_for_plots[n]
            ax.errorbar(range(len(res[2])), res[2], label=method, yerr=res[3])
        plt.legend(bbox_to_anchor=(1., 1, 0., -0.06), loc=1)
        ax.set_ylabel("Average Cost difference",
                      fontweight='bold',
                      fontsize=14)
        ax.set_xlabel("Iterations", fontweight='bold', fontsize=14)
        f.savefig(self.results_dir + "/cost_diff_val.png")

        f = plt.figure()
        ax = f.add_subplot(111)
        bar_vals = []
        labl = []
        for n, method in enumerate(self.results[0].keys()):
            labl.append(method)
            res = results_for_plots[n]

            bar_vals.append(np.var(np.concatenate((res[0], res[1]))))
        N = len(self.results[0].keys())
        ind = np.arange(N)
        width = 0.35
        rects1 = ax.bar(ind, bar_vals, width, color='r')
        ax.set_xticks(ind)
        ax.set_xticklabels(labl)
        plt.legend(bbox_to_anchor=(1., 1, 0., -0.06), loc=1)
        ax.set_ylabel("Variance across iterations",
                      fontweight='bold',
                      fontsize=14)
        f.savefig(self.results_dir + "/var.png")

        f = plt.figure()
        ax = f.add_subplot(111)
        for n, method in enumerate(self.results[0].keys()):
            if method[:4] == "RLT_":
                col = "r"
            elif method[:4] == "RLT-":
                col = "b"
            else:
                col = "g"
            if method != "p":
                res = results_for_plots[n]
                print res
                plt.errorbar(res[0][-1],
                             res[-2],
                             label=method,
                             fmt="o",
                             c=col,
                             xerr=res[1][-1])
                plt.annotate(method, xy=(res[0][-1] + 0.1, res[-2] + 0.1))
        #plt.legend(bbox_to_anchor=(1., 1,0.,-0.06),loc=1)
        ax.set_ylabel("Time Taken (seconds)", fontweight='bold', fontsize=14)
        ax.set_xlabel("Average Cost Difference",
                      fontweight='bold',
                      fontsize=14)
        f.savefig(self.results_dir + "/pareto_front.png")

        f = plt.figure()
        ax = f.add_subplot(111)
        for n, method in enumerate(self.results[0].keys()):
            if method[:4] == "RLT_":
                col = "r"
            elif method[:4] == "RLT-":
                col = "b"
            else:
                col = "g"
            if method != "p":
                res = results_for_plots[n]
                plt.errorbar(res[2][-1],
                             res[-2],
                             label=method,
                             fmt="o",
                             c=col,
                             xerr=res[3][-1])
                plt.annotate(method, xy=(res[2][-1] + 0.1, res[-2] + 0.1))
        #plt.legend(bbox_to_anchor=(1., 1,0.,-0.06),loc=1)
        ax.set_ylabel("Time Taken (seconds)", fontweight='bold', fontsize=14)
        ax.set_xlabel("Average Cost Difference",
                      fontweight='bold',
                      fontsize=14)
        f.savefig(self.results_dir + "/pareto_front_val.png")

        # f = plt.figure()
        # ax = f.add_subplot(111)

        # ax.errorbar(range(len(rrt_res[0])),rrt_res[0],label="RLT*-NC",yerr=rrt_res[1])
        # ax.errorbar(range(len(a08_res[0])),a08_res[0],label="MMP 0.8m",c="g",yerr=a08_res[1])
        # ax.errorbar(range(len(a03_res[0])),a03_res[0],label="MMP 0.3m",c="m",yerr=a03_res[1])
        # #ax.plot(avg_astar02,label="A* 0.2m",c="g")
        # ax.errorbar(range(len(crrt_res[0])),crrt_res[0],label="RLT*",c="r",yerr=crrt_res[1])
        # plt.legend(bbox_to_anchor=(1., 1,0.,-0.06),loc=1)
        # ax.set_ylabel("Average Cost difference",fontweight = 'bold',fontsize = 14)
        # ax.set_xlabel("Iterations",fontweight='bold',fontsize = 14)
        # f.savefig(self.results_dir+"/cost_diff_train.png")

        # f = plt.figure()
        # ax = f.add_subplot(111)
        # ax.errorbar(range(len(rrt_res[2])),rrt_res[2],label="RLT*-NC",yerr=rrt_res[3])
        # ax.errorbar(range(len(a08_res[2])),a08_res[2],label="MMP 0.8m",c="g",yerr=a08_res[3])
        # ax.errorbar(range(len(a03_res[2])),a03_res[2],label="MMP 0.3m",c="m",yerr=a03_res[3])
        # #ax.plot(avg_astar02,label="A* 0.2m",c="g")
        # ax.errorbar(range(len(crrt_res[2])),crrt_res[2],label="RLT*",c="r",yerr=crrt_res[3])
        # plt.legend(bbox_to_anchor=(1., 1,0.,-0.06),loc=1)
        # ax.set_ylabel("Average Cost difference",fontweight = 'bold',fontsize = 14)
        # ax.set_xlabel("Iterations",fontweight='bold',fontsize = 14)
        # f.savefig(self.results_dir+"/cost_diff_val.png")

    def point_cb(self, msg):
        current_datapoint = self.experiment_data[self.visualization_counter]
        self.ppl_pub.publish(current_datapoint.people)
        self.config_change(current_datapoint.path.poses[0],
                           current_datapoint.people)
        # Publish expert datapoint
        for i in current_datapoint.path.poses:
            i.header.frame_id = "map"
            i.header.stamp = rospy.get_rostime()

        self.costlib.weights = self.results[0]["RLT_10"]["weights_final"]
        #self.costlib.weights = self.gt_weights
        self.expert_path_pub.publish(path_to_pose(
            current_datapoint.path_array))
        # Plan using initial weights
        self.initial_paths_pub.publish(
            path_to_pose(self.results[0]["RLT_10"]["initial_paths"][
                self.visualization_counter]))
        self.crlt_path_pub.publish(
            path_to_pose(self.results[0]["RLT_10"]["final_paths"][
                self.visualization_counter]))
        self.astar_08_path_pub.publish(
            path_to_pose(self.results[0]["MMP_0.8"]["final_paths"][
                self.visualization_counter]))
        self.astar_03_path_pub.publish(
            path_to_pose(self.results[0]["MMP_0.3"]["final_paths"][
                self.visualization_counter]))
        if self.visualization_counter < len(self.experiment_data) - 2:
            self.visualization_counter += 1
        else:
            self.visualization_counter = 0

    def point_cb_real(self, msg):
        current_datapoint = self.experiment_data[self.visualization_counter]
        self.ppl_pub.publish(current_datapoint.people)
        self.config_change(current_datapoint.path.poses[0],
                           current_datapoint.people)
        # Publish expert datapoint
        for i in current_datapoint.path.poses:
            i.header.frame_id = "map"
            i.header.stamp = rospy.get_rostime()
        self.costlib.weights = self.results[0]["RLT_5"]["weights_final"]
        self.planner._goal = current_datapoint.goal
        self.planner.update_costmap()
        #self.costlib.weights = self.gt_weights
        self.expert_path_pub.publish(path_to_pose(
            current_datapoint.path_array))
        # Plan using initial weights
        self.initial_paths_pub.publish(
            path_to_pose(self.results[0]["RLT_NC_5"]["initial_paths"][
                self.visualization_counter]))
        self.final_paths_pub.publish(
            path_to_pose(self.results[0]["RLT_NC_5"]["final_paths"][
                self.visualization_counter]))
        if self.visualization_counter < len(self.experiment_data) - 2:
            self.visualization_counter += 1
        else:
            self.visualization_counter = 0

    def config_change(self, robotPose, personPoses):
        rospy.wait_for_service('change_robot_position')
        try:
            robot_pos = rospy.ServiceProxy('change_robot_position',
                                           positionChange)
            print "HERE"
            robot_pos(robotPose.pose)
        except rospy.ServiceException, e:
            print "Service call failed: %s" % e

        rospy.wait_for_service('change_people_position')
        try:
            ppl_pos = rospy.ServiceProxy('change_people_position',
                                         ppl_positionChange)
            ppl_pos(personPoses)
        except rospy.ServiceException, e:
            print "Service call failed: %s" % e
Ejemplo n.º 11
0
        board = get_new_board()
        draw = ImageDraw.Draw(board)
        obstacle_list = create_obstacles(num_obstacle)
        draw_obstacles(draw, obstacle_list)
        image = np.asarray(board)
        empty_coords = get_empty_coords(image, img_size)
        robot_coord = choose_random_empty_coords(empty_coords)
        previous_robot_coord = np.copy(robot_coord)
        robot_rect = generate_robot_rect(robot_coord)
        draw = draw_robot(draw, robot_rect)
        robot_angle = choose_random_angle()
        angle_list = np.linspace(0, num_angles - 1, num_angles)
        # video_str = get_time_str()
        video_str = f'seed_{seed_val}_angles_{direction_val}'
        out = init_video_writer(video_str)
        mp = MotionPlanner(max_random, min_dist)
        previous_robot_coords = []
        for i in range(num_step):
            print(i)
            board = get_new_board()
            draw = ImageDraw.Draw(board)
            draw_obstacles(draw, obstacle_list)
            image = np.asarray(board)
            dist_map, end_points = measure_dists(image, robot_coord,
                                                 angle_vectors)
            idx = get_idx_around_angle(robot_angle)
            visible_dists = dist_map[idx]

            available_angle_vectors = angle_vectors[idx]
            available_angle_list = angle_list[idx]
            draw = draw_dists(draw, robot_coord, end_points, idx)
Ejemplo n.º 12
0
#!/usr/bin/env python

from motion_planner import MotionPlanner

mp = MotionPlanner(20, "169.254.74.58", 30000)
mp.move_to_home()
mp.move_to_grabbing_points(80, -500, 100, -500, 50)