Beispiel #1
0
class RRTPlanner(object):
    def __init__(self, planning_env, bias=0.05, eta=1.0, max_iter=100000):
        self.env = planning_env  # Map Environment
        self.tree = RRTTree(self.env)
        self.bias = bias  # Goal Bias
        self.max_iter = max_iter  # Max Iterations
        self.eta = eta  # Distance to extend

    def Plan(self, start_config, goal_config):
        # TODO: YOUR IMPLEMENTATION HERE

        plan_time = time.time()

        # Start with adding the start configuration to the tree.
        self.tree.AddVertex(start_config)

        plan = []
        plan.append(start_config)

        for i in range(self.max_iter):
            x_rand = self.sample(goal_config)
            x_near_id, x_near = self.tree.GetNearestVertex(x_rand)

            # x_near = self.tree.vertices[x_near_id]
            x_new = self.extend(x_near, x_rand)
            x_new = np.array(x_new)
            if (len(x_new)
                    == 0) or (not self.env.state_validity_checker(x_new)):
                continue

            x_new_id = self.tree.AddVertex(x_new)
            self.tree.AddEdge(x_near_id, x_new_id)
            if self.env.compute_distance(x_new, goal_config) < 0.0001:
                end_id = x_new_id
                traj = [x_new]
                while self.tree.edges[end_id] != self.tree.GetRootID():
                    traj.append(self.tree.vertices[end_id])
                    end_id = self.tree.edges[end_id]

                plan += traj[::-1]
                break

        plan.append(goal_config)

        cost = 0
        for i in range(len(plan) - 1):
            cost += ((plan[i + 1][0] - plan[i][0])**2 +
                     (plan[i + 1][1] - plan[i][1])**2)**0.5
        plan_time = time.time() - plan_time

        print("Cost: %f" % cost)
        print("Planning Time: %ds" % plan_time)

        return np.concatenate(plan, axis=1)

    def extend(self, x_near, x_rand):
        # TODO: YOUR IMPLEMENTATION HERE
        # x_new = []
        dist = self.env.compute_distance(x_near, x_rand)
        if dist == 0:
            return []
        if not self.env.edge_validity_checker(x_near, x_rand):
            return []

        d_new = x_rand - x_near
        d_x = d_new[0, 0] * self.eta
        d_y = d_new[1, 0] * self.eta

        if self.eta <= dist:
            x_new = np.zeros(x_near.shape)
            x_new[0, 0] = x_near[0, 0] + d_x
            x_new[1, 0] = x_near[1, 0] + d_y
            return x_new
        else:
            return x_rand

    def sample(self, goal):
        # Sample random point from map
        if np.random.uniform() < self.bias:
            return goal

        return self.env.sample()
class RRTStarPlanner(object):
    def __init__(self, planning_env):
        self.planning_env = planning_env
        self.tree = RRTTree(self.planning_env)
        self.step_size = 5.0
        self.one_step = True
        self.cost = {}
        self.neighbers = 3
        self.p_threshold = 0.05

    def Plan(self, start_config, goal_config, epsilon=0.001):

        # Initialize an empty plan.
        plan = []

        # Start with adding the start configuration to the tree.
        start = time.time()
        self.tree.AddVertex(start_config)

        plan.append(start_config)
        self.cost[0] = 0

        for i in range(8000):
            while True:
                if random.random() <= self.p_threshold:
                    x_rand = goal_config
                else:
                    x_rand = [
                        random.randint(0, self.planning_env.xlimit[1]),
                        random.randint(0, self.planning_env.ylimit[1])
                    ]
                if self.planning_env.state_validity_checker(x_rand):
                    break

            x_near_id, x_near_vertex, vdist = self.tree.GetNearestVertex(
                x_rand)
            x_new = self.extend(x_near_vertex, x_rand)

            if (not len(x_new)) or (
                    not self.planning_env.state_validity_checker(x_new)):
                continue

            if len(self.tree.vertices) > self.neighbers:
                # Change father
                knnIDs, x_new_neighbers_vertices, knnDists = self.tree.GetKNN(
                    x_new, self.neighbers)
                total_dis = []
                for i in range(self.neighbers):
                    total_dis.append(self.cost[knnIDs[i]] + knnDists[i])
                new_father_id = knnIDs[total_dis.index(min(total_dis))]
                x_new_id = self.tree.AddVertex(x_new)
                self.cost[x_new_id] = round(min(total_dis), 2)
                # rewire
                for i in range(self.neighbers):
                    if self.cost[x_new_id] + knnDists[i] < self.cost[
                            knnIDs[i]]:
                        self.tree.AddEdge(x_new_id, knnIDs[i])
                        self.cost[knnIDs[i]] = self.cost[x_new_id] + round(
                            knnDists[i], 2)
            else:
                new_father_id = x_near_id
                x_new_id = self.tree.AddVertex(x_new)
                self.cost[x_new_id] = self.cost[new_father_id] + round(
                    vdist, 2)
            self.tree.AddEdge(new_father_id, x_new_id)
            if self.planning_env.compute_distance(x_new,
                                                  goal_config) < epsilon:
                s_id = x_new_id
                temp_list = [x_new]
                while self.tree.edges[s_id] != self.tree.GetRootID():
                    temp_list.append(self.tree.vertices[s_id])
                    s_id = self.tree.edges[s_id]
                plan += temp_list[::-1]
                break
        plan.append(goal_config)
        end = time.time()
        print('The time is', str(end - start))
        print('The cost is ', self.cost[len(self.tree.vertices) - 2])
        return numpy.array(plan)

    def extend(self, x_near, x_rand):
        x_new = []
        dis = self.planning_env.compute_distance(x_near, x_rand)
        if dis == 0:
            return []
        x_near = numpy.array(x_near, dtype=numpy.float32)
        x_rand = numpy.array(x_rand, dtype=numpy.float32)
        delta_x_new = x_rand - x_near
        if self.one_step:
            step_size = 1.0
            delta_x = delta_x_new[0] * (step_size / dis)
            delta_y = delta_x_new[1] * (step_size / dis)
            for i in range(1, int(dis / step_size)):
                temp = [x_near[0] + delta_x * i, x_near[1] + delta_y * i]
                if not self.planning_env.state_validity_checker(temp):
                    return []
            x_new = x_rand
        else:
            if dis >= self.step_size:
                x_new.append(x_near[0] + delta_x_new[0] *
                             (self.step_size / dis))
                x_new.append(x_near[1] + delta_x_new[1] *
                             (self.step_size / dis))
            else:
                x_new = x_rand
        return x_new

    def ShortenPath(self, path):
        start_point = 0
        end_point = 1
        new_path = [path[start_point]]
        while end_point < len(path):
            if self.cut(start_point, end_point, path):
                end_point += 1
            else:
                new_path.append(path[end_point])
                start_point = end_point
                end_point += 1
        return new_path

    def cut(self, start_point, end_point, path):
        dis = self.planning_env.compute_distance(path[start_point],
                                                 path[end_point])
        x_start = numpy.array(path[start_point], dtype=numpy.float32)
        x_end = numpy.array(path[end_point], dtype=numpy.float32)
        delta_x_new = x_start - x_end

        step_size = 0.1
        delta_x = delta_x_new[0] * (step_size / dis)
        delta_y = delta_x_new[1] * (step_size / dis)
        for i in range(1, int(dis / step_size)):
            temp = [x_start[0] + delta_x * i, x_start[1] + delta_y * i]
            if not self.planning_env.state_validity_checker(temp):
                return False
        return True
class RRTPlannerNonholonomic(object):
    def __init__(self,
                 planning_env,
                 bias=0.05,
                 max_iter=10000,
                 num_control_samples=25):
        self.env = planning_env  # Car Environment
        self.tree = RRTTree(self.env)
        self.bias = bias  # Goal Bias
        self.max_iter = max_iter  # Max Iterations
        self.num_control_samples = 25  # Number of controls to sample

    def Plan(self, start_config, goal_config):
        # TODO: YOUR IMPLEMENTATION HERE

        plan_time = time.time()

        # Start with adding the start configuration to the tree.
        self.tree.AddVertex(start_config)

        plan = []
        plan.append(start_config)

        for i in range(self.max_iter):
            # print(i)
            x_rand = self.sample(goal_config)
            if not self.env.state_validity_checker(x_rand):
                continue

            x_near_id, x_near = self.tree.GetNearestVertex(x_rand)
            x_new, delta_t, action_t = self.extend(x_near=x_near,
                                                   x_rand=x_rand)

            if x_new is None:
                continue

            x_new_id = self.tree.AddVertex(x_new)
            self.tree.AddEdge(x_near_id, x_new_id)

            if self.env.goal_criterion(x_new, goal_config):
                end_id = x_near_id
                traj = [x_new]
                while self.tree.edges[end_id] != self.tree.GetRootID():
                    traj.append(self.tree.vertices[end_id])
                    end_id = self.tree.edges[end_id]
                    plan += traj[::-1]

                break

        plan.append(goal_config)
        cost = 0
        for i in range(len(plan) - 1):
            cost += self.env.compute_distance(plan[i], plan[i + 1])
        plan_time = time.time() - plan_time

        print("Cost: %f" % cost)
        print("Planning Time: %ds" % plan_time)

        return np.concatenate(plan, axis=1)

    def extend(self, x_near, x_rand):
        """ Extend method for non-holonomic RRT

            Generate n control samples, with n = self.num_control_samples
            Simulate trajectories with these control samples
            Compute the closest closest trajectory and return the resulting state (and cost)
        """
        # TODO: YOUR IMPLEMENTATION HERE
        dist = self.env.compute_distance(x_near, x_rand)
        for i in range(self.num_control_samples):
            linear_vel, steer_angle = self.env.sample_action()
            x_new, delta_t = self.env.simulate_car(x_near, x_rand, linear_vel,
                                                   steer_angle)
            if x_new is None:
                continue
            d = self.env.compute_distance(x_new, x_rand)
            if d < dist:
                action_t = np.array([linear_vel, steer_angle])
                return x_new, delta_t, action_t
        return None, None, None

    def sample(self, goal):
        # Sample random point from map
        if np.random.uniform() < self.bias:
            return goal

        return self.env.sample()