class LatticeGenerator: def __init__( self, road: Road, actions: Actions, constraints: Constraints, termination_conditions: TerminationConditions, cost_calculator: CostCalculator, start_state: State, ego, subject, ): self.road = road self.actions = actions self.start_state = start_state self.constraints = constraints self.termination_conditions = termination_conditions self.cost_calculator = cost_calculator self.collision_checker = CollisionChecker(1.75, 1) self.ego_vehicle = ego self.subject_vehicle = subject def sample_random_trajectory(self): tmp_state = copy.deepcopy(self.start_state) trajectory = [self.start_state] ALLOW_LANE_CHANGE = True while True: # 1. Randomly select an action is_next_action_lane_change, next_action_params = self.actions.random_action( ALLOW_LANE_CHANGE) if is_next_action_lane_change: ALLOW_LANE_CHANGE = False # 2. Apply action + constraints # 2.a T tmp_state.time += next_action_params["deltaT"] # 2.b D tmp_state.position[0] += (tmp_state.speed + next_action_params["deltaDDiff"][0]) tmp_state.position[1] += next_action_params["deltaDDiff"][1] tmp_state.apply_constraints(self.constraints) # 2.c V tmp_state.speed += next_action_params["deltaV"] tmp_state.apply_constraints(self.constraints) # 3. Append to trajectory trajectory.append(copy.deepcopy(tmp_state)) # 4. Check if terminal if tmp_state.check_termination(self.termination_conditions): break return trajectory def sample_and_plot_random_trajectory(self): random_trajectory = self.sample_random_trajectory() # Plot the road self.road.plot() # Plot the states of the random trajectory x_vals = [] y_vals = [] for state in random_trajectory: plt.plot(state.position[0], state.position[1], "o") x_vals.append(state.position[0]) y_vals.append(state.position[1]) plt.plot(x_vals, y_vals) return random_trajectory def check_dense_collision_for_lane_change( self, start_state, end_state, subject_path, action_params, ego_size, subject_size, num_points=10, ): start_x = start_state.position[0] start_y = start_state.position[1] end_x = end_state.position[0] end_y = end_state.position[1] x_points = np.linspace(start_x, end_x, num_points) y_points = np.linspace(start_y, end_y, num_points) time_points = np.linspace(start_state.time, end_state.time, num_points) for i in range(len(time_points)): intermediate_state = State( position=[x_points[i], y_points[i]], speed=start_state.speed, time=time_points[i], ) if self.collision_checker.predict_collision( ego_size, subject_size, intermediate_state, subject_path, True): return True return False def generate_full_lattice(self, start_state, subject_path, lane_change_init_flag): lattice = ( {} ) # (x,y,v,t,lane_change_status) -> {"action" -> next (x,y,v,t,lane_change_status)} state_2_cost = {} goal_cost = 2**31 queue = PriorityQueue() queue.put((0, lane_change_init_flag, start_state)) # (state, has lane change happened, cost) goal_state = None while not queue.empty(): cost, has_lane_change_happend, curr_state = queue.get() curr_state_tuple = ( curr_state.position[0], curr_state.position[1], curr_state.speed, curr_state.time, has_lane_change_happend, ) if (curr_state_tuple in lattice.keys() and state_2_cost[curr_state_tuple] <= cost): continue lattice[curr_state_tuple] = {} state_2_cost[curr_state_tuple] = cost # Find best goal state if (has_lane_change_happend and curr_state.position[0] > 100 # and cost < goal_cost ): goal_state = curr_state_tuple goal_cost = cost break for i, next_action_params in enumerate( self.actions.action_params_list): if i == 3 and has_lane_change_happend: continue tmp_state = copy.deepcopy(curr_state) if i == 2 and tmp_state.speed < 8: continue # Apply action + constraints # a T tmp_state.time += next_action_params["deltaT"] # b D if i != 3: tmp_state.position[0] += ( tmp_state.speed + next_action_params["deltaDDiff"][0]) else: tmp_state.position[0] += tmp_state.speed * 3.6 tmp_state.position[1] += next_action_params["deltaDDiff"][1] tmp_state.apply_constraints(self.constraints) # c V tmp_state.speed += next_action_params["deltaV"] tmp_state.apply_constraints(self.constraints) # Check collision ego_size = [ self.ego_vehicle.bounding_box.extent.x, self.ego_vehicle.bounding_box.extent.y, ] subject_size = [ self.subject_vehicle.bounding_box.extent.x, self.subject_vehicle.bounding_box.extent.y, ] if i == 3: if self.check_dense_collision_for_lane_change( curr_state, tmp_state, subject_path, next_action_params, ego_size, subject_size, ): print("Dense Collision detected....................") continue else: if self.collision_checker.predict_collision( ego_size, subject_size, tmp_state, subject_path, i == 3): print("Lane Collision detected....................") continue if i == 3 or has_lane_change_happend: tmp_state_tuple = ( tmp_state.position[0], tmp_state.position[1], tmp_state.speed, tmp_state.time, 1, ) else: tmp_state_tuple = ( tmp_state.position[0], tmp_state.position[1], tmp_state.speed, tmp_state.time, 0, ) lattice[curr_state_tuple][i] = tmp_state_tuple cost_of_transition = self.cost_calculator.compute_transition_cost( next_action_params, curr_state, tmp_state, subject_path) # Check if terminal if tmp_state.check_termination( self.termination_conditions) and i != 3: continue # Add to queue if i == 3 or has_lane_change_happend: queue.put((cost + cost_of_transition, 1, tmp_state)) else: queue.put((cost + cost_of_transition, 0, tmp_state)) ## Reverse Direction Lattice (x,y,v,t,lane_change_status) -> {"action" -> parent (x,y,v,t,lane_change_status)} reverse_lattice = {} for parent_state in lattice.keys(): for action in lattice[parent_state].keys(): next_state = lattice[parent_state][action] if next_state in reverse_lattice.keys(): reverse_lattice[next_state][action] = parent_state else: reverse_lattice[next_state] = {action: parent_state} # import ipdb # ipdb.set_trace() return lattice, state_2_cost, reverse_lattice, goal_state def backtrack_from_state(self, reverse_lattice, state_2_cost, end_state_tuple, start_state_tuple): curr_state_tuple = copy.deepcopy(end_state_tuple) path = [curr_state_tuple] action_sequence = [] while curr_state_tuple != start_state_tuple: minCostAction = -1 minCost = pow(10, 9) # Some large number for possible_action in reverse_lattice[curr_state_tuple].keys(): corresponding_parent_state_tuple = reverse_lattice[ curr_state_tuple][possible_action] if state_2_cost[corresponding_parent_state_tuple] < minCost: minCost = state_2_cost[corresponding_parent_state_tuple] minCostAction = possible_action curr_state_tuple = reverse_lattice[curr_state_tuple][minCostAction] path.append(curr_state_tuple) action_sequence.append(minCostAction) path.reverse() action_sequence.reverse() return path, action_sequence def plot_some_trajectories(self, lattice, start_state_tuple, k=10): self.road.plot() for i in range(k): curr_state_tuple = copy.deepcopy(start_state_tuple) traj = [curr_state_tuple] while curr_state_tuple in lattice.keys(): next_action = random.choice( [k for k in range(len(lattice[curr_state_tuple]))]) next_state_tuple = lattice[curr_state_tuple][next_action] traj.append(next_state_tuple) curr_state_tuple = copy.deepcopy(next_state_tuple) x_vals = [p[0] for p in traj] y_vals = [p[1] for p in traj] plt.plot(x_vals, y_vals) for i in range(len(x_vals)): plt.plot(x_vals[i], y_vals[i], "o") return 0