Exemple #1
0
def defending(agent):
    """"Method that gives output for the defending strategy"""
    target = defending_target(agent)
    agent.drive.target = target
    distance = distance_2d(agent.info.my_car.location, target)
    vf = velocity_forward(agent.info.my_car)
    dodge_overshoot = distance < (abs(vf) + 500) * 1.5
    agent.drive.speed = get_speed(agent, target)
    agent.drive.step(agent.info.time_delta)
    agent.controls = agent.drive.controls
    if can_dodge(agent, target):
        agent.step = Step.Dodge
        agent.dodge = Dodge(agent.info.my_car)
        agent.dodge.duration = 0.1
        agent.dodge.target = target
    if not agent.defending:
        agent.step = (Step.Catching if agent.ball_bouncing else Step.Shooting)
    elif vf < -900 and (not dodge_overshoot or distance < 600):
        agent.step = Step.HalfFlip
        agent.halfflip = HalfFlip(agent.info.my_car)
    elif not dodge_overshoot and agent.info.my_car.location[2] < 80 and\
            (agent.drive.speed > abs(vf) + 300 and 1200 < abs(vf) < 2000 and agent.info.my_car.boost <= 25):
        # Dodge towards the target for speed
        agent.step = Step.Dodge
        agent.dodge = Dodge(agent.info.my_car)
        agent.dodge.duration = 0.1
        agent.dodge.target = target
Exemple #2
0
def shooting(agent):
    """"Method that gives the output for the shooting strategy"""
    ball = agent.info.ball
    car = agent.info.my_car
    target = shooting_target(agent)
    agent.drive.target = target
    distance = distance_2d(car.position, target)
    vf = velocity_forward(car)
    dodge_overshoot = distance < (abs(vf) + 500) * 1.5
    agent.drive.speed = get_speed(agent, target)
    agent.drive.step(agent.info.time_delta)
    agent.controls = agent.drive.controls
    if agent.defending:
        agent.step = Step.Defending
    elif should_dodge(agent):
        agent.step = Step.Dodge
        agent.dodge = Dodge(car)
        agent.dodge.duration = 0.1
        agent.dodge.target = ball.position
    elif agent.ball_bouncing and not (abs(ball.velocity[2]) < 100
              and sign(agent.team) * ball.velocity[1] < 0) and get_bounce(agent) is not None:
        agent.step = Step.Catching
        agent.drive.target = ball.position
        agent.drive.speed = 1399
    elif vf < -900 and (not dodge_overshoot or distance < 600):
        agent.step = Step.HalfFlip
        agent.halfflip = HalfFlip(car)
    elif not dodge_overshoot and car.position[2] < 80 and \
            (agent.drive.speed > abs(vf) + 300 and 1200 < abs(vf) < 2000 and car.boost <= 25):
        # Dodge towards the target for speed
        agent.step = Step.Dodge
        agent.dodge = Dodge(car)
        agent.dodge.duration = 0.1
        agent.dodge.target = target
Exemple #3
0
def start_shooting(agent):
    """"Method that is run the frame I choose the shooting strategy"""
    agent.step = Step.Shooting
    target = shooting_target(agent)
    speed = get_speed(agent, target)
    agent.drive.target = target
    agent.drive.speed = speed
Exemple #4
0
def defending(agent):
    """"Method that gives output for the defending strategy"""
    target = defending_target(agent)
    agent.drive.target = target
    distance = distance_2d(agent.info.my_car.position, target)
    vf = velocity_forward(agent.info.my_car)
    dodge_overshoot = distance < (abs(vf) + 500) * 1.5
    agent.drive.speed = get_speed(agent, target)
    agent.drive.step(agent.info.time_delta)
    agent.controls = agent.drive.controls
    t = time.time()
    can_dodge, simulated_duration, simulated_target = agent.simulate()
    # print(time.time() - t)
    if can_dodge:
        agent.dodge = Dodge(agent.info.my_car)
        agent.turn = AerialTurn(agent.info.my_car)
        agent.dodge.duration = simulated_duration - 0.1
        agent.dodge.target = simulated_target

        agent.dodge.preorientation = look_at(simulated_target, vec3(0, 0, 1))
        agent.timer = 0
        agent.step = Step.Dodge
    if not agent.should_defend():
        agent.step = Step.Shooting
    elif vf < -900 and (not dodge_overshoot or distance < 600):
        agent.step = Step.HalfFlip
        agent.halfflip = HalfFlip(agent.info.my_car)
    elif not dodge_overshoot and agent.info.my_car.position[2] < 80 and \
            (agent.drive.speed > abs(vf) + 300 and 1200 < abs(vf) < 2000 and agent.info.my_car.boost <= 25):
        # Dodge towards the target for speed
        agent.step = Step.Dodge
        agent.dodge = Dodge(agent.info.my_car)
        agent.dodge.duration = 0.1
        agent.dodge.target = target
    def run_step(self, target_speed, debug=False) -> np.ndarray:
        """
        Execute one step of longitudinal control to reach a given target speed.
        :param target_speed: target speed in Km/h
        :return: throttle control in the range [0, 1]
        """
        current_speed = get_speed(self._vehicle) * 3.6

        if debug:
            print("Current speed = {}".format(current_speed))

        return self._pid_control(target_speed, current_speed)
Exemple #6
0
def defending(agent):
    target = defending_target(agent)
    agent.drive.target_pos = target
    agent.drive.target_speed = get_speed(agent, target)
    agent.drive.step(agent.FPS)
    agent.controls = agent.drive.controls
    powerslide(agent)
    if can_dodge(agent, target):
        agent.step = "Dodge"
        agent.dodge = AirDodge(agent.info.my_car, 0.1, target)
    if not agent.defending:
        agent.step = "Catching"
    if not agent.info.my_car.on_ground:
        agent.step = "Recovery"
Exemple #7
0
def shooting(agent):
    agent.drive.step(agent.FPS)
    agent.controls = agent.drive.controls
    powerslide(agent)
    target = shooting_target(agent)
    agent.drive.target_pos = target
    agent.drive.target_speed = get_speed(agent, target)
    if should_dodge(agent):
        agent.step = "Dodge"
        agent.dodge = AirDodge(agent.info.my_car, 0.1, agent.info.ball.pos)
    elif not (abs(agent.info.ball.vel[2]) < 100
              and sign(agent.team) * agent.info.ball.vel[1] < 0):
        agent.step = "Catching"
        agent.drive = Drive(agent.info.my_car, agent.info.ball.pos, 1399)
Exemple #8
0
def start_shooting(agent):
    agent.step = "Shooting"
    target = shooting_target(agent)
    speed = get_speed(agent, target)
    agent.drive = Drive(agent.info.my_car, target, speed)
Exemple #9
0
    def __init__(self):

        self.client = carla.Client("127.0.0.1", 2000)
        self.client.set_timeout(20.0)

        change_to_Town06(self.client)
        self.world = self.client.get_world()
        self.world_snapshot = self.world.get_snapshot()
        self.time_step_count = 0
        self.time_step = 0.025  # Seconds. Have to fully divide 1
        self.curr_time = 0
        self.subject_path_time_res = 0.5
        self.warmup_time = 3
        # 0. Reset Scene
        initialize(self.world, self.client, self.time_step)

        self.lane_marker_linestring = (
            get_lane_marker_linestring_from_right_lane_road_and_lane_id(
                self.world, 15, -6))

        # 1. Spawn vehicles
        self.subject_behavior = "very_aggressive"
        (
            self.ego_vehicle,
            self.subject_vehicle,
            self.current_lane_waypoints,
            self.subject_agent,
        ) = setup_scenario(
            self.world,
            self.client,
            synchronous_master=True,
            subject_behavior=self.subject_behavior,
        )

        # 2. Get the path follower object
        self.path_follower = PathFollower(self.world, self.ego_vehicle,
                                          self.time_step)

        # 3. Coarse Trajectory Genertion
        road = Road()
        actions = Actions()
        constraints = Constraints()
        termination_conditions = TerminationConditions(max_time=15,
                                                       max_position_x=180)
        start_state = State([
            self.ego_vehicle.get_location().x,
            self.ego_vehicle.get_location().y
        ], 0, 0)
        cost_calculator = CostCalculator(
            subject_path_time_res=self.subject_path_time_res)
        self.latticeGenerator = LatticeGenerator(
            road=road,
            actions=actions,
            constraints=constraints,
            cost_calculator=cost_calculator,
            termination_conditions=termination_conditions,
            start_state=start_state,
            ego=self.ego_vehicle,
            subject=self.subject_vehicle,
        )

        # 3. Get the controller object
        args_lateral = {
            "K_P": 1.0,
            "K_D": 0.0,
            "K_I": 0.0,
            "dt": self.time_step
        }
        args_longitudinal = {
            "K_P": 1.0,
            "K_D": 0.00,
            "K_I": 0.00,
            "dt": self.time_step,
        }
        self.controller = VehiclePIDController(self.ego_vehicle, args_lateral,
                                               args_longitudinal)

        # 4. Placeholder for the concatenated trajectory
        self.traj_to_track = []
        self.has_lane_change_happend = 0
        self.is_lane_change_happening = 0
        self.planned_path = None
        self.next_timestep = None
        self.latest_tracked_speed_ego = get_speed(self.ego_vehicle)

        # self.regenerate_traj_flag = False

        if self.subject_behavior == "manual":
            self.path_predictor = PathPredictor(self.world,
                                                self.subject_vehicle,
                                                self.subject_path_time_res)
        else:
            self.path_predictor = PathPredictor(self.world,
                                                self.subject_agent.vehicle,
                                                self.subject_path_time_res)
        self.subject_traj = []
        self.ego_traj = []
Exemple #10
0
    def loop(self):

        # 1. Get state estimation
        ego_transform = self.ego_vehicle.get_transform()

        if self.latest_tracked_speed_ego is None:
            ego_speed = get_speed(self.ego_vehicle)
        else:
            ego_speed = self.latest_tracked_speed_ego

        ego_state = State(
            [ego_transform.location.x, ego_transform.location.y],
            ego_speed,
            self.curr_time,
        )
        ego_state_slvt = toSLVT(self.lane_marker_linestring, ego_state)
        ego_state_pose = PoseTemp(
            ego_transform.location.x,
            ego_transform.location.y,
            ego_transform.rotation.yaw * np.pi / 180,
            ego_speed,
        )

        subject_transform = self.subject_vehicle.get_transform()

        subject_state = State(
            [subject_transform.location.x, subject_transform.location.y],
            get_speed(self.subject_vehicle),
            self.curr_time,
        )
        subject_state_slvt = toSLVT(self.lane_marker_linestring, subject_state)

        self.subject_traj.append(subject_state)
        self.ego_traj.append(ego_state)

        print("##############")
        print(subject_state_slvt)
        print(ego_state_slvt)
        print("##############")

        if self.subject_behavior != "manual":
            # 5. Predict the path of the suject agent
            if len(self.subject_agent.get_local_planner().waypoints_queue
                   ) != 0:
                self.subject_vehicle.apply_control(
                    self.subject_agent.run_step())
            else:
                control = carla.VehicleControl()
                control.steer = 0.0
                control.throttle = 0.0
                control.brake = 1.0
                control.hand_brake = False
                control.manual_gear_shift = False
                self.subject_vehicle.apply_control(control)

        subject_path_slvt = []
        steps = 100
        subject_path = self.path_predictor.get_predicted_path(
            steps, subject_state_slvt.time)
        subject_path_slvt = [
            toSLVT(self.lane_marker_linestring, elem) for elem in subject_path
        ]
        for i in range(len(subject_path)):
            self.world.debug.draw_string(
                carla.Location(
                    x=subject_path[i].position[0],
                    y=subject_path[i].position[1],
                    z=0,
                ),
                "X",
                draw_shadow=False,
                color=carla.Color(r=0, g=0, b=255),
            )

        # 2. Get next plan to track

        while (self.world.get_snapshot().timestamp.elapsed_seconds < 15
               and self.subject_behavior == "manual"):
            # print("Time: ", self.world_snapshot.timestamp.elapsed_seconds)
            control_signal = self.controller.run_step(ego_speed,
                                                      ego_state_pose)
            self.ego_vehicle.set_transform(
                carla.Transform(
                    location=carla.Location(ego_state_pose.x, ego_state_pose.y,
                                            0),
                    rotation=carla.Rotation(yaw=ego_state_pose.theta * 180 /
                                            np.pi),
                ))
            # print("ego_speed: ", ego_speed)
            self.world.tick()

        if self.time_step_count % 10 == 0 and self.is_lane_change_happening == 0:

            self.time_step_count = 0

            # 6. Send current state to coarse path prediction module
            (
                full_lattice,
                state_2_cost,
                reverse_lattice,
                goal_state,
            ) = self.latticeGenerator.generate_full_lattice(
                ego_state_slvt, subject_path_slvt,
                self.has_lane_change_happend)

            example_start_state_tuple = (
                ego_state_slvt.position[0],
                ego_state_slvt.position[1],
                ego_state_slvt.speed,
                ego_state_slvt.time,
                self.has_lane_change_happend,
            )
            if goal_state is None:
                print(full_lattice)
                print(state_2_cost)

            if goal_state is not None:
                print(
                    "Goal State:",
                    goal_state,
                    "Cost:",
                    state_2_cost[goal_state],
                    "Ego State:",
                    ego_state_slvt.position[0],
                )

            (
                planned_path,
                planned_action_sequence,
            ) = self.latticeGenerator.backtrack_from_state(
                reverse_lattice,
                state_2_cost,
                goal_state,
                example_start_state_tuple,
            )
            self.planned_path = planned_path
            # Overwrite Trajectory for testing
            # planned_action_sequence = [0, 3, 0, 0, 0]

            print(planned_action_sequence)
            print(planned_path)
            print("Subject Path : --------------------")
            for elem in subject_path_slvt:
                print(elem)
            print("-----------------------------------")

            # if self.regenerate_traj_flag == False:
            (
                self.traj_to_track,
                self.lane_change_flags,
            ) = self.path_follower.get_trajectory(
                max(7.33 * 3.6, ego_state.speed * 3.6),
                get_ego_waypoint(self.world, self.ego_vehicle),
                planned_action_sequence,
            )

        # # 3. Find next pose to track

        next_index = self.time_step_count
        self.next_timestep = next_index
        pose_to_track = self.traj_to_track[next_index]

        next_flag = self.lane_change_flags[next_index]
        if next_flag == 1:
            self.has_lane_change_happend = 1
            self.is_lane_change_happening = 1
        else:
            self.is_lane_change_happening = 0

        # 3. Get control signal based on requested location + speed
        future_poses = self.traj_to_track[next_index:]

        gamma = 1
        for future_pose in future_poses:
            gamma = gamma * 0.98
            self.world.debug.draw_string(
                Location(x=future_pose.x, y=future_pose.y, z=1),
                "O",
                draw_shadow=False,
                color=carla.Color(r=0, g=int(255 * gamma), b=0),
                life_time=0.25,
            )

        if pose_to_track is not None:

            self.world.debug.draw_string(
                Location(x=pose_to_track.x, y=pose_to_track.y, z=1),
                "O",
                draw_shadow=False,
                color=carla.Color(r=0, g=0, b=255),
                life_time=1,
            )

            control_signal = self.controller.run_step(pose_to_track.speed,
                                                      pose_to_track)

            # 4. Apply control signal on ego-vehicle and subject vehicle(actuation)
            self.ego_vehicle.set_transform(
                carla.Transform(
                    location=carla.Location(pose_to_track.x, pose_to_track.y,
                                            0),
                    rotation=carla.Rotation(yaw=pose_to_track.theta * 180 /
                                            np.pi),
                ))
            self.latest_tracked_speed_ego = pose_to_track.speed / 3.6

        # 8. Tick
        self.time_step_count += 1
        self.curr_time = self.time_step_count * self.time_step
        self.world.tick()