Esempio n. 1
0
class AutoPilot(BaseAgent):
    def setup(self, path_to_conf_file):
        super().setup(path_to_conf_file)

        self.save_path = None

        if path_to_conf_file:
            now = datetime.datetime.now()
            string = pathlib.Path(os.environ['ROUTES']).stem + '_'
            string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second)))

            print(string)

            self.save_path = pathlib.Path(path_to_conf_file) / string
            self.save_path.mkdir(exist_ok=False)

            (self.save_path / 'rgb').mkdir()
            (self.save_path / 'mask').mkdir()
            (self.save_path / 'rgb_left').mkdir()
            (self.save_path / 'rgb_right').mkdir()
            (self.save_path / 'measurements').mkdir()

    def _init(self):
        super()._init()

        self._turn_controller = PIDController(K_P=1.25, K_I=0.75, K_D=0.3, n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

    def _get_angle_to(self, pos, theta, target):
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta),  np.cos(theta)],
            ])

        aim = R.T.dot(target - pos)
        angle = -np.degrees(np.arctan2(-aim[1], aim[0]))
        angle = 0.0 if np.isnan(angle) else angle 

        return angle

    def _get_control(self, target, far_target, tick_data):
        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        # Steering.
        angle_unnorm = self._get_angle_to(pos, theta, target)
        angle = angle_unnorm / 90

        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)
        steer = round(steer, 3)

        # Acceleration.
        angle_far_unnorm = self._get_angle_to(pos, theta, far_target)
        should_slow = abs(angle_far_unnorm) > 45.0 or abs(angle_unnorm) > 5.0
        target_speed = 4 if should_slow else 7.0

        brake = self._should_brake()
        target_speed = target_speed if not brake else 0.0

        delta = np.clip(target_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)

        if brake:
            steer *= 0.5
            throttle = 0.0

        return steer, throttle, brake, target_speed

    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        if self.step % 100 == 0:
            index = (self.step // 100) % len(WEATHERS)
            self._world.set_weather(WEATHERS[index])

        data = self.tick(input_data)

        gps = self._get_position(data)

        near_node, near_command = self._waypoint_planner.run_step(gps)
        far_node, far_command = self._command_planner.run_step(gps)

        steer, throttle, brake, target_speed = self._get_control(near_node, far_node, data)

        control = carla.VehicleControl()
        control.steer = steer + 1e-2 * np.random.randn()
        control.throttle = throttle
        control.brake = float(brake)

        if self.step % 10 == 0:
            self.save(far_node, near_command, steer, throttle, float(brake), target_speed, data)

        return control

    def save(self, far_node, near_command, steer, throttle, brake, target_speed, tick_data):
        frame = self.step // 10

        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        data = {
                "x": pos[0],
                "y": pos[1],
                "theta": theta,
                "speed": speed,
                "target_speed": target_speed,
                "x_command": far_node[0],
                "y_command": far_node[1],
                "command": near_command.value,
                "steer": steer,
                "throttle": throttle,
                "brake": brake,
                }

        with open(self.save_path / 'measurements' / ('%04d.json' % frame), 'w') as f:
            json.dump(data, f)

        Image.fromarray(tick_data['rgb']).save(self.save_path / 'rgb' / ('%04d.png' % frame))
        Image.fromarray(tick_data['mask']).save(self.save_path / 'mask' / ('%04d.png' % frame))
        Image.fromarray(tick_data['rgb_left']).save(self.save_path / 'rgb_left' / ('%04d.png' % frame))
        Image.fromarray(tick_data['rgb_right']).save(self.save_path / 'rgb_right' / ('%04d.png' % frame))

    def _should_brake(self):
        actors = self._world.get_actors()

        vehicle = self._is_vehicle_hazard(actors.filter('*vehicle*'))
        light = self._is_light_red(actors.filter('*traffic_light*'))
        walker = self._is_walker_hazard(actors.filter('*walker*'))

        return any(x is not None for x in [vehicle, light, walker])

    def _draw_line(self, p, v, z, color=(255, 0, 0)):
        if not DEBUG:
            return

        p1 = _location(p[0], p[1], z)
        p2 = _location(p[0]+v[0], p[1]+v[1], z)
        color = carla.Color(*color)

        self._world.debug.draw_line(p1, p2, 0.25, color, 0.01)

    def _is_light_red(self, lights_list):
        if self._vehicle.get_traffic_light_state() != carla.libcarla.TrafficLightState.Green:
            affecting = self._vehicle.get_traffic_light()

            for light in self._traffic_lights:
                if light.id == affecting.id:
                    return affecting

        return None

    def _is_walker_hazard(self, walkers_list):
        z = self._vehicle.get_location().z
        p1 = _numpy(self._vehicle.get_location())
        v1 = 10.0 * _orientation(self._vehicle.get_transform().rotation.yaw)

        self._draw_line(p1, v1, z+2.5, (0, 0, 255))

        for walker in walkers_list:
            v2_hat = _orientation(walker.get_transform().rotation.yaw)
            s2 = np.linalg.norm(_numpy(walker.get_velocity()))

            if s2 < 0.05:
                v2_hat *= s2

            p2 = -3.0 * v2_hat + _numpy(walker.get_location())
            v2 = 8.0 * v2_hat

            self._draw_line(p2, v2, z+2.5)

            collides, collision_point = get_collision(p1, v1, p2, v2)

            if collides:
                return walker

        return None

    def _is_vehicle_hazard(self, vehicle_list):
        z = self._vehicle.get_location().z

        o1 = _orientation(self._vehicle.get_transform().rotation.yaw)
        p1 = _numpy(self._vehicle.get_location())
        s1 = max(7.5, 2.0 * np.linalg.norm(_numpy(self._vehicle.get_velocity())))
        v1_hat = o1
        v1 = s1 * v1_hat

        self._draw_line(p1, v1, z+2.5, (255, 0, 0))

        for target_vehicle in vehicle_list:
            if target_vehicle.id == self._vehicle.id:
                continue

            o2 = _orientation(target_vehicle.get_transform().rotation.yaw)
            p2 = _numpy(target_vehicle.get_location())
            s2 = max(5.0, 2.0 * np.linalg.norm(_numpy(target_vehicle.get_velocity())))
            v2_hat = o2
            v2 = s2 * v2_hat

            p2_p1 = p2 - p1
            distance = np.linalg.norm(p2_p1)
            p2_p1_hat = p2_p1 / (distance + 1e-4)

            self._draw_line(p2, v2, z+2.5, (255, 0, 0))

            angle_to_car = np.degrees(np.arccos(v1_hat.dot(p2_p1_hat)))
            angle_between_heading = np.degrees(np.arccos(o1.dot(o2)))

            if angle_between_heading > 60.0 and not (angle_to_car < 15 and distance < s1):
                continue
            elif angle_to_car > 30.0:
                continue
            elif distance > s1:
                continue

            return target_vehicle

        return None
Esempio n. 2
0
class ImageAgent(BaseAgent):
    def setup(self, path_to_conf_file):
        super().setup(path_to_conf_file)

        self.converter = Converter()
        self.net = ImageModel.load_from_checkpoint(path_to_conf_file)
        self.net.cuda()
        self.net.eval()

        # addition: modified from leaderboard/team_code/auto_pilot.py
        self.save_path = None

        parent_folder = os.environ['SAVE_FOLDER']
        string = pathlib.Path(os.environ['ROUTES']).stem
        self.save_path = pathlib.Path(parent_folder) / string

    # addition: modified from leaderboard/team_code/auto_pilot.py
    def save(self, steer, throttle, brake, tick_data):
        # frame = self.step // 10
        frame = self.step

        pos = self._get_position(tick_data)
        far_command = tick_data['far_command']
        speed = tick_data['speed']

        string = os.environ['SAVE_FOLDER'] + '/' + pathlib.Path(
            os.environ['ROUTES']).stem

        center_str = string + '/' + 'rgb' + '/' + ('%04d.png' % frame)
        left_str = string + '/' + 'rgb_left' + '/' + ('%04d.png' % frame)
        right_str = string + '/' + 'rgb_right' + '/' + ('%04d.png' % frame)
        # topdown_str = string + '/' + 'topdown' + '/' + ('%04d.png' % frame)
        # rgb_with_car_str = string + '/' + 'rgb_with_car' + '/' + ('%04d.png' % frame)

        center = self.save_path / 'rgb' / ('%04d.png' % frame)
        left = self.save_path / 'rgb_left' / ('%04d.png' % frame)
        right = self.save_path / 'rgb_right' / ('%04d.png' % frame)
        topdown = self.save_path / 'topdown' / ('%04d.png' % frame)
        rgb_with_car = self.save_path / 'rgb_with_car' / ('%04d.png' % frame)

        data_row = ','.join([
            str(i) for i in [
                frame, far_command, speed, steer, throttle, brake, center_str,
                left_str, right_str
            ]
        ])
        with (self.save_path / 'measurements.csv').open("a") as f_out:
            f_out.write(data_row + '\n')

        Image.fromarray(tick_data['rgb']).save(center)
        Image.fromarray(tick_data['rgb_left']).save(left)
        Image.fromarray(tick_data['rgb_right']).save(right)

        # addition
        Image.fromarray(COLOR[CONVERTER[tick_data['topdown']]]).save(topdown)
        Image.fromarray(tick_data['rgb_with_car']).save(rgb_with_car)

    def _init(self):
        super()._init()

        self._turn_controller = PIDController(K_P=1.25,
                                              K_I=0.75,
                                              K_D=0.3,
                                              n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

        # addition:
        self._vehicle = CarlaDataProvider.get_hero_actor()
        self._world = self._vehicle.get_world()

    def tick(self, input_data):

        result = super().tick(input_data)
        result['image'] = np.concatenate(
            tuple(result[x] for x in ['rgb', 'rgb_left', 'rgb_right']), -1)

        rgb_with_car = cv2.cvtColor(input_data['rgb_with_car'][1][:, :, :3],
                                    cv2.COLOR_BGR2RGB)
        result['rgb_with_car'] = rgb_with_car

        theta = result['compass']
        theta = 0.0 if np.isnan(theta) else theta
        theta = theta + np.pi / 2
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta), np.cos(theta)],
        ])

        gps = self._get_position(result)
        # modification
        far_node, far_command = self._command_planner.run_step(gps)
        target = R.T.dot(far_node - gps)
        target *= 5.5
        target += [128, 256]
        target = np.clip(target, 0, 256)

        result['target'] = target
        # addition:
        self._actors = self._world.get_actors()
        self._traffic_lights = get_nearby_lights(
            self._vehicle, self._actors.filter('*traffic_light*'))
        result['far_command'] = far_command
        topdown = input_data['map'][1][:, :, 2]
        topdown = draw_traffic_lights(topdown, self._vehicle,
                                      self._traffic_lights)
        result['topdown'] = topdown

        return result

    @torch.no_grad()
    def run_step_using_learned_controller(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        tick_data = self.tick(input_data)

        img = torchvision.transforms.functional.to_tensor(tick_data['image'])
        img = img[None].cuda()

        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        points, (target_cam, _) = self.net.forward(img, target)
        control = self.net.controller(points).cpu().squeeze()

        steer = control[0].item()
        desired_speed = control[1].item()
        speed = tick_data['speed']

        brake = desired_speed < 0.4 or (speed / desired_speed) > 1.1

        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)

        if DEBUG:
            debug_display(tick_data, target_cam.squeeze(),
                          points.cpu().squeeze(), steer, throttle, brake,
                          desired_speed, self.step)

        return control

    @torch.no_grad()
    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        tick_data = self.tick(input_data)

        img = torchvision.transforms.functional.to_tensor(tick_data['image'])
        img = img[None].cuda()

        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        points, (target_cam, _) = self.net.forward(img, target)
        points_cam = points.clone().cpu()
        points_cam[..., 0] = (points_cam[..., 0] + 1) / 2 * img.shape[-1]
        points_cam[..., 1] = (points_cam[..., 1] + 1) / 2 * img.shape[-2]
        points_cam = points_cam.squeeze()
        points_world = self.converter.cam_to_world(points_cam).numpy()

        aim = (points_world[1] + points_world[0]) / 2.0
        angle = np.degrees(np.pi / 2 - np.arctan2(aim[1], aim[0])) / 90
        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)

        desired_speed = np.linalg.norm(points_world[0] - points_world[1]) * 2.0
        # desired_speed *= (1 - abs(angle)) ** 2

        speed = tick_data['speed']

        brake = desired_speed < 0.4 or (speed / desired_speed) > 1.1

        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)

        if DEBUG:
            debug_display(tick_data, target_cam.squeeze(),
                          points.cpu().squeeze(), steer, throttle, brake,
                          desired_speed, self.step)

        # addition: from leaderboard/team_code/auto_pilot.py
        if self.step == 0:
            title_row = ','.join([
                'frame_id', 'far_command', 'speed', 'steering', 'throttle',
                'brake', 'center', 'left', 'right'
            ])
            with (self.save_path / 'measurements.csv').open("a") as f_out:
                f_out.write(title_row + '\n')
        if self.step % 2 == 0:
            self.gather_info()

        record_every_n_steps = 3
        if self.step % record_every_n_steps == 0:
            self.save(steer, throttle, brake, tick_data)
        return control
class AutoPilot(MapAgent):
    def setup(self, path_to_conf_file):
        super().setup(path_to_conf_file)

        self.save_path = None

        if path_to_conf_file:
            now = datetime.datetime.now()
            string = pathlib.Path(os.environ['ROUTES']).stem + '_'
            string += '_'.join(
                map(lambda x: '%02d' % x,
                    (now.month, now.day, now.hour, now.minute, now.second)))

            print(string)

            self.save_path = pathlib.Path(path_to_conf_file) / string
            self.save_path.mkdir(exist_ok=False)

            (self.save_path / 'rgb').mkdir()
            (self.save_path / 'rgb_left').mkdir()
            (self.save_path / 'rgb_right').mkdir()
            (self.save_path / 'topdown').mkdir()
            (self.save_path / 'measurements').mkdir()

    def _init(self):
        super()._init()

        self._turn_controller = PIDController(K_P=1.25,
                                              K_I=0.75,
                                              K_D=0.3,
                                              n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

    def _get_angle_to(self, pos, theta, target):
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta), np.cos(theta)],
        ])

        aim = R.T.dot(target - pos)
        angle = -np.degrees(np.arctan2(-aim[1], aim[0]))
        angle = 0.0 if np.isnan(angle) else angle

        return angle

    def _get_control(self, target, far_target, tick_data, _draw):
        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        # Steering.
        angle_unnorm = self._get_angle_to(pos, theta, target)
        angle = angle_unnorm / 90

        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)
        steer = round(steer, 3)

        # Acceleration.
        angle_far_unnorm = self._get_angle_to(pos, theta, far_target)
        should_slow = abs(angle_far_unnorm) > 45.0 or abs(angle_unnorm) > 5.0
        target_speed = 4 if should_slow else 7.0

        brake = self._should_brake()
        target_speed = target_speed if not brake else 0.0

        delta = np.clip(target_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)

        if brake:
            steer *= 0.5
            throttle = 0.0

        _draw.text((5, 90), 'Speed: %.3f' % speed)
        _draw.text((5, 110), 'Target: %.3f' % target_speed)
        _draw.text((5, 130), 'Angle: %.3f' % angle_unnorm)
        _draw.text((5, 150), 'Angle Far: %.3f' % angle_far_unnorm)

        return steer, throttle, brake, target_speed

    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        if self.step % 100 == 0:
            index = (self.step // 100) % len(WEATHERS)
            self._world.set_weather(WEATHERS[index])

        data = self.tick(input_data)
        topdown = data['topdown']
        rgb = np.hstack((data['rgb_left'], data['rgb'], data['rgb_right']))

        gps = self._get_position(data)

        near_node, near_command = self._waypoint_planner.run_step(gps)
        far_node, far_command = self._command_planner.run_step(gps)

        _topdown = Image.fromarray(COLOR[CONVERTER[topdown]])
        _rgb = Image.fromarray(rgb)
        _draw = ImageDraw.Draw(_topdown)

        _topdown.thumbnail((256, 256))
        _rgb = _rgb.resize((int(256 / _rgb.size[1] * _rgb.size[0]), 256))

        _combined = Image.fromarray(np.hstack((_rgb, _topdown)))
        _draw = ImageDraw.Draw(_combined)

        steer, throttle, brake, target_speed = self._get_control(
            near_node, far_node, data, _draw)

        _draw.text((5, 10),
                   'FPS: %.3f' % (self.step / (time.time() - self.wall_start)))
        _draw.text((5, 30), 'Steer: %.3f' % steer)
        _draw.text((5, 50), 'Throttle: %.3f' % throttle)
        _draw.text((5, 70), 'Brake: %s' % brake)

        if HAS_DISPLAY:
            cv2.imshow('map',
                       cv2.cvtColor(np.array(_combined), cv2.COLOR_BGR2RGB))
            cv2.waitKey(1)

        control = carla.VehicleControl()
        control.steer = steer + 1e-2 * np.random.randn()
        control.throttle = throttle
        control.brake = float(brake)

        if self.step % 10 == 0:
            self.save(far_node, near_command, steer, throttle, brake,
                      target_speed, data)

        return control

    def save(self, far_node, near_command, steer, throttle, brake,
             target_speed, tick_data):
        frame = self.step // 10

        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        data = {
            'x': pos[0],
            'y': pos[1],
            'theta': theta,
            'speed': speed,
            'target_speed': target_speed,
            'x_command': far_node[0],
            'y_command': far_node[1],
            'command': near_command.value,
            'steer': steer,
            'throttle': throttle,
            'brake': brake,
        }

        (self.save_path / 'measurements' / ('%04d.json' % frame)).write_text(
            str(data))

        Image.fromarray(tick_data['rgb']).save(self.save_path / 'rgb' /
                                               ('%04d.png' % frame))
        Image.fromarray(tick_data['rgb_left']).save(
            self.save_path / 'rgb_left' / ('%04d.png' % frame))
        Image.fromarray(tick_data['rgb_right']).save(
            self.save_path / 'rgb_right' / ('%04d.png' % frame))
        Image.fromarray(tick_data['topdown']).save(self.save_path / 'topdown' /
                                                   ('%04d.png' % frame))

    def _should_brake(self):
        actors = self._world.get_actors()

        vehicle = self._is_vehicle_hazard(actors.filter('*vehicle*'))
        light = self._is_light_red(actors.filter('*traffic_light*'))
        walker = self._is_walker_hazard(actors.filter('*walker*'))

        return any(x is not None for x in [vehicle, light, walker])

    def _draw_line(self, p, v, z, color=(255, 0, 0)):
        if not DEBUG:
            return

        p1 = _location(p[0], p[1], z)
        p2 = _location(p[0] + v[0], p[1] + v[1], z)
        color = carla.Color(*color)

        self._world.debug.draw_line(p1, p2, 0.25, color, 0.01)

    def _is_light_red(self, lights_list):
        if self._vehicle.get_traffic_light_state(
        ) != carla.libcarla.TrafficLightState.Green:
            affecting = self._vehicle.get_traffic_light()

            for light in self._traffic_lights:
                if light.id == affecting.id:
                    return affecting

        return None

    def _is_walker_hazard(self, walkers_list):
        z = self._vehicle.get_location().z
        p1 = _numpy(self._vehicle.get_location())
        v1 = 10.0 * _orientation(self._vehicle.get_transform().rotation.yaw)

        self._draw_line(p1, v1, z + 2.5, (0, 0, 255))

        for walker in walkers_list:
            v2_hat = _orientation(walker.get_transform().rotation.yaw)
            s2 = np.linalg.norm(_numpy(walker.get_velocity()))

            if s2 < 0.05:
                v2_hat *= s2

            p2 = -3.0 * v2_hat + _numpy(walker.get_location())
            v2 = 8.0 * v2_hat

            self._draw_line(p2, v2, z + 2.5)

            collides, collision_point = get_collision(p1, v1, p2, v2)

            if collides:
                return walker

        return None

    def _is_vehicle_hazard(self, vehicle_list):
        z = self._vehicle.get_location().z

        o1 = _orientation(self._vehicle.get_transform().rotation.yaw)
        p1 = _numpy(self._vehicle.get_location())
        s1 = max(7.5,
                 2.0 * np.linalg.norm(_numpy(self._vehicle.get_velocity())))
        v1_hat = o1
        v1 = s1 * v1_hat

        self._draw_line(p1, v1, z + 2.5, (255, 0, 0))

        for target_vehicle in vehicle_list:
            if target_vehicle.id == self._vehicle.id:
                continue

            o2 = _orientation(target_vehicle.get_transform().rotation.yaw)
            p2 = _numpy(target_vehicle.get_location())
            s2 = max(
                5.0,
                2.0 * np.linalg.norm(_numpy(target_vehicle.get_velocity())))
            v2_hat = o2
            v2 = s2 * v2_hat

            p2_p1 = p2 - p1
            distance = np.linalg.norm(p2_p1)
            p2_p1_hat = p2_p1 / (distance + 1e-4)

            self._draw_line(p2, v2, z + 2.5, (255, 0, 0))

            angle_to_car = np.degrees(np.arccos(v1_hat.dot(p2_p1_hat)))
            angle_between_heading = np.degrees(np.arccos(o1.dot(o2)))

            if angle_between_heading > 60.0 and not (angle_to_car < 15
                                                     and distance < s1):
                continue
            elif angle_to_car > 30.0:
                continue
            elif distance > s1:
                continue

            return target_vehicle

        return None
Esempio n. 4
0
class AutoPilot(MapAgent):

    # for stop signs
    PROXIMITY_THRESHOLD = 30.0  # meters
    SPEED_THRESHOLD = 0.1
    WAYPOINT_STEP = 1.0  # meters

    def setup(self, path_to_conf_file):
        super().setup(path_to_conf_file)
            
    def _init(self):
        super()._init()

        self._turn_controller = PIDController(K_P=1.25, K_I=0.75, K_D=0.3, n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

        # for stop signs
        self._target_stop_sign = None # the stop sign affecting the ego vehicle
        self._stop_completed = False # if the ego vehicle has completed the stop sign
        self._affected_by_stop = False # if the ego vehicle is influenced by a stop sign

    def _get_angle_to(self, pos, theta, target):
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta),  np.cos(theta)],
            ])

        aim = R.T.dot(target - pos)
        angle = -np.degrees(np.arctan2(-aim[1], aim[0]))
        angle = 0.0 if np.isnan(angle) else angle 

        return angle

    def _get_control(self, target, far_target, tick_data):
        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        # Steering.
        angle_unnorm = self._get_angle_to(pos, theta, target)
        angle = angle_unnorm / 90

        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)
        steer = round(steer, 3)

        # Acceleration.
        angle_far_unnorm = self._get_angle_to(pos, theta, far_target)
        should_slow = abs(angle_far_unnorm) > 45.0 or abs(angle_unnorm) > 5.0
        target_speed = 4.0 if should_slow else 7.0
        brake = self._should_brake()
        target_speed = target_speed if not brake else 0.0
        
        self.should_slow = int(should_slow)
        self.should_brake = int(brake)
        self.angle = angle
        self.angle_unnorm = angle_unnorm
        self.angle_far_unnorm = angle_far_unnorm
        
        delta = np.clip(target_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)

        if brake:
            steer *= 0.5
            throttle = 0.0

        return steer, throttle, brake, target_speed

    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        # change weather for visual diversity
        if self.step % 10 == 0:
            index = random.choice(range(len(WEATHERS)))
            self.weather_id = WEATHERS_IDS[index]
            weather = WEATHERS[WEATHERS_IDS[index]]
            print (self.weather_id, weather)
            self._world.set_weather(weather)

        data = self.tick(input_data)
        gps = self._get_position(data)

        near_node, near_command = self._waypoint_planner.run_step(gps)
        far_node, far_command = self._command_planner.run_step(gps)

        steer, throttle, brake, target_speed = self._get_control(near_node, far_node, data)

        control = carla.VehicleControl()
        control.steer = steer + 1e-2 * np.random.randn()
        control.throttle = throttle
        control.brake = float(brake)

        if self.step % 10 == 0 and self.save_path is not None:
            self.save(near_node, far_node, near_command, steer, throttle, brake, target_speed, data)

        return control

    def _should_brake(self):
        actors = self._world.get_actors()

        vehicle = self._is_vehicle_hazard(actors.filter('*vehicle*'))
        light = self._is_light_red(actors.filter('*traffic_light*'))
        walker = self._is_walker_hazard(actors.filter('*walker*'))
        stop_sign = self._is_stop_sign_hazard(actors.filter('*stop*'))

        self.is_vehicle_present = 1 if vehicle is not None else 0
        self.is_red_light_present = 1 if light is not None else 0
        self.is_pedestrian_present = 1 if walker is not None else 0
        self.is_stop_sign_present = 1 if stop_sign is not None else 0

        return any(x is not None for x in [vehicle, light, walker, stop_sign])

    def _point_inside_boundingbox(self, point, bb_center, bb_extent):
        A = carla.Vector2D(bb_center.x - bb_extent.x, bb_center.y - bb_extent.y)
        B = carla.Vector2D(bb_center.x + bb_extent.x, bb_center.y - bb_extent.y)
        D = carla.Vector2D(bb_center.x - bb_extent.x, bb_center.y + bb_extent.y)
        M = carla.Vector2D(point.x, point.y)

        AB = B - A
        AD = D - A
        AM = M - A
        am_ab = AM.x * AB.x + AM.y * AB.y
        ab_ab = AB.x * AB.x + AB.y * AB.y
        am_ad = AM.x * AD.x + AM.y * AD.y
        ad_ad = AD.x * AD.x + AD.y * AD.y

        return am_ab > 0 and am_ab < ab_ab and am_ad > 0 and am_ad < ad_ad

    def _get_forward_speed(self, transform=None, velocity=None):
        """ Convert the vehicle transform directly to forward speed """
        if not velocity:
            velocity = self._vehicle.get_velocity()
        if not transform:
            transform = self._vehicle.get_transform()

        vel_np = np.array([velocity.x, velocity.y, velocity.z])
        pitch = np.deg2rad(transform.rotation.pitch)
        yaw = np.deg2rad(transform.rotation.yaw)
        orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)])
        speed = np.dot(vel_np, orientation)
        return speed

    def _is_actor_affected_by_stop(self, actor, stop, multi_step=20):
        """
        Check if the given actor is affected by the stop
        """
        affected = False
        # first we run a fast coarse test
        current_location = actor.get_location()
        stop_location = stop.get_transform().location
        if stop_location.distance(current_location) > self.PROXIMITY_THRESHOLD:
            return affected

        stop_t = stop.get_transform()
        transformed_tv = stop_t.transform(stop.trigger_volume.location)

        # slower and accurate test based on waypoint's horizon and geometric test
        list_locations = [current_location]
        waypoint = self._world.get_map().get_waypoint(current_location)
        for _ in range(multi_step):
            if waypoint:
                waypoint = waypoint.next(self.WAYPOINT_STEP)[0]
                if not waypoint:
                    break
                list_locations.append(waypoint.transform.location)

        for actor_location in list_locations:
            if self._point_inside_boundingbox(actor_location, transformed_tv, stop.trigger_volume.extent):
                affected = True

        return affected

    def _is_stop_sign_hazard(self, stop_sign_list):
        if self._affected_by_stop:
            if not self._stop_completed:
                current_speed = self._get_forward_speed()
                if current_speed < self.SPEED_THRESHOLD:
                    self._stop_completed = True
                    return None
                else:
                    return self._target_stop_sign
            else:
                # reset if the ego vehicle is outside the influence of the current stop sign
                if not self._is_actor_affected_by_stop(self._vehicle, self._target_stop_sign):
                    self._affected_by_stop = False
                    self._stop_completed = False
                    self._target_stop_sign = None
                return None

        ve_tra = self._vehicle.get_transform()
        ve_dir = ve_tra.get_forward_vector()

        wp = self._world.get_map().get_waypoint(ve_tra.location)
        wp_dir = wp.transform.get_forward_vector()

        dot_ve_wp = ve_dir.x * wp_dir.x + ve_dir.y * wp_dir.y + ve_dir.z * wp_dir.z

        if dot_ve_wp > 0:  # Ignore all when going in a wrong lane
            for stop_sign in stop_sign_list:
                if self._is_actor_affected_by_stop(self._vehicle, stop_sign):
                    # this stop sign is affecting the vehicle
                    self._affected_by_stop = True
                    self._target_stop_sign = stop_sign
                    return self._target_stop_sign

        return None

    def _is_light_red(self, lights_list):
        if self._vehicle.get_traffic_light_state() != carla.libcarla.TrafficLightState.Green:
            affecting = self._vehicle.get_traffic_light()

            for light in self._traffic_lights:
                if light.id == affecting.id:
                    return affecting

        return None

    def _is_walker_hazard(self, walkers_list):
        z = self._vehicle.get_location().z
        p1 = _numpy(self._vehicle.get_location())
        v1 = 10.0 * _orientation(self._vehicle.get_transform().rotation.yaw)

        for walker in walkers_list:
            v2_hat = _orientation(walker.get_transform().rotation.yaw)
            s2 = np.linalg.norm(_numpy(walker.get_velocity()))

            if s2 < 0.05:
                v2_hat *= s2

            p2 = -3.0 * v2_hat + _numpy(walker.get_location())
            v2 = 8.0 * v2_hat

            collides, collision_point = get_collision(p1, v1, p2, v2)

            if collides:
                return walker

        return None

    def _is_vehicle_hazard(self, vehicle_list):
        z = self._vehicle.get_location().z

        o1 = _orientation(self._vehicle.get_transform().rotation.yaw)
        p1 = _numpy(self._vehicle.get_location())
        s1 = max(10, 3.0 * np.linalg.norm(_numpy(self._vehicle.get_velocity()))) # increases the threshold distance
        v1_hat = o1
        v1 = s1 * v1_hat

        for target_vehicle in vehicle_list:
            if target_vehicle.id == self._vehicle.id:
                continue

            o2 = _orientation(target_vehicle.get_transform().rotation.yaw)
            p2 = _numpy(target_vehicle.get_location())
            s2 = max(5.0, 2.0 * np.linalg.norm(_numpy(target_vehicle.get_velocity())))
            v2_hat = o2
            v2 = s2 * v2_hat

            p2_p1 = p2 - p1
            distance = np.linalg.norm(p2_p1)
            p2_p1_hat = p2_p1 / (distance + 1e-4)

            angle_to_car = np.degrees(np.arccos(v1_hat.dot(p2_p1_hat)))
            angle_between_heading = np.degrees(np.arccos(o1.dot(o2)))

            # to consider -ve angles too
            angle_to_car = min(angle_to_car, 360.0 - angle_to_car)
            angle_between_heading = min(angle_between_heading, 360.0 - angle_between_heading)

            if angle_between_heading > 60.0 and not (angle_to_car < 15 and distance < s1):
                continue
            elif angle_to_car > 30.0:
                continue
            elif distance > s1:
                continue

            return target_vehicle

        return None
Esempio n. 5
0
class AutoPilot(MapAgent):
    def _init(self):
        super()._init()

        self._default_target_speed = 7.0
        self._default_slow_target_speed = 4.0
        self._angle = None
        self._turn_controller = PIDController(K_P=1.25,
                                              K_I=0.75,
                                              K_D=0.3,
                                              n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

    def _get_angle_to(self, pos, theta, target):
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta), np.cos(theta)],
        ])

        aim = R.T.dot(target - pos)
        angle = -np.degrees(np.arctan2(-aim[1], aim[0]))
        angle = 0.0 if np.isnan(angle) else angle

        return angle

    def _get_control(self, target, far_target, tick_data, _draw):
        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        # Steering.
        angle_unnorm = self._get_angle_to(pos, theta, target)
        angle = angle_unnorm / 90
        self._angle = np.radians(angle_unnorm)
        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)
        steer = round(steer, 3)

        # Acceleration.
        angle_far_unnorm = self._get_angle_to(pos, theta, far_target)
        should_slow = abs(angle_far_unnorm) > 45.0 or abs(angle_unnorm) > 5.0
        target_speed = self._default_slow_target_speed if should_slow else self._default_target_speed

        brake = self._should_brake()
        target_speed = target_speed if not brake else 0.0

        self._target_speed = target_speed

        delta = np.clip(target_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)

        if brake:
            steer *= 0.5
            throttle = 0.0

        _draw.text((5, 90), 'Speed: %.3f' % speed)
        _draw.text((5, 110), 'Target: %.3f' % target_speed)
        _draw.text((5, 130), 'Angle: %.3f' % angle_unnorm)
        _draw.text((5, 150), 'Angle Far: %.3f' % angle_far_unnorm)

        return steer, throttle, brake, target_speed

    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()
        #     self._world.set_weather(WEATHERS[int(os.environ['WEATHER_INDEX'])])

        # 100 -> 50 since 20Hz -> 10Hz
        if self.step % 50 == 0 and self.args.changing_weather:
            index = np.random.randint(0, len(WEATHERS))
            self._world.set_weather(WEATHERS[index])

        data = self.tick(input_data)
        rgb_with_car = cv2.cvtColor(input_data['rgb_with_car'][1][:, :, :3],
                                    cv2.COLOR_BGR2RGB)
        data['rgb_with_car'] = rgb_with_car

        topdown = data['topdown']
        rgb = np.hstack((data['rgb_left'], data['rgb'], data['rgb_right']))

        gps = self._get_position(data)

        near_node, near_command = self._waypoint_planner.run_step(gps)
        far_node, far_command = self._command_planner.run_step(gps)

        _topdown = Image.fromarray(COLOR[CONVERTER[topdown]])
        _rgb = Image.fromarray(rgb)
        _draw = ImageDraw.Draw(_topdown)

        _topdown.thumbnail((256, 256))
        _rgb = _rgb.resize((int(256 / _rgb.size[1] * _rgb.size[0]), 256))

        _combined = Image.fromarray(np.hstack((_rgb, _topdown)))
        _draw = ImageDraw.Draw(_combined)

        steer, throttle, brake, target_speed = self._get_control(
            near_node, far_node, data, _draw)

        _draw.text((5, 10),
                   'FPS: %.3f' % (self.step / (time.time() - self.wall_start)))
        _draw.text((5, 30), 'Steer: %.3f' % steer)
        _draw.text((5, 50), 'Throttle: %.3f' % throttle)
        _draw.text((5, 70), 'Brake: %s' % brake)

        if HAS_DISPLAY:
            cv2.imshow('map',
                       cv2.cvtColor(np.array(_combined), cv2.COLOR_BGR2RGB))
            cv2.waitKey(1)

        control = carla.VehicleControl()
        control.steer = steer + 1e-2 * np.random.randn()
        control.throttle = throttle
        control.brake = float(brake)

        # we only gether info every 2 frames for faster processing speed
        if self.step % 1 == 0:
            self.gather_info()

        # if this number is very small, we may not have the exact numbers and images for the event happening (e.g. the frame when a collision happen). However, this is usually ok if we only use these for retraining purpose
        record_every_n_steps = self.record_every_n_step
        if self.step % record_every_n_steps == 0:
            self.save(record_every_n_steps, far_command, steer, throttle,
                      brake, target_speed, data)
            self.save_json(record_every_n_steps, far_node, near_command, steer,
                           throttle, brake, target_speed, data)

        return control

    def save_json(self, record_every_n_steps, far_node, near_command, steer,
                  throttle, brake, target_speed, tick_data):
        frame = int(self.step // record_every_n_steps)

        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        # pos, , far_node, near_command
        data = {
            'x': pos[0],
            'y': pos[1],
            'theta': theta,
            'speed': speed,
            'target_speed': target_speed,
            'x_command': far_node[0],
            'y_command': far_node[1],
            'command': near_command.value,
            'steer': steer,
            'throttle': throttle,
            'brake': brake,
        }

        pth = self.save_path / 'measurements'
        pth.mkdir(parents=False, exist_ok=True)
        (pth / ('%04d.json' % frame)).write_text(str(data))

    def save(self, record_every_n_steps, far_command, steer, throttle, brake,
             target_speed, tick_data):
        frame = int(self.step // record_every_n_steps)

        speed = tick_data['speed']

        center = os.path.join('rgb', ('%06d.png' % frame))
        left = os.path.join('rgb_left', ('%06d.png' % frame))
        right = os.path.join('rgb_right', ('%06d.png' % frame))

        topdown = os.path.join('topdown', ('%06d.png' % frame))
        rgb_with_car = os.path.join('rgb_with_car', ('%06d.png' % frame))

        data_row = ','.join([
            str(i) for i in [
                frame, far_command, speed, steer, throttle, brake,
                str(center),
                str(left),
                str(right)
            ]
        ])
        with (self.save_path / 'measurements.csv').open("a") as f_out:
            f_out.write(data_row + '\n')

        Image.fromarray(tick_data['rgb']).save(self.save_path / center)
        Image.fromarray(tick_data['rgb_left']).save(self.save_path / left)
        Image.fromarray(tick_data['rgb_right']).save(self.save_path / right)
        # modification
        # Image.fromarray(COLOR[CONVERTER[tick_data['topdown']]]).save(topdown)
        Image.fromarray(tick_data['topdown']).save(self.save_path / topdown)
        Image.fromarray(tick_data['rgb_with_car']).save(self.save_path /
                                                        rgb_with_car)

        ########################################################################
        # log necessary info for action-based
        if self.args.save_action_based_measurements:
            from affordances import get_driving_affordances

            self._pedestrian_forbidden_distance = 10.0
            self._pedestrian_max_detected_distance = 50.0
            self._vehicle_forbidden_distance = 10.0
            self._vehicle_max_detected_distance = 50.0
            self._tl_forbidden_distance = 10.0
            self._tl_max_detected_distance = 50.0
            self._speed_detected_distance = 10.0

            current_affordances = get_driving_affordances(
                self,
                self._pedestrian_forbidden_distance,
                self._pedestrian_max_detected_distance,
                self._vehicle_forbidden_distance,
                self._vehicle_max_detected_distance,
                self._tl_forbidden_distance,
                self._tl_max_detected_distance,
                self._angle,
                self._default_target_speed,
                self._target_speed,
                self._speed_detected_distance,
                angle=True)

            is_vehicle_hazard = current_affordances['is_vehicle_hazard']
            is_red_tl_hazard = current_affordances['is_red_tl_hazard']
            is_pedestrian_hazard = current_affordances['is_pedestrian_hazard']
            forward_speed = current_affordances['forward_speed']
            relative_angle = current_affordances['relative_angle']
            target_speed = current_affordances['target_speed']
            closest_pedestrian_distance = current_affordances[
                'closest_pedestrian_distance']
            closest_vehicle_distance = current_affordances[
                'closest_vehicle_distance']
            closest_red_tl_distance = current_affordances[
                'closest_red_tl_distance']

            log_folder = str(self.save_path / 'affordance_measurements')
            if not os.path.exists(log_folder):
                os.mkdir(log_folder)
            log_path = os.path.join(log_folder, f'{self.step:06}.json')

            ego_transform = self._vehicle.get_transform()
            ego_location = ego_transform.location
            ego_rotation = ego_transform.rotation
            ego_velocity = self._vehicle.get_velocity()

            # relative_angle
            # lane_center_waypoint = self._map.get_waypoint(ego_location, lane_type=carla.LaneType.Any)
            # lane_center_transform = lane_center_waypoint.transform
            #
            #
            # relative_angle = np.abs(lane_center_transform.rotation.yaw - ego_rotation.yaw)
            # if lane_center_transform.rotation.yaw - ego_rotation.yaw > 180:
            #     relative_angle = np.abs(lane_center_transform.rotation.yaw - ego_rotation.yaw - 360)
            # elif ego_rotation.yaw - lane_center_transform.rotation.yaw > 180:
            #     relative_angle = np.abs(lane_center_transform.rotation.yaw - ego_rotation.yaw + 360)
            #
            # relative_angle = np.radians(relative_angle)

            # vehicle, pedestrian, traffic light
            # actors = self._world.get_actors()
            # vehicle_list = actors.filter('*vehicle*')
            # pedestrian_list = actors.filter('walker*')
            # tls = actors.filter('*traffic_light*')
            #
            # ego_bbox = get_bbox(self._vehicle)
            #
            # closest_vehicle_distance = 50
            # for i, vehicle in enumerate(vehicle_list):
            #     if vehicle.id == self._vehicle.id:
            #         continue
            #     other_bbox = get_bbox(vehicle)
            #     for other_b in other_bbox:
            #         for ego_b in ego_bbox:
            #             d = norm_2d(other_b, ego_b)
            #             # print('vehicle', i, 'd', d)
            #             closest_vehicle_distance = np.min([closest_vehicle_distance, d])
            #
            # closest_pedestrian_distance = 50
            # for i, pedestrian in enumerate(pedestrian_list):
            #     other_bbox = get_bbox(pedestrian)
            #     for other_b in other_bbox:
            #         for ego_b in ego_bbox:
            #             d = norm_2d(other_b, ego_b)
            #             # print('pedestrian', i, 'd', d)
            #             closest_pedestrian_distance = np.min([closest_pedestrian_distance, d])
            #
            # closest_red_tl_distance = 50
            # is_red_tl_hazard = False
            # for tl in tls:
            #     if tl.state == carla.TrafficLightState.Red:
            #         tl_location = tl.get_transform().location
            #         d = norm_2d(ego_location, tl_location)
            #         closest_red_tl_distance = np.min([closest_red_tl_distance, d])
            #
            #         if d < 10:
            #             affecting = self._vehicle.get_traffic_light()
            #             if affecting and tl.id == affecting.id:
            #                 is_red_tl_hazard = True
            #
            # is_pedestrian_hazard = bool(closest_pedestrian_distance < 10)
            # is_vehicle_hazard = bool(closest_vehicle_distance < 10)

            brake_noise = 0.0
            throttle_noise = 0.0  # 1.0 -> 0.0
            steer_noise = 0.0  # NaN -> 0.0

            # class RoadOption
            # VOID = -1
            # LEFT = 1
            # RIGHT = 2
            # STRAIGHT = 3
            # LANEFOLLOW = 4
            # CHANGELANELEFT = 5
            # CHANGELANERIGHT = 6
            map_roadoption_to_action_based_roadoption = {
                -1: 2,
                1: 3,
                2: 4,
                3: 5,
                4: 2,
                5: 2,
                6: 2
            }
            # print('\n far_command', far_command)
            # save info for action-based rep
            json_log_data = {
                "brake":
                float(brake),
                "closest_red_tl_distance":
                closest_red_tl_distance,
                "throttle":
                throttle,
                "directions":
                float(map_roadoption_to_action_based_roadoption[
                    far_command.value]),
                "brake_noise":
                brake_noise,
                "is_red_tl_hazard":
                is_red_tl_hazard,
                "opponents": {},
                "closest_pedestrian_distance":
                closest_pedestrian_distance,
                "is_pedestrian_hazard":
                is_pedestrian_hazard,
                "lane": {},
                "is_vehicle_hazard":
                is_vehicle_hazard,
                "throttle_noise":
                throttle_noise,
                "ego_actor": {
                    "velocity":
                    [ego_velocity.x, ego_velocity.y, ego_velocity.z],
                    "position":
                    [ego_location.x, ego_location.y, ego_location.z],
                    "orientation":
                    [ego_rotation.roll, ego_rotation.pitch, ego_rotation.yaw]
                },
                "hand_brake":
                False,
                "steer_noise":
                steer_noise,
                "reverse":
                False,
                "relative_angle":
                relative_angle,
                "closest_vehicle_distance":
                closest_vehicle_distance,
                "walkers": {},
                "forward_speed":
                forward_speed,
                "steer":
                steer,
                "target_speed":
                target_speed
            }

            with open(log_path, 'w') as f_out:
                json.dump(json_log_data, f_out, indent=4)

    def _should_brake(self):
        actors = self._world.get_actors()

        vehicle = self._is_vehicle_hazard(actors.filter('*vehicle*'))
        light = self._is_light_red(actors.filter('*traffic_light*'))
        walker = self._is_walker_hazard(actors.filter('*walker*'))

        return any(x is not None for x in [vehicle, light, walker])

    def _draw_line(self, p, v, z, color=(255, 0, 0)):
        if not DEBUG:
            return

        p1 = _location(p[0], p[1], z)
        p2 = _location(p[0] + v[0], p[1] + v[1], z)
        color = carla.Color(*color)

        self._world.debug.draw_line(p1, p2, 0.25, color, 0.01)

    def _is_light_red(self, lights_list):
        if self._vehicle.get_traffic_light_state(
        ) != carla.libcarla.TrafficLightState.Green:
            affecting = self._vehicle.get_traffic_light()

            for light in self._traffic_lights:
                if light.id == affecting.id:
                    return affecting

        return None

    def _is_walker_hazard(self, walkers_list):
        z = self._vehicle.get_location().z
        p1 = _numpy(self._vehicle.get_location())
        v1 = 10.0 * _orientation(self._vehicle.get_transform().rotation.yaw)

        # self._draw_line(p1, v1, z+2.5, (0, 0, 255))

        for walker in walkers_list:
            v2_hat = _orientation(walker.get_transform().rotation.yaw)
            s2 = np.linalg.norm(_numpy(walker.get_velocity()))

            if s2 < 0.05:
                v2_hat *= s2

            p2 = -3.0 * v2_hat + _numpy(walker.get_location())
            v2 = 8.0 * v2_hat

            # self._draw_line(p2, v2, z+2.5)

            collides, collision_point = get_collision(p1, v1, p2, v2)

            if collides:
                return walker

        return None

    def _is_vehicle_hazard(self, vehicle_list):
        z = self._vehicle.get_location().z

        o1 = _orientation(self._vehicle.get_transform().rotation.yaw)
        p1 = _numpy(self._vehicle.get_location())
        s1 = max(7.5,
                 2.0 * np.linalg.norm(_numpy(self._vehicle.get_velocity())))
        v1_hat = o1
        v1 = s1 * v1_hat

        # self._draw_line(p1, v1, z+2.5, (255, 0, 0))

        for target_vehicle in vehicle_list:
            if target_vehicle.id == self._vehicle.id:
                continue

            o2 = _orientation(target_vehicle.get_transform().rotation.yaw)
            p2 = _numpy(target_vehicle.get_location())
            s2 = max(
                5.0,
                2.0 * np.linalg.norm(_numpy(target_vehicle.get_velocity())))
            v2_hat = o2
            v2 = s2 * v2_hat

            p2_p1 = p2 - p1
            distance = np.linalg.norm(p2_p1)
            p2_p1_hat = p2_p1 / (distance + 1e-4)

            # self._draw_line(p2, v2, z+2.5, (255, 0, 0))

            angle_to_car = np.degrees(np.arccos(v1_hat.dot(p2_p1_hat)))
            angle_between_heading = np.degrees(np.arccos(o1.dot(o2)))

            if angle_between_heading > 60.0 and not (angle_to_car < 15
                                                     and distance < s1):
                continue
            elif angle_to_car > 30.0:
                continue
            elif distance > s1:
                continue

            return target_vehicle

        return None
class ImageAgent(BaseAgent):
    def setup(self, path_to_conf_file):
        super().setup(path_to_conf_file)

        self.converter = Converter()
        self.net = ImageModel.load_from_checkpoint(path_to_conf_file)
        self.net.cuda()
        self.net.eval()

    def _init(self):
        super()._init()

        self._turn_controller = PIDController(K_P=1.25,
                                              K_I=0.75,
                                              K_D=0.3,
                                              n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

    def tick(self, input_data):
        result = super().tick(input_data)
        result['image'] = np.concatenate(
            tuple(result[x] for x in ['rgb', 'rgb_left', 'rgb_right']), -1)

        theta = result['compass']
        theta = 0.0 if np.isnan(theta) else theta
        theta = theta + np.pi / 2
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta), np.cos(theta)],
        ])

        gps = self._get_position(result)
        far_node, _ = self._command_planner.run_step(gps)
        target = R.T.dot(far_node - gps)
        target *= 5.5
        target += [128, 256]
        target = np.clip(target, 0, 256)

        result['target'] = target

        return result

    @torch.no_grad()
    def run_step_using_learned_controller(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        tick_data = self.tick(input_data)

        img = torchvision.transforms.functional.to_tensor(tick_data['image'])
        img = img[None].cuda()

        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        points, (target_cam, _) = self.net.forward(img, target)
        control = self.net.controller(points).cpu().squeeze()

        steer = control[0].item()
        desired_speed = control[1].item()
        speed = tick_data['speed']

        brake = desired_speed < 0.4 or (speed / desired_speed) > 1.1

        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)

        if DEBUG:
            debug_display(tick_data, target_cam.squeeze(),
                          points.cpu().squeeze(), steer, throttle, brake,
                          desired_speed, self.step)

        return control

    @torch.no_grad()
    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        tick_data = self.tick(input_data)

        img = torchvision.transforms.functional.to_tensor(tick_data['image'])
        img = img[None].cuda()

        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        points, (target_cam, _) = self.net.forward(img, target)
        points_cam = points.clone().cpu()
        points_cam[..., 0] = (points_cam[..., 0] + 1) / 2 * img.shape[-1]
        points_cam[..., 1] = (points_cam[..., 1] + 1) / 2 * img.shape[-2]
        points_cam = points_cam.squeeze()
        points_world = self.converter.cam_to_world(points_cam).numpy()

        aim = (points_world[1] + points_world[0]) / 2.0
        angle = np.degrees(np.pi / 2 - np.arctan2(aim[1], aim[0])) / 90
        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)

        desired_speed = np.linalg.norm(points_world[0] - points_world[1]) * 2.0
        # desired_speed *= (1 - abs(angle)) ** 2

        speed = tick_data['speed']

        brake = desired_speed < 0.4 or (speed / desired_speed) > 1.1

        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)

        if DEBUG:
            debug_display(tick_data, target_cam.squeeze(),
                          points.cpu().squeeze(), steer, throttle, brake,
                          desired_speed, self.step)

        return control
Esempio n. 7
0
class ImageAgent(BaseAgent):
    def setup(self, path_to_conf_file):
        super().setup(path_to_conf_file)

        self.converter = Converter()
        self.net = ImageModel.load_from_checkpoint(path_to_conf_file)
        self.net.cuda()
        self.net.eval()

    def _init(self):
        super()._init()

        self._turn_controller = PIDController(K_P=1.25, K_I=0.75, K_D=0.3, n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

        self.save_images_path = Path(f'{SAVE_PATH_BASE}/images/{ROUTE_NAME}')
        #self.save_path.mkdir()


    def tick(self, input_data):
        result = super().tick(input_data)
        result['image'] = np.concatenate(tuple(result[x] for x in ['rgb', 'rgb_left', 'rgb_right']), -1)

        theta = result['compass']
        theta = 0.0 if np.isnan(theta) else theta
        theta = theta + np.pi / 2
        result['theta'] = theta
        #print((theta * 180 / np.pi)%360)
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta),  np.cos(theta)],
            ])
        result['R'] = R
        gps = self._get_position(result) # method returns position in meters
        
        # transform route waypoints to overhead map view
        route = self._command_planner.run_step(gps) # oriented in world frame
        nodes = np.array([node for node, _ in route]) # (N,2)
        nodes = nodes - gps # center at agent position and rotate
        nodes = R.T.dot(nodes.T) # (2,2) x (2,N) = (2,N)
        nodes = nodes.T * 5.5 # (N,2) # to map frame (5.5 pixels per meter)
        nodes += [128,256]
        nodes = np.clip(nodes, 0, 256)
        commands = [command for _, command in route]

        # populate results
        result['num_waypoints'] = len(route)
        result['route_map'] = nodes
        result['commands'] = commands
        result['target'] = nodes[1]

        return result

    @torch.no_grad()
    def run_step_using_learned_controller(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        tick_data = self.tick(input_data)

        img = torchvision.transforms.functional.to_tensor(tick_data['image'])
        img = img[None].cuda()

        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        points, (target_cam, _) = self.net.forward(img, target)
        control = self.net.controller(points).cpu().squeeze()

        steer = control[0].item()
        desired_speed = control[1].item()
        speed = tick_data['speed']

        brake = desired_speed < 0.4 or (speed / desired_speed) > 1.1

        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)

        if DEBUG:
            debug_display(
                    tick_data, target_cam.squeeze(), points.cpu().squeeze(),
                    steer, throttle, brake, desired_speed,
                    self.step)

        return control

    @torch.no_grad()
    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        tick_data = self.tick(input_data)

        # prepare image model inputs
        img = torchvision.transforms.functional.to_tensor(tick_data['image'])
        img = img[None].cuda()
        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        # forward through NN and process output
        points, (target_cam, _) = self.net.forward(img, target)
        #heatmap = _.cpu().numpy()
        #heatmap = cv2.flip(heatmap[0][0], 0)
        #cv2.imshow('heatmap', heatmap)
        points_cam = points.clone().cpu()
        points_cam[..., 0] = (points_cam[..., 0] + 1) / 2 * img.shape[-1]
        points_cam[..., 1] = (points_cam[..., 1] + 1) / 2 * img.shape[-2]
        points_cam = points_cam.squeeze()
        points_world = self.converter.cam_to_world(points_cam).numpy()
        tick_data['points_world'] = points_world

        # decide on control waypoint
        aim = (points_world[1] + points_world[0]) / 2.0
        tick_data['aim_world'] = aim

        # compute steer
        angle = np.degrees(np.pi / 2 - np.arctan2(aim[1], aim[0])) / 90
        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)

        # compute throttle
        speed = tick_data['speed']
        desired_speed = np.linalg.norm(points_world[0] - points_world[1]) * 2.0
        brake = desired_speed < 0.4 or (speed / desired_speed) > 1.1
        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        # create control object
        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)

        if DEBUG or SAVE_IMAGES:

            # make display image
            tick_data['points_cam'] = points.cpu().squeeze()
            tick_data['points_map'] = self.converter.cam_to_map(points_cam).numpy()
            self.debug_display(
                    tick_data, target_cam.squeeze(), 
                    steer, throttle, brake, desired_speed)

        return control

    def debug_display(self, tick_data, target_cam, steer, throttle, brake, desired_speed):

        # make BEV image

        # transform aim from world to map
        aim_world = np.array(tick_data['aim_world'])
        aim_map = self.converter.world_to_map(torch.Tensor(aim_world)).numpy()

        # append to image model points and plot
        points_plot = np.vstack([tick_data['points_map'], aim_map])
        points_plot = points_plot - [128,256] # center at origin
        points_plot = tick_data['R'].dot(points_plot.T).T
        points_plot = points_plot * -1 # why is this required?
        points_plot = points_plot + 256/2 # recenter origin in middle of plot
        _waypoint_img = self._command_planner.debug.img
        for x, y in points_plot:
            ImageDraw.Draw(_waypoint_img).ellipse((x-2, y-2, x+2, y+2), (0, 191, 255))
        x, y = points_plot[-1]
        ImageDraw.Draw(_waypoint_img).ellipse((x-2, y-2, x+2, y+2), (255, 105, 147))

        # make RGB images

        # draw center RGB image
        _rgb = Image.fromarray(tick_data['rgb'])
        _draw_rgb = ImageDraw.Draw(_rgb)
        for x, y in tick_data['points_cam']: # image model waypoints
            x = (x + 1)/2 * 256
            y = (y + 1)/2 * 144
            _draw_rgb.ellipse((x-2, y-2, x+2, y+2), (0, 191, 255))

        # transform aim from world to cam
        aim_world = np.array(tick_data['aim_world'])
        aim_cam = self.converter.world_to_cam(torch.Tensor(aim_world)).numpy()
        x, y = aim_cam
        _draw_rgb.ellipse((x-2, y-2, x+2, y+2), (255, 105, 147))

        # draw route waypoints in RGB image
        route_map = np.array(tick_data['route_map'])
        route_map = route_map[:3].squeeze()
        route_cam = self.converter.map_to_cam(torch.Tensor(route_map)).numpy()
        for i, (x, y) in enumerate(route_cam):
            if i == 0: # waypoint we just passed
                if y >= 139 or x <= 2 or x >= 254: # bottom of frame (behind us)
                    continue
                color = (0, 255, 0) # green 
            elif i == 1: # target
                color = (255, 0, 0) # red
            elif i == 2: # beyond target
                color = (139, 0, 139) # darkmagenta
            else:
                continue
            _draw_rgb.ellipse((x-2, y-2, x+2, y+2), color)

        _combined = Image.fromarray(np.hstack([tick_data['rgb_left'], _rgb, tick_data['rgb_right']]))
        _draw = ImageDraw.Draw(_combined)

        # draw debug text
        text_color = (139, 0, 139) #darkmagenta
        _draw.text((5, 10), 'Steer: %.3f' % steer, text_color)
        _draw.text((5, 30), 'Throttle: %.3f' % throttle, text_color)
        _draw.text((5, 50), 'Brake: %s' % brake, text_color)
        _draw.text((5, 70), 'Speed: %.3f' % tick_data['speed'], text_color)
        _draw.text((5, 90), 'Desired: %.3f' % desired_speed, text_color)
        cur_command, next_command = tick_data['commands'][:2]
        _draw.text((5, 110), f'Current: {cur_command}', text_color)
        _draw.text((5, 130), f'Next: {next_command}', text_color)

        # compose image to display/save
        _rgb_img = cv2.resize(np.array(_combined), DIM, interpolation=cv2.INTER_AREA)
        _save_img = Image.fromarray(np.hstack([_rgb_img, _waypoint_img]))
        _save_img = cv2.cvtColor(np.array(_save_img), cv2.COLOR_BGR2RGB)

        if self.step % 10 == 0 and SAVE_IMAGES:
            frame_number = self.step // 10
            rep_number = int(os.environ.get('REP',0))
            save_path = self.save_images_path / f'repetition_{rep_number:02d}' / f'{frame_number:06d}.png'
            cv2.imwrite(str(save_path), _save_img)

        if DEBUG:
            cv2.imshow('debug', _save_img)
            cv2.waitKey(1)
class AutoPilot(MapAgent):
    def setup(self, path_to_conf_file):
        super().setup(path_to_conf_file)

        self.save_path = None
        self.converter = Converter()
        self.save_images_path = pathlib.Path(f'{SAVE_PATH_BASE}/images/{ROUTE_NAME}')

        if path_to_conf_file and path_to_conf_file.split('/')[-1] != 'none':
            now = datetime.datetime.now()
            string = pathlib.Path(os.environ['ROUTES']).stem + '_'
            string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second)))

            print(string)

            self.save_path = pathlib.Path(path_to_conf_file) / string
            self.save_path.mkdir(exist_ok=False)

            (self.save_path / 'rgb').mkdir()
            (self.save_path / 'rgb_left').mkdir()
            (self.save_path / 'rgb_right').mkdir()
            (self.save_path / 'topdown').mkdir()
            (self.save_path / 'measurements').mkdir()

    def _init(self):
        super()._init()

        self._turn_controller = PIDController(K_P=1.25, K_I=0.75, K_D=0.3, n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

    def _get_angle_to(self, pos, theta, target):
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta),  np.cos(theta)],
            ])

        aim = R.T.dot(target - pos)
        angle = -np.degrees(np.arctan2(-aim[1], aim[0]))
        angle = 0.0 if np.isnan(angle) else angle 

        return angle

    def _get_control(self, target, far_target, tick_data):
        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        # Steering.
        angle_unnorm = self._get_angle_to(pos, theta, target)
        angle = angle_unnorm / 90

        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)
        steer = round(steer, 3)

        # Acceleration.
        angle_far_unnorm = self._get_angle_to(pos, theta, far_target)
        should_slow = abs(angle_far_unnorm) > 45.0 or abs(angle_unnorm) > 5.0
        target_speed = 4 if should_slow else 7.0

        brake = self._should_brake()
        target_speed = target_speed if not brake else 0.0

        delta = np.clip(target_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)

        if brake:
            steer *= 0.5
            throttle = 0.0

        return steer, throttle, brake, target_speed

    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        if self.step % 100 == 0:
            index = (self.step // 100) % len(WEATHERS)
            self._world.set_weather(WEATHERS[index])

        data = self.tick(input_data)
        gps = self._get_position(data)


        wpt_route = self._waypoint_planner.run_step(gps)
        cmd_route = self._command_planner.run_step(gps)
        cmd_nodes = np.array([node for node, _ in cmd_route])
        cmd_cmds = [cmd for _, cmd in cmd_route]

        near_node, near_command = wpt_route[0]
        far_node, far_command = cmd_route[0]
        steer, throttle, brake, target_speed = self._get_control(near_node, far_node, data)
        
        topdown = data['topdown']
        _topdown = Image.fromarray(COLOR[CONVERTER[topdown]])
        _topdown_draw = ImageDraw.Draw(_topdown)
        _rgb = Image.fromarray(data['rgb'])
        _rgb_draw = ImageDraw.Draw(_rgb)

        if (SAVE_IMAGES or HAS_DISPLAY) and not self.save_path:
            r = 2
            theta = data['compass']
            theta = 0.0 if np.isnan(theta) else theta
            theta = theta + np.pi / 2
            R = np.array([
                [np.cos(theta), -np.sin(theta)],
                [np.sin(theta),  np.cos(theta)]])
            
            nodes = cmd_nodes - gps
            nodes = R.T.dot(nodes.T).T
            nodes = nodes * 5.5
            nodes_bev = nodes.copy() + [256, 256]
            nodes_bev = nodes_bev[:3]
            for i, (x,y) in enumerate(nodes_bev):
                if i == 0:
                    color = (0, 255, 0)
                elif i == 1: # target
                    color = (255, 0, 0)
                elif i == 2:
                    color = (139, 0, 139)
                _topdown_draw.ellipse((x-2*r, y-2*r, x+2*r, y+2*r), color)

            nodes_cam = nodes.copy()[:3].squeeze()
            nodes_cam = nodes_cam + [128, 256]
            nodes_cam = np.clip(nodes_cam, 0, 256)
            nodes_cam = self.converter.map_to_cam(torch.Tensor(nodes_cam)).numpy()
            for i, (x,y) in enumerate(nodes_cam[:3]):
                if i == 0:
                    if not (0 < y < 143 and 0 < x < 255):
                        continue
                    color = (0, 255, 0)
                elif i == 1: # target
                    color = (255, 0, 0)
                elif i == 2:
                    color = (139, 0, 139)
                _rgb_draw.ellipse((x-r, y-r, x+r, y+r), color)


        #rgb = np.hstack((data['rgb_left'], data['rgb'], data['rgb_right']))
        _rgb = Image.fromarray(np.hstack((data['rgb_left'], _rgb, data['rgb_right'])))
        _draw = ImageDraw.Draw(_rgb)
        text_color = (139, 0, 139) #darkmagenta
        _draw.text((5, 10), 'Steer: %.3f' % steer, text_color)
        _draw.text((5, 30), 'Throttle: %.3f' % throttle, text_color)
        _draw.text((5, 50), 'Brake: %s' % brake, text_color)
        _draw.text((5, 70), 'Speed: %.3f' % data['speed'], text_color)
        _draw.text((5, 90), 'Target: %.3f' % target_speed, text_color)
        cur_command, next_command = cmd_cmds[:2]
        _draw.text((5, 110), f'Current: {cur_command}', text_color)
        _draw.text((5, 130), f'Next: {next_command}', text_color)
        

        # (256, 144) -> (256/144*256, 256)
        _rgb = _rgb.resize((int(256 / _rgb.size[1] * _rgb.size[0]), 256))
        _topdown = _topdown.resize((256,256))
        _combined = Image.fromarray(np.hstack((_rgb, _topdown)))

        if self.step % 10 == 0 and SAVE_IMAGES:
            _save_img = cv2.cvtColor(np.array(_combined), cv2.COLOR_BGR2RGB)
            frame_number = self.step//10
            rep_number = int(os.environ.get('REP', 0))
            save_path = self.save_images_path / f'repetition_{rep_number:02d}' / f'{frame_number:06d}.png'
            cv2.imwrite(str(save_path), _save_img)


        control = carla.VehicleControl()
        control.steer = steer + 1e-2 * np.random.randn()
        control.throttle = throttle
        control.brake = float(brake)

        if self.step % 10 == 0 and self.save_path:
            self.save(far_node, near_command, steer, throttle, brake, target_speed, data)

                    
        if HAS_DISPLAY:
            cv2.imshow('map', cv2.cvtColor(np.array(_combined), cv2.COLOR_BGR2RGB))
            cv2.waitKey(1)


        return control

    def save(self, far_node, near_command, steer, throttle, brake, target_speed, tick_data):
        frame = self.step // 10

        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        data = {
                'x': pos[0],
                'y': pos[1],
                'theta': theta,
                'speed': speed,
                'target_speed': target_speed,
                'x_command': far_node[0],
                'y_command': far_node[1],
                'command': near_command.value,
                'steer': steer,
                'throttle': throttle,
                'brake': brake,
                }

        (self.save_path / 'measurements' / ('%04d.json' % frame)).write_text(str(data))

        Image.fromarray(tick_data['rgb']).save(self.save_path / 'rgb' / ('%04d.png' % frame))
        Image.fromarray(tick_data['rgb_left']).save(self.save_path / 'rgb_left' / ('%04d.png' % frame))
        Image.fromarray(tick_data['rgb_right']).save(self.save_path / 'rgb_right' / ('%04d.png' % frame))
        Image.fromarray(tick_data['topdown']).save(self.save_path / 'topdown' / ('%04d.png' % frame))

    def _should_brake(self):
        actors = self._world.get_actors()

        vehicle = self._is_vehicle_hazard(actors.filter('*vehicle*'))
        light = self._is_light_red(actors.filter('*traffic_light*'))
        walker = self._is_walker_hazard(actors.filter('*walker*'))

        return any(x is not None for x in [vehicle, light, walker])

    def _draw_line(self, p, v, z, color=(255, 0, 0)):
        if not DEBUG:
            return

        p1 = _location(p[0], p[1], z)
        p2 = _location(p[0]+v[0], p[1]+v[1], z)
        color = carla.Color(*color)

        self._world.debug.draw_line(p1, p2, 0.25, color, 0.01)

    def _is_light_red(self, lights_list):
        if self._vehicle.get_traffic_light_state() != carla.libcarla.TrafficLightState.Green:
            affecting = self._vehicle.get_traffic_light()

            for light in self._traffic_lights:
                if light.id == affecting.id:
                    return affecting

        return None

    def _is_walker_hazard(self, walkers_list):
        z = self._vehicle.get_location().z
        p1 = _numpy(self._vehicle.get_location())
        v1 = 10.0 * _orientation(self._vehicle.get_transform().rotation.yaw)

        self._draw_line(p1, v1, z+2.5, (0, 0, 255))

        for walker in walkers_list:
            v2_hat = _orientation(walker.get_transform().rotation.yaw)
            s2 = np.linalg.norm(_numpy(walker.get_velocity()))

            if s2 < 0.05:
                v2_hat *= s2

            p2 = -3.0 * v2_hat + _numpy(walker.get_location())
            v2 = 8.0 * v2_hat

            self._draw_line(p2, v2, z+2.5)

            collides, collision_point = get_collision(p1, v1, p2, v2)

            if collides:
                return walker

        return None

    def _is_vehicle_hazard(self, vehicle_list):
        z = self._vehicle.get_location().z

        o1 = _orientation(self._vehicle.get_transform().rotation.yaw)
        p1 = _numpy(self._vehicle.get_location())
        s1 = max(7.5, 2.0 * np.linalg.norm(_numpy(self._vehicle.get_velocity())))
        v1_hat = o1
        v1 = s1 * v1_hat

        self._draw_line(p1, v1, z+2.5, (255, 0, 0))

        for target_vehicle in vehicle_list:
            if target_vehicle.id == self._vehicle.id:
                continue

            o2 = _orientation(target_vehicle.get_transform().rotation.yaw)
            p2 = _numpy(target_vehicle.get_location())
            s2 = max(5.0, 2.0 * np.linalg.norm(_numpy(target_vehicle.get_velocity())))
            v2_hat = o2
            v2 = s2 * v2_hat

            p2_p1 = p2 - p1
            distance = np.linalg.norm(p2_p1)
            p2_p1_hat = p2_p1 / (distance + 1e-4)

            self._draw_line(p2, v2, z+2.5, (255, 0, 0))

            angle_to_car = np.degrees(np.arccos(v1_hat.dot(p2_p1_hat)))
            angle_between_heading = np.degrees(np.arccos(o1.dot(o2)))

            if angle_between_heading > 60.0 and not (angle_to_car < 15 and distance < s1):
                continue
            elif angle_to_car > 30.0:
                continue
            elif distance > s1:
                continue

            return target_vehicle

        return None
Esempio n. 9
0
class ImageAgent(BaseAgent):
    def setup(self, path_to_conf_file, data_folder, route_folder, k,
              model_path):
        super().setup(path_to_conf_file, data_folder, route_folder, k,
                      model_path)
        self.converter = Converter()
        self.net = ImageModel.load_from_checkpoint(path_to_conf_file)
        self.data_folder = data_folder
        self.route_folder = route_folder
        self.scene_number = k
        self.weather_file = self.route_folder + "/weather_data.csv"
        self.model_path = model_path
        self.model_vae = None
        self.net.cuda()
        self.net.eval()
        self.run = 0
        self.risk = 0
        self.state = []
        self.monitors = []
        self.blur_queue = Queue(maxsize=1)
        self.occlusion_queue = Queue(maxsize=1)
        self.am_queue = Queue(maxsize=1)
        self.pval_queue = Queue(maxsize=1)
        self.sval_queue = Queue(maxsize=1)
        self.mval_queue = Queue(maxsize=1)
        self.avg_risk_queue = Queue(maxsize=1)
        self.calib_set = []
        self.result = []
        self.blur = []
        self.occlusion = []
        self.detector_file = None
        self.detector_file = None
        K.clear_session()
        config = tf.ConfigProto()
        sess = tf.Session(config=config)
        set_session(sess)

        with open(self.model_path + 'auto_model.json', 'r') as jfile:
            self.model_vae = model_from_json(jfile.read())
        self.model_vae.load_weights(self.model_path + 'auto_model.h5')
        self.model_vae._make_predict_function()
        self.fields = [
            'step',
            'monitor_result',
            'risk_score',
            'rgb_blur',
            'rgb_left_blur',
            'rgb_right_blur',
            'rgb_occluded',
            'rgb_left_occluded',
            'rgb_right_occluded',
        ]

        self.weather, self.run_number = process_weather_data(
            self.weather_file, self.scene_number)
        print(self.weather)

        with open(self.model_path + 'calibration.csv', 'r') as file:
            reader = csv.reader(file)
            for row in reader:
                self.calib_set.append(row)

    def _init(self):
        super()._init()
        self._turn_controller = PIDController(K_P=1.25,
                                              K_I=0.75,
                                              K_D=0.3,
                                              n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

    def blur_detection(self, result):
        self.blur = []
        fm1, rgb_blur = blur_detector(result['rgb'], threshold=20)
        fm2, rgb_left_blur = blur_detector(result['rgb_left'], threshold=20)
        fm3, rgb_right_blur = blur_detector(result['rgb_right'], threshold=20)
        self.blur.append(rgb_blur)
        self.blur.append(rgb_left_blur)
        self.blur.append(rgb_right_blur)
        self.blur_queue.put(self.blur)

    def occlusion_detection(self, result):
        self.occlusion = []
        percent1, rgb_occluded = occlusion_detector(result['rgb'],
                                                    threshold=25)
        percent2, rgb_left_occluded = occlusion_detector(result['rgb_left'],
                                                         threshold=25)
        percent3, rgb_right_occluded = occlusion_detector(result['rgb_right'],
                                                          threshold=25)
        self.occlusion.append(rgb_occluded)
        self.occlusion.append(rgb_left_occluded)
        self.occlusion.append(rgb_right_occluded)
        self.occlusion_queue.put(self.occlusion)

    def integrand1(self, p_anomaly):
        result = 0.0
        for i in range(len(p_anomaly)):
            result += p_anomaly[i]
        result = result / len(p_anomaly)
        return result

    def integrand(self, k, p_anomaly):
        result = 1.0
        for i in range(len(p_anomaly)):
            result *= k * (p_anomaly[i]**(k - 1.0))
        return result

    def mse(self, imageA, imageB):
        err = np.mean(np.power(imageA - imageB, 2), axis=1)
        return err

    def assurance_monitor(self, dist):
        if (self.step == 0):
            p_anomaly = []
            prev_value = []
        else:
            p_anomaly = self.pval_queue.get()
            prev_value = self.sval_queue.get()
        anomaly = 0
        m = 0
        delta = 10
        threshold = 20
        sliding_window = 15
        threshold = 10.0
        for i in range(len(self.calib_set)):
            if (float(dist) <= float(self.calib_set[i][0])):
                anomaly += 1
        p_value = anomaly / len(self.calib_set)
        if (p_value < 0.005):
            p_anomaly.append(0.005)
        else:
            p_anomaly.append(p_value)
        if (len(p_anomaly)) >= sliding_window:
            p_anomaly = p_anomaly[-1 * sliding_window:]
        m = integrate.quad(self.integrand, 0.0, 1.0, args=(p_anomaly))
        m_val = round(math.log(m[0]), 2)
        if (self.step == 0):
            S = 0
            S_prev = 0
        else:
            S = max(0, prev_value[0] + prev_value[1] - delta)
        prev_value = []
        S_prev = S
        m_prev = m[0]
        prev_value.append(S_prev)
        prev_value.append(m_prev)
        self.pval_queue.put(p_anomaly)
        self.sval_queue.put(prev_value)
        self.mval_queue.put(m_val)

    def risk_computation(self, weather, blur_queue, am_queue, occlusion_queue,
                         fault_scenario, fault_type, fault_time, fault_step):
        monitors = []
        faults = []
        faults.append(fault_type)
        blur = self.blur_queue.get()
        occlusion = self.occlusion_queue.get()
        mval = self.mval_queue.get()
        if (self.step == 0):
            avg_risk = []
        else:
            avg_risk = self.avg_risk_queue.get()
        monitors = blur + occlusion
        state = {"enviornment": {}, "fault_modes": None, "monitor_values": {}}
        for i in range(len(weather)):
            label = ENV_LABELS[i]
            state["enviornment"][label] = weather[i]
        state["fault_modes"] = fault_type
        for j in range(len(monitors)):
            label = MONITOR_LABELS[j]
            state["monitor_values"][label] = monitors[j]
        state["monitor_values"]["lec_martingale"] = mval

        fault_modes = state["fault_modes"][0]
        environment = state["enviornment"]
        monitor_values = state["monitor_values"]

        if (fault_scenario == 0):
            fault_modes = state["fault_modes"][0]

        if (fault_scenario == 1):
            if (self.step >= fault_step[0]
                    and self.step < fault_step[0] + fault_time[0]):
                fault_modes = state["fault_modes"][0]

        if (fault_scenario > 1):
            if (self.step >= fault_step[0]
                    and self.step < fault_step[0] + fault_time[0]):
                fault_modes = state["fault_modes"][0]

            elif (self.step >= fault_step[1]
                  and self.step < fault_step[1] + fault_time[1]):
                fault_modes = state["fault_modes"][1]

        bowtie = BowTie()
        r_t1_top = bowtie.rate_t1(state) * (1 -
                                            bowtie.prob_b1(state, fault_modes))
        r_t2_top = bowtie.rate_t2(state) * (1 -
                                            bowtie.prob_b2(state, fault_modes))
        r_top = r_t1_top + r_t2_top
        r_c1 = r_top * (1 - bowtie.prob_b3(state))

        print("Dynamic Risk Score:%f" % r_c1)

        avg_risk.append(r_c1)
        if (len(avg_risk)) >= 20:
            avg_risk = avg_risk[-1 * 20:]
        #m = integrate.cumtrapz(avg_risk)
        m = self.integrand1(avg_risk)

        self.avg_risk_queue.put(avg_risk)

        dict = [{
            'step': self.step,
            'monitor_result': mval,
            'risk_score': r_c1,
            'rgb_blur': blur[0],
            'rgb_left_blur': blur[1],
            'rgb_right_blur': blur[2],
            'rgb_occluded': occlusion[0],
            'rgb_left_occluded': occlusion[1],
            'rgb_right_occluded': occlusion[2]
        }]

        if (self.step == 0):
            self.detector_file = self.data_folder + "/run%d.csv" % (
                self.scene_number)

        file_exists = os.path.isfile(self.detector_file)
        with open(self.detector_file, 'a') as csvfile:
            # creating a csv dict writer object
            writer = csv.DictWriter(csvfile, fieldnames=self.fields)
            if not file_exists:
                writer.writeheader()
            writer.writerows(dict)

        return m, mval, blur[0], blur[1], blur[2], occlusion[0], occlusion[
            1], occlusion[2]

    def tick(self, input_data):
        self.time = time.time()
        result = super().tick(input_data)
        result['rgb_detector'] = cv2.resize(result['rgb'], (224, 224))
        result['rgb_detector_left'] = cv2.resize(result['rgb_left'],
                                                 (224, 224))
        result['rgb_detector_right'] = cv2.resize(result['rgb_right'],
                                                  (224, 224))
        result['rgb'] = cv2.resize(result['rgb'], (256, 144))
        result['rgb_left'] = cv2.resize(result['rgb_left'], (256, 144))
        result['rgb_right'] = cv2.resize(result['rgb_right'], (256, 144))
        detection_image = cv2.cvtColor(result['rgb_detector_right'],
                                       cv2.COLOR_BGR2RGB)
        detection_image = detection_image / 255.
        detection_image = np.reshape(detection_image, [
            -1, detection_image.shape[0], detection_image.shape[1],
            detection_image.shape[2]
        ])

        #B-VAE reconstruction based assurance monitor
        # TODO: Move this prediction into a thread. I had problems of using keras model in threads.
        predicted_reps = self.model_vae.predict_on_batch(detection_image)
        dist = np.square(np.subtract(np.array(predicted_reps),
                                     detection_image)).mean()

        #start other detectors
        BlurDetectorThread = Thread(target=self.blur_detection,
                                    args=(result, ))
        BlurDetectorThread.daemon = True
        OccusionDetectorThread = Thread(target=self.occlusion_detection,
                                        args=(result, ))
        OccusionDetectorThread.daemon = True
        AssuranceMonitorThread = Thread(
            target=self.assurance_monitor,
            args=(dist, ))  #image,model,calibration_set,pval_queue,sval_queue
        AssuranceMonitorThread.daemon = True
        AssuranceMonitorThread.start()
        BlurDetectorThread.start()
        OccusionDetectorThread.start()

        result['image'] = np.concatenate(
            tuple(result[x] for x in ['rgb', 'rgb_left', 'rgb_right']), -1)

        theta = result['compass']
        theta = 0.0 if np.isnan(theta) else theta
        theta = theta + np.pi / 2
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta), np.cos(theta)],
        ])

        gps = self._get_position(result)
        result['gps_y'] = gps[0]
        result['gps_x'] = gps[1]
        far_node, _ = self._command_planner.run_step(gps)
        result['actual_y'] = far_node[0]
        result['actual_x'] = far_node[1]
        result['far_node'] = far_node
        target = R.T.dot(far_node - gps)
        target *= 5.5
        target += [128, 256]
        target = np.clip(target, 0, 256)

        #waypoints_left = self._command_planner.get_waypoints_remaining(gps)
        #print("step:%d waypoints:%d"%(self.step,(9-waypoints_left)))

        #Synthetically add noise to the radar data based on the weather
        if (self.weather[0] <= "20.0" and self.weather[1] <= "20.0"
                and self.weather[2] <= "20.0"):
            result['cloud'][0] = result['cloud'][0]
            result['cloud_right'][0] = result['cloud_right'][0]
        elif ((self.weather[0] > "20.0" and self.weather[0] < "50.0")
              and (self.weather[1] > "20.0" and self.weather[1] < "50.0")
              and (self.weather[2] > "20.0" and self.weather[2] < "50.0")):
            noise = np.random.normal(0, 0.5, result['cloud'][0].shape)
            result['cloud'][0] += noise
            result['cloud_right'][0] += noise
        elif ((self.weather[0] > "50.0" and self.weather[0] < "70.0")
              and (self.weather[1] > "50.0" and self.weather[1] < "70.0")
              and (self.weather[2] > "50.0" and self.weather[2] < "70.0")):
            noise = np.random.normal(0, 1.5, result['cloud'][0].shape)
            result['cloud'][0] += noise
            result['cloud_right'][0] += noise
        elif ((self.weather[0] > "70.0" and self.weather[0] < "100.0")
              and (self.weather[1] > "70.0" and self.weather[1] < "100.0")
              and (self.weather[2] > "70.0" and self.weather[2] < "100.0")):
            noise = np.random.normal(0, 2, result['cloud'][0].shape)
            result['cloud'][0] += noise
            result['cloud_right'][0] += noise
        else:
            noise = np.random.normal(0, 0.5, result['cloud'][0].shape)
            result['cloud'][0] += noise
            result['cloud_right'][0] += noise
        result['target'] = target

        #Extract minimum distance from the 2 RADAR's. Center and right RADAR's
        object_depth = []
        object_vel = []
        for i in range(len(result['cloud'])):
            object_depth.append(result['cloud'][i][0])
            object_vel.append(result['cloud'][i][3])

        index = np.argmin(object_depth)
        result['object_velocity'] = object_vel[index]
        result['object_distance'] = abs(min(object_depth))

        object_depth_right = []
        object_vel_right = []
        for i in range(len(result['cloud_right'])):
            object_depth_right.append(result['cloud_right'][i][0])
            object_vel_right.append(result['cloud_right'][i][3])

        index = np.argmin(object_depth_right)
        result['object_velocity_right'] = object_vel_right[index]
        result['object_distance_right'] = abs(min(object_depth_right))

        #Join the threads
        BlurDetectorThread.join()
        OccusionDetectorThread.join()
        AssuranceMonitorThread.join()

        #Compute risk
        # TODO: We could do it in a thread in the future when risk is computed only once every few cycles. Now risk is computed every cycle
        t = time.time()
        risk, mval, blur0, blur1, blur2, Occlusion0, Occlusion1, Occlusion2 = self.risk_computation(
            self.weather, self.blur_queue, self.am_queue, self.occlusion_queue,
            result["fault_scenario"], result["fault_type"],
            result["fault_duration"], result["fault_step"])
        like = 1 - math.exp(-risk)

        result['time'] = time.time() - t
        result['risk'] = risk
        del predicted_reps
        gc.collect()

        return result

    #Step function that runs the Primary LEC and the secondary AEBS controller
    @torch.no_grad()
    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        time_val = 0.0
        tick_data = self.tick(input_data)

        img = torchvision.transforms.functional.to_tensor(tick_data['image'])
        img = img[None].cuda()

        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        #Primary LEC controller
        with torch.no_grad():
            points, (target_cam, _) = self.net.forward(img, target)
        points_cam = points.clone().cpu()
        points_cam[..., 0] = (points_cam[..., 0] + 1) / 2 * img.shape[-1]
        points_cam[..., 1] = (points_cam[..., 1] + 1) / 2 * img.shape[-2]
        points_cam = points_cam.squeeze()
        points_world = self.converter.cam_to_world(points_cam).numpy()

        aim = (points_world[1] + points_world[0]) / 2.0
        angle = np.degrees(np.pi / 2 - np.arctan2(aim[1], aim[0])) / 90
        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)

        desired_speed = np.linalg.norm(points_world[0] - points_world[1]) * 2.0

        speed = tick_data['speed']
        stopping_distance = ((speed * speed) / 13.72)

        #Supervisory AEBS controller
        if (speed >= 0.5):
            if (tick_data['object_distance'] <
                (1.5 * speed + (speed * speed) / 13.72)
                    or tick_data['object_distance_right'] <
                (1.5 * speed + (speed * speed) / 13.72)):
                brake = True
                val = 1
                print("emergency")
            else:
                brake = False
        elif (speed < 0.5):
            if ((tick_data['object_distance'] < 3)
                    or (tick_data['object_distance_right'] < 3)):
                brake = True
                val = 1
                print("emergency")
            else:
                brake = False
        else:
            brake = desired_speed < 0.2 or (speed / desired_speed) > 1.1

        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)

        #Log AV stats
        fields = [
            'step', 'stopping_distance', 'Radar_distance', 'speed', 'steer',
            'gps_x', 'gps_y', 'actual_x', 'actual_y', 'time', 'risk_time'
        ]

        dict = [{
            'step': self.step,
            'stopping_distance': abs(stopping_distance),
            'Radar_distance': tick_data['object_distance'],
            'speed': speed,
            'steer': steer,
            'gps_x': tick_data['gps_x'],
            'gps_y': tick_data['gps_y'],
            'actual_x': tick_data['actual_x'],
            'actual_y': tick_data['actual_y'],
            'time': float(time.time() - self.time),
            'risk_time': tick_data['time']
        }]

        if (self.step == 0):
            self.braking_file = self.data_folder + "/emergency_braking%d.csv" % (
                self.scene_number)

        file_exists = os.path.isfile(self.braking_file)
        with open(self.braking_file, 'a') as csvfile:
            # creating a csv dict writer object
            writer = csv.DictWriter(csvfile, fieldnames=fields)
            if not file_exists:
                writer.writeheader()
            writer.writerows(dict)

        if DEBUG:
            debug_display(tick_data, target_cam.squeeze(),
                          points.cpu().squeeze(), steer, throttle, brake,
                          desired_speed, self.step)

        return control
Esempio n. 10
0
class PrivilegedAgent(MapAgent):
    def setup(self, path_to_conf_file):
        # make conf file a json file?
        # store hparams for map model
        # can extend to store all of the save paths so we don't rely on os environ stuff
        # can copy this to the corresponding save path

        super().setup(path_to_conf_file)
        self.converter = Converter()
        self.net = MapModel.load_from_checkpoint(path_to_conf_file)
        self.net.cuda()
        self.net.eval()

    def _init(self):
        super()._init()

        self._turn_controller = PIDController(K_P=1.25,
                                              K_I=0.75,
                                              K_D=0.3,
                                              n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)
        self.save_images_path = Path(f'{SAVE_PATH_BASE}/images/{ROUTE_NAME}')
        #self.save_path.mkdir()

    def tick(self, input_data):
        result = super().tick(input_data)
        result['image'] = np.concatenate(
            tuple(result[x] for x in ['rgb', 'rgb_left', 'rgb_right']), -1)

        theta = result['compass']
        theta = 0.0 if np.isnan(theta) else theta
        theta = theta + np.pi / 2
        result['theta'] = theta
        #print((theta * 180 / np.pi)%360)
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta), np.cos(theta)],
        ])
        result['R'] = R
        gps = self._get_position(result)  # method returns position in meters

        # transform route waypoints to overhead map view
        route = self._command_planner.run_step(gps)  # oriented in world frame
        nodes = np.array([node for node, _ in route])  # (N,2)
        nodes = nodes - gps  # center at agent position and rotate
        nodes = R.T.dot(nodes.T)  # (2,2) x (2,N) = (2,N)
        nodes = nodes.T * 5.5  # (N,2) # to map frame (5.5 pixels per meter)
        nodes += [128, 256]
        #nodes = np.clip(nodes, 0, 256)
        commands = [command for _, command in route]
        target = np.clip(nodes[1], 0, 256)

        # populate results
        result['num_waypoints'] = len(route)
        result['route_map'] = nodes
        result['commands'] = commands
        result['target'] = target

        return result

    @torch.no_grad()
    def run_step_using_learned_controller(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        tick_data = self.tick(input_data)

        img = torchvision.transforms.functional.to_tensor(tick_data['image'])
        img = img[None].cuda()

        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        #points, (target_cam, _) = self.net.forward(img, target)
        points, (target_cam, _) = self.net.forward(img, target)
        control = self.net.controller(points).cpu().squeeze()

        steer = control[0].item()
        desired_speed = control[1].item()
        speed = tick_data['speed']

        brake = desired_speed < 0.4 or (speed / desired_speed) > 1.1

        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)

        if DEBUG:
            debug_display(tick_data, target_cam.squeeze(),
                          points.cpu().squeeze(), steer, throttle, brake,
                          desired_speed, self.step)

        return control

    @torch.no_grad()
    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        tick_data = self.tick(input_data)
        topdown = Image.fromarray(tick_data['topdown'])
        topdown = topdown.crop((128, 0, 128 + 256, 256))
        topdown = np.array(topdown)
        topdown = preprocess_semantic(topdown)
        topdown = topdown[None].cuda()

        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        #points, (target_cam, _) = self.net.forward(topdown, target)
        points = self.net.forward(topdown, target)  # world frame
        points_map = points.clone().cpu().squeeze()
        # what's this conversion for?
        # was this originally normalized for training stability or something?
        points_map = points_map + 1
        points_map = points_map / 2 * 256
        points_map = np.clip(points_map, 0, 256)
        points_cam = self.converter.map_to_cam(points_map).numpy()
        points_world = self.converter.map_to_world(points_map).numpy()
        points_map = points_map.numpy()

        tick_data['points_map'] = points_map
        tick_data['points_cam'] = points_cam
        tick_data['points_world'] = points_world

        #img = tick_data['image']

        aim = (points_world[1] + points_world[0]) / 2.0
        angle = np.degrees(np.pi / 2 - np.arctan2(aim[1], aim[0])) / 90
        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)
        desired_speed = np.linalg.norm(points_world[0] - points_world[1]) * 2.0
        tick_data['aim_world'] = aim

        speed = tick_data['speed']
        brake = desired_speed < 0.4 or (speed / desired_speed) > 1.1

        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)
        #print(timestamp) # GAMETIME

        if DEBUG or SAVE_IMAGES:

            # transform image model cam points to overhead BEV image (spectator frame?)
            self.debug_display(tick_data, steer, throttle, brake,
                               desired_speed)

        return control

    def debug_display(self,
                      tick_data,
                      steer,
                      throttle,
                      brake,
                      desired_speed,
                      r=2):

        topdown = tick_data['topdown']
        _topdown = Image.fromarray(COLOR[CONVERTER[topdown]])
        _topdown_draw = ImageDraw.Draw(_topdown)

        # model points
        points_map = tick_data['points_map']
        points_td = points_map + [128, 0]
        for i, (x, y) in enumerate(points_td):
            _topdown_draw.ellipse((x - 2 * r, y - 2 * r, x + 2 * r, y + 2 * r),
                                  (0, 191, 255))
        route_map = tick_data['route_map']
        route_td = route_map + [128, 0]

        # control point
        aim_world = np.array(tick_data['aim_world'])
        aim_map = self.converter.world_to_map(torch.Tensor(aim_world)).numpy()
        aim_map = aim_map + [128, 0]
        x, y = aim_map
        _topdown_draw.ellipse((x - 2, y - 2, x + 2, y + 2), (255, 105, 147))

        # route waypoints
        for i, (x, y) in enumerate(route_td[:3]):
            if i == 0:
                color = (0, 255, 0)
            elif i == 1:
                color = (255, 0, 0)
            elif i == 2:
                color = (139, 0, 139)
            _topdown_draw.ellipse((x - 2 * r, y - 2 * r, x + 2 * r, y + 2 * r),
                                  color)

        # make RGB images

        # draw center RGB image
        _rgb = Image.fromarray(tick_data['rgb'])
        _draw_rgb = ImageDraw.Draw(_rgb)
        for x, y in tick_data['points_cam']:  # image model waypoints
            #x = (x + 1)/2 * 256
            #y = (y + 1)/2 * 144
            _draw_rgb.ellipse((x - 2, y - 2, x + 2, y + 2), (0, 191, 255))

        # transform aim from world to cam
        aim_cam = self.converter.world_to_cam(torch.Tensor(aim_world)).numpy()
        x, y = aim_cam
        _draw_rgb.ellipse((x - 2, y - 2, x + 2, y + 2), (255, 105, 147))

        # draw route waypoints in RGB image
        route_map = np.array(tick_data['route_map'])
        route_map = np.clip(route_map, 0, 256)
        route_map = route_map[:3].squeeze()  # just the next couple
        route_cam = self.converter.map_to_cam(torch.Tensor(route_map)).numpy()
        for i, (x, y) in enumerate(route_cam):
            if i == 0:  # waypoint we just passed
                if not (0 < y < 143 and 0 < x < 255):
                    continue
                color = (0, 255, 0)  # green
            elif i == 1:  # target
                color = (255, 0, 0)  # red
            elif i == 2:  # beyond target
                color = (139, 0, 139)  # darkmagenta
            else:
                continue
            _draw_rgb.ellipse((x - 2, y - 2, x + 2, y + 2), color)

        _combined = Image.fromarray(
            np.hstack([tick_data['rgb_left'], _rgb, tick_data['rgb_right']]))
        _draw = ImageDraw.Draw(_combined)

        # draw debug text
        text_color = (139, 0, 139)  #darkmagenta
        _draw.text((5, 10), 'Steer: %.3f' % steer, text_color)
        _draw.text((5, 30), 'Throttle: %.3f' % throttle, text_color)
        _draw.text((5, 50), 'Brake: %s' % brake, text_color)
        _draw.text((5, 70), 'Speed: %.3f' % tick_data['speed'], text_color)
        _draw.text((5, 90), 'Desired: %.3f' % desired_speed, text_color)
        cur_command, next_command = tick_data['commands'][:2]
        _draw.text((5, 110), f'Current: {cur_command}', text_color)
        _draw.text((5, 130), f'Next: {next_command}', text_color)

        _rgb_img = _combined.resize(
            (int(256 / _combined.size[1] * _combined.size[0]), 256))
        _topdown = _topdown.resize((256, 256))
        _save_img = Image.fromarray(np.hstack([_rgb_img, _topdown]))
        _save_img = cv2.cvtColor(np.array(_save_img), cv2.COLOR_BGR2RGB)
        if self.step % 10 == 0 and SAVE_IMAGES:
            frame_number = self.step // 10
            rep_number = int(os.environ.get('REP', 0))
            save_path = self.save_images_path / f'repetition_{rep_number:02d}' / f'{frame_number:06d}.png'
            cv2.imwrite(str(save_path), _save_img)
        if DEBUG:
            cv2.imshow('debug', _save_img)
            cv2.waitKey(1)
Esempio n. 11
0
class ImageAgent(BaseAgent):
    def setup(self, path_to_conf_file):
        super().setup(path_to_conf_file)

        self.converter = Converter()
        self.net = ImageModel.load_from_checkpoint(path_to_conf_file)
        self.net.cuda()
        self.net.eval()




    # addition: modified from leaderboard/team_code/auto_pilot.py
    def save(self, steer, throttle, brake, tick_data):
        # frame = self.step // 10
        frame = self.step

        pos = self._get_position(tick_data)
        far_command = tick_data['far_command']
        speed = tick_data['speed']



        center = os.path.join('rgb', ('%04d.png' % frame))
        left = os.path.join('rgb_left', ('%04d.png' % frame))
        right = os.path.join('rgb_right', ('%04d.png' % frame))
        topdown = os.path.join('topdown', ('%04d.png' % frame))
        rgb_with_car = os.path.join('rgb_with_car', ('%04d.png' % frame))

        data_row = ','.join([str(i) for i in [frame, far_command, speed, steer, throttle, brake, str(center), str(left), str(right)]])
        with (self.save_path / 'measurements.csv' ).open("a") as f_out:
            f_out.write(data_row+'\n')


        Image.fromarray(tick_data['rgb']).save(self.save_path / center)
        Image.fromarray(tick_data['rgb_left']).save(self.save_path / left)
        Image.fromarray(tick_data['rgb_right']).save(self.save_path / right)

        # addition
        Image.fromarray(COLOR[CONVERTER[tick_data['topdown']]]).save(self.save_path / topdown)
        Image.fromarray(tick_data['rgb_with_car']).save(self.save_path / rgb_with_car)



        ########################################################################
        # log necessary info for action-based
        if self.args.save_action_based_measurements:
            from affordances import get_driving_affordances

            self._pedestrian_forbidden_distance = 10.0
            self._pedestrian_max_detected_distance = 50.0
            self._vehicle_forbidden_distance = 10.0
            self._vehicle_max_detected_distance = 50.0
            self._tl_forbidden_distance = 10.0
            self._tl_max_detected_distance = 50.0
            self._speed_detected_distance = 10.0

            self._default_target_speed = 10
            self._angle = None

            current_affordances = get_driving_affordances(self, self._pedestrian_forbidden_distance, self._pedestrian_max_detected_distance, self._vehicle_forbidden_distance, self._vehicle_max_detected_distance, self._tl_forbidden_distance, self._tl_max_detected_distance, self._angle_rad, self._default_target_speed, self._target_speed, self._speed_detected_distance, angle=True)

            is_vehicle_hazard = current_affordances['is_vehicle_hazard']
            is_red_tl_hazard = current_affordances['is_red_tl_hazard']
            is_pedestrian_hazard = current_affordances['is_pedestrian_hazard']
            forward_speed = current_affordances['forward_speed']
            relative_angle = current_affordances['relative_angle']
            target_speed = current_affordances['target_speed']
            closest_pedestrian_distance = current_affordances['closest_pedestrian_distance']
            closest_vehicle_distance = current_affordances['closest_vehicle_distance']
            closest_red_tl_distance = current_affordances['closest_red_tl_distance']



            log_folder = str(self.save_path / 'affordance_measurements')
            if not os.path.exists(log_folder):
                os.mkdir(log_folder)
            log_path = os.path.join(log_folder, f'{self.step:06}.json')


            ego_transform = self._vehicle.get_transform()
            ego_location = ego_transform.location
            ego_rotation = ego_transform.rotation
            ego_velocity = self._vehicle.get_velocity()

            brake_noise = 0.0
            throttle_noise = 0.0 # 1.0 -> 0.0
            steer_noise = 0.0 # NaN -> 0.0

            # class RoadOption
            map_roadoption_to_action_based_roadoption = {-1:2, 1:3, 2:4, 3:5, 4:2, 5:2, 6:2}

            # save info for action-based rep
            json_log_data = {
                "brake": float(brake),
                "closest_red_tl_distance": closest_red_tl_distance,
                "throttle": throttle,
                "directions": float(map_roadoption_to_action_based_roadoption[far_command.value]),
                "brake_noise": brake_noise,
                "is_red_tl_hazard": is_red_tl_hazard,
                "opponents": {},
                "closest_pedestrian_distance": closest_pedestrian_distance,
                "is_pedestrian_hazard": is_pedestrian_hazard,
                "lane": {},
                "is_vehicle_hazard": is_vehicle_hazard,
                "throttle_noise": throttle_noise,
                "ego_actor": {
                    "velocity": [
                        ego_velocity.x,
                        ego_velocity.y,
                        ego_velocity.z
                    ],
                    "position": [
                        ego_location.x,
                        ego_location.y,
                        ego_location.z
                    ],
                    "orientation": [
                        ego_rotation.roll,
                        ego_rotation.pitch,
                        ego_rotation.yaw
                    ]
                },
                "hand_brake": False,
                "steer_noise": steer_noise,
                "reverse": False,
                "relative_angle": relative_angle,
                "closest_vehicle_distance": closest_vehicle_distance,
                "walkers": {},
                "forward_speed": forward_speed,
                "steer": steer,
                "target_speed": target_speed
            }

            with open(log_path, 'w') as f_out:
                json.dump(json_log_data, f_out, indent=4)


    def _init(self):
        super()._init()

        self._turn_controller = PIDController(K_P=1.25, K_I=0.75, K_D=0.3, n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

        # addition:
        self._vehicle = CarlaDataProvider.get_hero_actor()
        self._world = self._vehicle.get_world()


        # -------------------------------------------------------
        # add a local_planner in order to estimate relative angle
        # self.target_speed = 10
        # args_lateral_dict = {
        #     'K_P': 1,
        #     'K_D': 0.4,
        #     'K_I': 0,
        #     'dt': 1.0/10.0}
        # self._local_planner = LocalPlanner(
        #     self._vehicle, opt_dict={'target_speed' : self.target_speed,
        #     'lateral_control_dict':args_lateral_dict})
        # self._hop_resolution = 2.0
        # self._path_seperation_hop = 2
        # self._path_seperation_threshold = 0.5
        # self._grp = None
        #
        # self._map = CarlaDataProvider.get_map()
        # route = [(self._map.get_waypoint(x[0].location), x[1]) for x in self._global_plan_world_coord]
        #
        # self._local_planner.set_global_plan(route)


    def tick(self, input_data):



        result = super().tick(input_data)
        result['image'] = np.concatenate(tuple(result[x] for x in ['rgb', 'rgb_left', 'rgb_right']), -1)


        rgb_with_car = cv2.cvtColor(input_data['rgb_with_car'][1][:, :, :3], cv2.COLOR_BGR2RGB)
        result['rgb_with_car'] = rgb_with_car

        result['radar_central'] = input_data['radar_central']

        theta = result['compass']
        theta = 0.0 if np.isnan(theta) else theta
        theta = theta + np.pi / 2
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta),  np.cos(theta)],
            ])

        gps = self._get_position(result)
        # modification
        far_node, far_command = self._command_planner.run_step(gps)
        target = R.T.dot(far_node - gps)
        target *= 5.5
        target += [128, 256]
        target = np.clip(target, 0, 256)

        result['target'] = target
        # addition:
        self._actors = self._world.get_actors()
        self._traffic_lights = get_nearby_lights(self._vehicle, self._actors.filter('*traffic_light*'))
        result['far_command'] = far_command
        topdown = input_data['map'][1][:, :, 2]
        topdown = draw_traffic_lights(topdown, self._vehicle, self._traffic_lights)
        result['topdown'] = topdown


        return result

    @torch.no_grad()
    def run_step_using_learned_controller(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        tick_data = self.tick(input_data)

        img = torchvision.transforms.functional.to_tensor(tick_data['image'])
        img = img[None].cuda()

        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        import random
        torch.manual_seed(2)
        torch.cuda.manual_seed_all(2)
        torch.backends.cudnn.deterministic = True
        np.random.seed(1)
        random.seed(1)
        device = torch.device('cuda')
        torch.backends.cudnn.benchmark = False

        points, (target_cam, _) = self.net.forward(img, target)
        control = self.net.controller(points).cpu().squeeze()

        steer = control[0].item()
        desired_speed = control[1].item()
        speed = tick_data['speed']

        brake = desired_speed < 0.4 or (speed / desired_speed) > 1.1

        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)

        if DEBUG:
            debug_display(
                    tick_data, target_cam.squeeze(), points.cpu().squeeze(),
                    steer, throttle, brake, desired_speed,
                    self.step)

        return control

    @torch.no_grad()
    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()

        tick_data = self.tick(input_data)
        radar_data = tick_data['radar_central'][1]



        img = torchvision.transforms.functional.to_tensor(tick_data['image'])
        img = img[None].cuda()





        target = torch.from_numpy(tick_data['target'])
        target = target[None].cuda()

        points, (target_cam, _) = self.net.forward(img, target)
        points_cam = points.clone().cpu()

        if self.step  == 0:
            print('\n'*5)
            print('step :', self.step)
            # print('radar')
            # print(radar_data.shape)
            # print(radar_data)
            # print(np.max(radar_data, axis=0))
            print('image', np.sum(tick_data['image']))
            # print('img', torch.sum(img))
            # print('target', target)
            # print('points', points)
            print('\n'*5)

        points_cam[..., 0] = (points_cam[..., 0] + 1) / 2 * img.shape[-1]
        points_cam[..., 1] = (points_cam[..., 1] + 1) / 2 * img.shape[-2]
        points_cam = points_cam.squeeze()
        points_world = self.converter.cam_to_world(points_cam).numpy()


        aim = (points_world[1] + points_world[0]) / 2.0
        angle = np.degrees(np.pi / 2 - np.arctan2(aim[1], aim[0]))
        self._angle_rad = np.radians(angle)
        angle = angle / 90
        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)

        desired_speed = np.linalg.norm(points_world[0] - points_world[1]) * 2.0
        # desired_speed *= (1 - abs(angle)) ** 2
        self._target_speed = desired_speed
        speed = tick_data['speed']

        brake = desired_speed < 0.4 or (speed / desired_speed) > 1.1

        delta = np.clip(desired_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)
        throttle = throttle if not brake else 0.0

        control = carla.VehicleControl()
        control.steer = steer
        control.throttle = throttle
        control.brake = float(brake)

        if DEBUG:
            debug_display(tick_data, target_cam.squeeze(), points.cpu().squeeze(), steer, throttle, brake, desired_speed, self.step)

        # addition: from leaderboard/team_code/auto_pilot.py
        if self.step == 0:
            title_row = ','.join(['frame_id', 'far_command', 'speed', 'steering', 'throttle', 'brake', 'center', 'left', 'right'])
            with (self.save_path / 'measurements.csv' ).open("a") as f_out:
                f_out.write(title_row+'\n')
        if self.step % 1 == 0:
            self.gather_info((steer, throttle, float(brake), speed))

        record_every_n_steps = self.record_every_n_step
        if self.step % record_every_n_steps == 0:
            self.save(steer, throttle, brake, tick_data)
        return control
Esempio n. 12
0
class AutoPilot(MapAgent):
    def setup(self, path_to_conf_file):
        super().setup(path_to_conf_file)

        self.save_path = None

        parent_folder = os.environ['SAVE_FOLDER']
        string = pathlib.Path(os.environ['ROUTES']).stem
        self.save_path = pathlib.Path(parent_folder) / string

    def _init(self):
        super()._init()

        self._turn_controller = PIDController(K_P=1.25,
                                              K_I=0.75,
                                              K_D=0.3,
                                              n=40)
        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)

    def _get_angle_to(self, pos, theta, target):
        R = np.array([
            [np.cos(theta), -np.sin(theta)],
            [np.sin(theta), np.cos(theta)],
        ])

        aim = R.T.dot(target - pos)
        angle = -np.degrees(np.arctan2(-aim[1], aim[0]))
        angle = 0.0 if np.isnan(angle) else angle

        return angle

    def _get_control(self, target, far_target, tick_data, _draw):
        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        # Steering.
        angle_unnorm = self._get_angle_to(pos, theta, target)
        angle = angle_unnorm / 90

        steer = self._turn_controller.step(angle)
        steer = np.clip(steer, -1.0, 1.0)
        steer = round(steer, 3)

        # Acceleration.
        angle_far_unnorm = self._get_angle_to(pos, theta, far_target)
        should_slow = abs(angle_far_unnorm) > 45.0 or abs(angle_unnorm) > 5.0
        target_speed = 4 if should_slow else 7.0

        brake = self._should_brake()
        target_speed = target_speed if not brake else 0.0

        delta = np.clip(target_speed - speed, 0.0, 0.25)
        throttle = self._speed_controller.step(delta)
        throttle = np.clip(throttle, 0.0, 0.75)

        if brake:
            steer *= 0.5
            throttle = 0.0

        _draw.text((5, 90), 'Speed: %.3f' % speed)
        _draw.text((5, 110), 'Target: %.3f' % target_speed)
        _draw.text((5, 130), 'Angle: %.3f' % angle_unnorm)
        _draw.text((5, 150), 'Angle Far: %.3f' % angle_far_unnorm)

        return steer, throttle, brake, target_speed

    def run_step(self, input_data, timestamp):
        if not self.initialized:
            self._init()
        #     self._world.set_weather(WEATHERS[int(os.environ['WEATHER_INDEX'])])

        # if self.step % 100 == 0:
        #     index = (self.step // 100) % len(WEATHERS)
        #     self._world.set_weather(WEATHERS[index])

        data = self.tick(input_data)
        rgb_with_car = cv2.cvtColor(input_data['rgb_with_car'][1][:, :, :3],
                                    cv2.COLOR_BGR2RGB)
        data['rgb_with_car'] = rgb_with_car

        topdown = data['topdown']
        rgb = np.hstack((data['rgb_left'], data['rgb'], data['rgb_right']))

        gps = self._get_position(data)

        near_node, near_command = self._waypoint_planner.run_step(gps)
        far_node, far_command = self._command_planner.run_step(gps)

        _topdown = Image.fromarray(COLOR[CONVERTER[topdown]])
        _rgb = Image.fromarray(rgb)
        _draw = ImageDraw.Draw(_topdown)

        _topdown.thumbnail((256, 256))
        _rgb = _rgb.resize((int(256 / _rgb.size[1] * _rgb.size[0]), 256))

        _combined = Image.fromarray(np.hstack((_rgb, _topdown)))
        _draw = ImageDraw.Draw(_combined)

        steer, throttle, brake, target_speed = self._get_control(
            near_node, far_node, data, _draw)

        _draw.text((5, 10),
                   'FPS: %.3f' % (self.step / (time.time() - self.wall_start)))
        _draw.text((5, 30), 'Steer: %.3f' % steer)
        _draw.text((5, 50), 'Throttle: %.3f' % throttle)
        _draw.text((5, 70), 'Brake: %s' % brake)

        if HAS_DISPLAY:
            cv2.imshow('map',
                       cv2.cvtColor(np.array(_combined), cv2.COLOR_BGR2RGB))
            cv2.waitKey(1)

        control = carla.VehicleControl()
        control.steer = steer + 1e-2 * np.random.randn()
        control.throttle = throttle
        control.brake = float(brake)

        # we only gether info every 2 frames for faster processing speed
        if self.step % 2 == 0:
            self.gather_info()

        # if this number is very small, we may not have the exact numbers and images for the event happening (e.g. the frame when a collision happen). However, this is usually ok if we only use these for retraining purpose
        record_every_n_steps = 3
        if self.step % record_every_n_steps == 0:
            self.save(record_every_n_steps, far_command, steer, throttle,
                      brake, target_speed, data)
            self.save_json(record_every_n_steps, far_node, near_command, steer,
                           throttle, brake, target_speed, data)

        return control

    def save_json(self, record_every_n_steps, far_node, near_command, steer,
                  throttle, brake, target_speed, tick_data):
        frame = int(self.step // record_every_n_steps)

        pos = self._get_position(tick_data)
        theta = tick_data['compass']
        speed = tick_data['speed']

        # pos, , far_node, near_command
        data = {
            'x': pos[0],
            'y': pos[1],
            'theta': theta,
            'speed': speed,
            'target_speed': target_speed,
            'x_command': far_node[0],
            'y_command': far_node[1],
            'command': near_command.value,
            'steer': steer,
            'throttle': throttle,
            'brake': brake,
        }

        pth = self.save_path / 'measurements'
        pth.mkdir(parents=False, exist_ok=True)
        (pth / ('%04d.json' % frame)).write_text(str(data))

    def save(self, record_every_n_steps, far_command, steer, throttle, brake,
             target_speed, tick_data):
        frame = int(self.step // record_every_n_steps)

        speed = tick_data['speed']
        string = os.environ['SAVE_FOLDER'] + '/' + pathlib.Path(
            os.environ['ROUTES']).stem

        center_str = string + '/' + 'rgb' + '/' + ('%04d.png' % frame)
        left_str = string + '/' + 'rgb_left' + '/' + ('%04d.png' % frame)
        right_str = string + '/' + 'rgb_right' + '/' + ('%04d.png' % frame)
        # topdown_str = string + '/' + 'topdown' + '/' + ('%04d.png' % frame)

        center = self.save_path / 'rgb' / ('%04d.png' % frame)
        left = self.save_path / 'rgb_left' / ('%04d.png' % frame)
        right = self.save_path / 'rgb_right' / ('%04d.png' % frame)
        topdown = self.save_path / 'topdown' / ('%04d.png' % frame)
        rgb_with_car = self.save_path / 'rgb_with_car' / ('%04d.png' % frame)

        data_row = ','.join([
            str(i) for i in [
                frame, far_command, speed, steer, throttle, brake, center_str,
                left_str, right_str
            ]
        ])
        with (self.save_path / 'measurements.csv').open("a") as f_out:
            f_out.write(data_row + '\n')

        Image.fromarray(tick_data['rgb']).save(center)
        Image.fromarray(tick_data['rgb_left']).save(left)
        Image.fromarray(tick_data['rgb_right']).save(right)
        # modification
        # Image.fromarray(COLOR[CONVERTER[tick_data['topdown']]]).save(topdown)
        Image.fromarray(tick_data['topdown']).save(topdown)
        Image.fromarray(tick_data['rgb_with_car']).save(rgb_with_car)

    def _should_brake(self):
        actors = self._world.get_actors()

        vehicle = self._is_vehicle_hazard(actors.filter('*vehicle*'))
        light = self._is_light_red(actors.filter('*traffic_light*'))
        walker = self._is_walker_hazard(actors.filter('*walker*'))

        return any(x is not None for x in [vehicle, light, walker])

    def _draw_line(self, p, v, z, color=(255, 0, 0)):
        if not DEBUG:
            return

        p1 = _location(p[0], p[1], z)
        p2 = _location(p[0] + v[0], p[1] + v[1], z)
        color = carla.Color(*color)

        self._world.debug.draw_line(p1, p2, 0.25, color, 0.01)

    def _is_light_red(self, lights_list):
        if self._vehicle.get_traffic_light_state(
        ) != carla.libcarla.TrafficLightState.Green:
            affecting = self._vehicle.get_traffic_light()

            for light in self._traffic_lights:
                if light.id == affecting.id:
                    return affecting

        return None

    def _is_walker_hazard(self, walkers_list):
        z = self._vehicle.get_location().z
        p1 = _numpy(self._vehicle.get_location())
        v1 = 10.0 * _orientation(self._vehicle.get_transform().rotation.yaw)

        self._draw_line(p1, v1, z + 2.5, (0, 0, 255))

        for walker in walkers_list:
            v2_hat = _orientation(walker.get_transform().rotation.yaw)
            s2 = np.linalg.norm(_numpy(walker.get_velocity()))

            if s2 < 0.05:
                v2_hat *= s2

            p2 = -3.0 * v2_hat + _numpy(walker.get_location())
            v2 = 8.0 * v2_hat

            self._draw_line(p2, v2, z + 2.5)

            collides, collision_point = get_collision(p1, v1, p2, v2)

            if collides:
                return walker

        return None

    def _is_vehicle_hazard(self, vehicle_list):
        z = self._vehicle.get_location().z

        o1 = _orientation(self._vehicle.get_transform().rotation.yaw)
        p1 = _numpy(self._vehicle.get_location())
        s1 = max(7.5,
                 2.0 * np.linalg.norm(_numpy(self._vehicle.get_velocity())))
        v1_hat = o1
        v1 = s1 * v1_hat

        self._draw_line(p1, v1, z + 2.5, (255, 0, 0))

        for target_vehicle in vehicle_list:
            if target_vehicle.id == self._vehicle.id:
                continue

            o2 = _orientation(target_vehicle.get_transform().rotation.yaw)
            p2 = _numpy(target_vehicle.get_location())
            s2 = max(
                5.0,
                2.0 * np.linalg.norm(_numpy(target_vehicle.get_velocity())))
            v2_hat = o2
            v2 = s2 * v2_hat

            p2_p1 = p2 - p1
            distance = np.linalg.norm(p2_p1)
            p2_p1_hat = p2_p1 / (distance + 1e-4)

            self._draw_line(p2, v2, z + 2.5, (255, 0, 0))

            angle_to_car = np.degrees(np.arccos(v1_hat.dot(p2_p1_hat)))
            angle_between_heading = np.degrees(np.arccos(o1.dot(o2)))

            if angle_between_heading > 60.0 and not (angle_to_car < 15
                                                     and distance < s1):
                continue
            elif angle_to_car > 30.0:
                continue
            elif distance > s1:
                continue

            return target_vehicle

        return None