Esempio n. 1
0
    def __init__(self, parent_actor, client, memory_frames, frame_stack, width,
                 height):
        self.frame_stack = frame_stack
        self.bird_eye_window_width = width
        self.bird_eye_window_height = height
        #self.buffer = torch.zeros(3, self.bird_eye_window_width, self.bird_eye_window_height).to(device)
        self.FRONT_FOV = 2 * (math.pi) / 3
        self._parent = parent_actor
        self.surface = None
        self._carla_client = client
        self.state_frames = memory_frames
        self.pedestrian_birdview = np.full(
            (self.state_frames, self.bird_eye_window_width,
             self.bird_eye_window_height),
            0,
            dtype=np.float32)
        self.pedestrian_birdview_stack = np.full(
            (self.frame_stack * self.state_frames, self.bird_eye_window_width,
             self.bird_eye_window_height),
            0,
            dtype=np.float32)
        self.pedestrian_birdview_embeded = np.full(
            (self.bird_eye_window_width, self.bird_eye_window_height),
            0,
            dtype=np.float32)

        self.birdview_producer = BirdViewProducer(
            self._carla_client,  # carla.Client
            target_size=PixelDimensions(width=self.bird_eye_window_width,
                                        height=self.bird_eye_window_height),
            pixels_per_meter=3,
            crop_type=BirdViewCropType.FRONT_AREA_ONLY,
            render_lanes_on_junctions=True,
        )
Esempio n. 2
0
def main():
    client = carla.Client("localhost", 2000)
    client.set_timeout(3.0)
    world = client.get_world()
    map = world.get_map()
    spawn_points = map.get_spawn_points()
    blueprints = world.get_blueprint_library()

    # settings = world.get_settings()
    # settings.synchronous_mode = True
    # settings.no_rendering_mode = True
    # settings.fixed_delta_seconds = 1 / 10.0
    # world.apply_settings(settings)

    hero_bp = random.choice(blueprints.filter("vehicle.audi.a2"))
    hero_bp.set_attribute("role_name", "hero")
    transform = random.choice(spawn_points)
    agent = world.spawn_actor(hero_bp, transform)
    agent.set_autopilot(True)

    birdview_producer = BirdViewProducer(
        client,
        PixelDimensions(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT),
        pixels_per_meter=4,
        crop_type=BirdViewCropType.FRONT_AND_REAR_AREA,
        render_lanes_on_junctions=False,
    )
    stuck_frames_count = 0

    try:
        while True:
            # world.tick()
            birdview: BirdView = birdview_producer.produce(agent_vehicle=agent)
            bgr = cv.cvtColor(BirdViewProducer.as_rgb(birdview),
                              cv.COLOR_BGR2RGB)
            # NOTE imshow requires BGR color model
            cv.imshow("BirdView RGB", bgr)

            # # Teleport when stuck for too long
            # if get_speed(agent) < STUCK_SPEED_THRESHOLD_IN_KMH:
            #     stuck_frames_count += 1
            # else:
            #     stuck_frames_count = 0

            # if stuck_frames_count == MAX_STUCK_FRAMES:
            #     agent.set_autopilot(False)
            #     agent.set_transform(random.choice(spawn_points))
            #     agent.set_autopilot(True)

            # Play next frames without having to wait for the key
            key = cv.waitKey(10) & 0xFF
            if key == 27:  # ESC
                break
    finally:
        if agent is not None:
            agent.destroy()
        cv.destroyAllWindows()
def process_image(queue, birdview_producer=None, vehicle=None):
    '''get the image from the buffer and process it. It's the state for vision-based systems'''
    image = queue.get()
    array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]

    image_rgb = array.copy()

    image = Image.fromarray(array).convert('L')  # grayscale conversion
    image = np.array(image.resize((84, 84)))  # convert to numpy array
    image = np.reshape(image, (84, 84, 1))  # reshape image

    if birdview_producer is not None:
        process_image.birdview_producer = birdview_producer
        process_image.vehicle = vehicle
    if process_image.birdview_producer is not None:
        birdview = process_image.birdview_producer.produce(
            agent_vehicle=process_image.
            vehicle  # carla.Actor (spawned vehicle)
        )
        # produces np.ndarray of shape (height, width, 3)
        rgb = BirdViewProducer.as_rgb(birdview)
        # cv2.imshow('BEV', cv2.resize(rgb, (640, 640)))
        # cv2.imshow('rgb', cv2.resize(image_rgb, (640, 640)))
        # print(rgb.shape)
        # cv2.waitKey(0)

        return rgb

    return image
class BirdEye(object):
    def __init__(self, parent_actor, client, memory_frames, frame_stack, width,
                 height):
        self.frame_stack = frame_stack
        self.bird_eye_window_width = width
        self.bird_eye_window_height = height
        #self.buffer = torch.zeros(3, self.bird_eye_window_width, self.bird_eye_window_height).to(device)
        self.FRONT_FOV = 2 * (math.pi) / 3
        self._parent = parent_actor
        self.surface = None
        self.bird_array = None
        self._carla_client = client
        self.state_frames = memory_frames
        self.pedestrian_birdview = np.full(
            (self.state_frames, self.bird_eye_window_width,
             self.bird_eye_window_height),
            0,
            dtype=np.float32)
        self.pedestrian_birdview_stack = np.full(
            (self.frame_stack * self.state_frames, self.bird_eye_window_width,
             self.bird_eye_window_height),
            0,
            dtype=np.float32)
        self.pedestrian_birdview_embeded = np.full(
            (self.bird_eye_window_width, self.bird_eye_window_height),
            0,
            dtype=np.float32)

        self.birdview_producer = BirdViewProducer(
            self._carla_client,  # carla.Client
            target_size=PixelDimensions(width=self.bird_eye_window_width,
                                        height=self.bird_eye_window_height),
            pixels_per_meter=2,
            crop_type=BirdViewCropType.FRONT_AND_REAR_AREA,
            render_lanes_on_junctions=True,
        )
        #self.birdeyeData(parent_actor)
        '''    PEDESTRIANS = 8
        RED_LIGHTS = 7
        YELLOW_LIGHTS = 6
        GREEN_LIGHTS = 5
        AGENT = 4
        VEHICLES = 3
        CENTERLINES = 2
        LANES = 1
        ROAD = 0'''

    def birdeyeData(self, player, lidar_object):

        self._parent = player
        self.bird_array = self.birdview_producer.produce(
            agent_vehicle=self._parent)
        rgb = BirdViewProducer.as_rgb(self.bird_array)

        copyrgb = rgb.copy()
        n, m = np.shape(self.bird_array[:, :, 1])
        x, y = np.ogrid[0:n, 0:m]
        agent_pixel_pose_Y = int(self.bird_eye_window_width / 2)
        agent_pixel_pose_X = int(self.bird_eye_window_height / 2)
        agent_pixel_pose = [agent_pixel_pose_X, agent_pixel_pose_Y]
        # print (agent_pixel_pose)

        fov_mask = (
            ((agent_pixel_pose_Y - y) <
             ((agent_pixel_pose_X - x) * math.tan(self.FRONT_FOV / 2))) &
            ((agent_pixel_pose_Y - y) >
             (-(agent_pixel_pose_X - x) * math.tan(self.FRONT_FOV / 2))))

        self.mask_shadow, uncertainty, d = shadow_mask.view_field(
            self.bird_array[:, :, 9] + 2.7 * self.bird_array[:, :, 3], 1.8,
            agent_pixel_pose, 90)
        #(array[self.mask_shadow & mask]) = 255

        #copyrgb = rgb.copy()
        shadow = np.zeros((n, m))
        visibility_matrix = fov_mask & self.mask_shadow
        shadow[visibility_matrix] = 1

        uncertainty = uncertainty * shadow
        uncertainty = uncertainty.astype(np.bool_)
        '''r= copyrgb[:, :, 0]
        g = copyrgb[:, :, 1]
        b = copyrgb[:, :, 2]
        r[uncertainty] = 0
        g[uncertainty] = 0
        b[uncertainty] = 0'''

        copyrgb[:, :, 1] = copyrgb[:, :, 1]
        #copyrgb[:,:,0] = copyrgb[:,:,0]
        n, m = np.shape(self.bird_array[:, :, 1])
        embedded_birdseye = np.zeros((n, m))

        self.bird_array[:, :, 8] = self.bird_array[:, :, 8] * uncertainty

        for i in range(self.state_frames * self.frame_stack - 1, -1, -1):
            if i == 0:
                self.pedestrian_birdview_stack[0, :, :] = np.multiply(
                    shadow, self.bird_array[:, :, 8])
            else:
                self.pedestrian_birdview_stack[
                    i, :, :] = self.pedestrian_birdview_stack[i - 1, :, :]

            #print(i)
            if i % self.frame_stack == 0:
                self.pedestrian_birdview[
                    int(i // self.frame_stack
                        ), :, :] = self.pedestrian_birdview_stack[i, :, :]

                embedded_birdseye = self.pedestrian_birdview[int(i//self.frame_stack), :, :] \
                                    * (0.5**(int(i//self.frame_stack))) + embedded_birdseye
        '''copyrgb = copyrgb.swapaxes(0, 1)
        self.rgb_nyg_surf = pygame.surfarray.make_surface(copyrgb)
        self.rgb_nyg_surf = pygame.transform.scale(self.rgb_nyg_surf, (self.bird_eye_window_width*4, self.bird_eye_window_height*4))
        '''
        visibility_int = np.zeros((1, n, m))
        visibility_int[0, uncertainty] = 1
        cir = np.where(d <= 30, int(1), shadow)
        cir = np.where(d > 30, int(0), cir)

        self.pedestrian_birdview_embeded = np.add(
            255 * self.bird_array[:, :, 8],
            0.5 * self.pedestrian_birdview_embeded)
        obsv = (0.01 * copyrgb)
        obsv[:, :, 0] = obsv[:, :, 0]  #+ 50 * uncertainty
        obsv[:, :,
             2] = obsv[:, :, 2] + 200 * self.pedestrian_birdview_stack[0, :, :]
        obsv[:, :, 1] = obsv[:, :, 1] + 100 * self.bird_array[:, :, 3]
        #obsv[:,:,1]= obsv[:,:,1]+200*self.pedestrian_birdview_stack[0, :, :]+100*(birdview[:, :, 3] * cir)
        obsv = obsv.swapaxes(0, 1)
        '''if lidar_data is not None:
            obsv= obsv + lidar_data
        birdview[:, :, 1] * fov_mask'''

        self.rgb_obsv_surf = pygame.surfarray.make_surface(obsv)
        self.rgb_obsv_surf = pygame.transform.scale(
            self.rgb_obsv_surf,
            (self.bird_eye_window_width * 4, self.bird_eye_window_height * 4))

        copyrgb[:, :, 0] = 255 * embedded_birdseye + copyrgb[:, :, 0]
        copyrgb = copyrgb.swapaxes(0, 1)
        self.rgb_nyg_surf = pygame.surfarray.make_surface(copyrgb)
        self.rgb_nyg_surf = pygame.transform.scale(
            self.rgb_nyg_surf,
            (self.bird_eye_window_width * 4, self.bird_eye_window_height * 4))

        self.tensor = pygame.surfarray.make_surface(embedded_birdseye)
        pygame.transform.flip(self.tensor, True, True)
        self.tensor = pygame.transform.rotozoom(self.tensor, 90, 4)

        return self.pedestrian_birdview, visibility_int

    def render(self, display):
        if self.rgb_nyg_surf is not None:
            display.blit(self.rgb_nyg_surf, (350, 0))
            display.blit(self.rgb_obsv_surf, (900, 0))

        if self.tensor is not None:
            display.blit(self.tensor, (350, 350))
        white = (255, 255, 255)
        green = (0, 255, 0)
        blue = (0, 0, 128)

        # set the pygame window name
        #pygame.display.set_caption('Show Text')

        font = pygame.font.Font('freesansbold.ttf', 20)

        text2 = font.render('Augmented Partial Observation', True, green, blue)
        text = font.render('Full State', True, green, blue)
        text3 = font.render('CARLA Front Camera', True, green, blue)
        text4 = font.render('CARLA Front Camera', True, green, blue)
        textRect = text.get_rect()

        textRect.center = (500, 360)
        display.blit(text, textRect)
        textRect = text.get_rect()

        textRect2 = text2.get_rect()
        textRect2.center = (1100, 360)
        display.blit(text2, textRect2)

        textRect3 = text3.get_rect()
        textRect3.center = (450, 1000)
        display.blit(text3, textRect3)

        textRect4 = text4.get_rect()
        textRect4.center = (1100, 1000)
        display.blit(text4, textRect4)

    def push_to_tensor(self, intensor, x):
        tensor = intensor.clone()
        tensor[:-1, :, :] = intensor[1:, :, :]
        tensor[:-1, :, :] = x[:, :]
        return tensor
    def birdeyeData(self, player, lidar_object):

        self._parent = player
        self.bird_array = self.birdview_producer.produce(
            agent_vehicle=self._parent)
        rgb = BirdViewProducer.as_rgb(self.bird_array)

        copyrgb = rgb.copy()
        n, m = np.shape(self.bird_array[:, :, 1])
        x, y = np.ogrid[0:n, 0:m]
        agent_pixel_pose_Y = int(self.bird_eye_window_width / 2)
        agent_pixel_pose_X = int(self.bird_eye_window_height / 2)
        agent_pixel_pose = [agent_pixel_pose_X, agent_pixel_pose_Y]
        # print (agent_pixel_pose)

        fov_mask = (
            ((agent_pixel_pose_Y - y) <
             ((agent_pixel_pose_X - x) * math.tan(self.FRONT_FOV / 2))) &
            ((agent_pixel_pose_Y - y) >
             (-(agent_pixel_pose_X - x) * math.tan(self.FRONT_FOV / 2))))

        self.mask_shadow, uncertainty, d = shadow_mask.view_field(
            self.bird_array[:, :, 9] + 2.7 * self.bird_array[:, :, 3], 1.8,
            agent_pixel_pose, 90)
        #(array[self.mask_shadow & mask]) = 255

        #copyrgb = rgb.copy()
        shadow = np.zeros((n, m))
        visibility_matrix = fov_mask & self.mask_shadow
        shadow[visibility_matrix] = 1

        uncertainty = uncertainty * shadow
        uncertainty = uncertainty.astype(np.bool_)
        '''r= copyrgb[:, :, 0]
        g = copyrgb[:, :, 1]
        b = copyrgb[:, :, 2]
        r[uncertainty] = 0
        g[uncertainty] = 0
        b[uncertainty] = 0'''

        copyrgb[:, :, 1] = copyrgb[:, :, 1]
        #copyrgb[:,:,0] = copyrgb[:,:,0]
        n, m = np.shape(self.bird_array[:, :, 1])
        embedded_birdseye = np.zeros((n, m))

        self.bird_array[:, :, 8] = self.bird_array[:, :, 8] * uncertainty

        for i in range(self.state_frames * self.frame_stack - 1, -1, -1):
            if i == 0:
                self.pedestrian_birdview_stack[0, :, :] = np.multiply(
                    shadow, self.bird_array[:, :, 8])
            else:
                self.pedestrian_birdview_stack[
                    i, :, :] = self.pedestrian_birdview_stack[i - 1, :, :]

            #print(i)
            if i % self.frame_stack == 0:
                self.pedestrian_birdview[
                    int(i // self.frame_stack
                        ), :, :] = self.pedestrian_birdview_stack[i, :, :]

                embedded_birdseye = self.pedestrian_birdview[int(i//self.frame_stack), :, :] \
                                    * (0.5**(int(i//self.frame_stack))) + embedded_birdseye
        '''copyrgb = copyrgb.swapaxes(0, 1)
        self.rgb_nyg_surf = pygame.surfarray.make_surface(copyrgb)
        self.rgb_nyg_surf = pygame.transform.scale(self.rgb_nyg_surf, (self.bird_eye_window_width*4, self.bird_eye_window_height*4))
        '''
        visibility_int = np.zeros((1, n, m))
        visibility_int[0, uncertainty] = 1
        cir = np.where(d <= 30, int(1), shadow)
        cir = np.where(d > 30, int(0), cir)

        self.pedestrian_birdview_embeded = np.add(
            255 * self.bird_array[:, :, 8],
            0.5 * self.pedestrian_birdview_embeded)
        obsv = (0.01 * copyrgb)
        obsv[:, :, 0] = obsv[:, :, 0]  #+ 50 * uncertainty
        obsv[:, :,
             2] = obsv[:, :, 2] + 200 * self.pedestrian_birdview_stack[0, :, :]
        obsv[:, :, 1] = obsv[:, :, 1] + 100 * self.bird_array[:, :, 3]
        #obsv[:,:,1]= obsv[:,:,1]+200*self.pedestrian_birdview_stack[0, :, :]+100*(birdview[:, :, 3] * cir)
        obsv = obsv.swapaxes(0, 1)
        '''if lidar_data is not None:
            obsv= obsv + lidar_data
        birdview[:, :, 1] * fov_mask'''

        self.rgb_obsv_surf = pygame.surfarray.make_surface(obsv)
        self.rgb_obsv_surf = pygame.transform.scale(
            self.rgb_obsv_surf,
            (self.bird_eye_window_width * 4, self.bird_eye_window_height * 4))

        copyrgb[:, :, 0] = 255 * embedded_birdseye + copyrgb[:, :, 0]
        copyrgb = copyrgb.swapaxes(0, 1)
        self.rgb_nyg_surf = pygame.surfarray.make_surface(copyrgb)
        self.rgb_nyg_surf = pygame.transform.scale(
            self.rgb_nyg_surf,
            (self.bird_eye_window_width * 4, self.bird_eye_window_height * 4))

        self.tensor = pygame.surfarray.make_surface(embedded_birdseye)
        pygame.transform.flip(self.tensor, True, True)
        self.tensor = pygame.transform.rotozoom(self.tensor, 90, 4)

        return self.pedestrian_birdview, visibility_int
Esempio n. 6
0
    def _get_obs(self):
        """Get the observations."""
        ## Birdeye rendering
        self.birdeye_render.vehicle_polygons = self.vehicle_polygons
        self.birdeye_render.walker_polygons = self.walker_polygons
        self.birdeye_render.waypoints = self.waypoints

        # birdeye view with roadmap and actors
        birdeye_render_types = ['roadmap', 'actors']
        if self.display_route:
            birdeye_render_types.append('waypoints')
        self.birdeye_render.render(self.display, birdeye_render_types)
        birdeye = pygame.surfarray.array3d(self.display)
        birdeye = birdeye[0:self.display_size, :, :]
        birdeye = display_to_rgb(birdeye, self.obs_size)

        # Roadmap
        if self.pixor:
            roadmap_render_types = ['roadmap']
            if self.display_route:
                roadmap_render_types.append('waypoints')
            self.birdeye_render.render(self.display, roadmap_render_types)
            roadmap = pygame.surfarray.array3d(self.display)
            roadmap = roadmap[0:self.display_size, :, :]
            roadmap = display_to_rgb(roadmap, self.obs_size)
            # Add ego vehicle
            for i in range(self.obs_size):
                for j in range(self.obs_size):
                    if abs(birdeye[i, j, 0] - 255) < 20 and abs(
                            birdeye[i, j, 1] -
                            0) < 20 and abs(birdeye[i, j, 0] - 255) < 20:
                        roadmap[i, j, :] = birdeye[i, j, :]

        # Display birdeye image
        birdeye_surface = rgb_to_display_surface(birdeye, self.display_size)
        self.display.blit(birdeye_surface, (0, 0))

        ## Lidar image generation
        point_cloud = []
        # Get point cloud data
        for location in self.lidar_data:
            point_cloud.append([location.x, location.y, -location.z])
        point_cloud = np.array(point_cloud)
        # Separate the 3D space to bins for point cloud, x and y is set according to self.lidar_bin,
        # and z is set to be two bins.
        y_bins = np.arange(-(self.obs_range - self.d_behind),
                           self.d_behind + self.lidar_bin, self.lidar_bin)
        x_bins = np.arange(-self.obs_range / 2,
                           self.obs_range / 2 + self.lidar_bin, self.lidar_bin)
        z_bins = [-self.lidar_height - 1, -self.lidar_height + 0.25, 1]
        # Get lidar image according to the bins
        lidar, _ = np.histogramdd(point_cloud, bins=(x_bins, y_bins, z_bins))
        lidar[:, :, 0] = np.array(lidar[:, :, 0] > 0, dtype=np.uint8)
        lidar[:, :, 1] = np.array(lidar[:, :, 1] > 0, dtype=np.uint8)
        # Add the waypoints to lidar image
        if self.display_route:
            wayptimg = (birdeye[:, :, 0] <= 10) * (birdeye[:, :, 1] <= 10) * (
                birdeye[:, :, 2] >= 240)
        else:
            wayptimg = birdeye[:, :, 0] < 0  # Equal to a zero matrix
        wayptimg = np.expand_dims(wayptimg, axis=2)
        wayptimg = np.fliplr(np.rot90(wayptimg, 3))

        # Get the final lidar image
        lidar = np.concatenate((lidar, wayptimg), axis=2)
        lidar = np.flip(lidar, axis=1)
        lidar = np.rot90(lidar, 1)
        lidar = lidar * 255

        # Display lidar image
        lidar_surface = rgb_to_display_surface(lidar, self.display_size)
        self.display.blit(lidar_surface, (self.display_size, 0))

        ## Display camera image
        camera = resize(self.camera_img, (self.obs_size, self.obs_size)) * 255
        camera_surface = rgb_to_display_surface(camera, self.display_size)
        self.display.blit(camera_surface, (self.display_size * 2, 0))

        ## Display processed camera image
        original_camera = resize(self.original_camera_image,
                                 (self.image_height, self.image_width))
        #print("original camera : {}".format(original_camera))

        o_camera_surface = rgb_to_display_surface(camera, self.display_size)
        self.display.blit(o_camera_surface, (self.display_size * 2, 0))

        # Display on pygame
        pygame.display.flip()

        # State observation
        ego_trans = self.ego.get_transform()
        ego_x = ego_trans.location.x
        ego_y = ego_trans.location.y
        ego_yaw = ego_trans.rotation.yaw / 180 * np.pi
        lateral_dis, w = get_preview_lane_dis(self.waypoints, ego_x, ego_y)
        delta_yaw = np.arcsin(
            np.cross(w, np.array(np.array([np.cos(ego_yaw),
                                           np.sin(ego_yaw)]))))
        v = self.ego.get_velocity()
        speed = np.sqrt(v.x**2 + v.y**2)
        state = np.array([lateral_dis, -delta_yaw, speed, self.vehicle_front])

        if self.pixor:
            ## Vehicle classification and regression maps (requires further normalization)
            vh_clas = np.zeros((self.pixor_size, self.pixor_size))
            vh_regr = np.zeros((self.pixor_size, self.pixor_size, 6))

            # Generate the PIXOR image. Note in CARLA it is using left-hand coordinate
            # Get the 6-dim geom parametrization in PIXOR, here we use pixel coordinate
            for actor in self.world.get_actors().filter('vehicle.*'):
                x, y, yaw, l, w = get_info(actor)
                x_local, y_local, yaw_local = get_local_pose(
                    (x, y, yaw), (ego_x, ego_y, ego_yaw))
                if actor.id != self.ego.id:
                    if abs(
                            y_local
                    ) < self.obs_range / 2 + 1 and x_local < self.obs_range - self.d_behind + 1 and x_local > -self.d_behind - 1:
                        x_pixel, y_pixel, yaw_pixel, l_pixel, w_pixel = get_pixel_info(
                            local_info=(x_local, y_local, yaw_local, l, w),
                            d_behind=self.d_behind,
                            obs_range=self.obs_range,
                            image_size=self.pixor_size)
                        cos_t = np.cos(yaw_pixel)
                        sin_t = np.sin(yaw_pixel)
                        logw = np.log(w_pixel)
                        logl = np.log(l_pixel)
                        pixels = get_pixels_inside_vehicle(
                            pixel_info=(x_pixel, y_pixel, yaw_pixel, l_pixel,
                                        w_pixel),
                            pixel_grid=self.pixel_grid)
                        for pixel in pixels:
                            vh_clas[pixel[0], pixel[1]] = 1
                            dx = x_pixel - pixel[0]
                            dy = y_pixel - pixel[1]
                            vh_regr[pixel[0], pixel[1], :] = np.array(
                                [cos_t, sin_t, dx, dy, logw, logl])

            # Flip the image matrix so that the origin is at the left-bottom
            vh_clas = np.flip(vh_clas, axis=0)
            vh_regr = np.flip(vh_regr, axis=0)

            # Pixor state, [x, y, cos(yaw), sin(yaw), speed]
            pixor_state = [
                ego_x, ego_y,
                np.cos(ego_yaw),
                np.sin(ego_yaw), speed
            ]

        birdview: BirdView = self.birdview_producer.produce(
            agent_vehicle=self.ego)
        bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview), cv.COLOR_BGR2RGB)

        v = self.ego.get_velocity()
        speed_kmh = 3.6 * np.sqrt(v.x**2 + v.y**2 + v.z**2)
        speed_kmh = np.array([speed_kmh])
        obs = {
            'original_camera': original_camera.astype(np.uint8),
            'speed': speed_kmh.astype(np.uint8),
            'camera': camera.astype(np.uint8),
            'lidar': lidar.astype(np.uint8),
            'birdeye': birdeye.astype(np.uint8),
            'driving_image': bgr.astype(np.uint8),
            'state': state,
        }

        if self.pixor:
            obs.update({
                'roadmap':
                roadmap.astype(np.uint8),
                'vh_clas':
                np.expand_dims(vh_clas, -1).astype(np.float32),
                'vh_regr':
                vh_regr.astype(np.float32),
                'pixor_state':
                pixor_state,
            })

        return obs
Esempio n. 7
0
    def __init__(self, params):
        # parameters
        self.display_size = params['display_size']  # rendering screen size
        self.max_past_step = params['max_past_step']
        self.number_of_vehicles = params['number_of_vehicles']
        self.number_of_walkers = params['number_of_walkers']
        self.dt = params['dt']
        self.task_mode = params['task_mode']
        self.max_time_episode = params['max_time_episode']
        self.max_waypt = params['max_waypt']
        self.obs_range = params['obs_range']
        self.lidar_bin = params['lidar_bin']
        self.d_behind = params['d_behind']
        self.obs_size = int(self.obs_range / self.lidar_bin)
        self.image_height = params['image_height']
        self.image_width = params['image_width']
        self.out_lane_thres = params['out_lane_thres']
        self.desired_speed = params['desired_speed']
        self.max_ego_spawn_times = params['max_ego_spawn_times']
        self.display_route = params['display_route']
        if 'pixor' in params.keys():
            self.pixor = params['pixor']
            self.pixor_size = params['pixor_size']
        else:
            self.pixor = False
        if 'image_xform' in params.keys():
            self.xform = True
        else:
            self.xform = False

        self.step_rewards = []
        self.step_actions = []
        self.step_steering = []
        self.max_distance = 2.0

        # Destination
        if params['task_mode'] == 'roundabout':
            self.dests = [[4.46, -61.46, 0], [-49.53, -2.89, 0],
                          [-6.48, 55.47, 0], [35.96, 3.33, 0]]
        else:
            self.dests = None

        # action and observation spaces
        self.discrete = params['discrete']
        self.discrete_act = [params['discrete_acc'],
                             params['discrete_steer']]  # acc, steer
        self.n_acc = len(self.discrete_act[0])
        self.n_steer = len(self.discrete_act[1])
        if self.discrete:
            self.action_space = spaces.Discrete(self.n_acc * self.n_steer)
        else:
            self.action_space = spaces.Box(
                np.array([
                    params['continuous_accel_range'][0],
                    params['continuous_steer_range'][0]
                ]),
                np.array([
                    params['continuous_accel_range'][1],
                    params['continuous_steer_range'][1]
                ]),
                dtype=np.float32)  # acc, steer
        observation_space_dict = {
            'orig_camera':
            spaces.Box(low=0,
                       high=255,
                       shape=(self.image_height, self.image_width, 3),
                       dtype=np.uint8),
            'camera':
            spaces.Box(low=0,
                       high=255,
                       shape=(self.obs_size, self.obs_size, 3),
                       dtype=np.uint8),
            'lidar':
            spaces.Box(low=0,
                       high=255,
                       shape=(self.obs_size, self.obs_size, 3),
                       dtype=np.uint8),
            'birdeye':
            spaces.Box(low=0,
                       high=255,
                       shape=(self.obs_size, self.obs_size, 3),
                       dtype=np.uint8),
            'state':
            spaces.Box(np.array([-2, -1, -5, 0]),
                       np.array([2, 1, 30, 1]),
                       dtype=np.float32)
        }
        if self.pixor:
            observation_space_dict.update({
                'roadmap':
                spaces.Box(low=0,
                           high=255,
                           shape=(self.obs_size, self.obs_size, 3),
                           dtype=np.uint8),
                'vh_clas':
                spaces.Box(low=0,
                           high=1,
                           shape=(self.pixor_size, self.pixor_size, 1),
                           dtype=np.float32),
                'vh_regr':
                spaces.Box(low=-5,
                           high=5,
                           shape=(self.pixor_size, self.pixor_size, 6),
                           dtype=np.float32),
                'pixor_state':
                spaces.Box(np.array([-1000, -1000, -1, -1, -5]),
                           np.array([1000, 1000, 1, 1, 20]),
                           dtype=np.float32)
            })
        self.observation_space = spaces.Dict(observation_space_dict)

        # Connect to carla server and get world object
        print('connecting to Carla server...')
        client = carla.Client('localhost', params['port'])
        client.set_timeout(10.0)
        self.client = client
        self.world = client.load_world(params['town'])
        print('Carla server connected!')

        self.birdview_producer = BirdViewProducer(
            self.client,  # carla.Client
            target_size=PixelDimensions(width=self.image_width,
                                        height=self.image_height),
            pixels_per_meter=4,
            crop_type=BirdViewCropType.FRONT_AND_REAR_AREA)

        # Set weather
        self.world.set_weather(carla.WeatherParameters.ClearNoon)

        # Get spawn points
        self.vehicle_spawn_points = list(
            self.world.get_map().get_spawn_points())
        self.walker_spawn_points = []
        for i in range(self.number_of_walkers):
            spawn_point = carla.Transform()
            loc = self.world.get_random_location_from_navigation()
            if (loc != None):
                spawn_point.location = loc
                self.walker_spawn_points.append(spawn_point)

        # Create the ego vehicle blueprint
        self.ego_bp = self._create_vehicle_bluepprint(
            params['ego_vehicle_filter'], color='49,8,8')

        # Collision sensor
        self.collision_hist = []  # The collision history
        self.collision_hist_l = 1  # collision history length
        self.collision_bp = self.world.get_blueprint_library().find(
            'sensor.other.collision')

        # Lidar sensor
        self.lidar_data = None
        self.lidar_height = 2.1
        self.lidar_trans = carla.Transform(
            carla.Location(x=0.0, z=self.lidar_height))
        self.lidar_bp = self.world.get_blueprint_library().find(
            'sensor.lidar.ray_cast')
        self.lidar_bp.set_attribute('channels', '32')
        self.lidar_bp.set_attribute('range', '5000')

        # Camera sensor
        self.camera_img = np.zeros((self.obs_size, self.obs_size, 3),
                                   dtype=np.uint8)
        self.original_camera_image = np.zeros(
            (self.image_height, self.image_width, 3), dtype=np.uint8)
        self.speed = np.zeros((1), dtype=np.uint8)
        self.camera_trans = carla.Transform(carla.Location(x=0.8, z=1.7))
        self.camera_bp = self.world.get_blueprint_library().find(
            'sensor.camera.rgb')
        # Modify the attributes of the blueprint to set image resolution and field of view.
        self.camera_bp.set_attribute('image_size_x', str(self.obs_size))
        self.camera_bp.set_attribute('image_size_y', str(self.obs_size))
        self.camera_bp.set_attribute('fov', '110')
        # Set the time in seconds between sensor captures
        self.camera_bp.set_attribute('sensor_tick', '0.02')

        # Set fixed simulation step for synchronous mode
        self.settings = self.world.get_settings()
        self.settings.fixed_delta_seconds = self.dt

        # Record the time of total steps and resetting steps
        self.reset_step = 0
        self.total_step = 0

        # Initialize the renderer
        self._init_renderer()

        self.current_waypoint_index = 0
        self.auto_pilot_mode = False

        # Get pixel grid points
        if self.pixor:
            x, y = np.meshgrid(
                np.arange(self.pixor_size),
                np.arange(self.pixor_size))  # make a canvas with coordinates
            x, y = x.flatten(), y.flatten()
            self.pixel_grid = np.vstack((x, y)).T
Esempio n. 8
0
class CarlaEnv(gym.Env):
    """An OpenAI gym wrapper for CARLA simulator."""
    def __init__(self, params):
        # parameters
        self.display_size = params['display_size']  # rendering screen size
        self.max_past_step = params['max_past_step']
        self.number_of_vehicles = params['number_of_vehicles']
        self.number_of_walkers = params['number_of_walkers']
        self.dt = params['dt']
        self.task_mode = params['task_mode']
        self.max_time_episode = params['max_time_episode']
        self.max_waypt = params['max_waypt']
        self.obs_range = params['obs_range']
        self.lidar_bin = params['lidar_bin']
        self.d_behind = params['d_behind']
        self.obs_size = int(self.obs_range / self.lidar_bin)
        self.image_height = params['image_height']
        self.image_width = params['image_width']
        self.out_lane_thres = params['out_lane_thres']
        self.desired_speed = params['desired_speed']
        self.max_ego_spawn_times = params['max_ego_spawn_times']
        self.display_route = params['display_route']
        if 'pixor' in params.keys():
            self.pixor = params['pixor']
            self.pixor_size = params['pixor_size']
        else:
            self.pixor = False
        if 'image_xform' in params.keys():
            self.xform = True
        else:
            self.xform = False

        self.step_rewards = []
        self.step_actions = []
        self.step_steering = []
        self.max_distance = 2.0

        # Destination
        if params['task_mode'] == 'roundabout':
            self.dests = [[4.46, -61.46, 0], [-49.53, -2.89, 0],
                          [-6.48, 55.47, 0], [35.96, 3.33, 0]]
        else:
            self.dests = None

        # action and observation spaces
        self.discrete = params['discrete']
        self.discrete_act = [params['discrete_acc'],
                             params['discrete_steer']]  # acc, steer
        self.n_acc = len(self.discrete_act[0])
        self.n_steer = len(self.discrete_act[1])
        if self.discrete:
            self.action_space = spaces.Discrete(self.n_acc * self.n_steer)
        else:
            self.action_space = spaces.Box(
                np.array([
                    params['continuous_accel_range'][0],
                    params['continuous_steer_range'][0]
                ]),
                np.array([
                    params['continuous_accel_range'][1],
                    params['continuous_steer_range'][1]
                ]),
                dtype=np.float32)  # acc, steer
        observation_space_dict = {
            'orig_camera':
            spaces.Box(low=0,
                       high=255,
                       shape=(self.image_height, self.image_width, 3),
                       dtype=np.uint8),
            'camera':
            spaces.Box(low=0,
                       high=255,
                       shape=(self.obs_size, self.obs_size, 3),
                       dtype=np.uint8),
            'lidar':
            spaces.Box(low=0,
                       high=255,
                       shape=(self.obs_size, self.obs_size, 3),
                       dtype=np.uint8),
            'birdeye':
            spaces.Box(low=0,
                       high=255,
                       shape=(self.obs_size, self.obs_size, 3),
                       dtype=np.uint8),
            'state':
            spaces.Box(np.array([-2, -1, -5, 0]),
                       np.array([2, 1, 30, 1]),
                       dtype=np.float32)
        }
        if self.pixor:
            observation_space_dict.update({
                'roadmap':
                spaces.Box(low=0,
                           high=255,
                           shape=(self.obs_size, self.obs_size, 3),
                           dtype=np.uint8),
                'vh_clas':
                spaces.Box(low=0,
                           high=1,
                           shape=(self.pixor_size, self.pixor_size, 1),
                           dtype=np.float32),
                'vh_regr':
                spaces.Box(low=-5,
                           high=5,
                           shape=(self.pixor_size, self.pixor_size, 6),
                           dtype=np.float32),
                'pixor_state':
                spaces.Box(np.array([-1000, -1000, -1, -1, -5]),
                           np.array([1000, 1000, 1, 1, 20]),
                           dtype=np.float32)
            })
        self.observation_space = spaces.Dict(observation_space_dict)

        # Connect to carla server and get world object
        print('connecting to Carla server...')
        client = carla.Client('localhost', params['port'])
        client.set_timeout(10.0)
        self.client = client
        self.world = client.load_world(params['town'])
        print('Carla server connected!')

        self.birdview_producer = BirdViewProducer(
            self.client,  # carla.Client
            target_size=PixelDimensions(width=self.image_width,
                                        height=self.image_height),
            pixels_per_meter=4,
            crop_type=BirdViewCropType.FRONT_AND_REAR_AREA)

        # Set weather
        self.world.set_weather(carla.WeatherParameters.ClearNoon)

        # Get spawn points
        self.vehicle_spawn_points = list(
            self.world.get_map().get_spawn_points())
        self.walker_spawn_points = []
        for i in range(self.number_of_walkers):
            spawn_point = carla.Transform()
            loc = self.world.get_random_location_from_navigation()
            if (loc != None):
                spawn_point.location = loc
                self.walker_spawn_points.append(spawn_point)

        # Create the ego vehicle blueprint
        self.ego_bp = self._create_vehicle_bluepprint(
            params['ego_vehicle_filter'], color='49,8,8')

        # Collision sensor
        self.collision_hist = []  # The collision history
        self.collision_hist_l = 1  # collision history length
        self.collision_bp = self.world.get_blueprint_library().find(
            'sensor.other.collision')

        # Lidar sensor
        self.lidar_data = None
        self.lidar_height = 2.1
        self.lidar_trans = carla.Transform(
            carla.Location(x=0.0, z=self.lidar_height))
        self.lidar_bp = self.world.get_blueprint_library().find(
            'sensor.lidar.ray_cast')
        self.lidar_bp.set_attribute('channels', '32')
        self.lidar_bp.set_attribute('range', '5000')

        # Camera sensor
        self.camera_img = np.zeros((self.obs_size, self.obs_size, 3),
                                   dtype=np.uint8)
        self.original_camera_image = np.zeros(
            (self.image_height, self.image_width, 3), dtype=np.uint8)
        self.speed = np.zeros((1), dtype=np.uint8)
        self.camera_trans = carla.Transform(carla.Location(x=0.8, z=1.7))
        self.camera_bp = self.world.get_blueprint_library().find(
            'sensor.camera.rgb')
        # Modify the attributes of the blueprint to set image resolution and field of view.
        self.camera_bp.set_attribute('image_size_x', str(self.obs_size))
        self.camera_bp.set_attribute('image_size_y', str(self.obs_size))
        self.camera_bp.set_attribute('fov', '110')
        # Set the time in seconds between sensor captures
        self.camera_bp.set_attribute('sensor_tick', '0.02')

        # Set fixed simulation step for synchronous mode
        self.settings = self.world.get_settings()
        self.settings.fixed_delta_seconds = self.dt

        # Record the time of total steps and resetting steps
        self.reset_step = 0
        self.total_step = 0

        # Initialize the renderer
        self._init_renderer()

        self.current_waypoint_index = 0
        self.auto_pilot_mode = False

        # Get pixel grid points
        if self.pixor:
            x, y = np.meshgrid(
                np.arange(self.pixor_size),
                np.arange(self.pixor_size))  # make a canvas with coordinates
            x, y = x.flatten(), y.flatten()
            self.pixel_grid = np.vstack((x, y)).T

    def get_client(self):
        return self.client

    def get_car(self):
        return self.ego

    def get_speed(self):
        v = self.ego.get_velocity()
        speed_kmh = 3.6 * np.sqrt(v.x**2 + v.y**2 + v.z**2)
        return speed_kmh

    def reset(self):
        # Clear sensor objects
        self.collision_sensor = None
        self.lidar_sensor = None
        self.camera_sensor = None
        self.step_rewards = []
        self.step_actions = []
        self.step_steering = []

        self.birdview_producer = BirdViewProducer(
            self.client,  # carla.Client
            target_size=PixelDimensions(width=self.image_width,
                                        height=self.image_height),
            pixels_per_meter=4,
            crop_type=BirdViewCropType.FRONT_AND_REAR_AREA)

        # Delete sensors, vehicles and walkers
        self._clear_all_actors([
            'sensor.other.collision', 'sensor.lidar.ray_cast',
            'sensor.camera.rgb', 'vehicle.*', 'controller.ai.walker',
            'walker.*'
        ])

        # Disable sync mode
        self._set_synchronous_mode(False)

        # Spawn surrounding vehicles
        random.shuffle(self.vehicle_spawn_points)
        count = self.number_of_vehicles
        if count > 0:
            for spawn_point in self.vehicle_spawn_points:
                if self._try_spawn_random_vehicle_at(spawn_point,
                                                     number_of_wheels=[4]):
                    count -= 1
                if count <= 0:
                    break
        while count > 0:
            if self._try_spawn_random_vehicle_at(random.choice(
                    self.vehicle_spawn_points),
                                                 number_of_wheels=[4]):
                count -= 1

        # Spawn pedestrians
        random.shuffle(self.walker_spawn_points)
        count = self.number_of_walkers
        if count > 0:
            for spawn_point in self.walker_spawn_points:
                if self._try_spawn_random_walker_at(spawn_point):
                    count -= 1
                if count <= 0:
                    break
        while count > 0:
            if self._try_spawn_random_walker_at(
                    random.choice(self.walker_spawn_points)):
                count -= 1

        # Get actors polygon list
        self.vehicle_polygons = []
        vehicle_poly_dict = self._get_actor_polygons('vehicle.*')
        self.vehicle_polygons.append(vehicle_poly_dict)
        self.walker_polygons = []
        walker_poly_dict = self._get_actor_polygons('walker.*')
        self.walker_polygons.append(walker_poly_dict)

        # Spawn the ego vehicle
        ego_spawn_times = 0
        while True:
            if ego_spawn_times > self.max_ego_spawn_times:
                self.reset()

            if self.task_mode == 'random':
                transform = random.choice(self.vehicle_spawn_points)
            if self.task_mode == 'roundabout':
                self.start = [52.1 + np.random.uniform(-5, 5), -4.2,
                              178.66]  # random
                # self.start=[52.1,-4.2, 178.66] # static
                transform = set_carla_transform(self.start)
            if self._try_spawn_ego_vehicle_at(transform):
                break
            else:
                ego_spawn_times += 1
                time.sleep(0.1)

        # Add collision sensor
        self.collision_sensor = self.world.spawn_actor(self.collision_bp,
                                                       carla.Transform(),
                                                       attach_to=self.ego)
        self.collision_sensor.listen(lambda event: get_collision_hist(event))

        def get_collision_hist(event):
            impulse = event.normal_impulse
            intensity = np.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)
            self.collision_hist.append(intensity)
            if len(self.collision_hist) > self.collision_hist_l:
                self.collision_hist.pop(0)

        self.collision_hist = []

        # Add lidar sensor
        self.lidar_sensor = self.world.spawn_actor(self.lidar_bp,
                                                   self.lidar_trans,
                                                   attach_to=self.ego)
        self.lidar_sensor.listen(lambda data: get_lidar_data(data))

        def get_lidar_data(data):
            self.lidar_data = data

        # Add camera sensor
        self.camera_sensor = self.world.spawn_actor(self.camera_bp,
                                                    self.camera_trans,
                                                    attach_to=self.ego)
        self.camera_sensor.listen(lambda data: get_camera_img(data))

        def get_camera_img(data):
            array = np.frombuffer(data.raw_data, dtype=np.dtype("uint8"))
            array = np.reshape(array, (data.height, data.width, 4))
            array = array[:, :, :3]
            i3 = (array) / 255.
            array = array[:, :, ::-1]
            self.camera_img = array
            self.original_camera_image = i3

        # Update timesteps
        self.time_step = 0
        self.reset_step += 1

        # Enable sync mode
        self.settings.synchronous_mode = True
        self.world.apply_settings(self.settings)

        self.routeplanner = RoutePlanner(self.ego, self.max_waypt)
        self.waypoints, _, self.vehicle_front = self.routeplanner.run_step()
        # get all the route waypoints
        self.route_waypoints = self.routeplanner._get_waypoints_data()
        #print(" Route waypoints : {} ".format(len(self.route_waypoints)))
        # Set ego information for render
        self.birdeye_render.set_hero(self.ego, self.ego.id)

        self.current_waypoint_index = 0
        self.step_start_location = self.ego.get_location()
        self.step_last_location = self.step_start_location
        return self._get_obs()

    def dump(self):
        print("Step throttle : {} ".format(self.step_actions))
        print("Step steering : {} ".format(self.step_steering))
        print("Step rewards  : {} ".format(self.step_rewards))

    def get_steering_angle(self):
        physics_control = self.ego.get_physics_control()
        for wheel in physics_control.wheels:
            print(wheel.max_steer_angle)

    def auto(self, value):
        self.auto_pilot_mode = value
        self.ego.set_autopilot(value)

    def isauto(self):
        return self.auto_pilot_mode

    def move(self):
        self.world.tick()
        return self._get_obs()

    def get_action_auto(self):
        control = self.ego.get_control()
        return control.throttle, control.steer

    def move_auto(self):
        self.world.tick()
        # Keep track of closest waypoint on the route
        transform = self.ego.get_transform()
        waypoint_index = self.current_waypoint_index
        for _ in range(len(self.waypoints)):
            # Check if we passed the next waypoint along the route
            next_waypoint_index = waypoint_index + 1
            wp = self.route_waypoints[next_waypoint_index %
                                      len(self.waypoints)]
            dot = np.dot(
                vector(wp.transform.get_forward_vector())[:2],
                vector(transform.location - wp.transform.location)[:2])
            if dot > 0.0:  # Did we pass the waypoint?
                waypoint_index += 1  # Go to next waypoint

        self.current_waypoint_index = waypoint_index
        v_transform = self.ego.get_transform()
        current_waypoint = self.route_waypoints[self.current_waypoint_index %
                                                len(self.waypoints)]
        next_waypoint = self.route_waypoints[(self.current_waypoint_index + 1)
                                             % len(self.waypoints)]
        self.distance_from_center = distance_to_line(
            vector(current_waypoint.transform.location),
            vector(next_waypoint.transform.location),
            vector(v_transform.location))
        self.current_waypoint = current_waypoint
        self.next_waypoint = next_waypoint
        control = self.ego.get_control()
        isdone, isout = self._terminal()
        reward = self.get_reward_speed(isdone, control.steer, isout)
        return self._get_obs(), control.throttle, control.steer, reward, isdone

    def step(self, action):
        # Calculate acceleration and steering
        self.throttle = 0
        self.steer = 0
        if self.discrete:
            #acc = self.discrete_act[0][action//self.n_steer]
            #steer = self.discrete_act[1][action%self.n_steer]
            #acc = self.discrete_act[0][action]
            #steer = self.discrete_act[1][action%self.n_steer]
            acc = action[0]
            steer = action[1]
        else:
            acc = action[0]
            steer = action[1]

        # Keep track of closest waypoint on the route
        transform = self.ego.get_transform()
        waypoint_index = self.current_waypoint_index
        for _ in range(len(self.waypoints)):
            # Check if we passed the next waypoint along the route
            next_waypoint_index = waypoint_index + 1
            wp = self.route_waypoints[next_waypoint_index %
                                      len(self.waypoints)]
            dot = np.dot(
                vector(wp.transform.get_forward_vector())[:2],
                vector(transform.location - wp.transform.location)[:2])
            if dot > 0.0:  # Did we pass the waypoint?
                waypoint_index += 1  # Go to next waypoint

        self.current_waypoint_index = waypoint_index
        v_transform = self.ego.get_transform()
        current_waypoint = self.route_waypoints[self.current_waypoint_index %
                                                len(self.waypoints)]
        next_waypoint = self.route_waypoints[(self.current_waypoint_index + 1)
                                             % len(self.waypoints)]
        self.distance_from_center = distance_to_line(
            vector(current_waypoint.transform.location),
            vector(next_waypoint.transform.location),
            vector(v_transform.location))
        self.current_waypoint = current_waypoint
        self.next_waypoint = next_waypoint
        #print("current way point idx {} ".format(self.current_waypoint_index))
        # Convert acceleration to throttle and brake
        if acc > 0:
            #throttle = np.clip(acc/3,0,1)
            #throttle = np.clip(acc,0,1)
            #throttle = acc
            #brake = 0
            throttle = acc
            brake = 0
        else:
            throttle = 0
            brake = 0
            #brake = np.clip(-acc/8,0,1)
            #brake = np.clip(-acc,0,1)

        self.throttle = throttle
        self.steer = steer

        # Apply control
        if self.auto_pilot_mode:
            self.world.tick()
        else:
            act = carla.VehicleControl(throttle=float(throttle),
                                       steer=float(steer),
                                       brake=float(brake))
            self.ego.apply_control(act)
            self.world.tick()

        # Append actors polygon list
        vehicle_poly_dict = self._get_actor_polygons('vehicle.*')
        self.vehicle_polygons.append(vehicle_poly_dict)
        while len(self.vehicle_polygons) > self.max_past_step:
            self.vehicle_polygons.pop(0)
        walker_poly_dict = self._get_actor_polygons('walker.*')
        self.walker_polygons.append(walker_poly_dict)
        while len(self.walker_polygons) > self.max_past_step:
            self.walker_polygons.pop(0)

        # route planner
        self.waypoints, _, self.vehicle_front = self.routeplanner.run_step()
        # get all the route waypoints
        self.route_waypoints = self.routeplanner._get_waypoints_data()

        # state information
        info = {
            'waypoints': self.waypoints,
            'vehicle_front': self.vehicle_front
        }

        # Update timesteps
        self.time_step += 1
        self.total_step += 1

        isdone, isout = self._terminal()
        self.step_end_location = self.ego.get_location()
        #reward , centre , coll, out = self._get_reward_speed_centering(isdone)
        reward = self.get_reward_speed(isdone, steer, isout)
        self.step_actions.append(throttle)
        self.step_steering.append(steer)
        self.step_rewards.append(reward)

        if isdone:
            print("Final Speed reward : {}".format(reward))
        return (self._get_obs(), reward, isdone, copy.deepcopy(info))

    def seed(self, seed=None):
        self.np_random, seed = seeding.np_random(seed)
        return [seed]

    def render(self, mode):
        pass

    def _create_vehicle_bluepprint(self,
                                   actor_filter,
                                   color=None,
                                   number_of_wheels=[4]):
        """Create the blueprint for a specific actor type.

    Args:
      actor_filter: a string indicating the actor type, e.g, 'vehicle.lincoln*'.

    Returns:
      bp: the blueprint object of carla.
    """
        blueprints = self.world.get_blueprint_library().filter(actor_filter)
        blueprint_library = []
        for nw in number_of_wheels:
            blueprint_library = blueprint_library + [
                x for x in blueprints
                if int(x.get_attribute('number_of_wheels')) == nw
            ]
        bp = random.choice(blueprint_library)
        if bp.has_attribute('color'):
            if not color:
                color = random.choice(
                    bp.get_attribute('color').recommended_values)
            bp.set_attribute('color', color)
        return bp

    def _init_renderer(self):
        """Initialize the birdeye view renderer.
    """
        pygame.init()
        self.display = pygame.display.set_mode(
            (self.display_size * 3, self.display_size),
            pygame.HWSURFACE | pygame.DOUBLEBUF)

        pixels_per_meter = self.display_size / self.obs_range
        pixels_ahead_vehicle = (self.obs_range / 2 -
                                self.d_behind) * pixels_per_meter
        birdeye_params = {
            'screen_size': [self.display_size, self.display_size],
            'pixels_per_meter': pixels_per_meter,
            'pixels_ahead_vehicle': pixels_ahead_vehicle
        }
        self.birdeye_render = BirdeyeRender(self.world, birdeye_params)

    def _set_synchronous_mode(self, synchronous=True):
        """Set whether to use the synchronous mode.
    """
        self.settings.synchronous_mode = synchronous
        self.world.apply_settings(self.settings)

    def _try_spawn_random_vehicle_at(self, transform, number_of_wheels=[4]):
        """Try to spawn a surrounding vehicle at specific transform with random bluprint.

    Args:
      transform: the carla transform object.

    Returns:
      Bool indicating whether the spawn is successful.
    """
        blueprint = self._create_vehicle_bluepprint(
            'vehicle.*', number_of_wheels=number_of_wheels)
        blueprint.set_attribute('role_name', 'autopilot')
        vehicle = self.world.try_spawn_actor(blueprint, transform)
        if vehicle is not None:
            vehicle.set_autopilot()
            return True
        return False

    def _try_spawn_random_walker_at(self, transform):
        """Try to spawn a walker at specific transform with random bluprint.

    Args:
      transform: the carla transform object.

    Returns:
      Bool indicating whether the spawn is successful.
    """
        walker_bp = random.choice(
            self.world.get_blueprint_library().filter('walker.*'))
        # set as not invencible
        if walker_bp.has_attribute('is_invincible'):
            walker_bp.set_attribute('is_invincible', 'false')
        walker_actor = self.world.try_spawn_actor(walker_bp, transform)

        if walker_actor is not None:
            walker_controller_bp = self.world.get_blueprint_library().find(
                'controller.ai.walker')
            walker_controller_actor = self.world.spawn_actor(
                walker_controller_bp, carla.Transform(), walker_actor)
            # start walker
            walker_controller_actor.start()
            # set walk to random point
            walker_controller_actor.go_to_location(
                self.world.get_random_location_from_navigation())
            # random max speed
            walker_controller_actor.set_max_speed(
                1 + random.random()
            )  # max speed between 1 and 2 (default is 1.4 m/s)
            return True
        return False

    def _try_spawn_ego_vehicle_at(self, transform):
        """Try to spawn the ego vehicle at specific transform.
    Args:
      transform: the carla transform object.
    Returns:
      Bool indicating whether the spawn is successful.
    """
        vehicle = None
        # Check if ego position overlaps with surrounding vehicles
        overlap = False
        for idx, poly in self.vehicle_polygons[-1].items():
            poly_center = np.mean(poly, axis=0)
            ego_center = np.array([transform.location.x, transform.location.y])
            dis = np.linalg.norm(poly_center - ego_center)
            if dis > 8:
                continue
            else:
                overlap = True
                break

        if not overlap:
            vehicle = self.world.try_spawn_actor(self.ego_bp, transform)

        if vehicle is not None:
            vehicle.set_simulate_physics(True)
            self.ego = vehicle
            return True

        return False

    def _get_actor_polygons(self, filt):
        """Get the bounding box polygon of actors.

    Args:
      filt: the filter indicating what type of actors we'll look at.

    Returns:
      actor_poly_dict: a dictionary containing the bounding boxes of specific actors.
    """
        actor_poly_dict = {}
        for actor in self.world.get_actors().filter(filt):
            # Get x, y and yaw of the actor
            trans = actor.get_transform()
            x = trans.location.x
            y = trans.location.y
            yaw = trans.rotation.yaw / 180 * np.pi
            # Get length and width
            bb = actor.bounding_box
            l = bb.extent.x
            w = bb.extent.y
            # Get bounding box polygon in the actor's local coordinate
            poly_local = np.array([[l, w], [l, -w], [-l, -w],
                                   [-l, w]]).transpose()
            # Get rotation matrix to transform to global coordinate
            R = np.array([[np.cos(yaw), -np.sin(yaw)],
                          [np.sin(yaw), np.cos(yaw)]])
            # Get global bounding box polygon
            poly = np.matmul(R, poly_local).transpose() + np.repeat(
                [[x, y]], 4, axis=0)
            actor_poly_dict[actor.id] = poly
        return actor_poly_dict

    def _get_obs(self):
        """Get the observations."""
        ## Birdeye rendering
        self.birdeye_render.vehicle_polygons = self.vehicle_polygons
        self.birdeye_render.walker_polygons = self.walker_polygons
        self.birdeye_render.waypoints = self.waypoints

        # birdeye view with roadmap and actors
        birdeye_render_types = ['roadmap', 'actors']
        if self.display_route:
            birdeye_render_types.append('waypoints')
        self.birdeye_render.render(self.display, birdeye_render_types)
        birdeye = pygame.surfarray.array3d(self.display)
        birdeye = birdeye[0:self.display_size, :, :]
        birdeye = display_to_rgb(birdeye, self.obs_size)

        # Roadmap
        if self.pixor:
            roadmap_render_types = ['roadmap']
            if self.display_route:
                roadmap_render_types.append('waypoints')
            self.birdeye_render.render(self.display, roadmap_render_types)
            roadmap = pygame.surfarray.array3d(self.display)
            roadmap = roadmap[0:self.display_size, :, :]
            roadmap = display_to_rgb(roadmap, self.obs_size)
            # Add ego vehicle
            for i in range(self.obs_size):
                for j in range(self.obs_size):
                    if abs(birdeye[i, j, 0] - 255) < 20 and abs(
                            birdeye[i, j, 1] -
                            0) < 20 and abs(birdeye[i, j, 0] - 255) < 20:
                        roadmap[i, j, :] = birdeye[i, j, :]

        # Display birdeye image
        birdeye_surface = rgb_to_display_surface(birdeye, self.display_size)
        self.display.blit(birdeye_surface, (0, 0))

        ## Lidar image generation
        point_cloud = []
        # Get point cloud data
        for location in self.lidar_data:
            point_cloud.append([location.x, location.y, -location.z])
        point_cloud = np.array(point_cloud)
        # Separate the 3D space to bins for point cloud, x and y is set according to self.lidar_bin,
        # and z is set to be two bins.
        y_bins = np.arange(-(self.obs_range - self.d_behind),
                           self.d_behind + self.lidar_bin, self.lidar_bin)
        x_bins = np.arange(-self.obs_range / 2,
                           self.obs_range / 2 + self.lidar_bin, self.lidar_bin)
        z_bins = [-self.lidar_height - 1, -self.lidar_height + 0.25, 1]
        # Get lidar image according to the bins
        lidar, _ = np.histogramdd(point_cloud, bins=(x_bins, y_bins, z_bins))
        lidar[:, :, 0] = np.array(lidar[:, :, 0] > 0, dtype=np.uint8)
        lidar[:, :, 1] = np.array(lidar[:, :, 1] > 0, dtype=np.uint8)
        # Add the waypoints to lidar image
        if self.display_route:
            wayptimg = (birdeye[:, :, 0] <= 10) * (birdeye[:, :, 1] <= 10) * (
                birdeye[:, :, 2] >= 240)
        else:
            wayptimg = birdeye[:, :, 0] < 0  # Equal to a zero matrix
        wayptimg = np.expand_dims(wayptimg, axis=2)
        wayptimg = np.fliplr(np.rot90(wayptimg, 3))

        # Get the final lidar image
        lidar = np.concatenate((lidar, wayptimg), axis=2)
        lidar = np.flip(lidar, axis=1)
        lidar = np.rot90(lidar, 1)
        lidar = lidar * 255

        # Display lidar image
        lidar_surface = rgb_to_display_surface(lidar, self.display_size)
        self.display.blit(lidar_surface, (self.display_size, 0))

        ## Display camera image
        camera = resize(self.camera_img, (self.obs_size, self.obs_size)) * 255
        camera_surface = rgb_to_display_surface(camera, self.display_size)
        self.display.blit(camera_surface, (self.display_size * 2, 0))

        ## Display processed camera image
        original_camera = resize(self.original_camera_image,
                                 (self.image_height, self.image_width))
        #print("original camera : {}".format(original_camera))

        o_camera_surface = rgb_to_display_surface(camera, self.display_size)
        self.display.blit(o_camera_surface, (self.display_size * 2, 0))

        # Display on pygame
        pygame.display.flip()

        # State observation
        ego_trans = self.ego.get_transform()
        ego_x = ego_trans.location.x
        ego_y = ego_trans.location.y
        ego_yaw = ego_trans.rotation.yaw / 180 * np.pi
        lateral_dis, w = get_preview_lane_dis(self.waypoints, ego_x, ego_y)
        delta_yaw = np.arcsin(
            np.cross(w, np.array(np.array([np.cos(ego_yaw),
                                           np.sin(ego_yaw)]))))
        v = self.ego.get_velocity()
        speed = np.sqrt(v.x**2 + v.y**2)
        state = np.array([lateral_dis, -delta_yaw, speed, self.vehicle_front])

        if self.pixor:
            ## Vehicle classification and regression maps (requires further normalization)
            vh_clas = np.zeros((self.pixor_size, self.pixor_size))
            vh_regr = np.zeros((self.pixor_size, self.pixor_size, 6))

            # Generate the PIXOR image. Note in CARLA it is using left-hand coordinate
            # Get the 6-dim geom parametrization in PIXOR, here we use pixel coordinate
            for actor in self.world.get_actors().filter('vehicle.*'):
                x, y, yaw, l, w = get_info(actor)
                x_local, y_local, yaw_local = get_local_pose(
                    (x, y, yaw), (ego_x, ego_y, ego_yaw))
                if actor.id != self.ego.id:
                    if abs(
                            y_local
                    ) < self.obs_range / 2 + 1 and x_local < self.obs_range - self.d_behind + 1 and x_local > -self.d_behind - 1:
                        x_pixel, y_pixel, yaw_pixel, l_pixel, w_pixel = get_pixel_info(
                            local_info=(x_local, y_local, yaw_local, l, w),
                            d_behind=self.d_behind,
                            obs_range=self.obs_range,
                            image_size=self.pixor_size)
                        cos_t = np.cos(yaw_pixel)
                        sin_t = np.sin(yaw_pixel)
                        logw = np.log(w_pixel)
                        logl = np.log(l_pixel)
                        pixels = get_pixels_inside_vehicle(
                            pixel_info=(x_pixel, y_pixel, yaw_pixel, l_pixel,
                                        w_pixel),
                            pixel_grid=self.pixel_grid)
                        for pixel in pixels:
                            vh_clas[pixel[0], pixel[1]] = 1
                            dx = x_pixel - pixel[0]
                            dy = y_pixel - pixel[1]
                            vh_regr[pixel[0], pixel[1], :] = np.array(
                                [cos_t, sin_t, dx, dy, logw, logl])

            # Flip the image matrix so that the origin is at the left-bottom
            vh_clas = np.flip(vh_clas, axis=0)
            vh_regr = np.flip(vh_regr, axis=0)

            # Pixor state, [x, y, cos(yaw), sin(yaw), speed]
            pixor_state = [
                ego_x, ego_y,
                np.cos(ego_yaw),
                np.sin(ego_yaw), speed
            ]

        birdview: BirdView = self.birdview_producer.produce(
            agent_vehicle=self.ego)
        bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview), cv.COLOR_BGR2RGB)

        v = self.ego.get_velocity()
        speed_kmh = 3.6 * np.sqrt(v.x**2 + v.y**2 + v.z**2)
        speed_kmh = np.array([speed_kmh])
        obs = {
            'original_camera': original_camera.astype(np.uint8),
            'speed': speed_kmh.astype(np.uint8),
            'camera': camera.astype(np.uint8),
            'lidar': lidar.astype(np.uint8),
            'birdeye': birdeye.astype(np.uint8),
            'driving_image': bgr.astype(np.uint8),
            'state': state,
        }

        if self.pixor:
            obs.update({
                'roadmap':
                roadmap.astype(np.uint8),
                'vh_clas':
                np.expand_dims(vh_clas, -1).astype(np.float32),
                'vh_regr':
                vh_regr.astype(np.float32),
                'pixor_state':
                pixor_state,
            })

        return obs

    def get_reward_speed(self, isdone, steer, isout):
        v = self.ego.get_velocity()
        speed_kmh = 3.6 * np.sqrt(v.x**2 + v.y**2 + v.z**2)

        if speed_kmh <= min_speed:
            r_speed = speed_kmh
        elif speed_kmh > max_speed:
            r_speed = max_speed - speed_kmh
        else:
            r_speed = speed_kmh

        r_steer = -(steer * steer)
        r_collision = 0
        if len(self.collision_hist) > 0:
            r_collision = -10.0

        r_out = 0
        if isout:
            r_out = -1

        reward_t = r_speed + r_steer + r_collision + r_out - 0.1
        if math.isnan(reward_t):
            print("Reward is Nan")
            print("r_speed :  {} ".format(r_speed))
            print("r_steer :  {} ".format(r_steer))
            print("steer :  {} ".format(steer))
        if reward_t == 0:
            print("Reward is 0")
        return reward_t

    def _get_reward_speed_centering(self, isdone):
        """
        reward = Positive speed reward for being close to target speed,
                 however, quick decline in reward beyond target speed
               * centering factor (1 when centered, 0 when not)
               * angle factor (1 when aligned with the road, 0 when more than 20 degress off)
    """
        v = self.ego.get_velocity()
        fwd = vector(v)
        #wp         = self.world.get_map().get_waypoint(self.ego.get_location())
        #wp_fwd     = vector(wp.transform.rotation.get_forward_vector())
        wp_fwd = vector(
            self.current_waypoint.transform.rotation.get_forward_vector())
        angle = angle_diff(fwd, wp_fwd)
        speed_kmh = 3.6 * np.sqrt(v.x**2 + v.y**2 + v.z**2)

        if self.throttle <= 0:
            #if throttle is negative we can not move
            speed_reward = -10.0
        else:
            if speed_kmh < min_speed:  # When speed is in [0, min_speed] range
                speed_reward = speed_kmh
            elif speed_kmh >= min_speed and speed_kmh <= target_speed:
                speed_reward = speed_kmh
            elif speed_kmh > target_speed:
                speed_reward = min_speed  # only give min speed reward

        collision_reward = 0
        if len(self.collision_hist) > 0:
            collision_reward = -1

        current_location = self.ego.get_location()
        start = self.step_last_location
        dist = current_location.distance(start)  # in meteres
        dist_reward = dist
        print("Distance reward = {} * Speed rewards {} , throttle {} ".format(
            dist_reward, speed_reward, self.throttle))

        # update last location
        self.step_last_location = current_location

        # Interpolated from 1 when centered to 0 when 3 m from center
        centering_factor = max(
            1.0 - self.distance_from_center / self.max_distance, 0.0)
        centering_reward = max(self.distance_from_center / self.max_distance,
                               0.0)

        # Interpolated from 1 when aligned with the road to 0 when +/- 20 degress of road
        angle_factor = max(1.0 - abs(angle / np.deg2rad(20)), 0.0)

        # reward for out of lane
        ego_x, ego_y = get_pos(self.ego)
        dis, w = get_lane_dis(self.waypoints, ego_x, ego_y)
        r_out = 0
        if abs(dis) > self.out_lane_thres:
            r_out = -1

        # Final reward
        reward = dist_reward + centering_factor + 10 * collision_reward
        #print("Dist reward {}  * centering_factor {} ".format(dist_reward, centering_factor))
        return reward, centering_factor, 10 * collision_reward, r_out

    def _get_reward(self):
        """Calculate the step reward."""
        # reward for speed tracking
        v = self.ego.get_velocity()
        speed = np.sqrt(v.x**2 + v.y**2)
        r_speed = -abs(speed - self.desired_speed)

        # reward for collision
        r_collision = 0
        if len(self.collision_hist) > 0:
            r_collision = -1

        # reward for steering:
        r_steer = -self.ego.get_control().steer**2

        # reward for out of lane
        ego_x, ego_y = get_pos(self.ego)
        dis, w = get_lane_dis(self.waypoints, ego_x, ego_y)
        r_out = 0
        if abs(dis) > self.out_lane_thres:
            r_out = -1

        # reward for not being in the centre of the lane
        # get the way point to the centre of the road
        waypoint = self.world.get_map().get_waypoint(self.ego.get_location(),
                                                     project_to_road=True)
        ways = np.array([[
            waypoint.transform.location.x, waypoint.transform.location.y,
            waypoint.transform.rotation.yaw
        ]])
        dis, w = get_preview_lane_dis(ways, ego_x, ego_y, 0)
        if (np.isnan(dis)):
            r_centre = 2.383e-07
            print("Car centre distance is nan , rcenter = {}", r_centre)
        else:
            r_centre = abs(dis)
        #print("Car centre distance : {}".format(r_centre))

        # longitudinal speed
        lspeed = np.array([v.x, v.y])
        lspeed_lon = np.dot(lspeed, w)

        # cost for too fast
        r_fast = 0
        if lspeed_lon > self.desired_speed:
            r_fast = -1

        # cost for lateral acceleration
        r_lat = -abs(self.ego.get_control().steer) * lspeed_lon**2

        #r = 200*r_collision + 1*lspeed_lon + 10*r_fast + 1*r_out + r_steer*5 + 0.2*r_lat - 0.1
        #r = 300*r_collision + 1*lspeed_lon + 10*r_fast + 200*r_out + r_steer*5 + 0.2*r_lat - 0.1
        r = 200 * r_collision + 1 * lspeed_lon + 10 * r_fast + 70 * r_out + r_steer * 5 + 0.2 * r_lat - 120 * r_centre - 0.1

        return r, (-120 * r_centre), (200 * r_collision), (70 * r_out)

    def _terminal(self):
        """Calculate whether to terminate the current episode."""
        # Get ego state
        ego_x, ego_y = get_pos(self.ego)

        # If collides
        if len(self.collision_hist) > 0:
            return True, False

        # If reach maximum timestep
        if self.time_step > self.max_time_episode:
            return True, False

        # If at destination
        if self.dests is not None:  # If at destination
            for dest in self.dests:
                if np.sqrt((ego_x - dest[0])**2 + (ego_y - dest[1])**2) < 4:
                    return True, False

        # Stop if distance from center > max distance
        if self.distance_from_center > self.max_distance:
            print("End : Distance from center more than max distance : {} ".
                  format(self.distance_from_center))
            return True, True

        # If out of lane
        dis, _ = get_lane_dis(self.waypoints, ego_x, ego_y)
        if abs(dis) > self.out_lane_thres:
            print("End : Out of Lane , distance : {} ".format(dis))
            return True, True

        # Speed is too fast
        v = self.ego.get_velocity()
        speed_kmh = 3.6 * np.sqrt(v.x**2 + v.y**2 + v.z**2)
        if max_speed > 0 and speed_kmh > max_speed:
            print("End : Too fast {} ".format(speed_kmh))
            return True, False
        return False, False

    def _clear_all_actors(self, actor_filters):
        """Clear specific actors."""
        for actor_filter in actor_filters:
            for actor in self.world.get_actors().filter(actor_filter):
                if actor.is_alive:
                    if actor.type_id == 'controller.ai.walker':
                        actor.stop()
                    actor.destroy()
                    print("Destroy : {} ".format(actor.type_id))
                else:
                    print("Not Destroyed : {} ".format(actor.type_id))
Esempio n. 9
0
    def reset(self):
        # Clear sensor objects
        self.collision_sensor = None
        self.lidar_sensor = None
        self.camera_sensor = None
        self.step_rewards = []
        self.step_actions = []
        self.step_steering = []

        self.birdview_producer = BirdViewProducer(
            self.client,  # carla.Client
            target_size=PixelDimensions(width=self.image_width,
                                        height=self.image_height),
            pixels_per_meter=4,
            crop_type=BirdViewCropType.FRONT_AND_REAR_AREA)

        # Delete sensors, vehicles and walkers
        self._clear_all_actors([
            'sensor.other.collision', 'sensor.lidar.ray_cast',
            'sensor.camera.rgb', 'vehicle.*', 'controller.ai.walker',
            'walker.*'
        ])

        # Disable sync mode
        self._set_synchronous_mode(False)

        # Spawn surrounding vehicles
        random.shuffle(self.vehicle_spawn_points)
        count = self.number_of_vehicles
        if count > 0:
            for spawn_point in self.vehicle_spawn_points:
                if self._try_spawn_random_vehicle_at(spawn_point,
                                                     number_of_wheels=[4]):
                    count -= 1
                if count <= 0:
                    break
        while count > 0:
            if self._try_spawn_random_vehicle_at(random.choice(
                    self.vehicle_spawn_points),
                                                 number_of_wheels=[4]):
                count -= 1

        # Spawn pedestrians
        random.shuffle(self.walker_spawn_points)
        count = self.number_of_walkers
        if count > 0:
            for spawn_point in self.walker_spawn_points:
                if self._try_spawn_random_walker_at(spawn_point):
                    count -= 1
                if count <= 0:
                    break
        while count > 0:
            if self._try_spawn_random_walker_at(
                    random.choice(self.walker_spawn_points)):
                count -= 1

        # Get actors polygon list
        self.vehicle_polygons = []
        vehicle_poly_dict = self._get_actor_polygons('vehicle.*')
        self.vehicle_polygons.append(vehicle_poly_dict)
        self.walker_polygons = []
        walker_poly_dict = self._get_actor_polygons('walker.*')
        self.walker_polygons.append(walker_poly_dict)

        # Spawn the ego vehicle
        ego_spawn_times = 0
        while True:
            if ego_spawn_times > self.max_ego_spawn_times:
                self.reset()

            if self.task_mode == 'random':
                transform = random.choice(self.vehicle_spawn_points)
            if self.task_mode == 'roundabout':
                self.start = [52.1 + np.random.uniform(-5, 5), -4.2,
                              178.66]  # random
                # self.start=[52.1,-4.2, 178.66] # static
                transform = set_carla_transform(self.start)
            if self._try_spawn_ego_vehicle_at(transform):
                break
            else:
                ego_spawn_times += 1
                time.sleep(0.1)

        # Add collision sensor
        self.collision_sensor = self.world.spawn_actor(self.collision_bp,
                                                       carla.Transform(),
                                                       attach_to=self.ego)
        self.collision_sensor.listen(lambda event: get_collision_hist(event))

        def get_collision_hist(event):
            impulse = event.normal_impulse
            intensity = np.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)
            self.collision_hist.append(intensity)
            if len(self.collision_hist) > self.collision_hist_l:
                self.collision_hist.pop(0)

        self.collision_hist = []

        # Add lidar sensor
        self.lidar_sensor = self.world.spawn_actor(self.lidar_bp,
                                                   self.lidar_trans,
                                                   attach_to=self.ego)
        self.lidar_sensor.listen(lambda data: get_lidar_data(data))

        def get_lidar_data(data):
            self.lidar_data = data

        # Add camera sensor
        self.camera_sensor = self.world.spawn_actor(self.camera_bp,
                                                    self.camera_trans,
                                                    attach_to=self.ego)
        self.camera_sensor.listen(lambda data: get_camera_img(data))

        def get_camera_img(data):
            array = np.frombuffer(data.raw_data, dtype=np.dtype("uint8"))
            array = np.reshape(array, (data.height, data.width, 4))
            array = array[:, :, :3]
            i3 = (array) / 255.
            array = array[:, :, ::-1]
            self.camera_img = array
            self.original_camera_image = i3

        # Update timesteps
        self.time_step = 0
        self.reset_step += 1

        # Enable sync mode
        self.settings.synchronous_mode = True
        self.world.apply_settings(self.settings)

        self.routeplanner = RoutePlanner(self.ego, self.max_waypt)
        self.waypoints, _, self.vehicle_front = self.routeplanner.run_step()
        # get all the route waypoints
        self.route_waypoints = self.routeplanner._get_waypoints_data()
        #print(" Route waypoints : {} ".format(len(self.route_waypoints)))
        # Set ego information for render
        self.birdeye_render.set_hero(self.ego, self.ego.id)

        self.current_waypoint_index = 0
        self.step_start_location = self.ego.get_location()
        self.step_last_location = self.step_start_location
        return self._get_obs()
Esempio n. 10
0
def main(optimalDistance, followDrivenPath, chaseMode, evaluateChasingCar, driveName='',record=False, followMode=False,
         resultsName='results',P=None,I=None,D=None,nOfFramesToSkip=0):
    # Imports
    # from cores.lane_detection.lane_detector import LaneDetector
    # from cores.lane_detection.camera_geometry import CameraGeometry
    # from cores.control.pure_pursuit import PurePursuitPlusPID

    # New imports
    from DrivingControl import DrivingControl
    from DrivingControlAdvanced import DrivingControlAdvanced
    from CarDetector import CarDetector
    from SemanticSegmentation import SemanticSegmentation


    # New Variables
    extrapolate = True
    optimalDistance = 8
    followDrivenPath = True
    evaluateChasingCar = True
    record = False
    chaseMode = True
    followMode = False
    counter = 1
    sensors = []

    vehicleToFollowSpawned = False
    obsticle_vehicleSpawned = False

    # New objects
    carDetector = CarDetector()
    drivingControl = DrivingControl(optimalDistance=optimalDistance)
    drivingControlAdvanced = DrivingControlAdvanced(optimalDistance=optimalDistance)
    evaluation = Evaluation()
    semantic = SemanticSegmentation()


    actor_list = []
    pygame.init()

    display = pygame.display.set_mode(
        (800, 600),
        pygame.HWSURFACE | pygame.DOUBLEBUF)
    font = get_font()
    clock = pygame.time.Clock()

    client = carla.Client('localhost', 2000)
    client.set_timeout(80.0)

    #client.load_world('Town06')
    # client.load_world('Town04')
    world = client.get_world()
    weather_presets = find_weather_presets()
    # print(weather_presets)
    world.set_weather(weather_presets[3][0])
    # world.set_weather(carla.WeatherParameters.HardRainSunset)

    # controller = PurePursuitPlusPID()

    # Set BirdView
    birdview_producer = BirdViewProducer(
        client,
        PixelDimensions(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT),
        pixels_per_meter=4,
        crop_type=BirdViewCropType.FRONT_AND_REAR_AREA,
        render_lanes_on_junctions=False,
    )


    try:
        m = world.get_map()

        blueprint_library = world.get_blueprint_library()

        veh_bp = random.choice(blueprint_library.filter('vehicle.dodge_charger.police'))
        vehicle = world.spawn_actor(
            veh_bp,
            m.get_spawn_points()[90])
        actor_list.append(vehicle)

        # New vehicle property
        vehicle.set_simulate_physics(True)

        if followDrivenPath:
            evaluation.LoadHistoryFromFile(driveName)
            first = evaluation.history[0]
            start_pose = carla.Transform(carla.Location(first[0], first[1], first[2]),
                                        carla.Rotation(first[3], first[4], first[5]))
            vehicle.set_transform(start_pose)

        # New Sensors
        collision_sensor = world.spawn_actor(blueprint_library.find('sensor.other.collision'),
                                                carla.Transform(), attach_to=vehicle)
        collision_sensor.listen(lambda event: evaluation.CollisionHandler(event))
        actor_list.append(collision_sensor)

        
        # front cam for object detection
        camera_rgb_blueprint = world.get_blueprint_library().find('sensor.camera.rgb')
        camera_rgb_blueprint.set_attribute('fov', '90')
        camera_rgb = world.spawn_actor(
           camera_rgb_blueprint,
            carla.Transform(carla.Location(x=1.5, z=1.4,y=0.3), carla.Rotation(pitch=0)),
            attach_to=vehicle)
        actor_list.append(camera_rgb)
        sensors.append(camera_rgb)

            
        # segmentation camera
        camera_segmentation = world.spawn_actor(
            blueprint_library.find('sensor.camera.semantic_segmentation'),
            carla.Transform(carla.Location(x=1.5, z=1.4,y=0), carla.Rotation(pitch=0)), #5,3,0 # -0.3
            attach_to=vehicle)
        actor_list.append(camera_segmentation)
        sensors.append(camera_segmentation)

        # cg = CameraGeometry()
        # ld = LaneDetector(model_path=Path("best_model.pth").absolute())
        # #windshield cam
        # cam_windshield_transform = carla.Transform(carla.Location(x=0.5, z=cg.height), carla.Rotation(pitch=-1*cg.pitch_deg))
        # bp = blueprint_library.find('sensor.camera.rgb')
        # bp.set_attribute('image_size_x', str(cg.image_width))
        # bp.set_attribute('image_size_y', str(cg.image_height))
        # bp.set_attribute('fov', str(cg.field_of_view_deg))
        # camera_windshield = world.spawn_actor(
        #     bp,
        #     cam_windshield_transform,
        #     attach_to=vehicle)
        # actor_list.append(camera_windshield)
        # sensors.append(camera_windshield)


        
        # Set up local planner and behavnioural planner
        # --------------------------------------------------------------

        # Planning Constants
        NUM_PATHS = 7
        BP_LOOKAHEAD_BASE      = 8.0              # m
        BP_LOOKAHEAD_TIME      = 2.0              # s
        PATH_OFFSET            = 1.5              # m
        CIRCLE_OFFSETS         = [-1.0, 1.0, 3.0] # m
        CIRCLE_RADII           = [1.5, 1.5, 1.5]  # m
        TIME_GAP               = 1.0              # s
        PATH_SELECT_WEIGHT     = 10
        A_MAX                  = 1.5              # m/s^2
        SLOW_SPEED             = 2.0              # m/s
        STOP_LINE_BUFFER       = 3.5              # m
        LEAD_VEHICLE_LOOKAHEAD = 20.0             # m
        LP_FREQUENCY_DIVISOR   = 2                # Frequency divisor to make the 
                                                # local planner operate at a lower
                                                # frequency than the controller
                                                # (which operates at the simulation
                                                # frequency). Must be a natural
                                                # number.

        PREV_BEST_PATH         = []
        stopsign_fences = [] 


        # --------------------------------------------------------------


        frame = 0
        max_error = 0
        FPS = 30
        speed = 0
        cross_track_error = 0
        start_time = 0.0
        times = 8
        LP_FREQUENCY_DIVISOR = 4

        # Create a synchronous mode context.
        with CarlaSyncMode(world, *sensors, fps=FPS) as sync_mode:
            while True:
                if should_quit():
                    return
                clock.tick()          
                start_time += clock.get_time()

                # Advance the simulation and wait for the data. 
                # tick_response = sync_mode.tick(timeout=2.0)


                # Display BirdView
                # Input for your model - call it every simulation step
                # returned result is np.ndarray with ones and zeros of shape (8, height, width)
                
                birdview = birdview_producer.produce(agent_vehicle=vehicle)
                bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview), cv2.COLOR_BGR2RGB)
                # NOTE imshow requires BGR color model
                cv2.imshow("BirdView RGB", bgr)
                cv2.waitKey(1)


                # snapshot, image_rgb, image_segmentation = tick_response
                snapshot, img_rgb, image_segmentation = sync_mode.tick(timeout=2.0)

                # detect car in image with semantic segnmentation camera
                carInTheImage = semantic.IsThereACarInThePicture(image_segmentation)

                line = []

                current_speed = np.linalg.norm(carla_vec_to_np_array(vehicle.get_velocity()))

                # Spawn a vehicle to follow
                if not vehicleToFollowSpawned and followDrivenPath:
                    vehicleToFollowSpawned = True
                    location1 = vehicle.get_transform()
                    newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,
                                                                     location1.rotation.yaw)
                    diffX = newX - location1.location.x
                    diffY = newY - location1.location.y
                    newX = location1.location.x - (diffX*5)
                    newY = location1.location.y - (diffY*5)

                    start_pose.location.x = newX
                    start_pose.location.y = newY

                    vehicle.set_transform(start_pose)

                    start_pose2 = random.choice(m.get_spawn_points())

                    bp = blueprint_library.filter('model3')[0]
                    bp.set_attribute('color', '0,101,189')
                    vehicleToFollow = world.spawn_actor(
                        bp,
                        start_pose2)

                    start_pose2 = carla.Transform()
                    start_pose2.rotation = start_pose.rotation

                    start_pose2.location.x = start_pose.location.x
                    start_pose2.location.y = start_pose.location.y
                    start_pose2.location.z = start_pose.location.z

                    vehicleToFollow.set_transform(start_pose2)

                    actor_list.append(vehicleToFollow)
                    vehicleToFollow.set_simulate_physics(True)
                    # vehicleToFollow.set_autopilot(False)

                if followDrivenPath:
                    if counter >= len(evaluation.history):
                        break
                    tmp = evaluation.history[counter]
                    currentPos = carla.Transform(carla.Location(tmp[0] ,tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5]))
                    vehicleToFollow.set_transform(currentPos)
                    counter += 1


                # Set up obsticle vehicle for testing 
                location1 = vehicle.get_transform()
                location2 = vehicleToFollow.get_transform()

                # if not obsticle_vehicleSpawned and followDrivenPath:
                #     obsticle_vehicleSpawned = True
                #     # Adding new obsticle vehicle 

                #     start_pose3 = random.choice(m.get_spawn_points())

                #     obsticle_vehicle = world.spawn_actor(
                #         random.choice(blueprint_library.filter('jeep')),
                #         start_pose3)

                #     start_pose3 = carla.Transform()
                #     start_pose3.rotation = start_pose2.rotation
                #     start_pose3.location.x = start_pose2.location.x 
                #     start_pose3.location.y =  start_pose2.location.y + 50 
                #     start_pose3.location.z =  start_pose2.location.z

                #     obsticle_vehicle.set_transform(start_pose3)


                #     actor_list.append(obsticle_vehicle)
                #     obsticle_vehicle.set_simulate_physics(True)



                # # Parked car(s) (X(m), Y(m), Z(m), Yaw(deg), RADX(m), RADY(m), RADZ(m))
                # parkedcar_data = None
                # parkedcar_box_pts = []      # [x,y]
                # with open(C4_PARKED_CAR_FILE, 'r') as parkedcar_file:
                #     next(parkedcar_file)  # skip header
                #     parkedcar_reader = csv.reader(parkedcar_file, 
                #                                 delimiter=',', 
                #                                 quoting=csv.QUOTE_NONNUMERIC)
                #     parkedcar_data = list(parkedcar_reader)
                #     # convert to rad
                #     for i in range(len(parkedcar_data)):
                #         parkedcar_data[i][3] = parkedcar_data[i][3] * np.pi / 180.0 

                # # obtain parked car(s) box points for LP
                # for i in range(len(parkedcar_data)):
                #     x = parkedcar_data[i][0]
                #     y = parkedcar_data[i][1]
                #     z = parkedcar_data[i][2]
                #     yaw = parkedcar_data[i][3]
                #     xrad = parkedcar_data[i][4]
                #     yrad = parkedcar_data[i][5]
                #     zrad = parkedcar_data[i][6]
                #     cpos = np.array([
                #             [-xrad, -xrad, -xrad, 0,    xrad, xrad, xrad,  0    ],
                #             [-yrad, 0,     yrad,  yrad, yrad, 0,    -yrad, -yrad]])
                #     rotyaw = np.array([
                #             [np.cos(yaw), np.sin(yaw)],
                #             [-np.sin(yaw), np.cos(yaw)]])
                #     cpos_shift = np.array([
                #             [x, x, x, x, x, x, x, x],
                #             [y, y, y, y, y, y, y, y]])
                #     cpos = np.add(np.matmul(rotyaw, cpos), cpos_shift)
                #     for j in range(cpos.shape[1]):
                #         parkedcar_box_pts.append([cpos[0,j], cpos[1,j]])


                # if frame % LP_FREQUENCY_DIVISOR == 0:
                #     # Update vehicleToFollow transorm with obsticles
                #     # --------------------------------------------------------------
                #     _LOOKAHEAD_INDEX = 5
                #     _BP_LOOKAHEAD_BASE = 8.0              # m 
                #     _BP_LOOKAHEAD_TIME = 2.0              # s 


                #     # unsupported operand type(s) for +: 'float' and 'Vector3D'
                #     lookahead_time = _BP_LOOKAHEAD_BASE +  _BP_LOOKAHEAD_TIME *  vehicleToFollow.get_velocity().z


                #     location3 = obsticle_vehicle.get_transform()
                    
                #     # Calculate the goal state set in the local frame for the local planner.
                #     # Current speed should be open loop for the velocity profile generation.
                #     ego_state = [location2.location.x, location2.location.y, location2.rotation.yaw, vehicleToFollow.get_velocity().z]

                #     # Set lookahead based on current speed.
                #     b_planner.set_lookahead(_BP_LOOKAHEAD_BASE + _BP_LOOKAHEAD_TIME * vehicleToFollow.get_velocity().z)

                    
                #     # Perform a state transition in the behavioural planner.
                #     b_planner.transition_state(evaluation.history, ego_state, current_speed)
                #     # print("The current speed = %f" % current_speed)

                #     # # Find the closest index to the ego vehicle.
                #     # closest_len, closest_index = behavioural_planner.get_closest_index(evaluation.history, ego_state)

                #     # print("closest_len: ", closest_len)
                #     # print("closest_index: ", closest_index)
                    
                #     # # Find the goal index that lies within the lookahead distance
                #     # # along the waypoints.
                #     # goal_index = b_planner.get_goal_index(evaluation.history, ego_state, closest_len, closest_index)

                #     # print("goal_index: ", goal_index)

                #     # # Set goal_state
                #     # goal_state = evaluation.history[goal_index]
            
                #     # Compute the goal state set from the behavioural planner's computed goal state.
                #     goal_state_set = l_planner.get_goal_state_set(b_planner._goal_index, b_planner._goal_state, evaluation.history, ego_state)

                #     # # Calculate planned paths in the local frame.
                #     paths, path_validity = l_planner.plan_paths(goal_state_set)

                #     # # Transform those paths back to the global frame.
                #     paths = local_planner.transform_paths(paths, ego_state)

                #     # Detect obsticle car
                #     obsticle_bbox, obsticle_predicted_distance, obsticle_predicted_angle = carDetector.getDistance(obsticle_vehicle, camera_rgb,carInTheImage,extrapolation=extrapolate,nOfFramesToSkip=nOfFramesToSkip)

                #     obsticle_bbox =[ [bbox[0],bbox[1]] for bbox in obsticle_bbox] 
     
                #     print("paths: ", paths)


                #     if obsticle_bbox:
                #         # # Perform collision checking.
                #         collision_check_array = l_planner._collision_checker.collision_check(paths, [obsticle_bbox])
                #         print("collision_check_array: ", collision_check_array)

                #         # Compute the best local path.
                #         best_index = l_planner._collision_checker.select_best_path_index(paths, collision_check_array, b_planner._goal_state)
                #         print("The best_index :", best_index)


                #     desired_speed = b_planner._goal_state[2]
                #     print("The desired_speed = %f" % desired_speed)

                # newX, newY = carDetector.CreatePointInFrontOFCar(location2.location.x, location2.location.y,location2.rotation.yaw)
                # new_angle = carDetector.getAngle([location2.location.x, location2.location.y], [newX, newY],
                #                              [location3.location.x, location3.location.y])
                
                # tmp = evaluation.history[counter-1]
                # currentPos = carla.Transform(carla.Location(tmp[0] + 0 ,tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5]))
                # vehicleToFollow.set_transform(currentPos)


                # --------------------------------------------------------------





                # Update vehicle position by detecting vehicle to follow position
                newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,location1.rotation.yaw)
                angle = carDetector.getAngle([location1.location.x, location1.location.y], [newX, newY],
                                             [location2.location.x, location2.location.y])

                possibleAngle = 0
                drivableIndexes = []
                bbox = []

                
                bbox, predicted_distance,predicted_angle = carDetector.getDistance(vehicleToFollow, camera_rgb, carInTheImage,extrapolation=extrapolate,nOfFramesToSkip=nOfFramesToSkip)

                if frame % LP_FREQUENCY_DIVISOR == 0:
                    # This is the bottle neck and takes times to run. But it is necessary for chasing around turns
                    predicted_angle, drivableIndexes = semantic.FindPossibleAngle(image_segmentation,bbox,predicted_angle) # This is still necessary need to optimize it 
                    
                steer, throttle = drivingControlAdvanced.PredictSteerAndThrottle(predicted_distance,predicted_angle,None)

                # This is a new method
                send_control(vehicle, throttle, steer, 0)


                speed = np.linalg.norm(carla_vec_to_np_array(vehicle.get_velocity()))

                real_dist = location1.location.distance(location2.location)
             


                fps = round(1.0 / snapshot.timestamp.delta_seconds)

                # Draw the display.
                # draw_image(display, image_rgb)
                draw_image(display, img_rgb, image_segmentation,location1, location2,record=record,driveName=driveName,smazat=line)
                display.blit(
                    font.render('     FPS (real) % 5d ' % clock.get_fps(), True, (255, 255, 255)),
                    (8, 10))
                display.blit(
                    font.render('     FPS (simulated): % 5d ' % fps, True, (255, 255, 255)),
                    (8, 28))
                display.blit(
                    font.render('     speed: {:.2f} m/s'.format(speed), True, (255, 255, 255)),
                    (8, 46))
                display.blit(
                    font.render('     distance to target: {:.2f} m'.format(real_dist), True, (255, 255, 255)),
                    (8, 66))
                # display.blit(
                #     font.render('     cross track error: {:03d} cm'.format(cross_track_error), True, (255, 255, 255)),
                #     (8, 64))
                # display.blit(
                #     font.render('     max cross track error: {:03d} cm'.format(max_error), True, (255, 255, 255)),
                #     (8, 82))


                # Draw bbox on following vehicle
                if len(bbox) != 0:
                    points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)]
                    BB_COLOR = (248, 64, 24)
                    # draw lines
                    # base
                    pygame.draw.line(display, BB_COLOR, points[0], points[1])
                    pygame.draw.line(display, BB_COLOR, points[1], points[2])
                    pygame.draw.line(display, BB_COLOR, points[2], points[3])
                    pygame.draw.line(display, BB_COLOR, points[3], points[0])
                    # top
                    pygame.draw.line(display, BB_COLOR, points[4], points[5])
                    pygame.draw.line(display, BB_COLOR, points[5], points[6])
                    pygame.draw.line(display, BB_COLOR, points[6], points[7])
                    pygame.draw.line(display, BB_COLOR, points[7], points[4])
                    # base-top
                    pygame.draw.line(display, BB_COLOR, points[0], points[4])
                    pygame.draw.line(display, BB_COLOR, points[1], points[5])
                    pygame.draw.line(display, BB_COLOR, points[2], points[6])
                    pygame.draw.line(display, BB_COLOR, points[3], points[7])

                # DrawDrivable(drivableIndexes, image_segmentation.width // 10, image_segmentation.height // 10, display)



                pygame.display.flip()

                frame += 1
                # if save_gif and frame > 1000:
                #     print("frame=",frame)
                #     imgdata = pygame.surfarray.array3d(pygame.display.get_surface())
                #     imgdata = imgdata.swapaxes(0,1)
                #     if frame < 1200:
                #         images.append(imgdata)
                

    finally:
        print('destroying actors.')
        for actor in actor_list:
            actor.destroy()
        pygame.quit()
        print('done.')
Esempio n. 11
0
def main(optimalDistance,
         followDrivenPath,
         chaseMode,
         evaluateChasingCar,
         driveName='',
         record=False,
         followMode=False,
         resultsName='results',
         P=None,
         I=None,
         D=None,
         nOfFramesToSkip=0):
    # Imports
    # from cores.lane_detection.lane_detector import LaneDetector
    # from cores.lane_detection.camera_geometry import CameraGeometry
    # from cores.control.pure_pursuit import PurePursuitPlusPID

    # New imports
    from DrivingControl import DrivingControl
    from DrivingControlAdvanced import DrivingControlAdvanced
    from CarDetector import CarDetector
    from SemanticSegmentation import SemanticSegmentation

    # New Variables
    extrapolate = True
    optimalDistance = 1
    followDrivenPath = True
    evaluateChasingCar = True
    record = False
    chaseMode = True
    followMode = False
    counter = 1
    sensors = []

    vehicleToFollowSpawned = False
    obsticle_vehicleSpawned = False

    # New objects
    carDetector = CarDetector()
    drivingControl = DrivingControl(optimalDistance=optimalDistance)
    drivingControlAdvanced = DrivingControlAdvanced(
        optimalDistance=optimalDistance)
    evaluation = Evaluation()
    semantic = SemanticSegmentation()

    actor_list = []
    pygame.init()

    display = pygame.display.set_mode((800, 600),
                                      pygame.HWSURFACE | pygame.DOUBLEBUF)
    font = get_font()
    clock = pygame.time.Clock()

    client = carla.Client('localhost', 2000)
    client.set_timeout(80.0)

    #client.load_world('Town06')
    # client.load_world('Town04')
    world = client.get_world()
    weather_presets = find_weather_presets()
    # print(weather_presets)
    world.set_weather(weather_presets[3][0])
    # world.set_weather(carla.WeatherParameters.HardRainSunset)

    # controller = PurePursuitPlusPID()

    # Set BirdView
    birdview_producer = BirdViewProducer(
        client,
        PixelDimensions(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT),
        pixels_per_meter=4,
        crop_type=BirdViewCropType.FRONT_AND_REAR_AREA,
        render_lanes_on_junctions=False,
    )

    try:
        m = world.get_map()

        blueprint_library = world.get_blueprint_library()

        veh_bp = random.choice(
            blueprint_library.filter('vehicle.dodge_charger.police'))
        vehicle = world.spawn_actor(veh_bp, m.get_spawn_points()[90])
        actor_list.append(vehicle)

        # New vehicle property
        vehicle.set_simulate_physics(True)

        if followDrivenPath:
            evaluation.LoadHistoryFromFile(driveName)
            first = evaluation.history[0]
            start_pose = carla.Transform(
                carla.Location(first[0], first[1], first[2]),
                carla.Rotation(first[3], first[4], first[5]))
            vehicle.set_transform(start_pose)

        # New Sensors
        collision_sensor = world.spawn_actor(
            blueprint_library.find('sensor.other.collision'),
            carla.Transform(),
            attach_to=vehicle)
        collision_sensor.listen(
            lambda event: evaluation.CollisionHandler(event))
        actor_list.append(collision_sensor)

        # front cam for object detection
        camera_rgb_blueprint = world.get_blueprint_library().find(
            'sensor.camera.rgb')
        camera_rgb_blueprint.set_attribute('fov', '90')
        camera_rgb = world.spawn_actor(camera_rgb_blueprint,
                                       carla.Transform(
                                           carla.Location(x=1.5, z=1.4, y=0.3),
                                           carla.Rotation(pitch=0)),
                                       attach_to=vehicle)
        actor_list.append(camera_rgb)
        sensors.append(camera_rgb)

        # segmentation camera
        camera_segmentation = world.spawn_actor(
            blueprint_library.find('sensor.camera.semantic_segmentation'),
            carla.Transform(carla.Location(x=1.5, z=1.4, y=0),
                            carla.Rotation(pitch=0)),  #5,3,0 # -0.3
            attach_to=vehicle)
        actor_list.append(camera_segmentation)
        sensors.append(camera_segmentation)

        # Set up local planner and behavnioural planner
        # --------------------------------------------------------------

        # --------------------------------------------------------------

        frame = 0
        max_error = 0
        FPS = 30
        speed = 0
        cross_track_error = 0
        start_time = 0.0
        times = 8
        LP_FREQUENCY_DIVISOR = 8  # Frequency divisor to make the
        # local planner operate at a lower
        # frequency than the controller
        # (which operates at the simulation
        # frequency). Must be a natural
        # number.

        # TMP obstacle lists
        ob = np.array([
            [233.980630, 130.523910],
            [233.980630, 30.523910],
            [233.980630, 60.523910],
            [233.980630, 80.523910],
            [233.786942, 75.530586],
        ])

        wx = []
        wy = []
        wz = []

        for p in evaluation.history:
            wp = carla.Transform(carla.Location(p[0], p[1], p[2]),
                                 carla.Rotation(p[3], p[4], p[5]))
            wx.append(wp.location.x)
            wy.append(wp.location.y)
            wz.append(wp.location.z)

        tx, ty, tyaw, tc, csp = generate_target_course(wx, wy, wz)

        # initial state
        c_speed = 2.0 / 3.6  # current speed [m/s]
        c_d = 2.0  # current lateral position [m]
        c_d_d = 0.0  # current lateral speed [m/s]
        c_d_dd = 0.0  # current latral acceleration [m/s]
        s0 = 0.0  # current course position

        # Create a synchronous mode context.
        with CarlaSyncMode(world, *sensors, fps=FPS) as sync_mode:
            while True:
                if should_quit():
                    return
                clock.tick()
                start_time += clock.get_time()

                # Advance the simulation and wait for the data.
                # tick_response = sync_mode.tick(timeout=2.0)

                # Display BirdView
                # Input for your model - call it every simulation step
                # returned result is np.ndarray with ones and zeros of shape (8, height, width)

                birdview = birdview_producer.produce(agent_vehicle=vehicle)
                bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview),
                                   cv2.COLOR_BGR2RGB)
                # NOTE imshow requires BGR color model
                cv2.imshow("BirdView RGB", bgr)
                cv2.waitKey(1)

                # snapshot, image_rgb, image_segmentation = tick_response
                snapshot, img_rgb, image_segmentation = sync_mode.tick(
                    timeout=2.0)

                print("type(image_segmentation): ", type(image_segmentation))

                # detect car in image with semantic segnmentation camera
                carInTheImage = semantic.IsThereACarInThePicture(
                    image_segmentation)

                line = []

                # Spawn a vehicle to follow
                if not vehicleToFollowSpawned and followDrivenPath:
                    vehicleToFollowSpawned = True
                    location1 = vehicle.get_transform()
                    newX, newY = carDetector.CreatePointInFrontOFCar(
                        location1.location.x, location1.location.y,
                        location1.rotation.yaw)
                    diffX = newX - location1.location.x
                    diffY = newY - location1.location.y
                    newX = location1.location.x - (diffX * 5)
                    newY = location1.location.y - (diffY * 5)

                    start_pose.location.x = newX
                    start_pose.location.y = newY

                    vehicle.set_transform(start_pose)

                    start_pose2 = random.choice(m.get_spawn_points())

                    bp = blueprint_library.filter('model3')[0]
                    bp.set_attribute('color', '0,101,189')
                    vehicleToFollow = world.spawn_actor(bp, start_pose2)

                    start_pose2 = carla.Transform()
                    start_pose2.rotation = start_pose.rotation

                    start_pose2.location.x = start_pose.location.x
                    start_pose2.location.y = start_pose.location.y
                    start_pose2.location.z = start_pose.location.z

                    vehicleToFollow.set_transform(start_pose2)

                    actor_list.append(vehicleToFollow)
                    vehicleToFollow.set_simulate_physics(True)
                    # vehicleToFollow.set_autopilot(False)

                if followDrivenPath:
                    if counter >= len(evaluation.history):
                        break
                    tmp = evaluation.history[counter]
                    currentPos = carla.Transform(
                        carla.Location(tmp[0], tmp[1], tmp[2]),
                        carla.Rotation(tmp[3], tmp[4], tmp[5]))
                    vehicleToFollow.set_transform(currentPos)
                    counter += 1

                # Set up obsticle vehicle for testing
                # location1 = vehicle.get_transform()
                # location2 = vehicleToFollow.get_transform()

                # if not obsticle_vehicleSpawned and followDrivenPath:
                #     obsticle_vehicleSpawned = True
                #     # Adding new obsticle vehicle

                #     start_pose3 = random.choice(m.get_spawn_points())

                #     obsticle_vehicle = world.spawn_actor(
                #         random.choice(blueprint_library.filter('jeep')),
                #         start_pose3)

                #     start_pose3 = carla.Transform()
                #     start_pose3.rotation = start_pose2.rotation
                #     start_pose3.location.x = start_pose2.location.x
                #     start_pose3.location.y =  start_pose2.location.y + 50
                #     start_pose3.location.z =  start_pose2.location.z

                #     obsticle_vehicle.set_transform(start_pose3)

                #     actor_list.append(obsticle_vehicle)
                #     obsticle_vehicle.set_simulate_physics(True)

                # if frame % LP_FREQUENCY_DIVISOR == 0:
                """
                        **********************************************************************************************************************
                        *********************************************** Motion Planner *******************************************************
                        **********************************************************************************************************************
                """

                # tmp = evaluation.history[counter-1]
                # currentPos = carla.Transform(carla.Location(tmp[0] + 0 ,tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5]))
                # vehicleToFollow.set_transform(currentPos)

                # ------------------- Frenet  --------------------------------
                path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,
                                               c_d_dd, ob)

                print("path length: ", len(path.x))

                new_vehicleToFollow_transform = carla.Transform()
                new_vehicleToFollow_transform.rotation = carla.Rotation(
                    pitch=0.0, yaw=math.degrees(path.yaw[1]), roll=0.0)

                new_vehicleToFollow_transform.location.x = path.x[1]
                new_vehicleToFollow_transform.location.y = path.y[1]
                new_vehicleToFollow_transform.location.z = path.z[1]

                vehicleToFollow.set_transform(new_vehicleToFollow_transform)

                # ------------------- Control for ego  --------------------------------

                # Set up  new locationss
                location1 = vehicle.get_transform()
                location2 = vehicleToFollow.get_transform()

                possibleAngle = 0
                drivableIndexes = []
                bbox = []

                bbox, predicted_distance, predicted_angle = carDetector.getDistance(
                    vehicleToFollow,
                    camera_rgb,
                    carInTheImage,
                    extrapolation=extrapolate,
                    nOfFramesToSkip=nOfFramesToSkip)

                if frame % LP_FREQUENCY_DIVISOR == 0:
                    # This is the bottle neck and takes times to run. But it is necessary for chasing around turns
                    predicted_angle, drivableIndexes = semantic.FindPossibleAngle(
                        image_segmentation, bbox, predicted_angle
                    )  # This is still necessary need to optimize it

                steer, throttle = drivingControlAdvanced.PredictSteerAndThrottle(
                    predicted_distance, predicted_angle, None)

                # This is a new method
                send_control(vehicle, throttle, steer, 0)

                speed = np.linalg.norm(
                    carla_vec_to_np_array(vehicle.get_velocity()))

                real_dist = location1.location.distance(location2.location)

                fps = round(1.0 / snapshot.timestamp.delta_seconds)

                # Draw the display.
                # draw_image(display, image_rgb)
                draw_image(display,
                           img_rgb,
                           image_segmentation,
                           location1,
                           location2,
                           record=record,
                           driveName=driveName,
                           smazat=line)
                display.blit(
                    font.render('     FPS (real) % 5d ' % clock.get_fps(),
                                True, (255, 255, 255)), (8, 10))
                display.blit(
                    font.render('     FPS (simulated): % 5d ' % fps, True,
                                (255, 255, 255)), (8, 28))
                display.blit(
                    font.render('     speed: {:.2f} m/s'.format(speed), True,
                                (255, 255, 255)), (8, 46))
                # display.blit(
                #     font.render('     cross track error: {:03d} cm'.format(cross_track_error), True, (255, 255, 255)),
                #     (8, 64))
                # display.blit(
                #     font.render('     max cross track error: {:03d} cm'.format(max_error), True, (255, 255, 255)),
                #     (8, 82))

                # Draw bbox on following vehicle
                if len(bbox) != 0:
                    points = [(int(bbox[i, 0]), int(bbox[i, 1]))
                              for i in range(8)]
                    BB_COLOR = (248, 64, 24)
                    # draw lines
                    # base
                    pygame.draw.line(display, BB_COLOR, points[0], points[1])
                    pygame.draw.line(display, BB_COLOR, points[1], points[2])
                    pygame.draw.line(display, BB_COLOR, points[2], points[3])
                    pygame.draw.line(display, BB_COLOR, points[3], points[0])
                    # top
                    pygame.draw.line(display, BB_COLOR, points[4], points[5])
                    pygame.draw.line(display, BB_COLOR, points[5], points[6])
                    pygame.draw.line(display, BB_COLOR, points[6], points[7])
                    pygame.draw.line(display, BB_COLOR, points[7], points[4])
                    # base-top
                    pygame.draw.line(display, BB_COLOR, points[0], points[4])
                    pygame.draw.line(display, BB_COLOR, points[1], points[5])
                    pygame.draw.line(display, BB_COLOR, points[2], points[6])
                    pygame.draw.line(display, BB_COLOR, points[3], points[7])

                # DrawDrivable(drivableIndexes, image_segmentation.width // 10, image_segmentation.height // 10, display)

                pygame.display.flip()

                frame += 1

                print("vehicle.get_transform()", vehicle.get_transform())

                print("vehicleToFollow.get_transform()",
                      vehicleToFollow.get_transform())

                # print("obsticle_vehicle.get_transform()", obsticle_vehicle.get_transform())

                s0 = path.s[1]
                c_d = path.d[1]
                c_d_d = path.d_d[1]
                c_d_dd = path.d_dd[1]
                c_speed = path.s_d[1]

                print("path.x[1]: ", path.x[1])
                print("path.y[1]: ", path.y[1])
                print("s: ", s0)

        cv2.destroyAllWindows()

    finally:
        print('destroying actors.')
        for actor in actor_list:
            actor.destroy()
        pygame.quit()
        print('done.')
def train_loop(rl_config, vehicle, map, sensors):
    file_reward_hist = open('summary/reward_hist_{}.txt'.format(len(os.listdir('summary'))), 'wb')
    birdview_producer = None
    birdview_producer = BirdViewProducer(
        client = carla.Client('127.0.0.1', 3000),  # carla.Client
        target_size=PixelDimensions(width=84, height=84),
        pixels_per_meter=4,
        crop_type=BirdViewCropType.FRONT_AREA_ONLY
    )
    process_image(sensors.camera_queue, birdview_producer, vehicle)

    configProto = init_tensorflow()
    # instantiate the DQN target networks
    DQNetwork = DDDQNet(rl_config.state_size, rl_config.action_space, rl_config.learning_rate, name=rl_config.model_name)
    TargetNetwork = DDDQNet(rl_config.state_size, rl_config.action_space, rl_config.learning_rate, name="TargetNetwork")

    #tensorflow summary for tensorboard visualization
    writer = tf.summary.FileWriter("summary")
    # losses
    tf.summary.scalar("Loss", DQNetwork.loss)
    tf.summary.scalar("Hubor_loss", DQNetwork.loss_2)
    tf.summary.histogram("ISWeights", DQNetwork.ISWeights_)
    write_op = tf.summary.merge_all()
    saver = tf.train.Saver()

    # initialize memory and fill it with examples, for prioritized replay
    memory = Memory(rl_config.memory_size, rl_config.pretrain_length, rl_config.action_space)
    if rl_config.load_memory:
        print(os.path.abspath(__file__))
        print(rl_config.memory_load_path)
        memory = memory.load_memory(rl_config.memory_load_path)
        print("Memory Loaded")
    else:
        #this can take a looong time
        memory.fill_memory(map, vehicle, sensors.camera_queue, sensors, autopilot=True)
        memory.save_memory(rl_config.memory_save_path, memory)

    # Reinforcement Learning loop
    with tf.Session(config=configProto) as sess:
        sess.run(tf.global_variables_initializer())
        writer.add_graph(sess.graph)

        m = 0
        decay_step = 0
        tau = 0
        # update param, of target network rith DQN_weights
        update_target = update_target_graph()
        sess.run(update_target)

        for episode in range(1, rl_config.total_episodes):
            # move the vehicle to a spawn_point and return state
            reset_environment(map, vehicle, sensors)
            state = process_image(sensors.camera_queue)

            done = False
            start = time.time()
            episode_reward = 0
            # save the model from time to time
            if rl_config.model_save_frequency:
                if episode % rl_config.model_save_frequency == 0:
                    save_path = saver.save(sess, rl_config.model_save_path)

            for step in range(rl_config.max_steps):
                tau += 1
                decay_step += 1

                action_int, action, explore_probability = DQNetwork.predict_action(sess, rl_config.explore_start,
                                                                         rl_config.explore_stop, rl_config.decay_rate,
                                                                         decay_step, state)
                car_controls = map_action(action_int, rl_config.action_space)
                vehicle.apply_control(car_controls)
                time.sleep(0.25)
                next_state = process_image(sensors.camera_queue)
                reward = compute_reward(vehicle, sensors)
                episode_reward += reward
                done = isDone(reward)
                experience = state, action, reward, next_state, done
                memory.store(experience)

                # Lets learn
                # First we need a mini-batch with experiences (s, a, r, s', done)
                tree_idx, batch, ISWeights_mb = memory.sample(rl_config.batch_size)
                s_mb, a_mb, r_mb, next_s_mb, dones_mb = get_split_batch(batch)

                # Get Q values for next_state from the DQN and TargetNetwork
                q_next_state = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: next_s_mb})
                q_target_next_state = sess.run(TargetNetwork.output,
                                               feed_dict={TargetNetwork.inputs_: next_s_mb})

                # Set Q_target = r if the episode ends at s+1, otherwise Q_target = r + gamma * Qtarget(s',a')
                target_Qs_batch = []
                for i in range(0, len(dones_mb)):
                    terminal = dones_mb[i]
                    # we got a'
                    action = np.argmax(q_next_state[i])
                    # if we are in a terminal state. only equals reward
                    if terminal:
                        target_Qs_batch.append((r_mb[i]))
                    else:
                        # take the Q taregt for action a'
                        target = r_mb[i] + rl_config.gamma * q_target_next_state[i][action]
                        target_Qs_batch.append(target)

                targets_mb = np.array([each for each in target_Qs_batch])

                _, _, loss, loss_2, absolute_errors = sess.run(
                    [DQNetwork.optimizer, DQNetwork.optimizer_2, DQNetwork.loss, DQNetwork.loss_2,
                     DQNetwork.absolute_errors],
                    feed_dict={DQNetwork.inputs_: s_mb,
                               DQNetwork.target_Q: targets_mb,
                               DQNetwork.actions_: a_mb,
                               DQNetwork.ISWeights_: ISWeights_mb})

                # update replay memory priorities
                memory.batch_update(tree_idx, absolute_errors)

                if tau > rl_config.max_tau:
                    update_target = update_target_graph()
                    sess.run(update_target)
                    m += 1
                    tau = 0
                    print("model updated")

                state = next_state

                if done:
                    print(episode, 'episode finished. Episode total reward:', episode_reward)
                    print('tau:', tau)
                    pickle.dump(episode_reward, file_reward_hist)
                    break

    file_reward_hist.close()
Esempio n. 13
0
def main(optimalDistance,
         followDrivenPath,
         chaseMode,
         evaluateChasingCar,
         driveName='',
         record=False,
         followMode=False,
         resultsName='results',
         P=None,
         I=None,
         D=None,
         nOfFramesToSkip=0):

    # New imports
    from car_chasing.DrivingControl import DrivingControl
    from car_chasing.DrivingControlAdvanced import DrivingControlAdvanced
    from car_chasing.CarDetector import CarDetector
    from car_chasing.SemanticSegmentation import SemanticSegmentation

    # New Variables
    extrapolate = True
    optimalDistance = 8
    followDrivenPath = True
    evaluateChasingCar = True
    record = False
    chaseMode = True
    followMode = False
    counter = 1
    sensors = []

    vehicleToFollowSpawned = False
    obsticle_vehicleSpawned = False

    # New objects
    carDetector = CarDetector()
    drivingControl = DrivingControl(optimalDistance=optimalDistance)
    drivingControlAdvanced = DrivingControlAdvanced(
        optimalDistance=optimalDistance)
    evaluation = Evaluation()
    semantic = SemanticSegmentation()

    actor_list = []
    pygame.init()

    display = pygame.display.set_mode((800, 600),
                                      pygame.HWSURFACE | pygame.DOUBLEBUF)
    font = get_font()
    clock = pygame.time.Clock()

    client = carla.Client('localhost', 2000)
    client.set_timeout(80.0)

    world = client.get_world()
    weather_presets = find_weather_presets()
    world.set_weather(weather_presets[3][0])

    # Set BirdView
    birdview_producer = BirdViewProducer(
        client,
        PixelDimensions(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT),
        pixels_per_meter=4,
        crop_type=BirdViewCropType.FRONT_AND_REAR_AREA,
        render_lanes_on_junctions=False,
    )

    try:
        m = world.get_map()

        blueprint_library = world.get_blueprint_library()

        veh_bp = random.choice(
            blueprint_library.filter('vehicle.dodge_charger.police'))
        vehicle = world.spawn_actor(veh_bp, m.get_spawn_points()[90])
        actor_list.append(vehicle)

        # New vehicle property
        vehicle.set_simulate_physics(True)

        if followDrivenPath:
            evaluation.LoadHistoryFromFile(driveName)
            first = evaluation.history[0]
            start_pose = carla.Transform(
                carla.Location(first[0], first[1], first[2]),
                carla.Rotation(first[3], first[4], first[5]))
            vehicle.set_transform(start_pose)

        # New Sensors

        # front cam for object detection
        camera_rgb_blueprint = world.get_blueprint_library().find(
            'sensor.camera.rgb')
        camera_rgb_blueprint.set_attribute('fov', '90')
        camera_rgb = world.spawn_actor(camera_rgb_blueprint,
                                       carla.Transform(
                                           carla.Location(x=1.5, z=1.4, y=0.3),
                                           carla.Rotation(pitch=0)),
                                       attach_to=vehicle)
        actor_list.append(camera_rgb)
        sensors.append(camera_rgb)

        # segmentation camera
        camera_segmentation = world.spawn_actor(
            blueprint_library.find('sensor.camera.semantic_segmentation'),
            carla.Transform(carla.Location(x=1.5, z=1.4, y=0),
                            carla.Rotation(pitch=0)),  #5,3,0 # -0.3
            attach_to=vehicle)
        actor_list.append(camera_segmentation)
        sensors.append(camera_segmentation)

        # Set up local planner and behavnioural planner
        # --------------------------------------------------------------
        # Planning Constants
        NUM_PATHS = 7
        BP_LOOKAHEAD_BASE = 8.0  # m
        BP_LOOKAHEAD_TIME = 2.0  # s
        PATH_OFFSET = 1.5  # m
        CIRCLE_OFFSETS = [-1.0, 1.0, 3.0]  # m
        CIRCLE_RADII = [1.5, 1.5, 1.5]  # m
        TIME_GAP = 1.0  # s
        PATH_SELECT_WEIGHT = 10
        A_MAX = 1.5  # m/s^2
        SLOW_SPEED = 2.0  # m/s
        STOP_LINE_BUFFER = 3.5  # m
        LEAD_VEHICLE_LOOKAHEAD = 20.0  # m
        LP_FREQUENCY_DIVISOR = 2  # Frequency divisor to make the
        # local planner operate at a lower
        # frequency than the controller
        # (which operates at the simulation
        # frequency). Must be a natural
        # number.

        PREV_BEST_PATH = []
        stopsign_fences = []

        # --------------------------------------------------------------
        frame = 0
        max_error = 0
        FPS = 30
        speed = 0
        cross_track_error = 0
        start_time = 0.0
        times = 8
        LP_FREQUENCY_DIVISOR = 4

        # ==============================================================================
        # -- Frenet related stuffs ---------------------------------------
        # ==============================================================================

        # -----------------Set Obstical Positions  -----------------------

        # TMP obstacle lists
        ob = np.array([
            [233.980630, 50.523910],
            [232.980630, 80.523910],
            [234.980630, 100.523910],
            [235.786942, 110.530586],
            [234.980630, 120.523910],
        ])

        # -----------------Set way points  -----------------------
        wx = []
        wy = []
        wz = []

        for p in evaluation.history:
            wp = carla.Transform(carla.Location(p[0], p[1], p[2]),
                                 carla.Rotation(p[3], p[4], p[5]))
            wx.append(wp.location.x)
            wy.append(wp.location.y)
            wz.append(wp.location.z)

        tx, ty, tyaw, tc, csp = generate_target_course(wx, wy, wz)

        old_tx = deepcopy(tx)
        old_ty = deepcopy(ty)

        # Leading waypoints
        leading_waypoints = []

        # other actors
        other_actor_ids = []

        # Trailing
        trail_path = None
        real_dist = 0

        # initial state
        c_speed = 10.0 / 3.6  # current speed [m/s]
        c_d = 2.0  # current lateral position [m]
        c_d_d = 0.0  # current lateral speed [m/s]
        c_d_dd = 0.0  # current latral acceleration [m/s]
        s0 = 0.0  # current course position

        trail_c_speed = 20.0 / 3.6  # current speed [m/s]
        trail_c_d = 2.0  # current lateral position [m]
        trail_c_d_d = 0.0  # current lateral speed [m/s]
        trail_c_d_dd = 0.0  # current latral acceleration [m/s]
        trail_s0 = 0.0  # current course position

        i = 0

        # Create a synchronous mode context.
        with CarlaSyncMode(world, *sensors, fps=FPS) as sync_mode:
            while True:
                if should_quit():
                    return
                clock.tick()
                start_time += clock.get_time()

                # snapshot, image_rgb, image_segmentation = tick_response
                snapshot, img_rgb, image_segmentation = sync_mode.tick(
                    timeout=2.0)

                # detect car in image with semantic segnmentation camera
                carInTheImage = semantic.IsThereACarInThePicture(
                    image_segmentation)

                line = []

                current_speed = np.linalg.norm(
                    carla_vec_to_np_array(vehicle.get_velocity()))

                # Spawn a vehicle to follow
                if not vehicleToFollowSpawned and followDrivenPath:
                    vehicleToFollowSpawned = True
                    location1 = vehicle.get_transform()
                    newX, newY = carDetector.CreatePointInFrontOFCar(
                        location1.location.x, location1.location.y,
                        location1.rotation.yaw)
                    diffX = newX - location1.location.x
                    diffY = newY - location1.location.y
                    newX = location1.location.x - (diffX * 5)
                    newY = location1.location.y - (diffY * 5)

                    start_pose.location.x = newX
                    start_pose.location.y = newY

                    vehicle.set_transform(start_pose)

                    start_pose2 = random.choice(m.get_spawn_points())

                    bp = blueprint_library.filter('model3')[0]
                    bp.set_attribute('color', '0,101,189')
                    vehicleToFollow = world.spawn_actor(bp, start_pose2)

                    start_pose2 = carla.Transform()
                    start_pose2.rotation = start_pose.rotation

                    start_pose2.location.x = start_pose.location.x
                    start_pose2.location.y = start_pose.location.y
                    start_pose2.location.z = start_pose.location.z

                    vehicleToFollow.set_transform(start_pose2)

                    actor_list.append(vehicleToFollow)
                    vehicleToFollow.set_simulate_physics(True)
                    vehicleToFollow.set_autopilot(True)

                if followDrivenPath:
                    if counter >= len(evaluation.history):
                        break
                    tmp = evaluation.history[counter]
                    currentPos = carla.Transform(
                        carla.Location(tmp[0] + 5, tmp[1], tmp[2]),
                        carla.Rotation(tmp[3], tmp[4], tmp[5]))
                    vehicleToFollow.set_transform(currentPos)
                    counter += 1

                # ------------------- Set up obsticle vehicle for testing  --------------------------------

                # Set up obsticle vehicle for testing

                if not obsticle_vehicleSpawned and followDrivenPath:
                    obsticle_vehicleSpawned = True

                    for obsticle_p in ob:

                        start_pose3 = random.choice(m.get_spawn_points())

                        obsticle_vehicle = world.spawn_actor(
                            random.choice(blueprint_library.filter('jeep')),
                            start_pose3)

                        start_pose3 = carla.Transform()
                        start_pose3.rotation = start_pose2.rotation
                        start_pose3.location.x = obsticle_p[0]
                        start_pose3.location.y = obsticle_p[1]
                        start_pose3.location.z = start_pose2.location.z

                        obsticle_vehicle.set_transform(start_pose3)
                        obsticle_vehicle.set_autopilot(True)

                        actor_list.append(obsticle_vehicle)
                        obsticle_vehicle.set_simulate_physics(True)

                        other_actor_ids.append(obsticle_vehicle.id)

                #---- Leading Frenet -----------------------------
                path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,
                                               c_d_dd, ob)
                ob = np.array([[
                    actor.get_transform().location.x,
                    actor.get_transform().location.y
                ] for actor in actor_list if actor.id in other_actor_ids])

                # --------------------------- Working leading frenet

                # Set new way points
                new_vehicleToFollow_transform = carla.Transform()
                new_vehicleToFollow_transform.rotation = carla.Rotation(
                    pitch=0.0, yaw=math.degrees(path.yaw[1]), roll=0.0)

                new_vehicleToFollow_transform.location.x = path.x[1]
                new_vehicleToFollow_transform.location.y = path.y[1]
                new_vehicleToFollow_transform.location.z = path.z[1]

                wp = (new_vehicleToFollow_transform.location.x,
                      new_vehicleToFollow_transform.location.y,
                      new_vehicleToFollow_transform.location.z,
                      new_vehicleToFollow_transform.rotation.pitch,
                      new_vehicleToFollow_transform.rotation.yaw,
                      new_vehicleToFollow_transform.rotation.roll)

                vehicleToFollow.set_transform(new_vehicleToFollow_transform)

                s0 = path.s[1]
                c_d = path.d[1]
                c_d_d = path.d_d[1]
                c_d_dd = path.d_dd[1]
                c_speed = path.s_d[1]

                if i > 2:
                    leading_waypoints.append(wp)

                location2 = None

                # ---- Car chasing activate -----
                # Give time for leading car to cumulate waypoints

                #------- Trailing Frenet --------------------------------------
                # Start frenet once every while
                if (i > 50):
                    #                     trailing_csp = look_ahead_local_planer(waypoints=leading_waypoints, current_idx=0, look_ahead=len(leading_waypoints))

                    #                     speed = get_speed(vehicle)
                    FRENET_FREQUENCY_DIVISOR = 1
                    if (frame % FRENET_FREQUENCY_DIVISOR
                            == 0) and (real_dist > 5):

                        wx = []
                        wy = []
                        wz = []

                        for p in leading_waypoints:
                            wp = carla.Transform(
                                carla.Location(p[0], p[1], p[2]),
                                carla.Rotation(p[3], p[4], p[5]))
                            wx.append(wp.location.x)
                            wy.append(wp.location.y)
                            wz.append(wp.location.z)

                        tx, ty, tyaw, tc, trailing_csp = generate_target_course(
                            wx, wy, wz)

                        trail_path = frenet_optimal_planning(
                            trailing_csp, trail_s0, trail_c_speed, trail_c_d,
                            trail_c_d_d, trail_c_d_dd, ob)

                        if trail_path:

                            trail_s0 = trail_path.s[1]
                            trail_c_d = trail_path.d[1]
                            trail_c_d_d = trail_path.d_d[1]
                            trail_c_d_dd = trail_path.d_dd[1]
                            trail_c_speed = trail_path.s_d[1]

                            new_vehicle_transform = carla.Transform()
                            new_vehicle_transform.rotation = carla.Rotation(
                                pitch=0.0,
                                yaw=math.degrees(trail_path.yaw[1]),
                                roll=0.0)

                            new_vehicle_transform.location.x = trail_path.x[1]
                            new_vehicle_transform.location.y = trail_path.y[1]
                            new_vehicle_transform.location.z = trail_path.z[1]

                            vehicle.set_transform(new_vehicle_transform)

                location1 = vehicle.get_transform()
                location2 = vehicleToFollow.get_transform()

                speed = np.linalg.norm(
                    carla_vec_to_np_array(vehicle.get_velocity()))

                real_dist = location1.location.distance(location2.location)

                fps = round(1.0 / snapshot.timestamp.delta_seconds)

                # Draw the display.
                # draw_image(display, image_rgb)
                draw_image(display,
                           img_rgb,
                           image_segmentation,
                           location1,
                           location2,
                           record=record,
                           driveName=driveName,
                           smazat=line)
                display.blit(
                    font.render('     FPS (real) % 5d ' % clock.get_fps(),
                                True, (255, 255, 255)), (8, 10))
                display.blit(
                    font.render('     FPS (simulated): % 5d ' % fps, True,
                                (255, 255, 255)), (8, 28))
                display.blit(
                    font.render('     speed: {:.2f} m/s'.format(speed), True,
                                (255, 255, 255)), (8, 46))
                display.blit(
                    font.render(
                        '     distance to target: {:.2f} m'.format(real_dist),
                        True, (255, 255, 255)), (8, 66))

                # Display BirdView
                # Input for your model - call it every simulation step
                # returned result is np.ndarray with ones and zeros of shape (8, height, width)

                birdview = birdview_producer.produce(
                    agent_vehicle=vehicleToFollow)
                bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview),
                                   cv2.COLOR_BGR2RGB)
                # NOTE imshow requires BGR color model
                cv2.imshow("BirdView RGB", bgr)
                cv2.waitKey(1)

                pygame.display.flip()

                frame += 1
                i += 1

    finally:
        print('destroying actors.')
        for actor in actor_list:
            actor.destroy()
        pygame.quit()
        print('done.')