def process_image(queue, birdview_producer=None, vehicle=None):
    '''get the image from the buffer and process it. It's the state for vision-based systems'''
    image = queue.get()
    array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]

    image_rgb = array.copy()

    image = Image.fromarray(array).convert('L')  # grayscale conversion
    image = np.array(image.resize((84, 84)))  # convert to numpy array
    image = np.reshape(image, (84, 84, 1))  # reshape image

    if birdview_producer is not None:
        process_image.birdview_producer = birdview_producer
        process_image.vehicle = vehicle
    if process_image.birdview_producer is not None:
        birdview = process_image.birdview_producer.produce(
            agent_vehicle=process_image.
            vehicle  # carla.Actor (spawned vehicle)
        )
        # produces np.ndarray of shape (height, width, 3)
        rgb = BirdViewProducer.as_rgb(birdview)
        # cv2.imshow('BEV', cv2.resize(rgb, (640, 640)))
        # cv2.imshow('rgb', cv2.resize(image_rgb, (640, 640)))
        # print(rgb.shape)
        # cv2.waitKey(0)

        return rgb

    return image
def main():
    client = carla.Client("localhost", 2000)
    client.set_timeout(3.0)
    world = client.get_world()
    map = world.get_map()
    spawn_points = map.get_spawn_points()
    blueprints = world.get_blueprint_library()

    # settings = world.get_settings()
    # settings.synchronous_mode = True
    # settings.no_rendering_mode = True
    # settings.fixed_delta_seconds = 1 / 10.0
    # world.apply_settings(settings)

    hero_bp = random.choice(blueprints.filter("vehicle.audi.a2"))
    hero_bp.set_attribute("role_name", "hero")
    transform = random.choice(spawn_points)
    agent = world.spawn_actor(hero_bp, transform)
    agent.set_autopilot(True)

    birdview_producer = BirdViewProducer(
        client,
        PixelDimensions(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT),
        pixels_per_meter=4,
        crop_type=BirdViewCropType.FRONT_AND_REAR_AREA,
        render_lanes_on_junctions=False,
    )
    stuck_frames_count = 0

    try:
        while True:
            # world.tick()
            birdview: BirdView = birdview_producer.produce(agent_vehicle=agent)
            bgr = cv.cvtColor(BirdViewProducer.as_rgb(birdview),
                              cv.COLOR_BGR2RGB)
            # NOTE imshow requires BGR color model
            cv.imshow("BirdView RGB", bgr)

            # # Teleport when stuck for too long
            # if get_speed(agent) < STUCK_SPEED_THRESHOLD_IN_KMH:
            #     stuck_frames_count += 1
            # else:
            #     stuck_frames_count = 0

            # if stuck_frames_count == MAX_STUCK_FRAMES:
            #     agent.set_autopilot(False)
            #     agent.set_transform(random.choice(spawn_points))
            #     agent.set_autopilot(True)

            # Play next frames without having to wait for the key
            key = cv.waitKey(10) & 0xFF
            if key == 27:  # ESC
                break
    finally:
        if agent is not None:
            agent.destroy()
        cv.destroyAllWindows()
    def birdeyeData(self, player, lidar_object):

        self._parent = player
        self.bird_array = self.birdview_producer.produce(
            agent_vehicle=self._parent)
        rgb = BirdViewProducer.as_rgb(self.bird_array)

        copyrgb = rgb.copy()
        n, m = np.shape(self.bird_array[:, :, 1])
        x, y = np.ogrid[0:n, 0:m]
        agent_pixel_pose_Y = int(self.bird_eye_window_width / 2)
        agent_pixel_pose_X = int(self.bird_eye_window_height / 2)
        agent_pixel_pose = [agent_pixel_pose_X, agent_pixel_pose_Y]
        # print (agent_pixel_pose)

        fov_mask = (
            ((agent_pixel_pose_Y - y) <
             ((agent_pixel_pose_X - x) * math.tan(self.FRONT_FOV / 2))) &
            ((agent_pixel_pose_Y - y) >
             (-(agent_pixel_pose_X - x) * math.tan(self.FRONT_FOV / 2))))

        self.mask_shadow, uncertainty, d = shadow_mask.view_field(
            self.bird_array[:, :, 9] + 2.7 * self.bird_array[:, :, 3], 1.8,
            agent_pixel_pose, 90)
        #(array[self.mask_shadow & mask]) = 255

        #copyrgb = rgb.copy()
        shadow = np.zeros((n, m))
        visibility_matrix = fov_mask & self.mask_shadow
        shadow[visibility_matrix] = 1

        uncertainty = uncertainty * shadow
        uncertainty = uncertainty.astype(np.bool_)
        '''r= copyrgb[:, :, 0]
        g = copyrgb[:, :, 1]
        b = copyrgb[:, :, 2]
        r[uncertainty] = 0
        g[uncertainty] = 0
        b[uncertainty] = 0'''

        copyrgb[:, :, 1] = copyrgb[:, :, 1]
        #copyrgb[:,:,0] = copyrgb[:,:,0]
        n, m = np.shape(self.bird_array[:, :, 1])
        embedded_birdseye = np.zeros((n, m))

        self.bird_array[:, :, 8] = self.bird_array[:, :, 8] * uncertainty

        for i in range(self.state_frames * self.frame_stack - 1, -1, -1):
            if i == 0:
                self.pedestrian_birdview_stack[0, :, :] = np.multiply(
                    shadow, self.bird_array[:, :, 8])
            else:
                self.pedestrian_birdview_stack[
                    i, :, :] = self.pedestrian_birdview_stack[i - 1, :, :]

            #print(i)
            if i % self.frame_stack == 0:
                self.pedestrian_birdview[
                    int(i // self.frame_stack
                        ), :, :] = self.pedestrian_birdview_stack[i, :, :]

                embedded_birdseye = self.pedestrian_birdview[int(i//self.frame_stack), :, :] \
                                    * (0.5**(int(i//self.frame_stack))) + embedded_birdseye
        '''copyrgb = copyrgb.swapaxes(0, 1)
        self.rgb_nyg_surf = pygame.surfarray.make_surface(copyrgb)
        self.rgb_nyg_surf = pygame.transform.scale(self.rgb_nyg_surf, (self.bird_eye_window_width*4, self.bird_eye_window_height*4))
        '''
        visibility_int = np.zeros((1, n, m))
        visibility_int[0, uncertainty] = 1
        cir = np.where(d <= 30, int(1), shadow)
        cir = np.where(d > 30, int(0), cir)

        self.pedestrian_birdview_embeded = np.add(
            255 * self.bird_array[:, :, 8],
            0.5 * self.pedestrian_birdview_embeded)
        obsv = (0.01 * copyrgb)
        obsv[:, :, 0] = obsv[:, :, 0]  #+ 50 * uncertainty
        obsv[:, :,
             2] = obsv[:, :, 2] + 200 * self.pedestrian_birdview_stack[0, :, :]
        obsv[:, :, 1] = obsv[:, :, 1] + 100 * self.bird_array[:, :, 3]
        #obsv[:,:,1]= obsv[:,:,1]+200*self.pedestrian_birdview_stack[0, :, :]+100*(birdview[:, :, 3] * cir)
        obsv = obsv.swapaxes(0, 1)
        '''if lidar_data is not None:
            obsv= obsv + lidar_data
        birdview[:, :, 1] * fov_mask'''

        self.rgb_obsv_surf = pygame.surfarray.make_surface(obsv)
        self.rgb_obsv_surf = pygame.transform.scale(
            self.rgb_obsv_surf,
            (self.bird_eye_window_width * 4, self.bird_eye_window_height * 4))

        copyrgb[:, :, 0] = 255 * embedded_birdseye + copyrgb[:, :, 0]
        copyrgb = copyrgb.swapaxes(0, 1)
        self.rgb_nyg_surf = pygame.surfarray.make_surface(copyrgb)
        self.rgb_nyg_surf = pygame.transform.scale(
            self.rgb_nyg_surf,
            (self.bird_eye_window_width * 4, self.bird_eye_window_height * 4))

        self.tensor = pygame.surfarray.make_surface(embedded_birdseye)
        pygame.transform.flip(self.tensor, True, True)
        self.tensor = pygame.transform.rotozoom(self.tensor, 90, 4)

        return self.pedestrian_birdview, visibility_int
Beispiel #4
0
    def _get_obs(self):
        """Get the observations."""
        ## Birdeye rendering
        self.birdeye_render.vehicle_polygons = self.vehicle_polygons
        self.birdeye_render.walker_polygons = self.walker_polygons
        self.birdeye_render.waypoints = self.waypoints

        # birdeye view with roadmap and actors
        birdeye_render_types = ['roadmap', 'actors']
        if self.display_route:
            birdeye_render_types.append('waypoints')
        self.birdeye_render.render(self.display, birdeye_render_types)
        birdeye = pygame.surfarray.array3d(self.display)
        birdeye = birdeye[0:self.display_size, :, :]
        birdeye = display_to_rgb(birdeye, self.obs_size)

        # Roadmap
        if self.pixor:
            roadmap_render_types = ['roadmap']
            if self.display_route:
                roadmap_render_types.append('waypoints')
            self.birdeye_render.render(self.display, roadmap_render_types)
            roadmap = pygame.surfarray.array3d(self.display)
            roadmap = roadmap[0:self.display_size, :, :]
            roadmap = display_to_rgb(roadmap, self.obs_size)
            # Add ego vehicle
            for i in range(self.obs_size):
                for j in range(self.obs_size):
                    if abs(birdeye[i, j, 0] - 255) < 20 and abs(
                            birdeye[i, j, 1] -
                            0) < 20 and abs(birdeye[i, j, 0] - 255) < 20:
                        roadmap[i, j, :] = birdeye[i, j, :]

        # Display birdeye image
        birdeye_surface = rgb_to_display_surface(birdeye, self.display_size)
        self.display.blit(birdeye_surface, (0, 0))

        ## Lidar image generation
        point_cloud = []
        # Get point cloud data
        for location in self.lidar_data:
            point_cloud.append([location.x, location.y, -location.z])
        point_cloud = np.array(point_cloud)
        # Separate the 3D space to bins for point cloud, x and y is set according to self.lidar_bin,
        # and z is set to be two bins.
        y_bins = np.arange(-(self.obs_range - self.d_behind),
                           self.d_behind + self.lidar_bin, self.lidar_bin)
        x_bins = np.arange(-self.obs_range / 2,
                           self.obs_range / 2 + self.lidar_bin, self.lidar_bin)
        z_bins = [-self.lidar_height - 1, -self.lidar_height + 0.25, 1]
        # Get lidar image according to the bins
        lidar, _ = np.histogramdd(point_cloud, bins=(x_bins, y_bins, z_bins))
        lidar[:, :, 0] = np.array(lidar[:, :, 0] > 0, dtype=np.uint8)
        lidar[:, :, 1] = np.array(lidar[:, :, 1] > 0, dtype=np.uint8)
        # Add the waypoints to lidar image
        if self.display_route:
            wayptimg = (birdeye[:, :, 0] <= 10) * (birdeye[:, :, 1] <= 10) * (
                birdeye[:, :, 2] >= 240)
        else:
            wayptimg = birdeye[:, :, 0] < 0  # Equal to a zero matrix
        wayptimg = np.expand_dims(wayptimg, axis=2)
        wayptimg = np.fliplr(np.rot90(wayptimg, 3))

        # Get the final lidar image
        lidar = np.concatenate((lidar, wayptimg), axis=2)
        lidar = np.flip(lidar, axis=1)
        lidar = np.rot90(lidar, 1)
        lidar = lidar * 255

        # Display lidar image
        lidar_surface = rgb_to_display_surface(lidar, self.display_size)
        self.display.blit(lidar_surface, (self.display_size, 0))

        ## Display camera image
        camera = resize(self.camera_img, (self.obs_size, self.obs_size)) * 255
        camera_surface = rgb_to_display_surface(camera, self.display_size)
        self.display.blit(camera_surface, (self.display_size * 2, 0))

        ## Display processed camera image
        original_camera = resize(self.original_camera_image,
                                 (self.image_height, self.image_width))
        #print("original camera : {}".format(original_camera))

        o_camera_surface = rgb_to_display_surface(camera, self.display_size)
        self.display.blit(o_camera_surface, (self.display_size * 2, 0))

        # Display on pygame
        pygame.display.flip()

        # State observation
        ego_trans = self.ego.get_transform()
        ego_x = ego_trans.location.x
        ego_y = ego_trans.location.y
        ego_yaw = ego_trans.rotation.yaw / 180 * np.pi
        lateral_dis, w = get_preview_lane_dis(self.waypoints, ego_x, ego_y)
        delta_yaw = np.arcsin(
            np.cross(w, np.array(np.array([np.cos(ego_yaw),
                                           np.sin(ego_yaw)]))))
        v = self.ego.get_velocity()
        speed = np.sqrt(v.x**2 + v.y**2)
        state = np.array([lateral_dis, -delta_yaw, speed, self.vehicle_front])

        if self.pixor:
            ## Vehicle classification and regression maps (requires further normalization)
            vh_clas = np.zeros((self.pixor_size, self.pixor_size))
            vh_regr = np.zeros((self.pixor_size, self.pixor_size, 6))

            # Generate the PIXOR image. Note in CARLA it is using left-hand coordinate
            # Get the 6-dim geom parametrization in PIXOR, here we use pixel coordinate
            for actor in self.world.get_actors().filter('vehicle.*'):
                x, y, yaw, l, w = get_info(actor)
                x_local, y_local, yaw_local = get_local_pose(
                    (x, y, yaw), (ego_x, ego_y, ego_yaw))
                if actor.id != self.ego.id:
                    if abs(
                            y_local
                    ) < self.obs_range / 2 + 1 and x_local < self.obs_range - self.d_behind + 1 and x_local > -self.d_behind - 1:
                        x_pixel, y_pixel, yaw_pixel, l_pixel, w_pixel = get_pixel_info(
                            local_info=(x_local, y_local, yaw_local, l, w),
                            d_behind=self.d_behind,
                            obs_range=self.obs_range,
                            image_size=self.pixor_size)
                        cos_t = np.cos(yaw_pixel)
                        sin_t = np.sin(yaw_pixel)
                        logw = np.log(w_pixel)
                        logl = np.log(l_pixel)
                        pixels = get_pixels_inside_vehicle(
                            pixel_info=(x_pixel, y_pixel, yaw_pixel, l_pixel,
                                        w_pixel),
                            pixel_grid=self.pixel_grid)
                        for pixel in pixels:
                            vh_clas[pixel[0], pixel[1]] = 1
                            dx = x_pixel - pixel[0]
                            dy = y_pixel - pixel[1]
                            vh_regr[pixel[0], pixel[1], :] = np.array(
                                [cos_t, sin_t, dx, dy, logw, logl])

            # Flip the image matrix so that the origin is at the left-bottom
            vh_clas = np.flip(vh_clas, axis=0)
            vh_regr = np.flip(vh_regr, axis=0)

            # Pixor state, [x, y, cos(yaw), sin(yaw), speed]
            pixor_state = [
                ego_x, ego_y,
                np.cos(ego_yaw),
                np.sin(ego_yaw), speed
            ]

        birdview: BirdView = self.birdview_producer.produce(
            agent_vehicle=self.ego)
        bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview), cv.COLOR_BGR2RGB)

        v = self.ego.get_velocity()
        speed_kmh = 3.6 * np.sqrt(v.x**2 + v.y**2 + v.z**2)
        speed_kmh = np.array([speed_kmh])
        obs = {
            'original_camera': original_camera.astype(np.uint8),
            'speed': speed_kmh.astype(np.uint8),
            'camera': camera.astype(np.uint8),
            'lidar': lidar.astype(np.uint8),
            'birdeye': birdeye.astype(np.uint8),
            'driving_image': bgr.astype(np.uint8),
            'state': state,
        }

        if self.pixor:
            obs.update({
                'roadmap':
                roadmap.astype(np.uint8),
                'vh_clas':
                np.expand_dims(vh_clas, -1).astype(np.float32),
                'vh_regr':
                vh_regr.astype(np.float32),
                'pixor_state':
                pixor_state,
            })

        return obs
Beispiel #5
0
def main(optimalDistance, followDrivenPath, chaseMode, evaluateChasingCar, driveName='',record=False, followMode=False,
         resultsName='results',P=None,I=None,D=None,nOfFramesToSkip=0):
    # Imports
    # from cores.lane_detection.lane_detector import LaneDetector
    # from cores.lane_detection.camera_geometry import CameraGeometry
    # from cores.control.pure_pursuit import PurePursuitPlusPID

    # New imports
    from DrivingControl import DrivingControl
    from DrivingControlAdvanced import DrivingControlAdvanced
    from CarDetector import CarDetector
    from SemanticSegmentation import SemanticSegmentation


    # New Variables
    extrapolate = True
    optimalDistance = 8
    followDrivenPath = True
    evaluateChasingCar = True
    record = False
    chaseMode = True
    followMode = False
    counter = 1
    sensors = []

    vehicleToFollowSpawned = False
    obsticle_vehicleSpawned = False

    # New objects
    carDetector = CarDetector()
    drivingControl = DrivingControl(optimalDistance=optimalDistance)
    drivingControlAdvanced = DrivingControlAdvanced(optimalDistance=optimalDistance)
    evaluation = Evaluation()
    semantic = SemanticSegmentation()


    actor_list = []
    pygame.init()

    display = pygame.display.set_mode(
        (800, 600),
        pygame.HWSURFACE | pygame.DOUBLEBUF)
    font = get_font()
    clock = pygame.time.Clock()

    client = carla.Client('localhost', 2000)
    client.set_timeout(80.0)

    #client.load_world('Town06')
    # client.load_world('Town04')
    world = client.get_world()
    weather_presets = find_weather_presets()
    # print(weather_presets)
    world.set_weather(weather_presets[3][0])
    # world.set_weather(carla.WeatherParameters.HardRainSunset)

    # controller = PurePursuitPlusPID()

    # Set BirdView
    birdview_producer = BirdViewProducer(
        client,
        PixelDimensions(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT),
        pixels_per_meter=4,
        crop_type=BirdViewCropType.FRONT_AND_REAR_AREA,
        render_lanes_on_junctions=False,
    )


    try:
        m = world.get_map()

        blueprint_library = world.get_blueprint_library()

        veh_bp = random.choice(blueprint_library.filter('vehicle.dodge_charger.police'))
        vehicle = world.spawn_actor(
            veh_bp,
            m.get_spawn_points()[90])
        actor_list.append(vehicle)

        # New vehicle property
        vehicle.set_simulate_physics(True)

        if followDrivenPath:
            evaluation.LoadHistoryFromFile(driveName)
            first = evaluation.history[0]
            start_pose = carla.Transform(carla.Location(first[0], first[1], first[2]),
                                        carla.Rotation(first[3], first[4], first[5]))
            vehicle.set_transform(start_pose)

        # New Sensors
        collision_sensor = world.spawn_actor(blueprint_library.find('sensor.other.collision'),
                                                carla.Transform(), attach_to=vehicle)
        collision_sensor.listen(lambda event: evaluation.CollisionHandler(event))
        actor_list.append(collision_sensor)

        
        # front cam for object detection
        camera_rgb_blueprint = world.get_blueprint_library().find('sensor.camera.rgb')
        camera_rgb_blueprint.set_attribute('fov', '90')
        camera_rgb = world.spawn_actor(
           camera_rgb_blueprint,
            carla.Transform(carla.Location(x=1.5, z=1.4,y=0.3), carla.Rotation(pitch=0)),
            attach_to=vehicle)
        actor_list.append(camera_rgb)
        sensors.append(camera_rgb)

            
        # segmentation camera
        camera_segmentation = world.spawn_actor(
            blueprint_library.find('sensor.camera.semantic_segmentation'),
            carla.Transform(carla.Location(x=1.5, z=1.4,y=0), carla.Rotation(pitch=0)), #5,3,0 # -0.3
            attach_to=vehicle)
        actor_list.append(camera_segmentation)
        sensors.append(camera_segmentation)

        # cg = CameraGeometry()
        # ld = LaneDetector(model_path=Path("best_model.pth").absolute())
        # #windshield cam
        # cam_windshield_transform = carla.Transform(carla.Location(x=0.5, z=cg.height), carla.Rotation(pitch=-1*cg.pitch_deg))
        # bp = blueprint_library.find('sensor.camera.rgb')
        # bp.set_attribute('image_size_x', str(cg.image_width))
        # bp.set_attribute('image_size_y', str(cg.image_height))
        # bp.set_attribute('fov', str(cg.field_of_view_deg))
        # camera_windshield = world.spawn_actor(
        #     bp,
        #     cam_windshield_transform,
        #     attach_to=vehicle)
        # actor_list.append(camera_windshield)
        # sensors.append(camera_windshield)


        
        # Set up local planner and behavnioural planner
        # --------------------------------------------------------------

        # Planning Constants
        NUM_PATHS = 7
        BP_LOOKAHEAD_BASE      = 8.0              # m
        BP_LOOKAHEAD_TIME      = 2.0              # s
        PATH_OFFSET            = 1.5              # m
        CIRCLE_OFFSETS         = [-1.0, 1.0, 3.0] # m
        CIRCLE_RADII           = [1.5, 1.5, 1.5]  # m
        TIME_GAP               = 1.0              # s
        PATH_SELECT_WEIGHT     = 10
        A_MAX                  = 1.5              # m/s^2
        SLOW_SPEED             = 2.0              # m/s
        STOP_LINE_BUFFER       = 3.5              # m
        LEAD_VEHICLE_LOOKAHEAD = 20.0             # m
        LP_FREQUENCY_DIVISOR   = 2                # Frequency divisor to make the 
                                                # local planner operate at a lower
                                                # frequency than the controller
                                                # (which operates at the simulation
                                                # frequency). Must be a natural
                                                # number.

        PREV_BEST_PATH         = []
        stopsign_fences = [] 


        # --------------------------------------------------------------


        frame = 0
        max_error = 0
        FPS = 30
        speed = 0
        cross_track_error = 0
        start_time = 0.0
        times = 8
        LP_FREQUENCY_DIVISOR = 4

        # Create a synchronous mode context.
        with CarlaSyncMode(world, *sensors, fps=FPS) as sync_mode:
            while True:
                if should_quit():
                    return
                clock.tick()          
                start_time += clock.get_time()

                # Advance the simulation and wait for the data. 
                # tick_response = sync_mode.tick(timeout=2.0)


                # Display BirdView
                # Input for your model - call it every simulation step
                # returned result is np.ndarray with ones and zeros of shape (8, height, width)
                
                birdview = birdview_producer.produce(agent_vehicle=vehicle)
                bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview), cv2.COLOR_BGR2RGB)
                # NOTE imshow requires BGR color model
                cv2.imshow("BirdView RGB", bgr)
                cv2.waitKey(1)


                # snapshot, image_rgb, image_segmentation = tick_response
                snapshot, img_rgb, image_segmentation = sync_mode.tick(timeout=2.0)

                # detect car in image with semantic segnmentation camera
                carInTheImage = semantic.IsThereACarInThePicture(image_segmentation)

                line = []

                current_speed = np.linalg.norm(carla_vec_to_np_array(vehicle.get_velocity()))

                # Spawn a vehicle to follow
                if not vehicleToFollowSpawned and followDrivenPath:
                    vehicleToFollowSpawned = True
                    location1 = vehicle.get_transform()
                    newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,
                                                                     location1.rotation.yaw)
                    diffX = newX - location1.location.x
                    diffY = newY - location1.location.y
                    newX = location1.location.x - (diffX*5)
                    newY = location1.location.y - (diffY*5)

                    start_pose.location.x = newX
                    start_pose.location.y = newY

                    vehicle.set_transform(start_pose)

                    start_pose2 = random.choice(m.get_spawn_points())

                    bp = blueprint_library.filter('model3')[0]
                    bp.set_attribute('color', '0,101,189')
                    vehicleToFollow = world.spawn_actor(
                        bp,
                        start_pose2)

                    start_pose2 = carla.Transform()
                    start_pose2.rotation = start_pose.rotation

                    start_pose2.location.x = start_pose.location.x
                    start_pose2.location.y = start_pose.location.y
                    start_pose2.location.z = start_pose.location.z

                    vehicleToFollow.set_transform(start_pose2)

                    actor_list.append(vehicleToFollow)
                    vehicleToFollow.set_simulate_physics(True)
                    # vehicleToFollow.set_autopilot(False)

                if followDrivenPath:
                    if counter >= len(evaluation.history):
                        break
                    tmp = evaluation.history[counter]
                    currentPos = carla.Transform(carla.Location(tmp[0] ,tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5]))
                    vehicleToFollow.set_transform(currentPos)
                    counter += 1


                # Set up obsticle vehicle for testing 
                location1 = vehicle.get_transform()
                location2 = vehicleToFollow.get_transform()

                # if not obsticle_vehicleSpawned and followDrivenPath:
                #     obsticle_vehicleSpawned = True
                #     # Adding new obsticle vehicle 

                #     start_pose3 = random.choice(m.get_spawn_points())

                #     obsticle_vehicle = world.spawn_actor(
                #         random.choice(blueprint_library.filter('jeep')),
                #         start_pose3)

                #     start_pose3 = carla.Transform()
                #     start_pose3.rotation = start_pose2.rotation
                #     start_pose3.location.x = start_pose2.location.x 
                #     start_pose3.location.y =  start_pose2.location.y + 50 
                #     start_pose3.location.z =  start_pose2.location.z

                #     obsticle_vehicle.set_transform(start_pose3)


                #     actor_list.append(obsticle_vehicle)
                #     obsticle_vehicle.set_simulate_physics(True)



                # # Parked car(s) (X(m), Y(m), Z(m), Yaw(deg), RADX(m), RADY(m), RADZ(m))
                # parkedcar_data = None
                # parkedcar_box_pts = []      # [x,y]
                # with open(C4_PARKED_CAR_FILE, 'r') as parkedcar_file:
                #     next(parkedcar_file)  # skip header
                #     parkedcar_reader = csv.reader(parkedcar_file, 
                #                                 delimiter=',', 
                #                                 quoting=csv.QUOTE_NONNUMERIC)
                #     parkedcar_data = list(parkedcar_reader)
                #     # convert to rad
                #     for i in range(len(parkedcar_data)):
                #         parkedcar_data[i][3] = parkedcar_data[i][3] * np.pi / 180.0 

                # # obtain parked car(s) box points for LP
                # for i in range(len(parkedcar_data)):
                #     x = parkedcar_data[i][0]
                #     y = parkedcar_data[i][1]
                #     z = parkedcar_data[i][2]
                #     yaw = parkedcar_data[i][3]
                #     xrad = parkedcar_data[i][4]
                #     yrad = parkedcar_data[i][5]
                #     zrad = parkedcar_data[i][6]
                #     cpos = np.array([
                #             [-xrad, -xrad, -xrad, 0,    xrad, xrad, xrad,  0    ],
                #             [-yrad, 0,     yrad,  yrad, yrad, 0,    -yrad, -yrad]])
                #     rotyaw = np.array([
                #             [np.cos(yaw), np.sin(yaw)],
                #             [-np.sin(yaw), np.cos(yaw)]])
                #     cpos_shift = np.array([
                #             [x, x, x, x, x, x, x, x],
                #             [y, y, y, y, y, y, y, y]])
                #     cpos = np.add(np.matmul(rotyaw, cpos), cpos_shift)
                #     for j in range(cpos.shape[1]):
                #         parkedcar_box_pts.append([cpos[0,j], cpos[1,j]])


                # if frame % LP_FREQUENCY_DIVISOR == 0:
                #     # Update vehicleToFollow transorm with obsticles
                #     # --------------------------------------------------------------
                #     _LOOKAHEAD_INDEX = 5
                #     _BP_LOOKAHEAD_BASE = 8.0              # m 
                #     _BP_LOOKAHEAD_TIME = 2.0              # s 


                #     # unsupported operand type(s) for +: 'float' and 'Vector3D'
                #     lookahead_time = _BP_LOOKAHEAD_BASE +  _BP_LOOKAHEAD_TIME *  vehicleToFollow.get_velocity().z


                #     location3 = obsticle_vehicle.get_transform()
                    
                #     # Calculate the goal state set in the local frame for the local planner.
                #     # Current speed should be open loop for the velocity profile generation.
                #     ego_state = [location2.location.x, location2.location.y, location2.rotation.yaw, vehicleToFollow.get_velocity().z]

                #     # Set lookahead based on current speed.
                #     b_planner.set_lookahead(_BP_LOOKAHEAD_BASE + _BP_LOOKAHEAD_TIME * vehicleToFollow.get_velocity().z)

                    
                #     # Perform a state transition in the behavioural planner.
                #     b_planner.transition_state(evaluation.history, ego_state, current_speed)
                #     # print("The current speed = %f" % current_speed)

                #     # # Find the closest index to the ego vehicle.
                #     # closest_len, closest_index = behavioural_planner.get_closest_index(evaluation.history, ego_state)

                #     # print("closest_len: ", closest_len)
                #     # print("closest_index: ", closest_index)
                    
                #     # # Find the goal index that lies within the lookahead distance
                #     # # along the waypoints.
                #     # goal_index = b_planner.get_goal_index(evaluation.history, ego_state, closest_len, closest_index)

                #     # print("goal_index: ", goal_index)

                #     # # Set goal_state
                #     # goal_state = evaluation.history[goal_index]
            
                #     # Compute the goal state set from the behavioural planner's computed goal state.
                #     goal_state_set = l_planner.get_goal_state_set(b_planner._goal_index, b_planner._goal_state, evaluation.history, ego_state)

                #     # # Calculate planned paths in the local frame.
                #     paths, path_validity = l_planner.plan_paths(goal_state_set)

                #     # # Transform those paths back to the global frame.
                #     paths = local_planner.transform_paths(paths, ego_state)

                #     # Detect obsticle car
                #     obsticle_bbox, obsticle_predicted_distance, obsticle_predicted_angle = carDetector.getDistance(obsticle_vehicle, camera_rgb,carInTheImage,extrapolation=extrapolate,nOfFramesToSkip=nOfFramesToSkip)

                #     obsticle_bbox =[ [bbox[0],bbox[1]] for bbox in obsticle_bbox] 
     
                #     print("paths: ", paths)


                #     if obsticle_bbox:
                #         # # Perform collision checking.
                #         collision_check_array = l_planner._collision_checker.collision_check(paths, [obsticle_bbox])
                #         print("collision_check_array: ", collision_check_array)

                #         # Compute the best local path.
                #         best_index = l_planner._collision_checker.select_best_path_index(paths, collision_check_array, b_planner._goal_state)
                #         print("The best_index :", best_index)


                #     desired_speed = b_planner._goal_state[2]
                #     print("The desired_speed = %f" % desired_speed)

                # newX, newY = carDetector.CreatePointInFrontOFCar(location2.location.x, location2.location.y,location2.rotation.yaw)
                # new_angle = carDetector.getAngle([location2.location.x, location2.location.y], [newX, newY],
                #                              [location3.location.x, location3.location.y])
                
                # tmp = evaluation.history[counter-1]
                # currentPos = carla.Transform(carla.Location(tmp[0] + 0 ,tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5]))
                # vehicleToFollow.set_transform(currentPos)


                # --------------------------------------------------------------





                # Update vehicle position by detecting vehicle to follow position
                newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,location1.rotation.yaw)
                angle = carDetector.getAngle([location1.location.x, location1.location.y], [newX, newY],
                                             [location2.location.x, location2.location.y])

                possibleAngle = 0
                drivableIndexes = []
                bbox = []

                
                bbox, predicted_distance,predicted_angle = carDetector.getDistance(vehicleToFollow, camera_rgb, carInTheImage,extrapolation=extrapolate,nOfFramesToSkip=nOfFramesToSkip)

                if frame % LP_FREQUENCY_DIVISOR == 0:
                    # This is the bottle neck and takes times to run. But it is necessary for chasing around turns
                    predicted_angle, drivableIndexes = semantic.FindPossibleAngle(image_segmentation,bbox,predicted_angle) # This is still necessary need to optimize it 
                    
                steer, throttle = drivingControlAdvanced.PredictSteerAndThrottle(predicted_distance,predicted_angle,None)

                # This is a new method
                send_control(vehicle, throttle, steer, 0)


                speed = np.linalg.norm(carla_vec_to_np_array(vehicle.get_velocity()))

                real_dist = location1.location.distance(location2.location)
             


                fps = round(1.0 / snapshot.timestamp.delta_seconds)

                # Draw the display.
                # draw_image(display, image_rgb)
                draw_image(display, img_rgb, image_segmentation,location1, location2,record=record,driveName=driveName,smazat=line)
                display.blit(
                    font.render('     FPS (real) % 5d ' % clock.get_fps(), True, (255, 255, 255)),
                    (8, 10))
                display.blit(
                    font.render('     FPS (simulated): % 5d ' % fps, True, (255, 255, 255)),
                    (8, 28))
                display.blit(
                    font.render('     speed: {:.2f} m/s'.format(speed), True, (255, 255, 255)),
                    (8, 46))
                display.blit(
                    font.render('     distance to target: {:.2f} m'.format(real_dist), True, (255, 255, 255)),
                    (8, 66))
                # display.blit(
                #     font.render('     cross track error: {:03d} cm'.format(cross_track_error), True, (255, 255, 255)),
                #     (8, 64))
                # display.blit(
                #     font.render('     max cross track error: {:03d} cm'.format(max_error), True, (255, 255, 255)),
                #     (8, 82))


                # Draw bbox on following vehicle
                if len(bbox) != 0:
                    points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)]
                    BB_COLOR = (248, 64, 24)
                    # draw lines
                    # base
                    pygame.draw.line(display, BB_COLOR, points[0], points[1])
                    pygame.draw.line(display, BB_COLOR, points[1], points[2])
                    pygame.draw.line(display, BB_COLOR, points[2], points[3])
                    pygame.draw.line(display, BB_COLOR, points[3], points[0])
                    # top
                    pygame.draw.line(display, BB_COLOR, points[4], points[5])
                    pygame.draw.line(display, BB_COLOR, points[5], points[6])
                    pygame.draw.line(display, BB_COLOR, points[6], points[7])
                    pygame.draw.line(display, BB_COLOR, points[7], points[4])
                    # base-top
                    pygame.draw.line(display, BB_COLOR, points[0], points[4])
                    pygame.draw.line(display, BB_COLOR, points[1], points[5])
                    pygame.draw.line(display, BB_COLOR, points[2], points[6])
                    pygame.draw.line(display, BB_COLOR, points[3], points[7])

                # DrawDrivable(drivableIndexes, image_segmentation.width // 10, image_segmentation.height // 10, display)



                pygame.display.flip()

                frame += 1
                # if save_gif and frame > 1000:
                #     print("frame=",frame)
                #     imgdata = pygame.surfarray.array3d(pygame.display.get_surface())
                #     imgdata = imgdata.swapaxes(0,1)
                #     if frame < 1200:
                #         images.append(imgdata)
                

    finally:
        print('destroying actors.')
        for actor in actor_list:
            actor.destroy()
        pygame.quit()
        print('done.')
Beispiel #6
0
def main(optimalDistance,
         followDrivenPath,
         chaseMode,
         evaluateChasingCar,
         driveName='',
         record=False,
         followMode=False,
         resultsName='results',
         P=None,
         I=None,
         D=None,
         nOfFramesToSkip=0):
    # Imports
    # from cores.lane_detection.lane_detector import LaneDetector
    # from cores.lane_detection.camera_geometry import CameraGeometry
    # from cores.control.pure_pursuit import PurePursuitPlusPID

    # New imports
    from DrivingControl import DrivingControl
    from DrivingControlAdvanced import DrivingControlAdvanced
    from CarDetector import CarDetector
    from SemanticSegmentation import SemanticSegmentation

    # New Variables
    extrapolate = True
    optimalDistance = 1
    followDrivenPath = True
    evaluateChasingCar = True
    record = False
    chaseMode = True
    followMode = False
    counter = 1
    sensors = []

    vehicleToFollowSpawned = False
    obsticle_vehicleSpawned = False

    # New objects
    carDetector = CarDetector()
    drivingControl = DrivingControl(optimalDistance=optimalDistance)
    drivingControlAdvanced = DrivingControlAdvanced(
        optimalDistance=optimalDistance)
    evaluation = Evaluation()
    semantic = SemanticSegmentation()

    actor_list = []
    pygame.init()

    display = pygame.display.set_mode((800, 600),
                                      pygame.HWSURFACE | pygame.DOUBLEBUF)
    font = get_font()
    clock = pygame.time.Clock()

    client = carla.Client('localhost', 2000)
    client.set_timeout(80.0)

    #client.load_world('Town06')
    # client.load_world('Town04')
    world = client.get_world()
    weather_presets = find_weather_presets()
    # print(weather_presets)
    world.set_weather(weather_presets[3][0])
    # world.set_weather(carla.WeatherParameters.HardRainSunset)

    # controller = PurePursuitPlusPID()

    # Set BirdView
    birdview_producer = BirdViewProducer(
        client,
        PixelDimensions(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT),
        pixels_per_meter=4,
        crop_type=BirdViewCropType.FRONT_AND_REAR_AREA,
        render_lanes_on_junctions=False,
    )

    try:
        m = world.get_map()

        blueprint_library = world.get_blueprint_library()

        veh_bp = random.choice(
            blueprint_library.filter('vehicle.dodge_charger.police'))
        vehicle = world.spawn_actor(veh_bp, m.get_spawn_points()[90])
        actor_list.append(vehicle)

        # New vehicle property
        vehicle.set_simulate_physics(True)

        if followDrivenPath:
            evaluation.LoadHistoryFromFile(driveName)
            first = evaluation.history[0]
            start_pose = carla.Transform(
                carla.Location(first[0], first[1], first[2]),
                carla.Rotation(first[3], first[4], first[5]))
            vehicle.set_transform(start_pose)

        # New Sensors
        collision_sensor = world.spawn_actor(
            blueprint_library.find('sensor.other.collision'),
            carla.Transform(),
            attach_to=vehicle)
        collision_sensor.listen(
            lambda event: evaluation.CollisionHandler(event))
        actor_list.append(collision_sensor)

        # front cam for object detection
        camera_rgb_blueprint = world.get_blueprint_library().find(
            'sensor.camera.rgb')
        camera_rgb_blueprint.set_attribute('fov', '90')
        camera_rgb = world.spawn_actor(camera_rgb_blueprint,
                                       carla.Transform(
                                           carla.Location(x=1.5, z=1.4, y=0.3),
                                           carla.Rotation(pitch=0)),
                                       attach_to=vehicle)
        actor_list.append(camera_rgb)
        sensors.append(camera_rgb)

        # segmentation camera
        camera_segmentation = world.spawn_actor(
            blueprint_library.find('sensor.camera.semantic_segmentation'),
            carla.Transform(carla.Location(x=1.5, z=1.4, y=0),
                            carla.Rotation(pitch=0)),  #5,3,0 # -0.3
            attach_to=vehicle)
        actor_list.append(camera_segmentation)
        sensors.append(camera_segmentation)

        # Set up local planner and behavnioural planner
        # --------------------------------------------------------------

        # --------------------------------------------------------------

        frame = 0
        max_error = 0
        FPS = 30
        speed = 0
        cross_track_error = 0
        start_time = 0.0
        times = 8
        LP_FREQUENCY_DIVISOR = 8  # Frequency divisor to make the
        # local planner operate at a lower
        # frequency than the controller
        # (which operates at the simulation
        # frequency). Must be a natural
        # number.

        # TMP obstacle lists
        ob = np.array([
            [233.980630, 130.523910],
            [233.980630, 30.523910],
            [233.980630, 60.523910],
            [233.980630, 80.523910],
            [233.786942, 75.530586],
        ])

        wx = []
        wy = []
        wz = []

        for p in evaluation.history:
            wp = carla.Transform(carla.Location(p[0], p[1], p[2]),
                                 carla.Rotation(p[3], p[4], p[5]))
            wx.append(wp.location.x)
            wy.append(wp.location.y)
            wz.append(wp.location.z)

        tx, ty, tyaw, tc, csp = generate_target_course(wx, wy, wz)

        # initial state
        c_speed = 2.0 / 3.6  # current speed [m/s]
        c_d = 2.0  # current lateral position [m]
        c_d_d = 0.0  # current lateral speed [m/s]
        c_d_dd = 0.0  # current latral acceleration [m/s]
        s0 = 0.0  # current course position

        # Create a synchronous mode context.
        with CarlaSyncMode(world, *sensors, fps=FPS) as sync_mode:
            while True:
                if should_quit():
                    return
                clock.tick()
                start_time += clock.get_time()

                # Advance the simulation and wait for the data.
                # tick_response = sync_mode.tick(timeout=2.0)

                # Display BirdView
                # Input for your model - call it every simulation step
                # returned result is np.ndarray with ones and zeros of shape (8, height, width)

                birdview = birdview_producer.produce(agent_vehicle=vehicle)
                bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview),
                                   cv2.COLOR_BGR2RGB)
                # NOTE imshow requires BGR color model
                cv2.imshow("BirdView RGB", bgr)
                cv2.waitKey(1)

                # snapshot, image_rgb, image_segmentation = tick_response
                snapshot, img_rgb, image_segmentation = sync_mode.tick(
                    timeout=2.0)

                print("type(image_segmentation): ", type(image_segmentation))

                # detect car in image with semantic segnmentation camera
                carInTheImage = semantic.IsThereACarInThePicture(
                    image_segmentation)

                line = []

                # Spawn a vehicle to follow
                if not vehicleToFollowSpawned and followDrivenPath:
                    vehicleToFollowSpawned = True
                    location1 = vehicle.get_transform()
                    newX, newY = carDetector.CreatePointInFrontOFCar(
                        location1.location.x, location1.location.y,
                        location1.rotation.yaw)
                    diffX = newX - location1.location.x
                    diffY = newY - location1.location.y
                    newX = location1.location.x - (diffX * 5)
                    newY = location1.location.y - (diffY * 5)

                    start_pose.location.x = newX
                    start_pose.location.y = newY

                    vehicle.set_transform(start_pose)

                    start_pose2 = random.choice(m.get_spawn_points())

                    bp = blueprint_library.filter('model3')[0]
                    bp.set_attribute('color', '0,101,189')
                    vehicleToFollow = world.spawn_actor(bp, start_pose2)

                    start_pose2 = carla.Transform()
                    start_pose2.rotation = start_pose.rotation

                    start_pose2.location.x = start_pose.location.x
                    start_pose2.location.y = start_pose.location.y
                    start_pose2.location.z = start_pose.location.z

                    vehicleToFollow.set_transform(start_pose2)

                    actor_list.append(vehicleToFollow)
                    vehicleToFollow.set_simulate_physics(True)
                    # vehicleToFollow.set_autopilot(False)

                if followDrivenPath:
                    if counter >= len(evaluation.history):
                        break
                    tmp = evaluation.history[counter]
                    currentPos = carla.Transform(
                        carla.Location(tmp[0], tmp[1], tmp[2]),
                        carla.Rotation(tmp[3], tmp[4], tmp[5]))
                    vehicleToFollow.set_transform(currentPos)
                    counter += 1

                # Set up obsticle vehicle for testing
                # location1 = vehicle.get_transform()
                # location2 = vehicleToFollow.get_transform()

                # if not obsticle_vehicleSpawned and followDrivenPath:
                #     obsticle_vehicleSpawned = True
                #     # Adding new obsticle vehicle

                #     start_pose3 = random.choice(m.get_spawn_points())

                #     obsticle_vehicle = world.spawn_actor(
                #         random.choice(blueprint_library.filter('jeep')),
                #         start_pose3)

                #     start_pose3 = carla.Transform()
                #     start_pose3.rotation = start_pose2.rotation
                #     start_pose3.location.x = start_pose2.location.x
                #     start_pose3.location.y =  start_pose2.location.y + 50
                #     start_pose3.location.z =  start_pose2.location.z

                #     obsticle_vehicle.set_transform(start_pose3)

                #     actor_list.append(obsticle_vehicle)
                #     obsticle_vehicle.set_simulate_physics(True)

                # if frame % LP_FREQUENCY_DIVISOR == 0:
                """
                        **********************************************************************************************************************
                        *********************************************** Motion Planner *******************************************************
                        **********************************************************************************************************************
                """

                # tmp = evaluation.history[counter-1]
                # currentPos = carla.Transform(carla.Location(tmp[0] + 0 ,tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5]))
                # vehicleToFollow.set_transform(currentPos)

                # ------------------- Frenet  --------------------------------
                path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,
                                               c_d_dd, ob)

                print("path length: ", len(path.x))

                new_vehicleToFollow_transform = carla.Transform()
                new_vehicleToFollow_transform.rotation = carla.Rotation(
                    pitch=0.0, yaw=math.degrees(path.yaw[1]), roll=0.0)

                new_vehicleToFollow_transform.location.x = path.x[1]
                new_vehicleToFollow_transform.location.y = path.y[1]
                new_vehicleToFollow_transform.location.z = path.z[1]

                vehicleToFollow.set_transform(new_vehicleToFollow_transform)

                # ------------------- Control for ego  --------------------------------

                # Set up  new locationss
                location1 = vehicle.get_transform()
                location2 = vehicleToFollow.get_transform()

                possibleAngle = 0
                drivableIndexes = []
                bbox = []

                bbox, predicted_distance, predicted_angle = carDetector.getDistance(
                    vehicleToFollow,
                    camera_rgb,
                    carInTheImage,
                    extrapolation=extrapolate,
                    nOfFramesToSkip=nOfFramesToSkip)

                if frame % LP_FREQUENCY_DIVISOR == 0:
                    # This is the bottle neck and takes times to run. But it is necessary for chasing around turns
                    predicted_angle, drivableIndexes = semantic.FindPossibleAngle(
                        image_segmentation, bbox, predicted_angle
                    )  # This is still necessary need to optimize it

                steer, throttle = drivingControlAdvanced.PredictSteerAndThrottle(
                    predicted_distance, predicted_angle, None)

                # This is a new method
                send_control(vehicle, throttle, steer, 0)

                speed = np.linalg.norm(
                    carla_vec_to_np_array(vehicle.get_velocity()))

                real_dist = location1.location.distance(location2.location)

                fps = round(1.0 / snapshot.timestamp.delta_seconds)

                # Draw the display.
                # draw_image(display, image_rgb)
                draw_image(display,
                           img_rgb,
                           image_segmentation,
                           location1,
                           location2,
                           record=record,
                           driveName=driveName,
                           smazat=line)
                display.blit(
                    font.render('     FPS (real) % 5d ' % clock.get_fps(),
                                True, (255, 255, 255)), (8, 10))
                display.blit(
                    font.render('     FPS (simulated): % 5d ' % fps, True,
                                (255, 255, 255)), (8, 28))
                display.blit(
                    font.render('     speed: {:.2f} m/s'.format(speed), True,
                                (255, 255, 255)), (8, 46))
                # display.blit(
                #     font.render('     cross track error: {:03d} cm'.format(cross_track_error), True, (255, 255, 255)),
                #     (8, 64))
                # display.blit(
                #     font.render('     max cross track error: {:03d} cm'.format(max_error), True, (255, 255, 255)),
                #     (8, 82))

                # Draw bbox on following vehicle
                if len(bbox) != 0:
                    points = [(int(bbox[i, 0]), int(bbox[i, 1]))
                              for i in range(8)]
                    BB_COLOR = (248, 64, 24)
                    # draw lines
                    # base
                    pygame.draw.line(display, BB_COLOR, points[0], points[1])
                    pygame.draw.line(display, BB_COLOR, points[1], points[2])
                    pygame.draw.line(display, BB_COLOR, points[2], points[3])
                    pygame.draw.line(display, BB_COLOR, points[3], points[0])
                    # top
                    pygame.draw.line(display, BB_COLOR, points[4], points[5])
                    pygame.draw.line(display, BB_COLOR, points[5], points[6])
                    pygame.draw.line(display, BB_COLOR, points[6], points[7])
                    pygame.draw.line(display, BB_COLOR, points[7], points[4])
                    # base-top
                    pygame.draw.line(display, BB_COLOR, points[0], points[4])
                    pygame.draw.line(display, BB_COLOR, points[1], points[5])
                    pygame.draw.line(display, BB_COLOR, points[2], points[6])
                    pygame.draw.line(display, BB_COLOR, points[3], points[7])

                # DrawDrivable(drivableIndexes, image_segmentation.width // 10, image_segmentation.height // 10, display)

                pygame.display.flip()

                frame += 1

                print("vehicle.get_transform()", vehicle.get_transform())

                print("vehicleToFollow.get_transform()",
                      vehicleToFollow.get_transform())

                # print("obsticle_vehicle.get_transform()", obsticle_vehicle.get_transform())

                s0 = path.s[1]
                c_d = path.d[1]
                c_d_d = path.d_d[1]
                c_d_dd = path.d_dd[1]
                c_speed = path.s_d[1]

                print("path.x[1]: ", path.x[1])
                print("path.y[1]: ", path.y[1])
                print("s: ", s0)

        cv2.destroyAllWindows()

    finally:
        print('destroying actors.')
        for actor in actor_list:
            actor.destroy()
        pygame.quit()
        print('done.')
Beispiel #7
0
def main(optimalDistance,
         followDrivenPath,
         chaseMode,
         evaluateChasingCar,
         driveName='',
         record=False,
         followMode=False,
         resultsName='results',
         P=None,
         I=None,
         D=None,
         nOfFramesToSkip=0):

    # New imports
    from car_chasing.DrivingControl import DrivingControl
    from car_chasing.DrivingControlAdvanced import DrivingControlAdvanced
    from car_chasing.CarDetector import CarDetector
    from car_chasing.SemanticSegmentation import SemanticSegmentation

    # New Variables
    extrapolate = True
    optimalDistance = 8
    followDrivenPath = True
    evaluateChasingCar = True
    record = False
    chaseMode = True
    followMode = False
    counter = 1
    sensors = []

    vehicleToFollowSpawned = False
    obsticle_vehicleSpawned = False

    # New objects
    carDetector = CarDetector()
    drivingControl = DrivingControl(optimalDistance=optimalDistance)
    drivingControlAdvanced = DrivingControlAdvanced(
        optimalDistance=optimalDistance)
    evaluation = Evaluation()
    semantic = SemanticSegmentation()

    actor_list = []
    pygame.init()

    display = pygame.display.set_mode((800, 600),
                                      pygame.HWSURFACE | pygame.DOUBLEBUF)
    font = get_font()
    clock = pygame.time.Clock()

    client = carla.Client('localhost', 2000)
    client.set_timeout(80.0)

    world = client.get_world()
    weather_presets = find_weather_presets()
    world.set_weather(weather_presets[3][0])

    # Set BirdView
    birdview_producer = BirdViewProducer(
        client,
        PixelDimensions(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT),
        pixels_per_meter=4,
        crop_type=BirdViewCropType.FRONT_AND_REAR_AREA,
        render_lanes_on_junctions=False,
    )

    try:
        m = world.get_map()

        blueprint_library = world.get_blueprint_library()

        veh_bp = random.choice(
            blueprint_library.filter('vehicle.dodge_charger.police'))
        vehicle = world.spawn_actor(veh_bp, m.get_spawn_points()[90])
        actor_list.append(vehicle)

        # New vehicle property
        vehicle.set_simulate_physics(True)

        if followDrivenPath:
            evaluation.LoadHistoryFromFile(driveName)
            first = evaluation.history[0]
            start_pose = carla.Transform(
                carla.Location(first[0], first[1], first[2]),
                carla.Rotation(first[3], first[4], first[5]))
            vehicle.set_transform(start_pose)

        # New Sensors

        # front cam for object detection
        camera_rgb_blueprint = world.get_blueprint_library().find(
            'sensor.camera.rgb')
        camera_rgb_blueprint.set_attribute('fov', '90')
        camera_rgb = world.spawn_actor(camera_rgb_blueprint,
                                       carla.Transform(
                                           carla.Location(x=1.5, z=1.4, y=0.3),
                                           carla.Rotation(pitch=0)),
                                       attach_to=vehicle)
        actor_list.append(camera_rgb)
        sensors.append(camera_rgb)

        # segmentation camera
        camera_segmentation = world.spawn_actor(
            blueprint_library.find('sensor.camera.semantic_segmentation'),
            carla.Transform(carla.Location(x=1.5, z=1.4, y=0),
                            carla.Rotation(pitch=0)),  #5,3,0 # -0.3
            attach_to=vehicle)
        actor_list.append(camera_segmentation)
        sensors.append(camera_segmentation)

        # Set up local planner and behavnioural planner
        # --------------------------------------------------------------
        # Planning Constants
        NUM_PATHS = 7
        BP_LOOKAHEAD_BASE = 8.0  # m
        BP_LOOKAHEAD_TIME = 2.0  # s
        PATH_OFFSET = 1.5  # m
        CIRCLE_OFFSETS = [-1.0, 1.0, 3.0]  # m
        CIRCLE_RADII = [1.5, 1.5, 1.5]  # m
        TIME_GAP = 1.0  # s
        PATH_SELECT_WEIGHT = 10
        A_MAX = 1.5  # m/s^2
        SLOW_SPEED = 2.0  # m/s
        STOP_LINE_BUFFER = 3.5  # m
        LEAD_VEHICLE_LOOKAHEAD = 20.0  # m
        LP_FREQUENCY_DIVISOR = 2  # Frequency divisor to make the
        # local planner operate at a lower
        # frequency than the controller
        # (which operates at the simulation
        # frequency). Must be a natural
        # number.

        PREV_BEST_PATH = []
        stopsign_fences = []

        # --------------------------------------------------------------
        frame = 0
        max_error = 0
        FPS = 30
        speed = 0
        cross_track_error = 0
        start_time = 0.0
        times = 8
        LP_FREQUENCY_DIVISOR = 4

        # ==============================================================================
        # -- Frenet related stuffs ---------------------------------------
        # ==============================================================================

        # -----------------Set Obstical Positions  -----------------------

        # TMP obstacle lists
        ob = np.array([
            [233.980630, 50.523910],
            [232.980630, 80.523910],
            [234.980630, 100.523910],
            [235.786942, 110.530586],
            [234.980630, 120.523910],
        ])

        # -----------------Set way points  -----------------------
        wx = []
        wy = []
        wz = []

        for p in evaluation.history:
            wp = carla.Transform(carla.Location(p[0], p[1], p[2]),
                                 carla.Rotation(p[3], p[4], p[5]))
            wx.append(wp.location.x)
            wy.append(wp.location.y)
            wz.append(wp.location.z)

        tx, ty, tyaw, tc, csp = generate_target_course(wx, wy, wz)

        old_tx = deepcopy(tx)
        old_ty = deepcopy(ty)

        # Leading waypoints
        leading_waypoints = []

        # other actors
        other_actor_ids = []

        # Trailing
        trail_path = None
        real_dist = 0

        # initial state
        c_speed = 10.0 / 3.6  # current speed [m/s]
        c_d = 2.0  # current lateral position [m]
        c_d_d = 0.0  # current lateral speed [m/s]
        c_d_dd = 0.0  # current latral acceleration [m/s]
        s0 = 0.0  # current course position

        trail_c_speed = 20.0 / 3.6  # current speed [m/s]
        trail_c_d = 2.0  # current lateral position [m]
        trail_c_d_d = 0.0  # current lateral speed [m/s]
        trail_c_d_dd = 0.0  # current latral acceleration [m/s]
        trail_s0 = 0.0  # current course position

        i = 0

        # Create a synchronous mode context.
        with CarlaSyncMode(world, *sensors, fps=FPS) as sync_mode:
            while True:
                if should_quit():
                    return
                clock.tick()
                start_time += clock.get_time()

                # snapshot, image_rgb, image_segmentation = tick_response
                snapshot, img_rgb, image_segmentation = sync_mode.tick(
                    timeout=2.0)

                # detect car in image with semantic segnmentation camera
                carInTheImage = semantic.IsThereACarInThePicture(
                    image_segmentation)

                line = []

                current_speed = np.linalg.norm(
                    carla_vec_to_np_array(vehicle.get_velocity()))

                # Spawn a vehicle to follow
                if not vehicleToFollowSpawned and followDrivenPath:
                    vehicleToFollowSpawned = True
                    location1 = vehicle.get_transform()
                    newX, newY = carDetector.CreatePointInFrontOFCar(
                        location1.location.x, location1.location.y,
                        location1.rotation.yaw)
                    diffX = newX - location1.location.x
                    diffY = newY - location1.location.y
                    newX = location1.location.x - (diffX * 5)
                    newY = location1.location.y - (diffY * 5)

                    start_pose.location.x = newX
                    start_pose.location.y = newY

                    vehicle.set_transform(start_pose)

                    start_pose2 = random.choice(m.get_spawn_points())

                    bp = blueprint_library.filter('model3')[0]
                    bp.set_attribute('color', '0,101,189')
                    vehicleToFollow = world.spawn_actor(bp, start_pose2)

                    start_pose2 = carla.Transform()
                    start_pose2.rotation = start_pose.rotation

                    start_pose2.location.x = start_pose.location.x
                    start_pose2.location.y = start_pose.location.y
                    start_pose2.location.z = start_pose.location.z

                    vehicleToFollow.set_transform(start_pose2)

                    actor_list.append(vehicleToFollow)
                    vehicleToFollow.set_simulate_physics(True)
                    vehicleToFollow.set_autopilot(True)

                if followDrivenPath:
                    if counter >= len(evaluation.history):
                        break
                    tmp = evaluation.history[counter]
                    currentPos = carla.Transform(
                        carla.Location(tmp[0] + 5, tmp[1], tmp[2]),
                        carla.Rotation(tmp[3], tmp[4], tmp[5]))
                    vehicleToFollow.set_transform(currentPos)
                    counter += 1

                # ------------------- Set up obsticle vehicle for testing  --------------------------------

                # Set up obsticle vehicle for testing

                if not obsticle_vehicleSpawned and followDrivenPath:
                    obsticle_vehicleSpawned = True

                    for obsticle_p in ob:

                        start_pose3 = random.choice(m.get_spawn_points())

                        obsticle_vehicle = world.spawn_actor(
                            random.choice(blueprint_library.filter('jeep')),
                            start_pose3)

                        start_pose3 = carla.Transform()
                        start_pose3.rotation = start_pose2.rotation
                        start_pose3.location.x = obsticle_p[0]
                        start_pose3.location.y = obsticle_p[1]
                        start_pose3.location.z = start_pose2.location.z

                        obsticle_vehicle.set_transform(start_pose3)
                        obsticle_vehicle.set_autopilot(True)

                        actor_list.append(obsticle_vehicle)
                        obsticle_vehicle.set_simulate_physics(True)

                        other_actor_ids.append(obsticle_vehicle.id)

                #---- Leading Frenet -----------------------------
                path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,
                                               c_d_dd, ob)
                ob = np.array([[
                    actor.get_transform().location.x,
                    actor.get_transform().location.y
                ] for actor in actor_list if actor.id in other_actor_ids])

                # --------------------------- Working leading frenet

                # Set new way points
                new_vehicleToFollow_transform = carla.Transform()
                new_vehicleToFollow_transform.rotation = carla.Rotation(
                    pitch=0.0, yaw=math.degrees(path.yaw[1]), roll=0.0)

                new_vehicleToFollow_transform.location.x = path.x[1]
                new_vehicleToFollow_transform.location.y = path.y[1]
                new_vehicleToFollow_transform.location.z = path.z[1]

                wp = (new_vehicleToFollow_transform.location.x,
                      new_vehicleToFollow_transform.location.y,
                      new_vehicleToFollow_transform.location.z,
                      new_vehicleToFollow_transform.rotation.pitch,
                      new_vehicleToFollow_transform.rotation.yaw,
                      new_vehicleToFollow_transform.rotation.roll)

                vehicleToFollow.set_transform(new_vehicleToFollow_transform)

                s0 = path.s[1]
                c_d = path.d[1]
                c_d_d = path.d_d[1]
                c_d_dd = path.d_dd[1]
                c_speed = path.s_d[1]

                if i > 2:
                    leading_waypoints.append(wp)

                location2 = None

                # ---- Car chasing activate -----
                # Give time for leading car to cumulate waypoints

                #------- Trailing Frenet --------------------------------------
                # Start frenet once every while
                if (i > 50):
                    #                     trailing_csp = look_ahead_local_planer(waypoints=leading_waypoints, current_idx=0, look_ahead=len(leading_waypoints))

                    #                     speed = get_speed(vehicle)
                    FRENET_FREQUENCY_DIVISOR = 1
                    if (frame % FRENET_FREQUENCY_DIVISOR
                            == 0) and (real_dist > 5):

                        wx = []
                        wy = []
                        wz = []

                        for p in leading_waypoints:
                            wp = carla.Transform(
                                carla.Location(p[0], p[1], p[2]),
                                carla.Rotation(p[3], p[4], p[5]))
                            wx.append(wp.location.x)
                            wy.append(wp.location.y)
                            wz.append(wp.location.z)

                        tx, ty, tyaw, tc, trailing_csp = generate_target_course(
                            wx, wy, wz)

                        trail_path = frenet_optimal_planning(
                            trailing_csp, trail_s0, trail_c_speed, trail_c_d,
                            trail_c_d_d, trail_c_d_dd, ob)

                        if trail_path:

                            trail_s0 = trail_path.s[1]
                            trail_c_d = trail_path.d[1]
                            trail_c_d_d = trail_path.d_d[1]
                            trail_c_d_dd = trail_path.d_dd[1]
                            trail_c_speed = trail_path.s_d[1]

                            new_vehicle_transform = carla.Transform()
                            new_vehicle_transform.rotation = carla.Rotation(
                                pitch=0.0,
                                yaw=math.degrees(trail_path.yaw[1]),
                                roll=0.0)

                            new_vehicle_transform.location.x = trail_path.x[1]
                            new_vehicle_transform.location.y = trail_path.y[1]
                            new_vehicle_transform.location.z = trail_path.z[1]

                            vehicle.set_transform(new_vehicle_transform)

                location1 = vehicle.get_transform()
                location2 = vehicleToFollow.get_transform()

                speed = np.linalg.norm(
                    carla_vec_to_np_array(vehicle.get_velocity()))

                real_dist = location1.location.distance(location2.location)

                fps = round(1.0 / snapshot.timestamp.delta_seconds)

                # Draw the display.
                # draw_image(display, image_rgb)
                draw_image(display,
                           img_rgb,
                           image_segmentation,
                           location1,
                           location2,
                           record=record,
                           driveName=driveName,
                           smazat=line)
                display.blit(
                    font.render('     FPS (real) % 5d ' % clock.get_fps(),
                                True, (255, 255, 255)), (8, 10))
                display.blit(
                    font.render('     FPS (simulated): % 5d ' % fps, True,
                                (255, 255, 255)), (8, 28))
                display.blit(
                    font.render('     speed: {:.2f} m/s'.format(speed), True,
                                (255, 255, 255)), (8, 46))
                display.blit(
                    font.render(
                        '     distance to target: {:.2f} m'.format(real_dist),
                        True, (255, 255, 255)), (8, 66))

                # Display BirdView
                # Input for your model - call it every simulation step
                # returned result is np.ndarray with ones and zeros of shape (8, height, width)

                birdview = birdview_producer.produce(
                    agent_vehicle=vehicleToFollow)
                bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview),
                                   cv2.COLOR_BGR2RGB)
                # NOTE imshow requires BGR color model
                cv2.imshow("BirdView RGB", bgr)
                cv2.waitKey(1)

                pygame.display.flip()

                frame += 1
                i += 1

    finally:
        print('destroying actors.')
        for actor in actor_list:
            actor.destroy()
        pygame.quit()
        print('done.')