def __init__(self): self.MODEL_NAME = './model/best_model_Epoch_18_step_8245_mAP_0.2501_loss_1.9889_lr_0.0001' self.new_size = [320, 320] self.height_ori, self.width_ori = 0, 0 self.letterbox_resize = True self.lastN = 5 self.lastNDistances = [] self.lastNAngles = [] self.exponentialMovingAverageDist = 0 self.exponentialMovingAverageAngle = 0 self.alpha = 0.5 self.sess = None self.segmentation = SemanticSegmentation()
def main(optimalDistance, followDrivenPath, chaseMode, evaluateChasingCar, driveName='', record=False, followMode=False, resultsName='results', P=None, I=None, D=None, nOfFramesToSkip=0): counter = 1 actor_list = [] pygame.init() carDetector = CarDetector() position = CarPosition(driveName.split('/')[1]) position.startRecording = True evaluation = Evaluation() semantic = SemanticSegmentation() display = pygame.display.set_mode((800, 600), pygame.HWSURFACE | pygame.DOUBLEBUF) font = get_font() clock = pygame.time.Clock() client = carla.Client('localhost', 2000) client.set_timeout(2.0) world = client.get_world() vehicleToFollowSpawned = False try: m = world.get_map() start_pose = random.choice(m.get_spawn_points()) blueprint_library = world.get_blueprint_library() vehicle = world.spawn_actor( random.choice(blueprint_library.filter('jeep')), start_pose) actor_list.append(vehicle) vehicle.set_simulate_physics(True) if followDrivenPath: evaluation.LoadHistoryFromFile(driveName) first = evaluation.history[0] start_pose = carla.Transform( carla.Location(first[0], first[1], first[2]), carla.Rotation(first[3], first[4], first[5])) vehicle.set_transform(start_pose) collision_sensor = world.spawn_actor( blueprint_library.find('sensor.other.collision'), carla.Transform(), attach_to=vehicle) collision_sensor.listen( lambda event: evaluation.CollisionHandler(event)) actor_list.append(collision_sensor) blueprint = world.get_blueprint_library().find('sensor.camera.rgb') blueprint.set_attribute('image_size_x', '800') blueprint.set_attribute('image_size_y', '600') blueprint.set_attribute('fov', '90') camera_rgb = world.spawn_actor( blueprint_library.find('sensor.camera.rgb'), carla.Transform(carla.Location(x=1.5, z=1.4, y=0.3), carla.Rotation(pitch=0)), #5,3,0 # -0.3 attach_to=vehicle) actor_list.append(camera_rgb) camera_rgb2 = world.spawn_actor( blueprint_library.find('sensor.camera.rgb'), carla.Transform(carla.Location(x=1.5, z=1.4, y=-0.3), carla.Rotation(pitch=0)), attach_to=vehicle) actor_list.append(camera_rgb2) camera_segmentation = world.spawn_actor( blueprint_library.find('sensor.camera.semantic_segmentation'), carla.Transform(carla.Location(x=1.5, z=1.4, y=0), carla.Rotation(pitch=0)), #5,3,0 # -0.3 attach_to=vehicle) actor_list.append(camera_segmentation) # Create a synchronous mode context. with CarlaSyncMode(world, camera_rgb, camera_rgb2, camera_segmentation, fps=30) as sync_mode: while True: if should_quit(): return clock.tick(30) # Advance the simulation and wait for the data. snapshot, img_rgb, image_rgb2, image_segmentation = sync_mode.tick( timeout=2.0) line = [] if not vehicleToFollowSpawned and followDrivenPath: vehicleToFollowSpawned = True location1 = vehicle.get_transform() newX, newY = carDetector.CreatePointInFrontOFCar( location1.location.x, location1.location.y, location1.rotation.yaw) diffX = newX - location1.location.x diffY = newY - location1.location.y newX = location1.location.x - (diffX * 5) newY = location1.location.y - (diffY * 5) start_pose.location.x = newX start_pose.location.y = newY vehicle.set_transform(start_pose) start_pose2 = random.choice(m.get_spawn_points()) bp = blueprint_library.filter('model3')[0] bp.set_attribute('color', '0,101,189') vehicleToFollow = world.spawn_actor(bp, start_pose2) start_pose2 = carla.Transform() start_pose2.rotation = start_pose.rotation start_pose2.location.x = start_pose.location.x start_pose2.location.y = start_pose.location.y start_pose2.location.z = start_pose.location.z vehicleToFollow.set_transform(start_pose2) actor_list.append(vehicleToFollow) vehicleToFollow.set_simulate_physics(True) vehicleToFollow.set_autopilot(False) if followDrivenPath: if counter >= len(evaluation.history): break tmp = evaluation.history[counter] currentPos = carla.Transform( carla.Location(tmp[0], tmp[1], tmp[2]), carla.Rotation(tmp[3], tmp[4], tmp[5])) vehicleToFollow.set_transform(currentPos) counter += 1 fps = round(1.0 / snapshot.timestamp.delta_seconds) location1 = vehicle.get_transform() location2 = vehicleToFollow.get_transform() position.SaveCarPosition(location1) newX, newY = carDetector.CreatePointInFrontOFCar( location1.location.x, location1.location.y, location1.rotation.yaw) angle = carDetector.getAngle( [location1.location.x, location1.location.y], [newX, newY], [location2.location.x, location2.location.y]) possibleAngle = 0 carInTheImage = semantic.IsThereACarInThePicture( image_segmentation) bbox = carDetector.get3DboundingBox(vehicleToFollow, camera_rgb, carInTheImage) # Choose approriate steer and throttle here steer, throttle = 0, 1 vehicle.apply_control( carla.VehicleControl(throttle=throttle, steer=steer)) if evaluateChasingCar: evaluation.AddError( location1.location.distance(location2.location), optimalDistance) velocity1 = vehicle.get_velocity() velocity2 = vehicleToFollow.get_velocity() draw_image(display, image_rgb2, image_segmentation, location1, location2, record=record, driveName=driveName, smazat=line) display.blit( font.render('% 5d FPS (real)' % clock.get_fps(), True, (255, 255, 255)), (8, 10)) display.blit( font.render('% 5d FPS (simulated)' % fps, True, (255, 255, 255)), (8, 28)) if len(bbox) != 0: points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)] BB_COLOR = (248, 64, 24) # draw lines # base pygame.draw.line(display, BB_COLOR, points[0], points[1]) pygame.draw.line(display, BB_COLOR, points[1], points[2]) pygame.draw.line(display, BB_COLOR, points[2], points[3]) pygame.draw.line(display, BB_COLOR, points[3], points[0]) # top pygame.draw.line(display, BB_COLOR, points[4], points[5]) pygame.draw.line(display, BB_COLOR, points[5], points[6]) pygame.draw.line(display, BB_COLOR, points[6], points[7]) pygame.draw.line(display, BB_COLOR, points[7], points[4]) # base-top pygame.draw.line(display, BB_COLOR, points[0], points[4]) pygame.draw.line(display, BB_COLOR, points[1], points[5]) pygame.draw.line(display, BB_COLOR, points[2], points[6]) pygame.draw.line(display, BB_COLOR, points[3], points[7]) real_dist = location1.location.distance(location2.location) #if chaseMode or followMode: #myPrint(angle,predicted_angle, possibleAngle,real_dist, predicted_distance,chaseMode) pygame.display.flip() except Exception as ex: print(ex) finally: print('Ending') if evaluateChasingCar: if not os.path.exists('res'): os.mkdir('res') evaluation.WriteIntoFileFinal(os.path.join('res', resultsName + '.txt'), driveName=driveName) position.SaveHistoryToFile() print('destroying actors.') for actor in actor_list: actor.destroy() pygame.quit() print('done.')
def main(optimalDistance, followDrivenPath, chaseMode, evaluateChasingCar, driveName='', record=False, followMode=False, resultsName='results', P=None, I=None, D=None, nOfFramesToSkip=0): counter = 1 actor_list = [] pygame.init() carDetector = CarDetector() drivingControl = DrivingControl(optimalDistance=optimalDistance) if P != None: drivingControlAdvanced = DrivingControlAdvanced( optimalDistance=optimalDistance, P=P, I=I, D=D) else: drivingControlAdvanced = DrivingControlAdvanced( optimalDistance=optimalDistance) visualisation = VizualizeDrivingPath() myControl = ManualControl(driveName, name=str(nOfFramesToSkip)) myControl.startRecording = True advanced = False extrapolate = True evaluation = Evaluation() semantic = SemanticSegmentation() lookAheadDistance = 5 purePursuit = PurePursuitAlgorithm(lookAheadDistance=lookAheadDistance) display = pygame.display.set_mode((800, 600), pygame.HWSURFACE | pygame.DOUBLEBUF) font = get_font() clock = pygame.time.Clock() client = carla.Client('localhost', 2000) client.set_timeout(2.0) world = client.get_world() vehicleToFollowSpawned = False try: # if True: m = world.get_map() # if not followDrivenPath: start_pose = random.choice(m.get_spawn_points()) # else: # first = evaluation.history[0] # print(first) # start_pose = random.choice(m.get_spawn_points()) # start_pose = carla.Transform(carla.Location(first[0],first[1],first[2]),carla.Rotation(first[3],first[4],first[5])) # print('Start pose:',start_pose) blueprint_library = world.get_blueprint_library() vehicle = world.spawn_actor( random.choice(blueprint_library.filter('jeep')), start_pose) actor_list.append(vehicle) vehicle.set_simulate_physics(True) if followDrivenPath: evaluation.LoadHistoryFromFile(driveName) first = evaluation.history[0] start_pose = carla.Transform( carla.Location(first[0], first[1], first[2]), carla.Rotation(first[3], first[4], first[5])) vehicle.set_transform(start_pose) collision_sensor = world.spawn_actor( blueprint_library.find('sensor.other.collision'), carla.Transform(), attach_to=vehicle) collision_sensor.listen( lambda event: evaluation.CollisionHandler(event)) actor_list.append(collision_sensor) # Find the blueprint of the sensor. blueprint = world.get_blueprint_library().find('sensor.camera.rgb') # Modify the attributes of the blueprint to set image resolution and field of view. blueprint.set_attribute('image_size_x', '800') blueprint.set_attribute('image_size_y', '600') blueprint.set_attribute('fov', '90') camera_rgb = world.spawn_actor( blueprint_library.find('sensor.camera.rgb'), carla.Transform(carla.Location(x=1.5, z=1.4, y=0.3), carla.Rotation(pitch=0)), #5,3,0 # -0.3 attach_to=vehicle) actor_list.append(camera_rgb) camera_rgb2 = world.spawn_actor( blueprint_library.find('sensor.camera.rgb'), carla.Transform(carla.Location(x=1.5, z=1.4, y=-0.3), carla.Rotation(pitch=0))) #x=-5.5, z=4.4,y=0 #attach_to=vehicle) actor_list.append(camera_rgb2) camera_segmentation = world.spawn_actor( blueprint_library.find('sensor.camera.semantic_segmentation'), carla.Transform(carla.Location(x=1.5, z=1.4, y=0), carla.Rotation(pitch=0)), #5,3,0 # -0.3 attach_to=vehicle) actor_list.append(camera_segmentation) # Create a synchronous mode context. with CarlaSyncMode(world, camera_rgb, camera_rgb2, camera_segmentation, fps=30) as sync_mode: while True: if should_quit(): return clock.tick(30) # Advance the simulation and wait for the data. snapshot, img_rgb, image_rgb2, image_segmentation = sync_mode.tick( timeout=2.0) line = [] if not vehicleToFollowSpawned and not followDrivenPath: vehicleToFollowSpawned = True start_pose2 = carla.Transform() start_pose2.rotation = start_pose.rotation start_pose2.location.x = start_pose.location.x start_pose2.location.y = start_pose.location.y start_pose2.location.z = start_pose.location.z location1 = vehicle.get_transform() rotation1 = location1.rotation print(rotation1.yaw, abs(rotation1.yaw)) if abs(rotation1.yaw - 180.0) < 45.0 or abs(rotation1.yaw + 180.0) < 45.0: print('1') start_pose2.location.x = start_pose.location.x - 5 elif abs(rotation1.yaw) < 45.0: print('2') start_pose2.location.x = start_pose.location.x + 5 elif abs(rotation1.yaw + 90.0) < 45.0: print('3') start_pose2.location.y = start_pose.location.y - 5 elif abs(rotation1.yaw - 90.0) < 45.0: print('4') start_pose2.location.y = start_pose.location.y + 5 bp = blueprint_library.filter('model3')[0] bp.set_attribute('color', '0,101,189') vehicleToFollow = world.spawn_actor(bp, start_pose2) actor_list.append(vehicleToFollow) vehicleToFollow.set_simulate_physics(True) vehicleToFollow.set_autopilot(True) elif not vehicleToFollowSpawned and followDrivenPath: vehicleToFollowSpawned = True location1 = vehicle.get_transform() newX, newY = carDetector.CreatePointInFrontOFCar( location1.location.x, location1.location.y, location1.rotation.yaw) diffX = newX - location1.location.x diffY = newY - location1.location.y newX = location1.location.x - (diffX * 5) newY = location1.location.y - (diffY * 5) start_pose.location.x = newX start_pose.location.y = newY vehicle.set_transform(start_pose) start_pose2 = random.choice(m.get_spawn_points()) bp = blueprint_library.filter('model3')[0] bp.set_attribute('color', '0,101,189') vehicleToFollow = world.spawn_actor(bp, start_pose2) start_pose2 = carla.Transform() start_pose2.rotation = start_pose.rotation start_pose2.location.x = start_pose.location.x start_pose2.location.y = start_pose.location.y start_pose2.location.z = start_pose.location.z vehicleToFollow.set_transform(start_pose2) actor_list.append(vehicleToFollow) vehicleToFollow.set_simulate_physics(True) vehicleToFollow.set_autopilot(False) if followDrivenPath: if counter >= len(evaluation.history): break tmp = evaluation.history[counter] currentPos = carla.Transform( carla.Location(tmp[0], tmp[1], tmp[2]), carla.Rotation(tmp[3], tmp[4], tmp[5])) vehicleToFollow.set_transform(currentPos) counter += 1 fps = round(1.0 / snapshot.timestamp.delta_seconds) # manual control if not followDrivenPath: myControl._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time()) vehicle.apply_control(myControl._control) location1 = vehicle.get_transform() location2 = vehicleToFollow.get_transform() myControl.SaveCarPosition(location1) newX, newY = carDetector.CreatePointInFrontOFCar( location1.location.x, location1.location.y, location1.rotation.yaw) angle = carDetector.getAngle( [location1.location.x, location1.location.y], [newX, newY], [location2.location.x, location2.location.y]) possibleAngle = 0 drivableIndexes = [] bbox = [] if chaseMode: carInTheImage = semantic.IsThereACarInThePicture( image_segmentation) bbox, predicted_distance, predicted_angle = carDetector.getDistance( vehicleToFollow, camera_rgb, carInTheImage, extrapolation=extrapolate, nOfFramesToSkip=nOfFramesToSkip) if advanced: possibleAngle, drivableIndexes = semantic.FindPossibleAngle( image_segmentation, bbox, predicted_angle) steer, throttle = drivingControlAdvanced.PredictSteerAndThrottle( predicted_distance, possibleAngle, None) else: steer, throttle = drivingControl.PredictSteerAndThrottle( predicted_distance, predicted_angle, None) # if followDrivenPath: vehicle.apply_control( carla.VehicleControl(throttle=throttle, steer=steer)) if evaluateChasingCar: evaluation.AddError( location1.location.distance(location2.location), optimalDistance) elif followMode: angle = 0 carInTheImage = semantic.IsThereACarInThePicture( image_segmentation) bbox, predicted_distance, predicted_angle = carDetector.getDistance( vehicleToFollow, camera_rgb, carInTheImage) purePursuit.AddPathPoint(location2.location.x, location2.location.y) newX, newY = carDetector.CreatePointInFrontOFCar( location1.location.x, location1.location.y, location1.rotation.yaw) targetX, targetY = purePursuit.GetNextPoint( location1.location.x, location1.location.y) predicted_angle = carDetector.getAngle( [location1.location.x, location1.location.y], [newX, newY], [targetX, targetY]) possibleAngle = predicted_angle steer, throttle = drivingControl.PredictSteerAndThrottle( predicted_distance, predicted_angle, None) # if followDrivenPath: vehicle.apply_control( carla.VehicleControl(throttle=throttle, steer=steer)) if evaluateChasingCar: evaluation.AddError( location1.location.distance(location2.location), optimalDistance) velocity1 = vehicle.get_velocity() velocity2 = vehicleToFollow.get_velocity() visualisation.Add( velocity1, velocity2, location1.location.distance(location2.location), angle) draw_image(display, image_rgb2, image_segmentation, location1, location2, record=record, driveName=driveName, smazat=line) display.blit( font.render('% 5d FPS (real)' % clock.get_fps(), True, (255, 255, 255)), (8, 10)) display.blit( font.render('% 5d FPS (simulated)' % fps, True, (255, 255, 255)), (8, 28)) if len(bbox) != 0: points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)] BB_COLOR = (248, 64, 24) # draw lines # base pygame.draw.line(display, BB_COLOR, points[0], points[1]) pygame.draw.line(display, BB_COLOR, points[1], points[2]) pygame.draw.line(display, BB_COLOR, points[2], points[3]) pygame.draw.line(display, BB_COLOR, points[3], points[0]) # top pygame.draw.line(display, BB_COLOR, points[4], points[5]) pygame.draw.line(display, BB_COLOR, points[5], points[6]) pygame.draw.line(display, BB_COLOR, points[6], points[7]) pygame.draw.line(display, BB_COLOR, points[7], points[4]) # base-top pygame.draw.line(display, BB_COLOR, points[0], points[4]) pygame.draw.line(display, BB_COLOR, points[1], points[5]) pygame.draw.line(display, BB_COLOR, points[2], points[6]) pygame.draw.line(display, BB_COLOR, points[3], points[7]) DrawDrivable(drivableIndexes, image_segmentation.width // 10, image_segmentation.height // 10, display) real_dist = location1.location.distance(location2.location) if chaseMode or followMode: myPrint(angle, predicted_angle, possibleAngle, real_dist, predicted_distance, chaseMode) pygame.display.flip() except Exception as ex: print(ex) finally: print('Ending') if evaluateChasingCar: evaluation.WriteIntoFileFinal(os.path.join('res', resultsName + '.txt'), driveName=driveName) myControl.SaveHistoryToFile() print('destroying actors.') for actor in actor_list: actor.destroy() pygame.quit() print('done.')
def main(optimalDistance, followDrivenPath, chaseMode, evaluateChasingCar, driveName='',record=False, followMode=False, resultsName='results',P=None,I=None,D=None,nOfFramesToSkip=0): # Imports # from cores.lane_detection.lane_detector import LaneDetector # from cores.lane_detection.camera_geometry import CameraGeometry # from cores.control.pure_pursuit import PurePursuitPlusPID # New imports from DrivingControl import DrivingControl from DrivingControlAdvanced import DrivingControlAdvanced from CarDetector import CarDetector from SemanticSegmentation import SemanticSegmentation # New Variables extrapolate = True optimalDistance = 8 followDrivenPath = True evaluateChasingCar = True record = False chaseMode = True followMode = False counter = 1 sensors = [] vehicleToFollowSpawned = False obsticle_vehicleSpawned = False # New objects carDetector = CarDetector() drivingControl = DrivingControl(optimalDistance=optimalDistance) drivingControlAdvanced = DrivingControlAdvanced(optimalDistance=optimalDistance) evaluation = Evaluation() semantic = SemanticSegmentation() actor_list = [] pygame.init() display = pygame.display.set_mode( (800, 600), pygame.HWSURFACE | pygame.DOUBLEBUF) font = get_font() clock = pygame.time.Clock() client = carla.Client('localhost', 2000) client.set_timeout(80.0) #client.load_world('Town06') # client.load_world('Town04') world = client.get_world() weather_presets = find_weather_presets() # print(weather_presets) world.set_weather(weather_presets[3][0]) # world.set_weather(carla.WeatherParameters.HardRainSunset) # controller = PurePursuitPlusPID() # Set BirdView birdview_producer = BirdViewProducer( client, PixelDimensions(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT), pixels_per_meter=4, crop_type=BirdViewCropType.FRONT_AND_REAR_AREA, render_lanes_on_junctions=False, ) try: m = world.get_map() blueprint_library = world.get_blueprint_library() veh_bp = random.choice(blueprint_library.filter('vehicle.dodge_charger.police')) vehicle = world.spawn_actor( veh_bp, m.get_spawn_points()[90]) actor_list.append(vehicle) # New vehicle property vehicle.set_simulate_physics(True) if followDrivenPath: evaluation.LoadHistoryFromFile(driveName) first = evaluation.history[0] start_pose = carla.Transform(carla.Location(first[0], first[1], first[2]), carla.Rotation(first[3], first[4], first[5])) vehicle.set_transform(start_pose) # New Sensors collision_sensor = world.spawn_actor(blueprint_library.find('sensor.other.collision'), carla.Transform(), attach_to=vehicle) collision_sensor.listen(lambda event: evaluation.CollisionHandler(event)) actor_list.append(collision_sensor) # front cam for object detection camera_rgb_blueprint = world.get_blueprint_library().find('sensor.camera.rgb') camera_rgb_blueprint.set_attribute('fov', '90') camera_rgb = world.spawn_actor( camera_rgb_blueprint, carla.Transform(carla.Location(x=1.5, z=1.4,y=0.3), carla.Rotation(pitch=0)), attach_to=vehicle) actor_list.append(camera_rgb) sensors.append(camera_rgb) # segmentation camera camera_segmentation = world.spawn_actor( blueprint_library.find('sensor.camera.semantic_segmentation'), carla.Transform(carla.Location(x=1.5, z=1.4,y=0), carla.Rotation(pitch=0)), #5,3,0 # -0.3 attach_to=vehicle) actor_list.append(camera_segmentation) sensors.append(camera_segmentation) # cg = CameraGeometry() # ld = LaneDetector(model_path=Path("best_model.pth").absolute()) # #windshield cam # cam_windshield_transform = carla.Transform(carla.Location(x=0.5, z=cg.height), carla.Rotation(pitch=-1*cg.pitch_deg)) # bp = blueprint_library.find('sensor.camera.rgb') # bp.set_attribute('image_size_x', str(cg.image_width)) # bp.set_attribute('image_size_y', str(cg.image_height)) # bp.set_attribute('fov', str(cg.field_of_view_deg)) # camera_windshield = world.spawn_actor( # bp, # cam_windshield_transform, # attach_to=vehicle) # actor_list.append(camera_windshield) # sensors.append(camera_windshield) # Set up local planner and behavnioural planner # -------------------------------------------------------------- # Planning Constants NUM_PATHS = 7 BP_LOOKAHEAD_BASE = 8.0 # m BP_LOOKAHEAD_TIME = 2.0 # s PATH_OFFSET = 1.5 # m CIRCLE_OFFSETS = [-1.0, 1.0, 3.0] # m CIRCLE_RADII = [1.5, 1.5, 1.5] # m TIME_GAP = 1.0 # s PATH_SELECT_WEIGHT = 10 A_MAX = 1.5 # m/s^2 SLOW_SPEED = 2.0 # m/s STOP_LINE_BUFFER = 3.5 # m LEAD_VEHICLE_LOOKAHEAD = 20.0 # m LP_FREQUENCY_DIVISOR = 2 # Frequency divisor to make the # local planner operate at a lower # frequency than the controller # (which operates at the simulation # frequency). Must be a natural # number. PREV_BEST_PATH = [] stopsign_fences = [] # -------------------------------------------------------------- frame = 0 max_error = 0 FPS = 30 speed = 0 cross_track_error = 0 start_time = 0.0 times = 8 LP_FREQUENCY_DIVISOR = 4 # Create a synchronous mode context. with CarlaSyncMode(world, *sensors, fps=FPS) as sync_mode: while True: if should_quit(): return clock.tick() start_time += clock.get_time() # Advance the simulation and wait for the data. # tick_response = sync_mode.tick(timeout=2.0) # Display BirdView # Input for your model - call it every simulation step # returned result is np.ndarray with ones and zeros of shape (8, height, width) birdview = birdview_producer.produce(agent_vehicle=vehicle) bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview), cv2.COLOR_BGR2RGB) # NOTE imshow requires BGR color model cv2.imshow("BirdView RGB", bgr) cv2.waitKey(1) # snapshot, image_rgb, image_segmentation = tick_response snapshot, img_rgb, image_segmentation = sync_mode.tick(timeout=2.0) # detect car in image with semantic segnmentation camera carInTheImage = semantic.IsThereACarInThePicture(image_segmentation) line = [] current_speed = np.linalg.norm(carla_vec_to_np_array(vehicle.get_velocity())) # Spawn a vehicle to follow if not vehicleToFollowSpawned and followDrivenPath: vehicleToFollowSpawned = True location1 = vehicle.get_transform() newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y, location1.rotation.yaw) diffX = newX - location1.location.x diffY = newY - location1.location.y newX = location1.location.x - (diffX*5) newY = location1.location.y - (diffY*5) start_pose.location.x = newX start_pose.location.y = newY vehicle.set_transform(start_pose) start_pose2 = random.choice(m.get_spawn_points()) bp = blueprint_library.filter('model3')[0] bp.set_attribute('color', '0,101,189') vehicleToFollow = world.spawn_actor( bp, start_pose2) start_pose2 = carla.Transform() start_pose2.rotation = start_pose.rotation start_pose2.location.x = start_pose.location.x start_pose2.location.y = start_pose.location.y start_pose2.location.z = start_pose.location.z vehicleToFollow.set_transform(start_pose2) actor_list.append(vehicleToFollow) vehicleToFollow.set_simulate_physics(True) # vehicleToFollow.set_autopilot(False) if followDrivenPath: if counter >= len(evaluation.history): break tmp = evaluation.history[counter] currentPos = carla.Transform(carla.Location(tmp[0] ,tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5])) vehicleToFollow.set_transform(currentPos) counter += 1 # Set up obsticle vehicle for testing location1 = vehicle.get_transform() location2 = vehicleToFollow.get_transform() # if not obsticle_vehicleSpawned and followDrivenPath: # obsticle_vehicleSpawned = True # # Adding new obsticle vehicle # start_pose3 = random.choice(m.get_spawn_points()) # obsticle_vehicle = world.spawn_actor( # random.choice(blueprint_library.filter('jeep')), # start_pose3) # start_pose3 = carla.Transform() # start_pose3.rotation = start_pose2.rotation # start_pose3.location.x = start_pose2.location.x # start_pose3.location.y = start_pose2.location.y + 50 # start_pose3.location.z = start_pose2.location.z # obsticle_vehicle.set_transform(start_pose3) # actor_list.append(obsticle_vehicle) # obsticle_vehicle.set_simulate_physics(True) # # Parked car(s) (X(m), Y(m), Z(m), Yaw(deg), RADX(m), RADY(m), RADZ(m)) # parkedcar_data = None # parkedcar_box_pts = [] # [x,y] # with open(C4_PARKED_CAR_FILE, 'r') as parkedcar_file: # next(parkedcar_file) # skip header # parkedcar_reader = csv.reader(parkedcar_file, # delimiter=',', # quoting=csv.QUOTE_NONNUMERIC) # parkedcar_data = list(parkedcar_reader) # # convert to rad # for i in range(len(parkedcar_data)): # parkedcar_data[i][3] = parkedcar_data[i][3] * np.pi / 180.0 # # obtain parked car(s) box points for LP # for i in range(len(parkedcar_data)): # x = parkedcar_data[i][0] # y = parkedcar_data[i][1] # z = parkedcar_data[i][2] # yaw = parkedcar_data[i][3] # xrad = parkedcar_data[i][4] # yrad = parkedcar_data[i][5] # zrad = parkedcar_data[i][6] # cpos = np.array([ # [-xrad, -xrad, -xrad, 0, xrad, xrad, xrad, 0 ], # [-yrad, 0, yrad, yrad, yrad, 0, -yrad, -yrad]]) # rotyaw = np.array([ # [np.cos(yaw), np.sin(yaw)], # [-np.sin(yaw), np.cos(yaw)]]) # cpos_shift = np.array([ # [x, x, x, x, x, x, x, x], # [y, y, y, y, y, y, y, y]]) # cpos = np.add(np.matmul(rotyaw, cpos), cpos_shift) # for j in range(cpos.shape[1]): # parkedcar_box_pts.append([cpos[0,j], cpos[1,j]]) # if frame % LP_FREQUENCY_DIVISOR == 0: # # Update vehicleToFollow transorm with obsticles # # -------------------------------------------------------------- # _LOOKAHEAD_INDEX = 5 # _BP_LOOKAHEAD_BASE = 8.0 # m # _BP_LOOKAHEAD_TIME = 2.0 # s # # unsupported operand type(s) for +: 'float' and 'Vector3D' # lookahead_time = _BP_LOOKAHEAD_BASE + _BP_LOOKAHEAD_TIME * vehicleToFollow.get_velocity().z # location3 = obsticle_vehicle.get_transform() # # Calculate the goal state set in the local frame for the local planner. # # Current speed should be open loop for the velocity profile generation. # ego_state = [location2.location.x, location2.location.y, location2.rotation.yaw, vehicleToFollow.get_velocity().z] # # Set lookahead based on current speed. # b_planner.set_lookahead(_BP_LOOKAHEAD_BASE + _BP_LOOKAHEAD_TIME * vehicleToFollow.get_velocity().z) # # Perform a state transition in the behavioural planner. # b_planner.transition_state(evaluation.history, ego_state, current_speed) # # print("The current speed = %f" % current_speed) # # # Find the closest index to the ego vehicle. # # closest_len, closest_index = behavioural_planner.get_closest_index(evaluation.history, ego_state) # # print("closest_len: ", closest_len) # # print("closest_index: ", closest_index) # # # Find the goal index that lies within the lookahead distance # # # along the waypoints. # # goal_index = b_planner.get_goal_index(evaluation.history, ego_state, closest_len, closest_index) # # print("goal_index: ", goal_index) # # # Set goal_state # # goal_state = evaluation.history[goal_index] # # Compute the goal state set from the behavioural planner's computed goal state. # goal_state_set = l_planner.get_goal_state_set(b_planner._goal_index, b_planner._goal_state, evaluation.history, ego_state) # # # Calculate planned paths in the local frame. # paths, path_validity = l_planner.plan_paths(goal_state_set) # # # Transform those paths back to the global frame. # paths = local_planner.transform_paths(paths, ego_state) # # Detect obsticle car # obsticle_bbox, obsticle_predicted_distance, obsticle_predicted_angle = carDetector.getDistance(obsticle_vehicle, camera_rgb,carInTheImage,extrapolation=extrapolate,nOfFramesToSkip=nOfFramesToSkip) # obsticle_bbox =[ [bbox[0],bbox[1]] for bbox in obsticle_bbox] # print("paths: ", paths) # if obsticle_bbox: # # # Perform collision checking. # collision_check_array = l_planner._collision_checker.collision_check(paths, [obsticle_bbox]) # print("collision_check_array: ", collision_check_array) # # Compute the best local path. # best_index = l_planner._collision_checker.select_best_path_index(paths, collision_check_array, b_planner._goal_state) # print("The best_index :", best_index) # desired_speed = b_planner._goal_state[2] # print("The desired_speed = %f" % desired_speed) # newX, newY = carDetector.CreatePointInFrontOFCar(location2.location.x, location2.location.y,location2.rotation.yaw) # new_angle = carDetector.getAngle([location2.location.x, location2.location.y], [newX, newY], # [location3.location.x, location3.location.y]) # tmp = evaluation.history[counter-1] # currentPos = carla.Transform(carla.Location(tmp[0] + 0 ,tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5])) # vehicleToFollow.set_transform(currentPos) # -------------------------------------------------------------- # Update vehicle position by detecting vehicle to follow position newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,location1.rotation.yaw) angle = carDetector.getAngle([location1.location.x, location1.location.y], [newX, newY], [location2.location.x, location2.location.y]) possibleAngle = 0 drivableIndexes = [] bbox = [] bbox, predicted_distance,predicted_angle = carDetector.getDistance(vehicleToFollow, camera_rgb, carInTheImage,extrapolation=extrapolate,nOfFramesToSkip=nOfFramesToSkip) if frame % LP_FREQUENCY_DIVISOR == 0: # This is the bottle neck and takes times to run. But it is necessary for chasing around turns predicted_angle, drivableIndexes = semantic.FindPossibleAngle(image_segmentation,bbox,predicted_angle) # This is still necessary need to optimize it steer, throttle = drivingControlAdvanced.PredictSteerAndThrottle(predicted_distance,predicted_angle,None) # This is a new method send_control(vehicle, throttle, steer, 0) speed = np.linalg.norm(carla_vec_to_np_array(vehicle.get_velocity())) real_dist = location1.location.distance(location2.location) fps = round(1.0 / snapshot.timestamp.delta_seconds) # Draw the display. # draw_image(display, image_rgb) draw_image(display, img_rgb, image_segmentation,location1, location2,record=record,driveName=driveName,smazat=line) display.blit( font.render(' FPS (real) % 5d ' % clock.get_fps(), True, (255, 255, 255)), (8, 10)) display.blit( font.render(' FPS (simulated): % 5d ' % fps, True, (255, 255, 255)), (8, 28)) display.blit( font.render(' speed: {:.2f} m/s'.format(speed), True, (255, 255, 255)), (8, 46)) display.blit( font.render(' distance to target: {:.2f} m'.format(real_dist), True, (255, 255, 255)), (8, 66)) # display.blit( # font.render(' cross track error: {:03d} cm'.format(cross_track_error), True, (255, 255, 255)), # (8, 64)) # display.blit( # font.render(' max cross track error: {:03d} cm'.format(max_error), True, (255, 255, 255)), # (8, 82)) # Draw bbox on following vehicle if len(bbox) != 0: points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)] BB_COLOR = (248, 64, 24) # draw lines # base pygame.draw.line(display, BB_COLOR, points[0], points[1]) pygame.draw.line(display, BB_COLOR, points[1], points[2]) pygame.draw.line(display, BB_COLOR, points[2], points[3]) pygame.draw.line(display, BB_COLOR, points[3], points[0]) # top pygame.draw.line(display, BB_COLOR, points[4], points[5]) pygame.draw.line(display, BB_COLOR, points[5], points[6]) pygame.draw.line(display, BB_COLOR, points[6], points[7]) pygame.draw.line(display, BB_COLOR, points[7], points[4]) # base-top pygame.draw.line(display, BB_COLOR, points[0], points[4]) pygame.draw.line(display, BB_COLOR, points[1], points[5]) pygame.draw.line(display, BB_COLOR, points[2], points[6]) pygame.draw.line(display, BB_COLOR, points[3], points[7]) # DrawDrivable(drivableIndexes, image_segmentation.width // 10, image_segmentation.height // 10, display) pygame.display.flip() frame += 1 # if save_gif and frame > 1000: # print("frame=",frame) # imgdata = pygame.surfarray.array3d(pygame.display.get_surface()) # imgdata = imgdata.swapaxes(0,1) # if frame < 1200: # images.append(imgdata) finally: print('destroying actors.') for actor in actor_list: actor.destroy() pygame.quit() print('done.')
def main(optimalDistance, followDrivenPath, chaseMode, evaluateChasingCar, driveName='', record=False, followMode=False, resultsName='results', P=None, I=None, D=None, nOfFramesToSkip=0): counter = 1 actor_list = [] pygame.init() carDetector = CarDetector() # New extrapolate = True drivingControl = DrivingControl(optimalDistance=optimalDistance) if P != None: drivingControlAdvanced = DrivingControlAdvanced( optimalDistance=optimalDistance, P=P, I=I, D=D) else: drivingControlAdvanced = DrivingControlAdvanced( optimalDistance=optimalDistance) position = CarPosition(driveName.split('/')[1]) position.startRecording = True evaluation = Evaluation() semantic = SemanticSegmentation() display = pygame.display.set_mode((800, 600), pygame.HWSURFACE | pygame.DOUBLEBUF) font = get_font() clock = pygame.time.Clock() client = carla.Client('localhost', 2000) client.set_timeout(100.0) world = client.get_world() world.set_weather(carla.WeatherParameters.ClearSunset) vehicleToFollowSpawned = False obsticle_vehicleSpawned = False try: m = world.get_map() start_pose = random.choice(m.get_spawn_points()) blueprint_library = world.get_blueprint_library() vehicle = world.spawn_actor( random.choice( blueprint_library.filter('vehicle.dodge_charger.police')), start_pose) actor_list.append(vehicle) vehicle.set_simulate_physics(True) if followDrivenPath: evaluation.LoadHistoryFromFile(driveName) first = evaluation.history[0] start_pose = carla.Transform( carla.Location(first[0], first[1], first[2]), carla.Rotation(first[3], first[4], first[5])) vehicle.set_transform(start_pose) collision_sensor = world.spawn_actor( blueprint_library.find('sensor.other.collision'), carla.Transform(), attach_to=vehicle) collision_sensor.listen( lambda event: evaluation.CollisionHandler(event)) actor_list.append(collision_sensor) camera_rgb_blueprint = world.get_blueprint_library().find( 'sensor.camera.rgb') # camera_rgb_blueprint.set_attribute('image_size_x', '800') # camera_rgb_blueprint.set_attribute('image_size_y', '600') camera_rgb_blueprint.set_attribute('fov', '90') # camera_rgb_blueprint.set_attribute('sensor_tick', '1') camera_rgb = world.spawn_actor( camera_rgb_blueprint, carla.Transform(carla.Location(x=1.5, z=1.4, y=0.3), carla.Rotation(pitch=0)), #5,3,0 # -0.3 attach_to=vehicle) actor_list.append(camera_rgb) # camera_rgb2 = world.spawn_actor( # camera_rgb_blueprint, # carla.Transform(carla.Location(x=1.5, z=1.4,y=-0.3), carla.Rotation(pitch=0)),attach_to=vehicle) # actor_list.append(camera_rgb2) camera_segmentation = world.spawn_actor( blueprint_library.find('sensor.camera.semantic_segmentation'), carla.Transform(carla.Location(x=1.5, z=1.4, y=0), carla.Rotation(pitch=0)), #5,3,0 # -0.3 attach_to=vehicle) actor_list.append(camera_segmentation) # Set up local planner and behavnioural planner # -------------------------------------------------------------- # Planning Constants NUM_PATHS = 7 BP_LOOKAHEAD_BASE = 8.0 # m BP_LOOKAHEAD_TIME = 2.0 # s PATH_OFFSET = 1.5 # m CIRCLE_OFFSETS = [-1.0, 1.0, 3.0] # m CIRCLE_RADII = [1.5, 1.5, 1.5] # m TIME_GAP = 1.0 # s PATH_SELECT_WEIGHT = 10 A_MAX = 1.5 # m/s^2 SLOW_SPEED = 2.0 # m/s STOP_LINE_BUFFER = 3.5 # m LEAD_VEHICLE_LOOKAHEAD = 20.0 # m LP_FREQUENCY_DIVISOR = 2 # Frequency divisor to make the # local planner operate at a lower # frequency than the controller # (which operates at the simulation # frequency). Must be a natural # number. PREV_BEST_PATH = [] stopsign_fences = [] l_planner = local_planner.LocalPlanner(NUM_PATHS, PATH_OFFSET, CIRCLE_OFFSETS, CIRCLE_RADII, PATH_SELECT_WEIGHT, TIME_GAP, A_MAX, SLOW_SPEED, STOP_LINE_BUFFER, PREV_BEST_PATH) b_planner = behavioural_planner.BehaviouralPlanner( BP_LOOKAHEAD_BASE, stopsign_fences, LEAD_VEHICLE_LOOKAHEAD) # -------------------------------------------------------------- FPS = 10 # Create a synchronous mode context. with CarlaSyncMode(world, camera_rgb, camera_segmentation, fps=FPS) as sync_mode: while True: if should_quit(): return clock.tick() # Advance the simulation and wait for the data. snapshot, img_rgb, image_segmentation = sync_mode.tick( timeout=2.0) line = [] if not vehicleToFollowSpawned and followDrivenPath: vehicleToFollowSpawned = True location1 = vehicle.get_transform() newX, newY = carDetector.CreatePointInFrontOFCar( location1.location.x, location1.location.y, location1.rotation.yaw) diffX = newX - location1.location.x diffY = newY - location1.location.y newX = location1.location.x - (diffX * 5) newY = location1.location.y - (diffY * 5) start_pose.location.x = newX start_pose.location.y = newY vehicle.set_transform(start_pose) start_pose2 = random.choice(m.get_spawn_points()) bp = blueprint_library.filter('model3')[0] bp.set_attribute('color', '0,101,189') vehicleToFollow = world.spawn_actor(bp, start_pose2) start_pose2 = carla.Transform() start_pose2.rotation = start_pose.rotation start_pose2.location.x = start_pose.location.x start_pose2.location.y = start_pose.location.y start_pose2.location.z = start_pose.location.z vehicleToFollow.set_transform(start_pose2) actor_list.append(vehicleToFollow) vehicleToFollow.set_simulate_physics(True) vehicleToFollow.set_autopilot(False) if followDrivenPath: if counter >= len(evaluation.history): break tmp = evaluation.history[counter] currentPos = carla.Transform( carla.Location(tmp[0], tmp[1], tmp[2]), carla.Rotation(tmp[3], tmp[4], tmp[5])) vehicleToFollow.set_transform(currentPos) counter += 1 fps = round(1.0 / snapshot.timestamp.delta_seconds) location1 = vehicle.get_transform() location2 = vehicleToFollow.get_transform() if not obsticle_vehicleSpawned and followDrivenPath: obsticle_vehicleSpawned = True # Adding new obsticle vehicle start_pose3 = random.choice(m.get_spawn_points()) obsticle_vehicle = world.spawn_actor( random.choice(blueprint_library.filter('jeep')), start_pose3) start_pose3 = carla.Transform() start_pose3.rotation = start_pose2.rotation start_pose3.location.x = start_pose2.location.x start_pose3.location.y = start_pose2.location.y + 50 start_pose3.location.z = start_pose2.location.z obsticle_vehicle.set_transform(start_pose3) actor_list.append(obsticle_vehicle) obsticle_vehicle.set_simulate_physics(True) # obsticle_vehicle.set_autopilot(True) # Update vehicleToFollow transorm with obsticles # -------------------------------------------------------------- _LOOKAHEAD_INDEX = 5 _BP_LOOKAHEAD_BASE = 8.0 # m _BP_LOOKAHEAD_TIME = 2.0 # s # unsupported operand type(s) for +: 'float' and 'Vector3D' lookahead_time = _BP_LOOKAHEAD_BASE + _BP_LOOKAHEAD_TIME * vehicleToFollow.get_velocity( ).z location3 = obsticle_vehicle.get_transform() ego_state = [ location2.location.x, location2.location.y, location2.rotation.yaw, vehicleToFollow.get_velocity().z ] # Find the closest index to the ego vehicle. closest_len, closest_index = behavioural_planner.get_closest_index( evaluation.history, ego_state) # Find the goal index that lies within the lookahead distance # along the waypoints. goal_index = b_planner.get_goal_index(evaluation.history, ego_state, closest_len, closest_index) # Set goal_state goal_state = evaluation.history[goal_index] # Compute the goal state set from the behavioural planner's computed goal state. goal_state_set = l_planner.get_goal_state_set( goal_index, goal_state, evaluation.history, ego_state) # print("len(goal_state_set): ", len(goal_state_set)) # # Calculate planned paths in the local frame. paths, path_validity = l_planner.plan_paths(goal_state_set) # # Transform those paths back to the global frame. # paths = local_planner.transform_paths(paths, ego_state) # # Perform collision checking. # parkedcar_box_pts # collision_check_array = lp._collision_checker.collision_check(paths, [parkedcar_box_pts]) newX, newY = carDetector.CreatePointInFrontOFCar( location2.location.x, location2.location.y, location2.rotation.yaw) new_angle = carDetector.getAngle( [location2.location.x, location2.location.y], [newX, newY], [location3.location.x, location3.location.y]) tmp = evaluation.history[counter - 1] currentPos = carla.Transform( carla.Location(tmp[0] + 5, tmp[1], tmp[2]), carla.Rotation(tmp[3], tmp[4], tmp[5])) vehicleToFollow.set_transform(currentPos) # -------------------------------------------------------------- position.SaveCarPosition(location1) newX, newY = carDetector.CreatePointInFrontOFCar( location1.location.x, location1.location.y, location1.rotation.yaw) angle = carDetector.getAngle( [location1.location.x, location1.location.y], [newX, newY], [location2.location.x, location2.location.y]) possibleAngle = 0 carInTheImage = semantic.IsThereACarInThePicture( image_segmentation) # bbox = carDetector.get3DboundingBox(vehicleToFollow, camera_rgb,carInTheImage) bbox, predicted_distance, predicted_angle = carDetector.getDistance( vehicleToFollow, camera_rgb, carInTheImage, extrapolation=extrapolate, nOfFramesToSkip=nOfFramesToSkip) # This is the bottle neck # possibleAngle, drivableIndexes = semantic.FindPossibleAngle(image_segmentation,bbox,predicted_angle) steer, throttle = drivingControl.PredictSteerAndThrottle( predicted_distance, predicted_angle, None) # steer, throttle = drivingControlAdvanced.PredictSteerAndThrottle(predicted_distance, possibleAngle,None) # Choose approriate steer and throttle here # steer, throttle = 0, 0.7 vehicle.apply_control( carla.VehicleControl(throttle=throttle, steer=steer)) if evaluateChasingCar: evaluation.AddError( location1.location.distance(location2.location), optimalDistance) velocity1 = vehicle.get_velocity() velocity2 = vehicleToFollow.get_velocity() draw_image(display, img_rgb, image_segmentation, location1, location2, record=record, driveName=driveName, smazat=line) display.blit( font.render('% 5d FPS (real)' % clock.get_fps(), True, (255, 255, 255)), (8, 10)) display.blit( font.render('% 5d FPS (simulated)' % fps, True, (255, 255, 255)), (8, 28)) if len(bbox) != 0: points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)] BB_COLOR = (248, 64, 24) # draw lines # base pygame.draw.line(display, BB_COLOR, points[0], points[1]) pygame.draw.line(display, BB_COLOR, points[1], points[2]) pygame.draw.line(display, BB_COLOR, points[2], points[3]) pygame.draw.line(display, BB_COLOR, points[3], points[0]) # top pygame.draw.line(display, BB_COLOR, points[4], points[5]) pygame.draw.line(display, BB_COLOR, points[5], points[6]) pygame.draw.line(display, BB_COLOR, points[6], points[7]) pygame.draw.line(display, BB_COLOR, points[7], points[4]) # base-top pygame.draw.line(display, BB_COLOR, points[0], points[4]) pygame.draw.line(display, BB_COLOR, points[1], points[5]) pygame.draw.line(display, BB_COLOR, points[2], points[6]) pygame.draw.line(display, BB_COLOR, points[3], points[7]) real_dist = location1.location.distance(location2.location) # draw trajetory debug = world.debug # debug.draw_box(carla.BoundingBox(location2.location,carla.Vector3D(0.5,0.5,2)),location2.rotation, 0.05, carla.Color(0,0,255,0),0) debug.draw_point(location2.location, size=0.1, color=carla.Color(0, 0, 255), life_time=-1.0) #if chaseMode or followMode: #myPrint(angle,predicted_angle, possibleAngle,real_dist, predicted_distance,chaseMode) pygame.display.flip() except Exception as ex: print(ex) finally: print('Ending') if evaluateChasingCar: if not os.path.exists('res'): os.mkdir('res') # evaluation.WriteIntoFileFinal(os.path.join('res',resultsName+'.txt'),driveName=driveName) # position.SaveHistoryToFile() print('destroying actors.') for actor in actor_list: actor.destroy() pygame.quit() print('done.')
def main(optimalDistance, followDrivenPath, chaseMode, evaluateChasingCar, driveName='', record=False, followMode=False, resultsName='results', P=None, I=None, D=None, nOfFramesToSkip=0): # Imports # from cores.lane_detection.lane_detector import LaneDetector # from cores.lane_detection.camera_geometry import CameraGeometry # from cores.control.pure_pursuit import PurePursuitPlusPID # New imports from DrivingControl import DrivingControl from DrivingControlAdvanced import DrivingControlAdvanced from CarDetector import CarDetector from SemanticSegmentation import SemanticSegmentation # New Variables extrapolate = True optimalDistance = 1 followDrivenPath = True evaluateChasingCar = True record = False chaseMode = True followMode = False counter = 1 sensors = [] vehicleToFollowSpawned = False obsticle_vehicleSpawned = False # New objects carDetector = CarDetector() drivingControl = DrivingControl(optimalDistance=optimalDistance) drivingControlAdvanced = DrivingControlAdvanced( optimalDistance=optimalDistance) evaluation = Evaluation() semantic = SemanticSegmentation() actor_list = [] pygame.init() display = pygame.display.set_mode((800, 600), pygame.HWSURFACE | pygame.DOUBLEBUF) font = get_font() clock = pygame.time.Clock() client = carla.Client('localhost', 2000) client.set_timeout(80.0) #client.load_world('Town06') # client.load_world('Town04') world = client.get_world() weather_presets = find_weather_presets() # print(weather_presets) world.set_weather(weather_presets[3][0]) # world.set_weather(carla.WeatherParameters.HardRainSunset) # controller = PurePursuitPlusPID() # Set BirdView birdview_producer = BirdViewProducer( client, PixelDimensions(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT), pixels_per_meter=4, crop_type=BirdViewCropType.FRONT_AND_REAR_AREA, render_lanes_on_junctions=False, ) try: m = world.get_map() blueprint_library = world.get_blueprint_library() veh_bp = random.choice( blueprint_library.filter('vehicle.dodge_charger.police')) vehicle = world.spawn_actor(veh_bp, m.get_spawn_points()[90]) actor_list.append(vehicle) # New vehicle property vehicle.set_simulate_physics(True) if followDrivenPath: evaluation.LoadHistoryFromFile(driveName) first = evaluation.history[0] start_pose = carla.Transform( carla.Location(first[0], first[1], first[2]), carla.Rotation(first[3], first[4], first[5])) vehicle.set_transform(start_pose) # New Sensors collision_sensor = world.spawn_actor( blueprint_library.find('sensor.other.collision'), carla.Transform(), attach_to=vehicle) collision_sensor.listen( lambda event: evaluation.CollisionHandler(event)) actor_list.append(collision_sensor) # front cam for object detection camera_rgb_blueprint = world.get_blueprint_library().find( 'sensor.camera.rgb') camera_rgb_blueprint.set_attribute('fov', '90') camera_rgb = world.spawn_actor(camera_rgb_blueprint, carla.Transform( carla.Location(x=1.5, z=1.4, y=0.3), carla.Rotation(pitch=0)), attach_to=vehicle) actor_list.append(camera_rgb) sensors.append(camera_rgb) # segmentation camera camera_segmentation = world.spawn_actor( blueprint_library.find('sensor.camera.semantic_segmentation'), carla.Transform(carla.Location(x=1.5, z=1.4, y=0), carla.Rotation(pitch=0)), #5,3,0 # -0.3 attach_to=vehicle) actor_list.append(camera_segmentation) sensors.append(camera_segmentation) # Set up local planner and behavnioural planner # -------------------------------------------------------------- # -------------------------------------------------------------- frame = 0 max_error = 0 FPS = 30 speed = 0 cross_track_error = 0 start_time = 0.0 times = 8 LP_FREQUENCY_DIVISOR = 8 # Frequency divisor to make the # local planner operate at a lower # frequency than the controller # (which operates at the simulation # frequency). Must be a natural # number. # TMP obstacle lists ob = np.array([ [233.980630, 130.523910], [233.980630, 30.523910], [233.980630, 60.523910], [233.980630, 80.523910], [233.786942, 75.530586], ]) wx = [] wy = [] wz = [] for p in evaluation.history: wp = carla.Transform(carla.Location(p[0], p[1], p[2]), carla.Rotation(p[3], p[4], p[5])) wx.append(wp.location.x) wy.append(wp.location.y) wz.append(wp.location.z) tx, ty, tyaw, tc, csp = generate_target_course(wx, wy, wz) # initial state c_speed = 2.0 / 3.6 # current speed [m/s] c_d = 2.0 # current lateral position [m] c_d_d = 0.0 # current lateral speed [m/s] c_d_dd = 0.0 # current latral acceleration [m/s] s0 = 0.0 # current course position # Create a synchronous mode context. with CarlaSyncMode(world, *sensors, fps=FPS) as sync_mode: while True: if should_quit(): return clock.tick() start_time += clock.get_time() # Advance the simulation and wait for the data. # tick_response = sync_mode.tick(timeout=2.0) # Display BirdView # Input for your model - call it every simulation step # returned result is np.ndarray with ones and zeros of shape (8, height, width) birdview = birdview_producer.produce(agent_vehicle=vehicle) bgr = cv2.cvtColor(BirdViewProducer.as_rgb(birdview), cv2.COLOR_BGR2RGB) # NOTE imshow requires BGR color model cv2.imshow("BirdView RGB", bgr) cv2.waitKey(1) # snapshot, image_rgb, image_segmentation = tick_response snapshot, img_rgb, image_segmentation = sync_mode.tick( timeout=2.0) print("type(image_segmentation): ", type(image_segmentation)) # detect car in image with semantic segnmentation camera carInTheImage = semantic.IsThereACarInThePicture( image_segmentation) line = [] # Spawn a vehicle to follow if not vehicleToFollowSpawned and followDrivenPath: vehicleToFollowSpawned = True location1 = vehicle.get_transform() newX, newY = carDetector.CreatePointInFrontOFCar( location1.location.x, location1.location.y, location1.rotation.yaw) diffX = newX - location1.location.x diffY = newY - location1.location.y newX = location1.location.x - (diffX * 5) newY = location1.location.y - (diffY * 5) start_pose.location.x = newX start_pose.location.y = newY vehicle.set_transform(start_pose) start_pose2 = random.choice(m.get_spawn_points()) bp = blueprint_library.filter('model3')[0] bp.set_attribute('color', '0,101,189') vehicleToFollow = world.spawn_actor(bp, start_pose2) start_pose2 = carla.Transform() start_pose2.rotation = start_pose.rotation start_pose2.location.x = start_pose.location.x start_pose2.location.y = start_pose.location.y start_pose2.location.z = start_pose.location.z vehicleToFollow.set_transform(start_pose2) actor_list.append(vehicleToFollow) vehicleToFollow.set_simulate_physics(True) # vehicleToFollow.set_autopilot(False) if followDrivenPath: if counter >= len(evaluation.history): break tmp = evaluation.history[counter] currentPos = carla.Transform( carla.Location(tmp[0], tmp[1], tmp[2]), carla.Rotation(tmp[3], tmp[4], tmp[5])) vehicleToFollow.set_transform(currentPos) counter += 1 # Set up obsticle vehicle for testing # location1 = vehicle.get_transform() # location2 = vehicleToFollow.get_transform() # if not obsticle_vehicleSpawned and followDrivenPath: # obsticle_vehicleSpawned = True # # Adding new obsticle vehicle # start_pose3 = random.choice(m.get_spawn_points()) # obsticle_vehicle = world.spawn_actor( # random.choice(blueprint_library.filter('jeep')), # start_pose3) # start_pose3 = carla.Transform() # start_pose3.rotation = start_pose2.rotation # start_pose3.location.x = start_pose2.location.x # start_pose3.location.y = start_pose2.location.y + 50 # start_pose3.location.z = start_pose2.location.z # obsticle_vehicle.set_transform(start_pose3) # actor_list.append(obsticle_vehicle) # obsticle_vehicle.set_simulate_physics(True) # if frame % LP_FREQUENCY_DIVISOR == 0: """ ********************************************************************************************************************** *********************************************** Motion Planner ******************************************************* ********************************************************************************************************************** """ # tmp = evaluation.history[counter-1] # currentPos = carla.Transform(carla.Location(tmp[0] + 0 ,tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5])) # vehicleToFollow.set_transform(currentPos) # ------------------- Frenet -------------------------------- path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob) print("path length: ", len(path.x)) new_vehicleToFollow_transform = carla.Transform() new_vehicleToFollow_transform.rotation = carla.Rotation( pitch=0.0, yaw=math.degrees(path.yaw[1]), roll=0.0) new_vehicleToFollow_transform.location.x = path.x[1] new_vehicleToFollow_transform.location.y = path.y[1] new_vehicleToFollow_transform.location.z = path.z[1] vehicleToFollow.set_transform(new_vehicleToFollow_transform) # ------------------- Control for ego -------------------------------- # Set up new locationss location1 = vehicle.get_transform() location2 = vehicleToFollow.get_transform() possibleAngle = 0 drivableIndexes = [] bbox = [] bbox, predicted_distance, predicted_angle = carDetector.getDistance( vehicleToFollow, camera_rgb, carInTheImage, extrapolation=extrapolate, nOfFramesToSkip=nOfFramesToSkip) if frame % LP_FREQUENCY_DIVISOR == 0: # This is the bottle neck and takes times to run. But it is necessary for chasing around turns predicted_angle, drivableIndexes = semantic.FindPossibleAngle( image_segmentation, bbox, predicted_angle ) # This is still necessary need to optimize it steer, throttle = drivingControlAdvanced.PredictSteerAndThrottle( predicted_distance, predicted_angle, None) # This is a new method send_control(vehicle, throttle, steer, 0) speed = np.linalg.norm( carla_vec_to_np_array(vehicle.get_velocity())) real_dist = location1.location.distance(location2.location) fps = round(1.0 / snapshot.timestamp.delta_seconds) # Draw the display. # draw_image(display, image_rgb) draw_image(display, img_rgb, image_segmentation, location1, location2, record=record, driveName=driveName, smazat=line) display.blit( font.render(' FPS (real) % 5d ' % clock.get_fps(), True, (255, 255, 255)), (8, 10)) display.blit( font.render(' FPS (simulated): % 5d ' % fps, True, (255, 255, 255)), (8, 28)) display.blit( font.render(' speed: {:.2f} m/s'.format(speed), True, (255, 255, 255)), (8, 46)) # display.blit( # font.render(' cross track error: {:03d} cm'.format(cross_track_error), True, (255, 255, 255)), # (8, 64)) # display.blit( # font.render(' max cross track error: {:03d} cm'.format(max_error), True, (255, 255, 255)), # (8, 82)) # Draw bbox on following vehicle if len(bbox) != 0: points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)] BB_COLOR = (248, 64, 24) # draw lines # base pygame.draw.line(display, BB_COLOR, points[0], points[1]) pygame.draw.line(display, BB_COLOR, points[1], points[2]) pygame.draw.line(display, BB_COLOR, points[2], points[3]) pygame.draw.line(display, BB_COLOR, points[3], points[0]) # top pygame.draw.line(display, BB_COLOR, points[4], points[5]) pygame.draw.line(display, BB_COLOR, points[5], points[6]) pygame.draw.line(display, BB_COLOR, points[6], points[7]) pygame.draw.line(display, BB_COLOR, points[7], points[4]) # base-top pygame.draw.line(display, BB_COLOR, points[0], points[4]) pygame.draw.line(display, BB_COLOR, points[1], points[5]) pygame.draw.line(display, BB_COLOR, points[2], points[6]) pygame.draw.line(display, BB_COLOR, points[3], points[7]) # DrawDrivable(drivableIndexes, image_segmentation.width // 10, image_segmentation.height // 10, display) pygame.display.flip() frame += 1 print("vehicle.get_transform()", vehicle.get_transform()) print("vehicleToFollow.get_transform()", vehicleToFollow.get_transform()) # print("obsticle_vehicle.get_transform()", obsticle_vehicle.get_transform()) s0 = path.s[1] c_d = path.d[1] c_d_d = path.d_d[1] c_d_dd = path.d_dd[1] c_speed = path.s_d[1] print("path.x[1]: ", path.x[1]) print("path.y[1]: ", path.y[1]) print("s: ", s0) cv2.destroyAllWindows() finally: print('destroying actors.') for actor in actor_list: actor.destroy() pygame.quit() print('done.')
class CarDetector: def __init__(self): self.MODEL_NAME = './model/best_model_Epoch_18_step_8245_mAP_0.2501_loss_1.9889_lr_0.0001' self.new_size = [320, 320] self.height_ori, self.width_ori = 0, 0 self.letterbox_resize = True self.lastN = 5 self.lastNDistances = [] self.lastNAngles = [] self.exponentialMovingAverageDist = 0 self.exponentialMovingAverageAngle = 0 self.alpha = 0.5 self.sess = None self.segmentation = SemanticSegmentation() def getDistanceAndAngle(self, box, width_ori, height_ori): VIEW_WIDTH = width_ori VIEW_HEIGHT = height_ori VIEW_FOV = 85 calibration = np.identity(3) calibration[0, 2] = VIEW_WIDTH / 2.0 calibration[1, 2] = VIEW_HEIGHT / 2.0 calibration[0, 0] = calibration[ 1, 1] = VIEW_WIDTH / (2.0 * np.tan(VIEW_FOV * np.pi / 360.0)) distortion = np.zeros((4, 1)) objectPoints = np.random.random((5, 3, 1)) imagePoints = np.random.random((5, 2, 1)) x0, y0, x1, y1 = box imagePoints[0][0][0] = (x0 + x1) / 2 imagePoints[0][1][0] = (y1 + y0) / 2 imagePoints[0 + 1][0][0] = x1 imagePoints[0 + 1][1][0] = y1 imagePoints[1 + 1][0][0] = x0 imagePoints[1 + 1][1][0] = y1 imagePoints[2 + 1][0][0] = x1 imagePoints[2 + 1][1][0] = y0 imagePoints[3 + 1][0][0] = x0 imagePoints[3 + 1][1][0] = y0 objectPoints[0][1][0] = 0 objectPoints[0][2][0] = 0 objectPoints[0][0][0] = 0 objectPoints[0 + 1][1][0] = 0.13 objectPoints[0 + 1][2][0] = -0.09 objectPoints[0 + 1][0][0] = 0 objectPoints[1 + 1][1][0] = -0.13 objectPoints[1 + 1][2][0] = -0.09 objectPoints[1 + 1][0][0] = 0 objectPoints[2 + 1][1][0] = 0.13 objectPoints[2 + 1][2][0] = 0.09 objectPoints[2 + 1][0][0] = 0 objectPoints[3 + 1][1][0] = -0.13 objectPoints[3 + 1][2][0] = 0.09 objectPoints[3 + 1][0][0] = 0 objectPoints = np.array(objectPoints, dtype=float) objectPoints = np.reshape(objectPoints, (5, 3, 1)) imagePoints = np.array(imagePoints, dtype=float) imagePoints = np.reshape(imagePoints, (5, 2, 1)) ret, rvecs, tvecs = cv2.solvePnP(objectPoints, imagePoints, calibration, distortion) # print(tvecs) angle = math.degrees(math.atan2(tvecs[0][0], tvecs[2][0])) distance = math.sqrt(tvecs[0][0]**2 + tvecs[1][0]**2 + tvecs[2][0]**2) # print(angle) prvniStrana = 0.1 # korekce triangleAngle = 0 if angle >= 0: triangleAngle = 90.0 - angle elif angle < 0: triangleAngle = 90.0 + (-1 * angle) # Cosinova veta na spocteni 3. strany trojuhelnika predicted_distance = math.sqrt(prvniStrana**2 + distance**2 - 2 * prvniStrana * distance * math.cos(math.radians(triangleAngle))) tmpAngle = math.degrees( math.acos((prvniStrana**2 + predicted_distance**2 - distance**2) / (2 * prvniStrana * predicted_distance))) if angle >= 0: angle = 90.0 - (180 - tmpAngle) else: angle = -1 * (90.0 - (tmpAngle)) # print('distance:', distance) # print('angle:', angle) return distance, angle def GetBoundingBox(self, img_ori, half): from utils.data_aug import letterbox_resize if img_ori.shape[2] == 4: img_ori = img_ori[:, :, :3] if half: # Grab left half of the image height, width = img_ori.shape[:2] start_row, start_col = int(0), int(0) end_row, end_col = int(height), int(width // 2) img_ori = img_ori[start_row:end_row, start_col:end_col] self.height_ori, self.width_ori = img_ori.shape[:2] # print('start resize') if self.letterbox_resize: img, resize_ratio, dw, dh = letterbox_resize( img_ori, self.new_size[0], self.new_size[1]) else: height_ori, width_ori = img_ori.shape[:2] img = cv2.resize(img_ori, tuple(self.new_size)) # print('resize end') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = np.asarray(img, np.float32) img = img[np.newaxis, :] / 255. # print('run TF') boxes_, scores_, labels_, map4_ = self.sess.run( [self.boxes, self.scores, self.labels, self.map4], feed_dict={self.input_data: img}) # print('after TF') # rescale the coordinates to the original image if self.letterbox_resize: boxes_[:, [0, 2]] = (boxes_[:, [0, 2]] - dw) / resize_ratio boxes_[:, [1, 3]] = (boxes_[:, [1, 3]] - dh) / resize_ratio else: boxes_[:, [0, 2]] *= (width_ori / float(self.new_size[0])) boxes_[:, [1, 3]] *= (height_ori / float(self.new_size[1])) return boxes_, scores_, map4_[0] def Extrapolate(self): if len(self.lastNDistances) >= 2: predicted_distance = 2 * self.lastNDistances[ -1] - self.lastNDistances[-2] # simple extrapolation predicted_angle = 2 * self.lastNAngles[-1] - self.lastNAngles[ -2] # simple extrapolation predicted_angle = self.LimitAngles(predicted_angle) alpha = self.alpha if len(self.lastNDistances) > 1 else 1 self.exponentialMovingAverageDist = alpha * predicted_distance + ( 1 - self.alpha) * self.exponentialMovingAverageDist self.exponentialMovingAverageAngle = alpha * predicted_angle + ( 1 - self.alpha) * self.exponentialMovingAverageAngle self.lastNDistances.append(predicted_distance) self.lastNAngles.append(predicted_angle) return self.exponentialMovingAverageDist, self.exponentialMovingAverageAngle else: return 0, 0 def LimitAngles(self, angle): return min(max(angle, -175), 175) def LimitDistance(self, dist): return min(20, max(dist, 0)) def KeepLastN(self): if len(self.lastNDistances) > self.lastN: self.lastNDistances = self.lastNDistances[1:] self.lastNAngles = self.lastNAngles[1:] def Run(self, img_name, half=True): if self.sess == None: import tensorflow as tf from model import yolov3 from utils.misc_utils import parse_anchors, read_class_names from utils.nms_utils import gpu_nms self.anchors = parse_anchors('./model/yolo_anchors.txt') self.classes = read_class_names('./model/coco.names') self.num_class = len(self.classes) self.sess = tf.Session() self.input_data = tf.placeholder( tf.float32, [1, self.new_size[1], self.new_size[0], 3], name='input_data') self.yolo_model = yolov3(self.num_class, self.anchors) with tf.variable_scope('yolov3'): pred_feature_maps = self.yolo_model.forward( self.input_data, False) self.pred_boxes, self.pred_confs, self.pred_probs, self.map4 = self.yolo_model.predict( pred_feature_maps) self.pred_scores = self.pred_confs * self.pred_probs self.boxes, self.scores, self.labels = gpu_nms(self.pred_boxes, self.pred_scores, self.num_class, max_boxes=200, score_thresh=0.3, nms_thresh=0.45) self.saver = tf.train.Saver() self.saver.restore(self.sess, self.MODEL_NAME) # self.sess.run(tf.global_variables_initializer()) print('Tensorflow intialized') # print('getting boxes') boxes, scores, feature_map = self.GetBoundingBox(img_name, half) # print('got boxes:',boxes) if len(boxes) != 0: dist, angle = self.getDistanceAndAngle(boxes[np.argmax(scores)], self.width_ori, self.height_ori) self.lastNDistances.append(dist) self.lastNAngles.append(angle) alpha = self.alpha if len(self.lastNDistances) > 1 else 1 self.exponentialMovingAverageDist = alpha * dist + ( 1 - alpha) * self.exponentialMovingAverageDist self.exponentialMovingAverageAngle = alpha * angle + ( 1 - alpha) * self.exponentialMovingAverageAngle angle, _ = self.segmentation.FindPossibleAngle( boxes[np.argmax(scores)], angle, feature_map, self.width_ori, self.height_ori) else: print('No box found') dist, angle = self.Extrapolate() angle, _ = self.segmentation.FindPossibleAngle( boxes, angle, feature_map, self.width_ori, self.height_ori) self.KeepLastN() angle = self.LimitAngles(angle) dist = self.LimitDistance(dist) return dist, angle