def recover_obstacle(self): if self.obstacle_is_at is not None: curr_coord = self.vehicle.transform.location.copy() if curr_coord.distance(self.obstacle_avoid_starting_coord ) < self.recover_max_dist: if self.obstacle_is_at == ObstacleEnum.LEFT: self.logger.info("Recovering --> TURING RIGHT") return VehicleControl(throttle=self.avoid_throttle, steering=self.avoid_right_steer, brake=False) else: self.logger.info("Recovering --> TURING LEFT") return VehicleControl(throttle=self.avoid_throttle, steering=self.avoid_left_steer, brake=False) else: self.logger.info( "Recovering --> DONE! Shifting to normal state") self.mode = CS249AgentModes.NORMAL self.obstacle_is_at = None self.obstacle_avoid_starting_coord = None return VehicleControl() else: self.logger.error("No obstacle to recover from") return VehicleControl(brake=True)
def lead_car_step(self): if self.time_counter % 10 == 0: self.udp_multicast.send_current_state() if self.front_depth_camera.data is not None and self.front_rgb_camera.data is not None: left, center, right = self.find_obstacles_via_depth_to_pcd( debug=True) if left[0] or center[0] or right[0] or self.mode == CS249AgentModes.OBSTACLE_AVOID or \ self.mode == CS249AgentModes.OBSTACLE_RECOVER or self.mode == CS249AgentModes.OBSTACLE_BYPASS: # self.logger.info(f"Braking due to obstacle: left: {left} | center: {center} | right: {right}") # return VehicleControl(brake=True) self.logger.info(f"Avoiding obstacle") return self.act_on_obstacle(left, center, right) if self.is_light_found(rgb_data=self.front_rgb_camera.data.copy(), debug=True): self.logger.info("Braking due to traffic light") return VehicleControl(brake=True) error = self.find_error() if error is None: return self.no_line_seen() else: self.kwargs["lat_error"] = error self.vehicle.control = self.controller.run_in_series( next_waypoint=None) self.prev_steerings.append(self.vehicle.control.steering) return self.vehicle.control else: # image feed is not available yet return VehicleControl()
def act_on_obstacle(self, left, center, right): # if obstacle is detected, deal with obstacle first if left[0] and center[0] and right[0]: self.logger.info("Can't find feasible path around obstacle") return VehicleControl(brake=True) # if there is a way to avoid it or i am already avoiding it if (left[0] or center[0] or right[0]) and \ (self.mode != CS249AgentModes.OBSTACLE_AVOID and self.mode != CS249AgentModes.OBSTACLE_BYPASS and self.mode != CS249AgentModes.OBSTACLE_RECOVER): self.mode = CS249AgentModes.OBSTACLE_AVOID self.obstacle_is_at = ObstacleEnum.LEFT if left[ 0] else ObstacleEnum.RIGHT self.obstacle_avoid_starting_coord = self.vehicle.transform.location.copy( ) return VehicleControl() elif self.mode == CS249AgentModes.OBSTACLE_AVOID or self.mode == CS249AgentModes.OBSTACLE_RECOVER \ or self.mode == CS249AgentModes.OBSTACLE_BYPASS: if self.mode == CS249AgentModes.OBSTACLE_AVOID: return self.avoid_obstacle() elif self.mode == CS249AgentModes.OBSTACLE_BYPASS: return self.bypass_obstacle() elif self.mode == CS249AgentModes.OBSTACLE_RECOVER: return self.recover_obstacle() else: return VehicleControl(brake=True) return VehicleControl()
def bypass_obstacle(self): if self.obstacle_is_at is not None: curr_coord = self.vehicle.transform.location.copy() if curr_coord.distance(self.obstacle_avoid_starting_coord ) < self.bypass_dist_dist: if self.obstacle_is_at == ObstacleEnum.LEFT: self.logger.info("BYPASSING --> TURING LEFT") return VehicleControl(throttle=self.avoid_throttle, steering=self.avoid_left_steer, brake=False) else: self.logger.info("BYPASSING --> TURING RIGHT") return VehicleControl(throttle=self.avoid_throttle, steering=self.avoid_right_steer, brake=False) else: self.logger.info( "BYPASSING --> DONE! Shifting to recover mode") self.mode = CS249AgentModes.OBSTACLE_RECOVER self.obstacle_avoid_starting_coord = self.vehicle.transform.location.copy( ) return VehicleControl() else: self.logger.error("No obstacle to bypass") return VehicleControl(brake=True)
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super().run_step(sensors_data=sensors_data, vehicle=vehicle) try: img = self.floodfill_lane_detector.run_in_series() # left, front, right_steering dot img location left_dot_coord = (self.front_rgb_camera.image_size_x // 4, 350) center_dot_coord = (self.front_rgb_camera.image_size_x // 2, 350) right_dot_coord = (self.front_rgb_camera.image_size_x - (self.front_rgb_camera.image_size_x // 4), 350) blue = [255, 0, 0] left_ok = self._is_equal(img[left_dot_coord[::-1]], blue) center_ok = self._is_equal(img[center_dot_coord[::-1]], blue) right_ok = self._is_equal(img[right_dot_coord[::-1]], blue) result = cv2.circle(img=img, center=left_dot_coord, radius=10, color=(0, 0, 255), thickness=-1) result = cv2.circle(img=result, center=center_dot_coord, radius=10, color=(0, 0, 255), thickness=-1) result = cv2.circle(img=result, center=right_dot_coord, radius=10, color=(0, 0, 255), thickness=-1) cv2.imshow("rgb image", result) cv2.waitKey(1) straight_throttle, turning_throttle, left_steering, right_steering = 0.18, 0.15, -0.4, 0.4 throttle, steering = 0, 0 if bool(left_ok) is False: # print("GO RIGHT!") throttle = turning_throttle steering = left_steering elif bool(right_ok) is False: # print("GO LEFT!") throttle = turning_throttle steering = right_steering elif center_ok: throttle, steering = straight_throttle, 0 # if center_ok: # throttle, steering = 0.5, 0 # elif left_ok: # throttle = 0.3 # steering = -0.5 # elif right_ok: # throttle = 0.3 # steering = 0.5 # self.logger.info(f"Throttle = {throttle}, steering = {steering}") return VehicleControl(throttle=throttle, steering=steering) except: return VehicleControl()
def run_in_series(self) -> VehicleControl: """ Run step for the local planner Procedure: 1. Sync data 2. get the correct look ahead for current speed 3. get the correct next waypoint 4. feed waypoint into controller 5. return result from controller Returns: next control that the local think the agent should execute. """ if (len(self.mission_planner.mission_plan) == 0 and len(self.way_points_queue) == 0): return VehicleControl() # get vehicle's location vehicle_transform: Union[Transform, None] = self.agent.vehicle.transform if vehicle_transform is None: raise AgentException( "I do not know where I am, I cannot proceed forward") # redefine closeness level based on speed self.set_closeness_threhold(self.closeness_threshold_config) # get current waypoint curr_closest_dist = float("inf") while True: if len(self.way_points_queue) == 0: self.logger.info("Destination reached") return VehicleControl() # waypoint: Transform = self.way_points_queue[0] waypoint, speed_factor = self.next_waypoint_smooth_and_speed() curr_dist = vehicle_transform.location.distance(waypoint.location) if curr_dist < curr_closest_dist: # if i find a waypoint that is closer to me than before # note that i will always enter here to start the calculation for curr_closest_dist curr_closest_dist = curr_dist elif curr_dist < self.closeness_threshold: # i have moved onto a waypoint, remove that waypoint from the queue self.way_points_queue.popleft() else: break target_waypoint, speed_factor = self.next_waypoint_smooth_and_speed() control: VehicleControl = self.controller.run_in_series( next_waypoint=target_waypoint, speed_multiplier=speed_factor) self.logger.debug( f"\n" f"Curr Transform: {self.agent.vehicle.transform}\n" f"Target Location: {target_waypoint.location}\n" f"Control: {control} | Speed: {Vehicle.get_speed(self.agent.vehicle)}\n" ) return control
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super().run_step(sensors_data=sensors_data, vehicle=vehicle) if self.front_rgb_camera.data is not None: try: # use vision to find line, and find the center point that we are supposed to follow img = self.clean_image(self.front_rgb_camera.data.copy()) Xs, Ys = np.where(img == 255) next_point_in_pixel = (np.average(Ys).astype(int), img.shape[0] - np.average(Xs).astype(int)) # now that we have the center point, declare robot's position as the mid, lower of the image robot_point_in_pixel = (img.shape[1] // 2, img.shape[0]) # now execute a pid control on lat diff. Since we know that only the X axis will have difference robot_x = robot_point_in_pixel[0] next_point_x = next_point_in_pixel[0] error = robot_x - next_point_x self.error_queue.append(error) error_dt = 0 if len( self.error_queue) == 0 else error - self.error_queue[-1] error_it = sum(self.error_queue) e_p = self.kP * error e_d = self.kD * error_dt e_i = self.kI * error_it lat_control = np.clip(-1 * round((e_p + e_d + e_i), 3), -1, 1) if self.debug: cv2.circle(img, center=next_point_in_pixel, radius=10, color=(0.5, 0.5), thickness=-1) cv2.circle(img, center=robot_point_in_pixel, radius=10, color=(0.5, 0.5), thickness=-1) cv2.imshow("img", img) cv2.waitKey(1) control = VehicleControl(throttle=0.175, steering=lat_control) print(control) return control except Exception as e: # self.logger.error("Unable to detect line") return VehicleControl() return VehicleControl()
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super(DepthE2EAgent, self).run_step(sensors_data, vehicle) control = VehicleControl(throttle=0.5, steering=0) if self.front_depth_camera.data is not None: depth_image = self.front_depth_camera.data.copy() data = np.expand_dims(np.expand_dims(depth_image, 2), 0) output = self.model.predict(data) throttle, steering = output[0] control.throttle, control.steering = float(throttle), float( steering) return control
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super().run_step(sensors_data=sensors_data, vehicle=vehicle) if self.front_depth_camera.data is not None: self.image_queue.append( depthToLogDepth(self.front_depth_camera.data)) if len(self.image_queue) == self.batch_size: obs = torch.tensor(np.array(self.image_queue)).float() steerings = self.model(obs).cpu().detach().numpy() steering = steerings[-1] print(steering) return VehicleControl(throttle=0.5, steering=steering) return VehicleControl(throttle=0.5, steering=0)
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super(OccuMapDemoDrivingAgent, self).run_step(sensors_data=sensors_data, vehicle=vehicle) self.occupancy_map.visualize(transform=self.vehicle.transform, view_size=(100, 100)) return VehicleControl()
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl: throttle = self.long_pid_controller.run_in_series( next_waypoint=next_waypoint, target_speed=kwargs.get("target_speed", self.max_speed)) steering = self.lat_pid_controller.run_in_series( next_waypoint=next_waypoint) # print( self.agent.vehicle.transform.rotation.roll) print('steering', steering) veh_x = self.agent.vehicle.transform.location.x veh_y = self.agent.vehicle.transform.location.y veh_z = self.agent.vehicle.transform.location.z veh_yaw = self.agent.vehicle.transform.rotation.yaw veh_roll = self.agent.vehicle.transform.rotation.roll veh_pitch = self.agent.vehicle.transform.rotation.pitch print('pos x: ', veh_x) print('pos y: ', veh_y) print('pos z: ', veh_z) print('yaw: ', veh_yaw) # print('roll: ', veh_roll) # print('pitch: ', veh_pitch) return VehicleControl(throttle=throttle, steering=steering)
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl: throttle = self.long_pid_controller.run_in_series(next_waypoint=next_waypoint, target_speed=kwargs.get("target_speed", self.max_speed)) steering = self.lat_pid_controller.run_in_series(next_waypoint=next_waypoint) return VehicleControl(throttle=throttle, steering=steering)
def step(self, action: List[float]) -> Tuple[np.ndarray, float, bool, dict]: """ Args: action: Returns: """ self._prev_speed = Vehicle.get_speed(self.agent.vehicle) control = VehicleControl(throttle=action[0], steering=action[1]) self.agent.kwargs["control"] = control before_dist = self.agent.vehicle.transform.location.distance( self.agent.local_planner.way_points_queue[ self.agent.local_planner.get_curr_waypoint_index()].location) agent, reward, is_done, other_info = super(OccuDebug, self).step(action=action) after_dist = self.agent.vehicle.transform.location.distance( self.agent.local_planner.way_points_queue[ self.agent.local_planner.get_curr_waypoint_index()].location) self.dist_diff = before_dist - after_dist obs = self._get_obs() return obs, reward, is_done, other_info
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super(PointcloudRecordingAgent, self).run_step(sensors_data, vehicle) if self.front_rgb_camera.data is not None and self.front_depth_camera.data is not None: self.prev_steerings.append(self.vehicle.control.steering) try: pcd: o3d.geometry.PointCloud = self.depth2pointcloud.run_in_series(self.front_depth_camera.data, self.front_rgb_camera.data) folder_name = Path("./data/pointcloud") folder_name.mkdir(parents=True, exist_ok=True) o3d.io.write_point_cloud((folder_name / f"{datetime.now().strftime('%m_%d_%Y_%H_%M_%S_%f')}.pcd").as_posix(), pcd, print_progress=True) pcd = self.filter_ground(pcd) points = np.asarray(pcd.points) new_points = np.copy(points) points = np.vstack([new_points[:, 0], new_points[:, 2]]).T self.occu_map.update(points, val=1) coord = self.occu_map.world_loc_to_occu_map_coord(loc=self.vehicle.transform.location) self.m[np.where(self.occu_map.map == 1)] = [255, 255, 255] self.m[coord[1] - 2:coord[1] + 2, coord[0] - 2:coord[0] + 2] = [0, 0, 255] cv2.imshow("m", self.m) except Exception as e: print(e) return VehicleControl()
def step(self, action: Any) -> Tuple[Any, float, bool, dict]: assert type(action) == list or type(action) == np.ndarray, f"Action is not recognizable" assert len(action) == 2, f"Action should be of length 2 but is of length [{len(action)}]." control = VehicleControl(throttle=action[0], steering=action[1]) self.agent.kwargs["control"] = control self.curr_debug_info["control"] = control return super(DepthE2EEnv, self).step(action=action)
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl: throttle = self.long_pid_controller.run_in_series( next_waypoint=next_waypoint) steering = self.lat_pid_controller.run_in_series( next_waypoint=next_waypoint) return VehicleControl(throttle=throttle, steering=steering)
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super().run_step(sensors_data=sensors_data, vehicle=vehicle) control = VehicleControl(throttle=0.4, steering=0) # if sensors_data.front_rgb is not None and sensors_data.front_rgb.data is not None: # cv2.imshow("rgb", self.front_rgb_camera.data) # cv2.waitKey(1) return control
def start_game_loop(self, use_manual_control=False): self.logger.info("Starting Game Loop") try: clock = pygame.time.Clock() should_continue = True while should_continue: clock.tick_busy_loop(60) # pass throttle and steering into the bridge sensors_data, vehicle = self.convert_data() # run a step of agent vehicle_control = VehicleControl() if self.auto_pilot: vehicle_control: VehicleControl = self.agent.run_step( sensors_data=sensors_data, vehicle=vehicle) # manual control always take precedence if use_manual_control: should_continue, vehicle_control = self.update_pygame( clock=clock) # self.logger.debug(f"Vehicle Control = [{vehicle_control}]") # pass the output into sender to send it self.jetson_vehicle.update_parts( new_throttle=vehicle_control.throttle, new_steering=vehicle_control.steering) except KeyboardInterrupt: self.logger.info("Keyboard Interrupt detected. Safely quitting") self.jetson_vehicle.stop() except Exception as e: self.logger.error(f"Something bad happened: [{e}]") finally: self.jetson_vehicle.stop()
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super(OpenCVTensorflowObjectDetectionAgent, self).run_step(sensors_data=sensors_data, vehicle=vehicle) image = self.front_rgb_camera.data.copy() rows, cols, channels = image.shape # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Use the given image as input, which needs to be blob(s). self.tensorflowNet.setInput(cv2.dnn.blobFromImage(image, size=(300, 300), swapRB=True, crop=False)) # Runs a forward pass to compute the net output networkOutput = self.tensorflowNet.forward() # Loop on the outputs for detection in networkOutput[0, 0]: score = float(detection[2]) if score > 0.3: left = detection[3] * cols top = detection[4] * rows right = detection[5] * cols bottom = detection[6] * rows area = (right - left) * (bottom - top) # draw a red rectangle around detected objects if area < 10000: cv2.rectangle(image, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2) self.logger.debug(f"Detection confirmed. Score = {score}") # cv2.rectangle(image, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2) # Show the image with a rectagle surrounding the detected objects cv2.imshow('Image', image) cv2.waitKey(1) return VehicleControl()
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl: throttle = self.long_pid_controller.run_in_series(next_waypoint=next_waypoint, target_speed=kwargs.get("target_speed", self.max_speed)) steering,x,y,z= self.lat_pid_controller.run_in_series(next_waypoint=next_waypoint) if y>0.8 or (x>-390 and x<0 and z<91): if steering>0: steering=0.01 else: steering=-0.01 # module 1 38s: # if steering<0.4 and steering>0 and y<0.8 and x>0: # throttle=throttle*(1-steering*1.6) # if steering>-0.4 and steering<-0.05 and y<0.8 and x<0: # steering=steering*1.4 # module 2 (unstable) 36s: # if steering>-0.4 and steering<-0.05 and y<0.8 and x<0: # steering=steering*1.3 # if x> 370 and x<390 and z>-60 and z<20: # steering = steering *10 # module 3: if steering>-0.4 and steering<-0.05 and y<0.8 and x<0: steering=steering*1.3 if x> 360 and x<390 and z>-60 and z<20: throttle=0.9 steering = 0.9 if x> -570 and x<-420 and z<80 and z>10: # throttle=0.9 steering = -0.4 # throttle=throttle*(1-steering*1.5) # if abs(steering)>0.01: # throttle=0.9 return VehicleControl(throttle=throttle, steering=steering)
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl: steering, errBoi, posBoi, velBoi, way_dir = self.lat_pid_controller.run_in_series(next_waypoint=next_waypoint) # feed the error into the longitudinal controller order to slow down when off target throttle = self.long_pid_controller.run_in_series(next_waypoint=next_waypoint, target_speed=kwargs.get("target_speed", self.max_speed), errBoi=np.abs(errBoi)) ''' # return zero controls every fifth step to see if that does anything if self.num_steps % 10 < 3: steering = 0 throttle = 0 self.num_steps += 1 ''' # we have current position x, y, z, current velocity x, y, z, next waypoint position x, y, z, # next waypoint direction relative to the current position of the car x, y, z, steering, and throttle # dataLine = "{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}\n".format( # posBoi.x, posBoi.y, posBoi.z, # velBoi[0], velBoi[1], velBoi[2], # next_waypoint.location.x, next_waypoint.location.y, next_waypoint.location.z, # way_dir[0], way_dir[1], way_dir[2], # steering, # throttle) # with open("tmp/pid_data.csv", "a") as f: # f.write(dataLine) return VehicleControl(throttle=throttle, steering=steering)
def run_step(self) -> float: return float( VehicleControl.clamp( self.kp * (self.target_speed - Vehicle.get_speed(self.agent.vehicle)), 0, 1 ) )
def follower_step(self): if self.time_counter % 10 == 0: self.udp_multicast.send_current_state() # location x, y, z; rotation roll, pitch, yaw; velocity x, y, z; acceleration x, y, z if self.udp_multicast.msg_log.get(self.car_to_follow, None) is not None: control = self.controller.run_in_series( next_waypoint=Transform.from_array(self.udp_multicast.msg_log[ self.car_to_follow])) if self.front_depth_camera.data is not None and self.front_rgb_camera.data is not None: left, center, right = self.find_obstacles_via_depth_to_pcd() obstacle_detected = left[0] or center[0] or right[0] if obstacle_detected and Transform.from_array( self.udp_multicast.msg_log[self.car_to_follow] ).location.distance(self.vehicle.transform.location ) > self.controller.distance_to_keep: # if obstacle is detected and the obstacle is not the following vehicle, execute avoid sequence control = self.act_on_obstacle(left, center, right) return control return control else: # self.logger.info("No other cars found") return VehicleControl(throttle=0, steering=0)
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super(WaypointGeneratigAgent, self).run_step(sensors_data=sensors_data, vehicle=vehicle) if self.time_counter > 1: print(self.vehicle.transform) self.output_file.write(self.vehicle.transform.record() + "\n") return VehicleControl()
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super().run_step(sensors_data=sensors_data, vehicle=vehicle) try: self.lane_detector.run_in_series() except Exception as e: self.logger.error(e) return VehicleControl()
def run_in_series(self) -> VehicleControl: next_waypoint: Transform = self.agent.kwargs.get("next_waypoint", None) if next_waypoint is None: return VehicleControl() else: self.way_points_queue.append(next_waypoint) control = self.controller.run_in_series(next_waypoint=next_waypoint) return control
def parse_events(self, clock: pygame.time.Clock): """ parse a keystoke event Args: clock: pygame clock Returns: Tuple bool, and vehicle control boolean states whether quit is pressed. VehicleControl by default has throttle = 0, steering = """ events = pygame.event.get() for event in events: if event.type == pygame.QUIT: return False, VehicleControl() self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time()) return True, VehicleControl(throttle=self.throttle, steering=self.steering)
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl: super(RealtimePlotterAgent, self).run_step(sensors_data=sensors_data, vehicle=vehicle) self.transform_history.append(self.vehicle.transform) # print(self.vehicle.transform) return VehicleControl()
def smoothen_control(self, control: VehicleControl): if abs(control.throttle) > abs(self.prev_control.throttle ) and self.prev_control.throttle > 0.15: # ensure slower increase, faster decrease. 0.15 barely drives the car control.throttle = (self.prev_control.throttle * self.throttle_smoothen_factor + control.throttle) / \ (self.throttle_smoothen_factor + 1) if abs(control.steering) < abs(self.prev_control.steering): # slowly turn back control.steering = ( self.prev_control.steering * self.steering_smoothen_factor_backward + control.steering) / \ (self.steering_smoothen_factor_backward + 1) elif abs(control.steering) < abs(self.prev_control.steering): control.steering = (self.prev_control.steering * self.steering_smoothen_factor_forward + control.steering) / \ (self.steering_smoothen_factor_backward + 1) self.prev_control = control return control
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl: """ Execute one step of control invoking both lateral and longitudinal PID controllers to reach a target waypoint at a given target_speed. Args: vehicle: New vehicle state next_waypoint: target location encoded as a waypoint **kwargs: Returns: Next Vehicle Control """ self.update_lon_control_k_values() self.update_lat_control_k_values() acceleration = self._lon_controller.run_step(self.target_speed) current_steering = self._lat_controller.run_step(next_waypoint) control = VehicleControl() if acceleration >= 0.0: control.throttle = min(acceleration, self.max_throttle) # control.brake = 0.0 else: control.throttle = 0 # control.brake = min(abs(acceleration), self.max_brake) # Steering regulation: changes cannot happen abruptly, can't steer too much. if current_steering > self.past_steering + 0.1: current_steering = self.past_steering + 0.1 elif current_steering < self.past_steering - 0.1: current_steering = self.past_steering - 0.1 if current_steering >= 0: steering = min(self.max_steer, current_steering) else: steering = max(-self.max_steer, current_steering) control.steering = steering self.past_steering = steering abs_throttle = 0.2 return self.throttle_regularization(control=control, min_throttle=-abs_throttle, max_throttle=abs_throttle)