def default_config(cls) -> PGConfig: config = PGDriveEnv.default_config() # Set the internal environment run in 0.02s interval. config["decision_repeat"] = 1 # Speed reward_function is given for current state, so its magnitude need to be reduced original_action_repeat = PGDriveEnv.default_config()["decision_repeat"] config[ "speed_reward"] = config["speed_reward"] / original_action_repeat # Set the interval from 0.02s to 1s config.update( { "fixed_action_repeat": 0, "max_action_repeat": 50, "min_action_repeat": 1, "horizon": 5000, }, ) # default gamma for ORIGINAL primitive step! # Note that we will change this term since ORIGINAL primitive steps is not the internal step! # It still contains ORIGINAL_ACTION_STEP internal steps! # So we will modify this gamma to make sure it behaves like the one applied to ORIGINAL primitive steps. # config.add("gamma", 0.99) return config
def default_config() -> PGConfig: config = PGDriveEnv.default_config() config.add("change_friction", True) config.add("friction_min", 0.8) config.add("friction_max", 1.2) config.update({"vehicle_config": {"wheel_friction": 1.0}}) return config
def default_config() -> PGConfig: config = PGDriveEnv.default_config() config.update({ "change_density": True, "density_min": 0.0, "density_max": 0.4, }) return config
def default_config() -> PGConfig: config = PGDriveEnv.default_config() config.update({ "change_friction": True, "friction_min": 0.8, "friction_max": 1.2, "vehicle_config": { "wheel_friction": 1.0 } }) return config
def default_config(cls) -> PGConfig: config = PGDriveEnv.default_config() config["vehicle_config"]["lidar"].update({"num_lasers": 0, "distance": 0}) # Remove lidar config.update( { "frame_skip": 5, "frame_stack": 3, "post_stack": 5, "rgb_clip": True, "resolution_size": 84, "distance": 30 } ) return config
def default_config(cls) -> PGConfig: config = PGDriveEnv.default_config() # Set the internal environment run in 0.02s interval. config["decision_repeat"] = 1 # Speed reward is given for current state, so its magnitude need to be reduced config["speed_reward"] = config["speed_reward"] / cls.ORIGINAL_ACTION_REPEAT # Set the interval from 0.02s to 1s config.add("fixed_action_repeat", 0) # 0 stands for using varying action repeat. config.add("max_action_repeat", 50) config.add("min_action_repeat", 1) config.add("horizon", 5000) # How many primitive steps within one episode # default gamma for ORIGINAL primitive step! # Note that we will change this term since ORIGINAL primitive steps is not the internal step! # It still contains ORIGINAL_ACTION_STEP internal steps! # So we will modify this gamma to make sure it behaves like the one applied to ORIGINAL primitive steps. # config.add("gamma", 0.99) return config
@staticmethod def argoverse_position(pos): pos[1] *= -1 return pos @staticmethod def pgdrive_position(pos): pos[1] *= -1 return pos if __name__ == "__main__": from pgdrive.engine.engine_utils import initialize_engine from pgdrive.envs.pgdrive_env import PGDriveEnv default_config = PGDriveEnv.default_config() default_config["use_render"] = True default_config["debug"] = True engine = initialize_engine(default_config) # in argoverse coordinates xcenter, ycenter = 2599.5505965123866, 1200.0214763629717 xmin = xcenter - 80 # 150 xmax = xcenter + 80 # 150 ymin = ycenter - 80 # 150 ymax = ycenter + 80 # 150 map = ArgoverseMap({ "city": "PIT", # "draw_map_resolution": 1024, "center":
def default_config() -> Config: config = PGDriveEnv.default_config() config.update(MULTI_AGENT_PGDRIVE_DEFAULT_CONFIG) return config
def default_config() -> PGConfig: config = PGDriveEnv.default_config() config.add("change_density", True) config.add("density_min", 0.0) config.add("density_max", 0.4) return config
def default_config() -> PGConfig: config = PGDriveEnvV1.default_config() config.update( dict( # ===== Traffic ===== traffic_density=0.1, traffic_mode=TrafficMode. Hybrid, # "Respawn", "Trigger", "Hybrid" random_traffic=True, # Traffic is randomized at default. # ===== Cost Scheme ===== crash_vehicle_cost=1., crash_object_cost=1., out_of_road_cost=1., # ===== Reward Scheme ===== # See: https://github.com/decisionforce/pgdrive/issues/283 success_reward=10.0, out_of_road_penalty=5.0, crash_vehicle_penalty=5.0, crash_object_penalty=5.0, acceleration_penalty=0.0, driving_reward=1.0, general_penalty=0.0, speed_reward=0.5, use_lateral=False, gaussian_noise=0.0, dropout_prob=0.0, vehicle_config=dict( wheel_friction=0.8, # See: https://github.com/decisionforce/pgdrive/issues/297 lidar=dict(num_lasers=240, distance=50, num_others=4, gaussian_noise=0.0, dropout_prob=0.0), side_detector=dict(num_lasers=0, distance=50, gaussian_noise=0.0, dropout_prob=0.0), lane_line_detector=dict(num_lasers=0, distance=50, gaussian_noise=0.0, dropout_prob=0.0), # Following the examples: https://docs.panda3d.org/1.10/python/programming/physics/bullet/vehicles max_engine_force=1000, max_brake_force=100, max_steering=40, max_speed=80, ), map_config=dict(block_type_version="v2"), # Disable map loading! auto_termination=False, load_map_from_json=False, _load_map_from_json="", )) config.remove_keys([]) return config