def test_map_random_seeding(): cfg_1 = { "environment_num": 1, "start_seed": 5, } cfg_2 = { "environment_num": 10, "start_seed": 5, } cfg_3 = { "environment_num": 100, "start_seed": 5, } cfg_4 = { "environment_num": 10, "start_seed": 0, } cfg_5 = { "environment_num": 3, "start_seed": 3, } map_configs = [] for cfg in [cfg_1, cfg_2, cfg_3, cfg_4, cfg_5]: env = PGDriveEnv(cfg) try: env.reset(force_seed=5) map_configs.append(env.current_map.save_map) finally: env.close() for idx, map_cfg in enumerate(map_configs[:-1]): nxt_map_cfg = map_configs[idx + 1] recursive_equal(map_cfg, nxt_map_cfg)
def _evaluate(env_config, num_episode): s = time.time() np.random.seed(0) env = PGDriveEnv(env_config) obs = env.reset() success_list, reward_list, ep_reward, ep_len, ep_count = [], [], 0, 0, 0 while ep_count < num_episode: action = expert(obs, deterministic=True) obs, reward, done, info = env.step(action) ep_reward += reward ep_len += 1 if done: ep_count += 1 success_list.append(1 if get_terminal_state(info) == "Success" else 0) reward_list.append(ep_reward) ep_reward = 0 ep_len = 0 obs = env.reset() env.close() t = time.time() - s ep_reward_mean = sum(reward_list) / len(reward_list) success_rate = sum(success_list) / len(success_list) print( f"Finish {ep_count} episodes in {t:.3f} s. Episode reward: {ep_reward_mean}, success rate: {success_rate}." ) return ep_reward_mean, success_rate
def test_config_consistency(): env = PGDriveEnv({"vehicle_config": {"lidar": {"num_lasers": 999}}}) try: env.reset() assert env.vehicle.vehicle_config["lidar"]["num_lasers"] == 999 finally: env.close()
def test_base_vehicle(): env = PGDriveEnv() try: env.reset() engine = env.engine map = env.current_map # v_config = BaseVehicle.get_vehicle_config(dict()) v_config = Config(BASE_DEFAULT_CONFIG["vehicle_config"]).update( PGDriveEnv_DEFAULT_CONFIG["vehicle_config"]) v = engine.spawn_object(DefaultVehicle, vehicle_config=v_config, random_seed=0) v.add_navigation() v.add_navigation() v.navigation.set_force_calculate_lane_index(True) v.update_map_info(map) for heading in [-1.0, 0.0, 1.0]: for pos in [[0., 0.], [-100., -100.], [100., 100.]]: v.reset(pos=pos, heading=heading) np.testing.assert_almost_equal(_get_heading_deg( v.heading_theta), heading, decimal=3) v_pos = v.position # v_pos[1] = -v_pos[1], this position is converted to pg_position in reset() now np.testing.assert_almost_equal(v_pos, pos) v.set_position(pos) v_pos = v.position np.testing.assert_almost_equal(v_pos, pos) v.after_step() v.reset(pos=np.array([10, 0])) for a_x in [-1, 0, 0.5, 1]: for a_y in [-1, 0, 0.5, 1]: v.before_step([a_x, a_y]) v._set_action([a_x, a_y]) _assert_vehicle(v) v._set_incremental_action([a_x, a_y]) _assert_vehicle(v) state = v.get_state() v.set_state(state) assert _get_heading_deg(v.heading_theta) == _get_heading_deg( state["heading"]) np.testing.assert_almost_equal(v.position, state["position"]) v.projection([a_x, a_y]) _nan_speed(env) v.destroy() del v finally: env.close()
def test_base_vehicle(): env = PGDriveEnv() try: env.reset() pg_world = env.pg_world map = env.current_map # v_config = BaseVehicle.get_vehicle_config(dict()) v_config = PGConfig(BASE_DEFAULT_CONFIG["vehicle_config"]).update( PGDriveEnvV1_DEFAULT_CONFIG["vehicle_config"]) v_config.update({"use_render": False, "use_image": False}) v = BaseVehicle(pg_world, vehicle_config=v_config) v.add_lidar() v.add_routing_localization(True) v.add_routing_localization(False) v.routing_localization.set_force_calculate_lane_index(True) v.update_map_info(map) for heading in [-1.0, 0.0, 1.0]: for pos in [[0., 0.], [-100., -100.], [100., 100.]]: v.reset(map, pos=pos, heading=heading) np.testing.assert_almost_equal(_get_heading_deg( v.heading_theta), heading, decimal=3) v_pos = v.position # v_pos[1] = -v_pos[1], this position is converted to pg_position in reset() now np.testing.assert_almost_equal(v_pos, pos) v.set_position(pos) v_pos = v.position np.testing.assert_almost_equal(v_pos, pos) v.update_state(detector_mask=None) v.reset(map, pos=np.array([10, 0])) for a_x in [-1, 0, 0.5, 1]: for a_y in [-1, 0, 0.5, 1]: v.prepare_step([a_x, a_y]) v.set_act([a_x, a_y]) _assert_vehicle(v) v.set_incremental_action([a_x, a_y]) _assert_vehicle(v) state = v.get_state() v.set_state(state) assert _get_heading_deg(v.heading_theta) == _get_heading_deg( state["heading"]) np.testing.assert_almost_equal(v.position, state["position"]) v.projection([a_x, a_y]) _nan_speed(env) v.destroy() del v finally: env.close()
def test_map_buffering(): env_config = {"environment_num": 5} e = PGDriveEnv(env_config) try: for i in range(10): e.reset() assert any( [v is not None for v in e.engine.map_manager.pg_maps.values()]) finally: e.close()
def test_seeding(): env = PGDriveEnv({"environment_num": 1000}) try: env.reset() env.seed(999) # assert env.engine is None assert env.current_seed == 999 env.reset(force_seed=992) assert env.current_seed == 992 # assert env.engine is not None finally: env.close()
def test_pgdrive_env_blackbox(config): env = PGDriveEnv(config=config) try: obs = env.reset() assert env.observation_space.contains(obs) _act(env, env.action_space.sample()) for x in [-1, 0, 1]: env.reset() for y in [-1, 0, 1]: _act(env, [x, y]) finally: env.close()
def test_random_traffic(): env = PGDriveEnv({ "random_traffic": True, "traffic_mode": "respawn", "traffic_density": 0.3, "start_seed": 5, # "fast": True, "use_render": True }) has_traffic = False try: last_pos = None for i in range(20): obs = env.reset(force_seed=5) assert env.engine.traffic_manager.random_traffic new_pos = [ v.position for v in env.engine.traffic_manager.traffic_vehicles ] if len(new_pos) > 0: has_traffic = True if last_pos is not None and len(new_pos) == len(last_pos): assert sum([ norm(lastp[0] - newp[0], lastp[1] - newp[1]) >= 0.5 for lastp, newp in zip(last_pos, new_pos) ]), [(lastp, newp) for lastp, newp in zip(last_pos, new_pos)] last_pos = new_pos assert has_traffic finally: env.close()
def test_zombie(): env = PGDriveEnv(pid_control_config) target = Target(0.375, 30) dest = [-288.88415527, -411.55871582] try: o = env.reset() steering_controller = PIDController(1.6, 0.0008, 27.3) acc_controller = PIDController(0.1, 0.001, 0.3) steering_error = o[0] - target.lateral steering = steering_controller.get_result(steering_error) acc_error = env.vehicle.speed - target.speed acc = acc_controller.get_result(acc_error) for i in range(1, 1000000): o, r, d, info = env.step([-steering, acc]) steering_error = o[0] - target.lateral steering = steering_controller.get_result(steering_error) t_speed = target.speed if abs(o[12] - 0.5) < 0.01 else target.speed - 10 acc_error = env.vehicle.speed - t_speed acc = acc_controller.get_result(acc_error) if d: assert info["arrive_dest"] assert abs(env.vehicle.position[0] - dest[0]) < 0.1 and abs( env.vehicle.position[1] - dest[1]) < 0.1 break finally: env.close()
def test_config_consistency_2(): # env = PGDriveEnv({"map_config": {"config": "OO"}}) the_config = 11 env = PGDriveEnv({"map_config": {"config": the_config}}) try: env.reset() assert env.current_map.config["config"] == the_config assert all( [v.config["config"] == the_config for v in env.maps.values()]) assert env.config["map_config"]["config"] == the_config finally: env.close() the_config = 11 env = PGDriveEnv({"map": the_config}) try: env.reset() assert env.current_map.config["config"] == the_config assert all( [v.config["config"] == the_config for v in env.maps.values()]) assert env.config["map_config"]["config"] == the_config finally: env.close() the_config = "OO" env = PGDriveEnv({"map": the_config}) try: env.reset() assert env.current_map.config["config"] == the_config assert all( [v.config["config"] == the_config for v in env.maps.values()]) assert env.config["map_config"]["config"] == the_config finally: env.close() the_config = "OO" env = PGDriveEnv( {"map_config": { "config": the_config, "type": "block_sequence" }}) try: env.reset() assert env.current_map.config["config"] == the_config assert all( [v.config["config"] == the_config for v in env.maps.values()]) assert env.config["map_config"]["config"] == the_config finally: env.close()
class TestObsActionSpace(unittest.TestCase): def setUp(self): self.env = PGDriveEnv() def test_obs_space(self): obs = self.env.reset() assert self.env.observation_space.contains(obs), ( self.env.observation_space, obs.shape) obs, _, _, _ = self.env.step(self.env.action_space.sample()) assert self.env.observation_space.contains(obs), ( self.env.observation_space, obs.shape) def tearDown(self): self.env.close()
def _evaluate(env_config, num_episode, has_traffic=True): s = time.time() np.random.seed(0) env = PGDriveEnv(env_config) try: obs = env.reset() lidar_success = False success_list, reward_list, ep_reward, ep_len, ep_count = [], [], 0, 0, 0 while ep_count < num_episode: action = expert(obs, deterministic=True) obs, reward, done, info = env.step(action) # double check lidar lidar = [True if p == 1.0 else False for p in env.observations[DEFAULT_AGENT].cloud_points] if not all(lidar): lidar_success = True ep_reward += reward ep_len += 1 if done: ep_count += 1 success_list.append(1 if get_terminal_state(info) == "Success" else 0) reward_list.append(ep_reward) ep_reward = 0 ep_len = 0 obs = env.reset() if has_traffic: assert lidar_success lidar_success = False env.close() t = time.time() - s ep_reward_mean = sum(reward_list) / len(reward_list) success_rate = sum(success_list) / len(success_list) print( f"Finish {ep_count} episodes in {t:.3f} s. Episode reward: {ep_reward_mean}, success rate: {success_rate}." ) finally: env.close() return ep_reward_mean, success_rate
def test_zombie(): conf = copy.deepcopy(pid_control_config) # conf["use_render"] = True # conf["fast"] = True env = PGDriveEnv(conf) env.seed(0) target = Target(0.375, 30) try: o = env.reset() steering_controller = PIDController(1.6, 0.0008, 27.3) acc_controller = PIDController(0.1, 0.001, 0.3) steering_error = o[0] - target.lateral steering = steering_controller.get_result(steering_error) acc_error = env.vehicles[env.DEFAULT_AGENT].speed - target.speed acc = acc_controller.get_result(acc_error) for i in range(1, 1000000): o, r, d, info = env.step([-steering, acc]) # env.render(text={ # "o": o[0], # "lat": env.vehicle.lane.local_coordinates(env.vehicle.position)[0], # "tar": target.lateral # }) steering_error = o[0] - target.lateral steering = steering_controller.get_result(steering_error) t_speed = target.speed if abs(o[12] - 0.5) < 0.01 else target.speed - 10 acc_error = env.vehicles[env.DEFAULT_AGENT].speed - t_speed acc = acc_controller.get_result(acc_error) if d: # We assert the vehicle should arrive the middle lane in the final block. assert info[TerminationState.SUCCESS] assert len(env.current_map.blocks[-1].positive_lanes) == 3 middle_lane = env.vehicle.routing_localization.final_road.get_lanes(env.current_map.road_network)[1] # Current recorded lane of ego should be exactly the same as the final-middle-lane. assert middle_lane == env.vehicle.lane # Ego should in the middle of final-middle-lane # new result is 0.1, I have visualized it, and everything works well assert abs(middle_lane.local_coordinates(env.vehicle.position)[1]) < 0.5 # Ego should in the utmost location of the final-middle-lane assert abs(middle_lane.local_coordinates(env.vehicle.position)[0] - middle_lane.length) < 10 # The speed should also be perfectly controlled. assert abs(env.vehicle.speed - target.speed) < 1 break finally: env.close()
def test_loaded_map_alignment(): # Generate the second round # for seed in [0, 1, 2, 100, 200, 300, 9999]: for seed in [0, 1, 2, 99]: env_config = {"start_seed": seed, "environment_num": 1} generate_maps(PGDriveEnv, env_config.copy(), json_file_path="seed{}_v1.json".format(seed)) # generate_maps(PGDriveEnv, env_config.copy(), json_file_path="seed{}_v2.json".format(seed)) with open("seed{}_v1.json".format(seed), "r") as f: saved_v1 = json.load(f) # with open("seed{}_v2.json".format(seed), "r") as f: # saved_v2 = json.load(f) e = PGDriveEnv(env_config.copy()) e.reset() assert e.engine.global_config["load_map_from_json"] is True, ( "If this assertion fail, it means you are not using the predefined maps. Please check the read_all_" "maps_from_json function in map_manager.py") map_data_realtime_load = e.current_map.save_map() map_data_in_json = saved_v1["map_data"][str(seed)] e.close() e = PGDriveEnv({ "start_seed": seed, "environment_num": 1, "load_map_from_json": False }) e.reset() map_data_realtime_generate = e.current_map.save_map() e.close() e = PGDriveEnv({ "start_seed": seed, "environment_num": 10, "load_map_from_json": False }) e.reset(force_seed=seed) map_data_realtime_generate_in_multiple_maps = e.current_map.save_map() e.close() # Assert v1 and v2 have same maps # recursive_equal(saved_v1, saved_v2, True) # Assert json and loaded maps are same recursive_equal(map_data_in_json, map_data_realtime_load, True) # Assert json and generated maps are same recursive_equal(map_data_in_json, map_data_realtime_generate, True) # Assert json and generated maps in recursive_equal(map_data_in_json, map_data_realtime_generate_in_multiple_maps, True) print(saved_v1)
def test_fixed_traffic(): env = PGDriveEnv({ "random_traffic": False, "traffic_mode": "respawn", # "fast": True, "use_render": True }) try: last_pos = None for i in range(20): obs = env.reset() assert env.engine.traffic_manager.random_seed == env.current_seed new_pos = [v.position for v in env.engine.traffic_manager.vehicles] if last_pos is not None and len(new_pos) == len(last_pos): assert sum([ norm(lastp[0] - newp[0], lastp[1] - newp[1]) <= 1e-3 for lastp, newp in zip(last_pos, new_pos) ]), [(lastp, newp) for lastp, newp in zip(last_pos, new_pos)] last_pos = new_pos finally: env.close()
def test_random_vehicle_parameter(): env = PGDriveEnv({ "environment_num": 5, "traffic_density": .2, "traffic_mode": "trigger", "start_seed": 12, "random_agent_model": True }) try: o = env.reset(force_seed=12) old_config_1 = env.vehicle.get_config(True) env.reset(force_seed=15) old_config_2 = env.vehicle.get_config(True) env.reset(force_seed=13) env.reset(force_seed=12) new_config = env.vehicle.get_config(True) assert recursive_equal(old_config_1, new_config) env.reset(force_seed=15) new_config = env.vehicle.get_config(True) assert recursive_equal(old_config_2, new_config) finally: env.close()
def test_random_lane_width(): env = PGDriveEnv({ "environment_num": 5, "traffic_density": .2, "traffic_mode": "trigger", "start_seed": 12, "random_lane_width": True, "load_map_from_json": False }) try: o = env.reset(force_seed=12) old_config_1 = env.vehicle.lane.width env.reset(force_seed=15) old_config_2 = env.vehicle.lane.width env.reset(force_seed=13) env.reset(force_seed=12) new_config = env.vehicle.lane.width assert old_config_1 == new_config env.reset(force_seed=15) new_config = env.vehicle.lane.width assert old_config_2 == new_config assert old_config_1 != old_config_2 finally: env.close()
import time from pgdrive import PGDriveEnv if __name__ == '__main__': env = PGDriveEnv( dict(environment_num=1000, traffic_density=0.1, load_map_from_json=True, start_seed=5000)) obs = env.reset() start = time.time() vc = [] for s in range(1000): env.reset(force_seed=s + 5000) print("We have {} vehicles in seed {} map!".format( len(env.engine.traffic_manager.vehicles), s)) vc.append(len(env.engine.traffic_manager.vehicles)) if (s + 1) % 1 == 0: print(f"{s + 1} | Time Elapse: {time.time() - start}") import numpy as np print(np.mean(vc), np.min(vc), np.max(vc), np.std(vc))
Note: This script require rendering, please following the installation instruction to setup a proper environment that allows popping up an window. """ import random from pgdrive import PGDriveEnv from pgdrive.examples import expert, get_terminal_state if __name__ == '__main__': env = PGDriveEnv( dict(use_render=True, environment_num=100, start_seed=random.randint(0, 1000), map=7)) obs = env.reset() success_list, reward_list, ep_reward, ep_len, ep_count = [], [], 0, 0, 0 try: while True: action = expert(obs) obs, reward, done, info = env.step(action) ep_reward += reward ep_len += 1 # env.render() if done: ep_count += 1 success_list.append(1 if get_terminal_state(info) == "Success" else 0) reward_list.append(ep_reward) print( "{} episodes terminated! Length: {}, Reward: {:.4f}, Terminal state: {}."
import random import matplotlib.pyplot as plt from pgdrive import PGDriveEnv if __name__ == '__main__': env = PGDriveEnv(config=dict(environment_num=100, map=7, start_seed=random.randint(0, 1000))) fig, axs = plt.subplots(4, 4, figsize=(10, 10), dpi=100) for i in range(4): for j in range(4): env.reset() m = env.get_map() ax = axs[i][j] ax.imshow(m, cmap="bone") ax.set_xticks([]) ax.set_yticks([]) fig.suptitle("Bird's-eye view of genertaed maps") plt.show() env.close()
def test_random_lane_num(): env = PGDriveEnv({ "environment_num": 5, "traffic_density": .2, "traffic_mode": "trigger", "start_seed": 12, "load_map_from_json": False, "random_lane_num": True, }) try: o = env.reset(force_seed=12) old_config_1 = env.vehicle.navigation.get_current_lane_num() env.reset(force_seed=15) old_config_2 = env.vehicle.navigation.get_current_lane_num() env.reset(force_seed=13) env.reset(force_seed=12) new_config = env.vehicle.navigation.get_current_lane_num() assert old_config_1 == new_config env.reset(force_seed=15) new_config = env.vehicle.navigation.get_current_lane_num() assert old_config_2 == new_config env.close() env.reset(force_seed=12) assert old_config_1 == env.vehicle.navigation.get_current_lane_num() env.reset(force_seed=15) assert old_config_2 == env.vehicle.navigation.get_current_lane_num() finally: env.close()
import time from pgdrive import PGDriveEnv from pgdrive.utils import setup_logger if __name__ == '__main__': setup_logger(debug=False) env = PGDriveEnv(dict( environment_num=1000, # use_render=True, fast=True, start_seed=1010, pstats=True )) obs = env.reset() start = time.time() action = [0.0, 1.] for s in range(10000000): o, r, d, i = env.step(action) if d: env.reset() if (s + 1) % 100 == 0: print( "Finish {}/10000 simulation steps. Time elapse: {:.4f}. Average FPS: {:.4f}".format( s + 1, time.time() - start, (s + 1) / (time.time() - start) ) ) print(f"Total Time Elapse: {time.time() - start}")
def pg_env(): env = PGDriveEnv() env.reset() return env