class TestItRuns(unittest.TestCase): """ Tests the set_state function """ def setUp(self): vehicles = Vehicles() vehicles.add(veh_id="idm", acceleration_controller=(IDMController, {}), routing_controller=(GridRouter, {}), sumo_car_following_params=SumoCarFollowingParams( min_gap=2.5, tau=1.1), num_vehicles=16) self.env, self.scenario = grid_mxn_exp_setup(row_num=1, col_num=3, vehicles=vehicles) def tearDown(self): # terminate the traci instance self.env.terminate() # free data used by the class self.env = None self.scenario = None def test_it_runs(self): self.exp = SumoExperiment(self.env, self.scenario) self.exp.run(5, 50)
class TestLoopMerges(unittest.TestCase): """ Tests the loop_merges generator, scenario, and environment. """ def setUp(self): # create the environment and scenario classes for a ring road self.env, scenario = two_loops_one_merging_exp_setup() # instantiate an experiment class self.exp = SumoExperiment(self.env, scenario) def tearDown(self): # terminate the traci instance try: self.env.terminate() except FileNotFoundError: pass # free up used memory self.env = None self.exp = None def test_it_runs(self): """ Tests that the loop merges experiment runs, and vehicles do not exit the network. """ self.exp.run(1, 10) def test_gen_custom_start_pos(self): """ Tests that vehicle with the prefix "merge" are in the merge_in lane, and all other vehicles are in the ring road. """ # reset the environment to ensure all vehicles are at their starting # positions self.env.reset() ids = self.env.vehicles.get_ids() # collect the starting edges of all vehicles merge_starting_edges = [] other_starting_edges = [] for veh_id in ids: if veh_id[:5] == "merge": merge_starting_edges.append(self.env.vehicles.get_edge(veh_id)) else: other_starting_edges.append(self.env.vehicles.get_edge(veh_id)) # ensure that all vehicles are starting in the edges they should be in expected_merge_starting_edges = ["right", "top", "bottom"] self.assertTrue( all(starting_edge in expected_merge_starting_edges for starting_edge in merge_starting_edges)) self.assertTrue( all(starting_edge not in expected_merge_starting_edges for starting_edge in other_starting_edges))
class TestIndividualLights(unittest.TestCase): """ Tests the functionality of the the TrafficLights class in allowing for customization of specific nodes """ def setUp(self): tl_logic = TrafficLights(baseline=False) phases = [{ "duration": "31", "minDur": "8", "maxDur": "45", "state": "GGGrrrGGGrrr" }, { "duration": "6", "minDur": "3", "maxDur": "6", "state": "yyyrrryyyrrr" }, { "duration": "31", "minDur": "8", "maxDur": "45", "state": "rrrGGGrrrGGG" }, { "duration": "6", "minDur": "3", "maxDur": "6", "state": "rrryyyrrryyy" }] tl_logic.add("center0", phases=phases, programID=1) tl_logic.add("center1", phases=phases, programID=1, offset=1) tl_logic.add("center2", tls_type="actuated", phases=phases, programID=1) tl_logic.add("center3", tls_type="actuated", phases=phases, programID=1, maxGap=3.0, detectorGap=0.8, showDetectors=True, file="testindividuallights.xml", freq=100) self.env, self.scenario = grid_mxn_exp_setup(row_num=1, col_num=4, tl_logic=tl_logic) def tearDown(self): # terminate the traci instance self.env.terminate() # free data used by the class self.env = None self.scenario = None def test_it_runs(self): self.exp = SumoExperiment(self.env, self.scenario) self.exp.run(5, 50)
def merge_baseline(num_runs, flow_params, render=True): """Run script for all merge baselines. Parameters ---------- num_runs : int number of rollouts the performance of the environment is evaluated over flow_params : dict the flow meta-parameters describing the structure of a benchmark. Must be one of the merge flow_params render: bool, optional specifies whether to use sumo's gui during execution Returns ------- SumoExperiment class needed to run simulations """ exp_tag = flow_params['exp_tag'] sumo_params = flow_params['sumo'] vehicles = flow_params['veh'] env_params = flow_params['env'] net_params = flow_params['net'] initial_config = flow_params.get('initial', InitialConfig()) traffic_lights = flow_params.get('tls', TrafficLights()) # modify the rendering to match what is requested sumo_params.render = render # set the evaluation flag to True env_params.evaluate = True # import the scenario class module = __import__('flow.scenarios', fromlist=[flow_params['scenario']]) scenario_class = getattr(module, flow_params['scenario']) # create the scenario object scenario = scenario_class(name=exp_tag, vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=traffic_lights) # import the environment class module = __import__('flow.envs', fromlist=[flow_params['env_name']]) env_class = getattr(module, flow_params['env_name']) # create the environment object env = env_class(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) results = exp.run(num_runs, env_params.horizon) avg_speed = np.mean(results['mean_returns']) return avg_speed
class TestNumSteps(unittest.TestCase): """ Tests that experiment class runs for the number of steps requested. """ def setUp(self): # create the environment and scenario classes for a ring road env, scenario = ring_road_exp_setup() # instantiate an experiment class self.exp = SumoExperiment(env, scenario) def tearDown(self): # free up used memory self.exp = None def test_steps(self): self.exp.run(num_runs=1, num_steps=10) self.assertEqual(self.exp.env.time_counter, 10)
def runTest(self): # run the experiment for 1 run and collect the last position of all # vehicles env, scenario = ring_road_exp_setup() exp = SumoExperiment(env, scenario) exp.run(num_runs=1, num_steps=10) vel1 = [exp.env.vehicles.get_speed(exp.env.vehicles.get_ids())] # run the experiment for 2 runs and collect the last position of all # vehicles env, scenario = ring_road_exp_setup() exp = SumoExperiment(env, scenario) exp.run(num_runs=2, num_steps=10) vel2 = [exp.env.vehicles.get_speed(exp.env.vehicles.get_ids())] # check that the final position is the same in both instances np.testing.assert_array_almost_equal(vel1, vel2)
def test_convert_to_csv(self): dir_path = os.path.dirname(os.path.realpath(__file__)) sumo_params = SumoParams(emission_path="{}/".format(dir_path)) env, scenario = ring_road_exp_setup(sumo_params=sumo_params) exp = SumoExperiment(env, scenario) exp.run(num_runs=1, num_steps=10, convert_to_csv=True) time.sleep(0.1) # check that both the emission xml and csv files exist self.assertTrue(os.path.isfile(dir_path + "/{}-emission.xml".format( scenario.name))) self.assertTrue(os.path.isfile(dir_path + "/{}-emission.csv".format( scenario.name))) time.sleep(0.1) # delete the files os.remove(os.path.expanduser(dir_path + "/{}-emission.xml".format( scenario.name))) os.remove(os.path.expanduser(dir_path + "/{}-emission.csv".format( scenario.name)))
def test_rl_actions(self): def rl_actions(*_): return [1] # actions are always an acceleration of 1 for one veh # create an environment using AccelEnv with 1 RL vehicle vehicles = Vehicles() vehicles.add( veh_id="rl", acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), speed_mode="aggressive", num_vehicles=1) env, scenario = ring_road_exp_setup(vehicles=vehicles) exp = SumoExperiment(env=env, scenario=scenario) exp.run(1, 10, rl_actions=rl_actions) # check that the acceleration of the RL vehicle was that specified by # the rl_actions method self.assertAlmostEqual(exp.env.vehicles.get_speed("rl_0"), 1, places=1)
evaluate=True, # Set to True to evaluate traffic metrics warmup_steps=40, sims_per_step=1, horizon=HORIZON, additional_params=additional_env_params, ) initial_config = InitialConfig( spacing="uniform", min_gap=5, lanes_distribution=float("inf"), edges_distribution=["2", "3", "4", "5"], ) scenario = BottleneckScenario(name="bay_bridge_toll", generator_class=BottleneckGenerator, vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=traffic_lights) env = DesiredVelocityEnv(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) num_runs = 2 results = exp.run(num_runs, HORIZON) avg_outflow = np.mean([outflow[-1] for outflow in results["per_step_returns"]]) print('The average outflow over 500 seconds ' 'across {} runs is {}'.format(num_runs, avg_outflow))
def merge_baseline(num_runs, render=True): """Run script for all merge baselines. Parameters ---------- num_runs : int number of rollouts the performance of the environment is evaluated over render: bool, optional specifies whether to use sumo's gui during execution Returns ------- SumoExperiment class needed to run simulations """ # We consider a highway network with an upstream merging lane producing # shockwaves additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params["merge_lanes"] = 1 additional_net_params["highway_lanes"] = 1 additional_net_params["pre_merge_length"] = 500 # RL vehicles constitute 5% of the total number of vehicles vehicles = Vehicles() vehicles.add(veh_id="human", acceleration_controller=(SumoCarFollowingController, {}), speed_mode=9, num_vehicles=5) # Vehicles are introduced from both sides of merge, with RL vehicles # entering from the highway portion as well inflow = InFlows() inflow.add(veh_type="human", edge="inflow_highway", vehs_per_hour=FLOW_RATE, departLane="free", departSpeed=10) inflow.add(veh_type="human", edge="inflow_merge", vehs_per_hour=100, departLane="free", departSpeed=7.5) sumo_params = SumoParams( restart_instance=True, sim_step=0.5, # time step decreased to prevent occasional crashes render=render, ) env_params = EnvParams( horizon=HORIZON, sims_per_step=5, # value raised to ensure sec/step match experiment warmup_steps=0, evaluate=True, # Set to True to evaluate traffic metric performance additional_params={ "max_accel": 1.5, "max_decel": 1.5, "target_velocity": 20, "num_rl": NUM_RL, }, ) initial_config = InitialConfig() net_params = NetParams( inflows=inflow, no_internal_links=False, additional_params=additional_net_params, ) scenario = MergeScenario(name="merge", generator_class=MergeGenerator, vehicles=vehicles, net_params=net_params, initial_config=initial_config) env = WaveAttenuationMergePOEnv(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) results = exp.run(num_runs, HORIZON) avg_speed = np.mean(results["mean_returns"]) return avg_speed
class TestInstantaneousFailsafe(unittest.TestCase): """ Tests that the instantaneous failsafe of the base acceleration controller does not allow vehicles to crash under situations where they otherwise would. This is tested on two crash-prone controllers: OVM and LinearOVM """ def setUp_failsafe(self, vehicles): additional_env_params = {"target_velocity": 8, "max_accel": 3, "max_decel": 3} env_params = EnvParams(additional_params=additional_env_params) additional_net_params = {"length": 100, "lanes": 1, "speed_limit": 30, "resolution": 40} net_params = NetParams(additional_params=additional_net_params) initial_config = InitialConfig(bunching=10) # create the environment and scenario classes for a ring road env, scenario = ring_road_exp_setup(vehicles=vehicles, env_params=env_params, net_params=net_params, initial_config=initial_config) # instantiate an experiment class self.exp = SumoExperiment(env, scenario) def tearDown_failsafe(self): # free data used by the class self.exp = None def test_no_crash_OVM(self): vehicles = Vehicles() vehicles.add( veh_id="test", acceleration_controller=(OVMController, {"fail_safe": "instantaneous"}), routing_controller=(ContinuousRouter, {}), num_vehicles=10, ) self.setUp_failsafe(vehicles=vehicles) # run the experiment, see if it fails self.exp.run(1, 200) self.tearDown_failsafe() def test_no_crash_LinearOVM(self): vehicles = Vehicles() vehicles.add( veh_id="test", acceleration_controller=(LinearOVM, {"fail_safe": "instantaneous"}), routing_controller=(ContinuousRouter, {}), num_vehicles=10 ) self.setUp_failsafe(vehicles=vehicles) # run the experiment, see if it fails self.exp.run(1, 200) self.tearDown_failsafe()
class TestCollisions(unittest.TestCase): """Tests that collisions do not cause the experiments to terminate prematurely.""" def test_collide(self): """Tests collisions in the absence of inflows.""" # create the environment and scenario classes for a ring road sumo_params = SumoParams(sim_step=1, render=False) total_vehicles = 20 vehicles = Vehicles() vehicles.add( veh_id="idm", acceleration_controller=(SumoCarFollowingController, {}), routing_controller=(GridRouter, {}), sumo_car_following_params=SumoCarFollowingParams( tau=0.1, carFollowModel="Krauss", minGap=2.5), num_vehicles=total_vehicles, speed_mode=0b00000) grid_array = { "short_length": 100, "inner_length": 100, "long_length": 100, "row_num": 1, "col_num": 1, "cars_left": int(total_vehicles / 4), "cars_right": int(total_vehicles / 4), "cars_top": int(total_vehicles / 4), "cars_bot": int(total_vehicles / 4) } additional_net_params = { "speed_limit": 35, "grid_array": grid_array, "horizontal_lanes": 1, "vertical_lanes": 1 } net_params = NetParams( no_internal_links=False, additional_params=additional_net_params) self.env, self.scenario = grid_mxn_exp_setup( row_num=1, col_num=1, sumo_params=sumo_params, vehicles=vehicles, net_params=net_params) # go through the env and set all the lights to green for i in range(self.env.rows * self.env.cols): self.env.traci_connection.trafficlight.setRedYellowGreenState( 'center' + str(i), "gggggggggggg") # instantiate an experiment class self.exp = SumoExperiment(self.env, self.scenario) self.exp.run(50, 50) def test_collide_inflows(self): """Tests collisions in the presence of inflows.""" # create the environment and scenario classes for a ring road sumo_params = SumoParams(sim_step=1, render=False) total_vehicles = 12 vehicles = Vehicles() vehicles.add( veh_id="idm", acceleration_controller=(SumoCarFollowingController, {}), routing_controller=(GridRouter, {}), sumo_car_following_params=SumoCarFollowingParams( tau=0.1, carFollowModel="Krauss", minGap=2.5), num_vehicles=total_vehicles, speed_mode=0b00000) grid_array = { "short_length": 100, "inner_length": 100, "long_length": 100, "row_num": 1, "col_num": 1, "cars_left": 3, "cars_right": 3, "cars_top": 3, "cars_bot": 3 } additional_net_params = { "speed_limit": 35, "grid_array": grid_array, "horizontal_lanes": 1, "vertical_lanes": 1 } inflows = InFlows() inflows.add(veh_type="idm", edge="bot0_0", vehs_per_hour=1000) inflows.add(veh_type="idm", edge="top0_1", vehs_per_hour=1000) net_params = NetParams( no_internal_links=False, inflows=inflows, additional_params=additional_net_params) self.env, self.scenario = grid_mxn_exp_setup( row_num=1, col_num=1, sumo_params=sumo_params, vehicles=vehicles, net_params=net_params) # go through the env and set all the lights to green for i in range(self.env.rows * self.env.cols): self.env.traci_connection.trafficlight.setRedYellowGreenState( 'center' + str(i), "gggggggggggg") # instantiate an experiment class self.exp = SumoExperiment(self.env, self.scenario) self.exp.run(50, 50)
def grid1_baseline(num_runs, render=True): """Run script for the grid1 baseline. Parameters ---------- num_runs : int number of rollouts the performance of the environment is evaluated over render: bool, optional specifies whether to use sumo's gui during execution Returns ------- SumoExperiment class needed to run simulations """ # we place a sufficient number of vehicles to ensure they confirm with the # total number specified above. We also use a "right_of_way" speed mode to # support traffic light compliance vehicles = Vehicles() vehicles.add(veh_id="human", acceleration_controller=(SumoCarFollowingController, {}), sumo_car_following_params=SumoCarFollowingParams( min_gap=2.5, max_speed=V_ENTER, ), routing_controller=(GridRouter, {}), num_vehicles=(N_LEFT + N_RIGHT) * N_COLUMNS + (N_BOTTOM + N_TOP) * N_ROWS, speed_mode="right_of_way") # inflows of vehicles are place on all outer edges (listed here) outer_edges = [] outer_edges += ["left{}_{}".format(N_ROWS, i) for i in range(N_COLUMNS)] outer_edges += ["right0_{}".format(i) for i in range(N_ROWS)] outer_edges += ["bot{}_0".format(i) for i in range(N_ROWS)] outer_edges += ["top{}_{}".format(i, N_COLUMNS) for i in range(N_ROWS)] # equal inflows for each edge (as dictate by the EDGE_INFLOW constant) inflow = InFlows() for edge in outer_edges: inflow.add(veh_type="human", edge=edge, vehs_per_hour=EDGE_INFLOW, departLane="free", departSpeed="max") # define the traffic light logic tl_logic = TrafficLights(baseline=False) phases = [{ "duration": "31", "minDur": "5", "maxDur": "45", "state": "GGGrrrGGGrrr" }, { "duration": "2", "minDur": "2", "maxDur": "2", "state": "yyyrrryyyrrr" }, { "duration": "31", "minDur": "5", "maxDur": "45", "state": "rrrGGGrrrGGG" }, { "duration": "2", "minDur": "2", "maxDur": "2", "state": "rrryyyrrryyy" }] for i in range(N_ROWS * N_COLUMNS): tl_logic.add("center" + str(i), tls_type="actuated", phases=phases, programID=1) net_params = NetParams( inflows=inflow, no_internal_links=False, additional_params={ "speed_limit": V_ENTER + 5, "grid_array": { "short_length": SHORT_LENGTH, "inner_length": INNER_LENGTH, "long_length": LONG_LENGTH, "row_num": N_ROWS, "col_num": N_COLUMNS, "cars_left": N_LEFT, "cars_right": N_RIGHT, "cars_top": N_TOP, "cars_bot": N_BOTTOM, }, "horizontal_lanes": 1, "vertical_lanes": 1, }, ) sumo_params = SumoParams( restart_instance=False, sim_step=1, render=render, ) env_params = EnvParams( evaluate=True, # Set to True to evaluate traffic metrics horizon=HORIZON, additional_params={ "target_velocity": 50, "switch_time": 2, "num_observed": 2, "discrete": False, "tl_type": "actuated" }, ) initial_config = InitialConfig(shuffle=True) scenario = SimpleGridScenario(name="grid", generator_class=SimpleGridGenerator, vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=tl_logic) env = PO_TrafficLightGridEnv(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) results = exp.run(num_runs, HORIZON) total_delay = np.mean(results["returns"]) return total_delay
sumo_params = SumoParams(sumo_binary="sumo-gui") vehicles = Vehicles() vehicles.add_vehicles("idm", (IDMController, {}), (StaticLaneChanger, {}), (ContinuousRouter, {}), 0, 14) additional_env_params = {"target_velocity": 8, "num_steps": 500} env_params = EnvParams(additional_params=additional_env_params) additional_net_params = { "radius_ring": 30, "lanes": 1, "speed_limit": 30, "resolution": 40 } net_params = NetParams(no_internal_links=False, additional_params=additional_net_params) scenario = Figure8Scenario("figure8", Figure8Generator, vehicles, net_params) env = SimpleAccelerationEnvironment(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) logging.info("Experiment Set Up complete") exp.run(1, 1500) exp.env.terminate()
def figure_eight_baseline(num_runs, render=True): """Run script for all figure eight baselines. Parameters ---------- num_runs : int number of rollouts the performance of the environment is evaluated over render : bool, optional specifies whether to use sumo's gui during execution Returns ------- SumoExperiment class needed to run simulations """ # We place 1 autonomous vehicle and 13 human-driven vehicles in the network vehicles = Vehicles() vehicles.add(veh_id="human", acceleration_controller=(IDMController, { "noise": 0.2 }), routing_controller=(ContinuousRouter, {}), speed_mode="no_collide", num_vehicles=14) sumo_params = SumoParams( sim_step=0.1, render=render, ) env_params = EnvParams( horizon=HORIZON, evaluate=True, # Set to True to evaluate traffic metrics additional_params={ "target_velocity": 20, "max_accel": 3, "max_decel": 3, }, ) initial_config = InitialConfig() net_params = NetParams( no_internal_links=False, additional_params=ADDITIONAL_NET_PARAMS, ) scenario = Figure8Scenario(name="figure_eight", vehicles=vehicles, net_params=net_params, initial_config=initial_config) env = AccelEnv(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) results = exp.run(num_runs, HORIZON) avg_speed = np.mean(results["mean_returns"]) return avg_speed
def evaluate_policy(benchmark, _get_actions, _get_states=None): """Evaluate the performance of a controller on a predefined benchmark. Parameters ---------- benchmark : str name of the benchmark, must be printed as it is in the benchmarks folder; otherwise a ValueError will be raised _get_actions : method the mapping from states to actions for the RL agent(s) _get_states : method, optional a mapping from the environment object in Flow to some state, which overrides the _get_states method of the environment. Note that the same cannot be done for the actions. Returns ------- float mean of the evaluation return of the benchmark from NUM_RUNS number of simulations float standard deviation of the evaluation return of the benchmark from NUM_RUNS number of simulations Raises ------ ValueError If the specified benchmark is not available. """ if benchmark not in AVAILABLE_BENCHMARKS.keys(): raise ValueError( "benchmark {} is not available. Check spelling?".format(benchmark)) # get the flow params from the benchmark flow_params = AVAILABLE_BENCHMARKS[benchmark] exp_tag = flow_params["exp_tag"] sumo_params = flow_params["sumo"] vehicles = flow_params["veh"] env_params = flow_params["env"] env_params.evaluate = True # Set to true to get evaluation returns net_params = flow_params["net"] initial_config = flow_params.get("initial", InitialConfig()) traffic_lights = flow_params.get("tls", TrafficLights()) # import the environment and scenario classes module = __import__("flow.envs", fromlist=[flow_params["env_name"]]) env_class = getattr(module, flow_params["env_name"]) module = __import__("flow.scenarios", fromlist=[flow_params["scenario"]]) scenario_class = getattr(module, flow_params["scenario"]) # recreate the scenario and environment scenario = scenario_class( name=exp_tag, vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=traffic_lights) # make sure the _get_states method of the environment is the one # specified by the user if _get_states is not None: class _env_class(env_class): def get_state(self): return _get_states(self) env_class = _env_class env = env_class( env_params=env_params, sumo_params=sumo_params, scenario=scenario) # create a SumoExperiment object with the "rl_actions" method as # described in the inputs. Note that the state may not be that which is # specified by the environment. exp = SumoExperiment(env=env, scenario=scenario) # run the experiment and return the reward res = exp.run( num_runs=NUM_RUNS, num_steps=env.env_params.horizon, rl_actions=_get_actions) return np.mean(res["returns"]), np.std(res["returns"])
additional_net_params = {"ring_radius": 230/(2*np.pi), "lanes": 1, "speed_limit": 30, "resolution": 40} net_params = NetParams( no_internal_links=False, additional_params=additional_net_params ) initial_config = InitialConfig( spacing="custom", additional_params={"merge_bunching": 0} ) scenario = TwoLoopsOneMergingScenario( name="two-loop-one-merging", generator_class=TwoLoopOneMergingGenerator, vehicles=vehicles, net_params=net_params, initial_config=initial_config ) env = TwoLoopsOneMergingEnvironment(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) logging.info("Experiment Set Up complete") exp.run(1, 3000) exp.env.terminate()
sumo_params = SumoParams(time_step=0.1, emission_path="./data/", human_speed_mode="no_collide", sumo_binary="sumo-gui") vehicles = Vehicles() vehicles.add_vehicles("idm", (IDMController, {}), (StaticLaneChanger, {}), None, 0, 14) vehicles.add_vehicles("merge-idm", (IDMController, {}), (StaticLaneChanger, {}), None, 0, 14) additional_env_params = {"target_velocity": 8, "fail-safe": "None"} env_params = EnvParams(additional_params=additional_env_params) additional_net_params = {"merge_in_length": 500, "merge_in_angle": pi/9, "merge_out_length": 500, "merge_out_angle": pi * 17/9, "ring_radius": 400 / (2 * pi), "resolution": 40, "lanes": 1, "speed_limit": 30} net_params = NetParams(no_internal_links=False, additional_params=additional_net_params) initial_config = InitialConfig(spacing="custom", additional_params={"merge_bunching": 250}) scenario = LoopMergesScenario("loop-merges", LoopMergesGenerator, vehicles, net_params, initial_config=initial_config) env = SimpleLoopMergesEnvironment(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) logging.info("Experiment Set Up complete") exp.run(1, 550) exp.env.terminate()
def bottleneck0_baseline(num_runs, sumo_binary="sumo-gui"): """Run script for the bottleneck0 baseline. Parameters ---------- num_runs : int number of rollouts the performance of the environment is evaluated over sumo_binary: str, optional specifies whether to use sumo's gui during execution Returns ------- SumoExperiment class needed to run simulations """ vehicles = Vehicles() vehicles.add(veh_id="human", speed_mode=9, routing_controller=(ContinuousRouter, {}), lane_change_mode=0, num_vehicles=1 * SCALING) controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), ("4", 2, True), ("5", 1, False)] num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] additional_env_params = { "target_velocity": 40, "disable_tb": True, "disable_ramp_metering": True, "controlled_segments": controlled_segments, "symmetric": False, "observed_segments": num_observed_segments, "reset_inflow": False, "lane_change_duration": 5, "max_accel": 3, "max_decel": 3, "inflow_range": [1000, 2000] } # flow rate flow_rate = 1900 * SCALING # percentage of flow coming out of each lane inflow = InFlows() inflow.add(veh_type="human", edge="1", vehs_per_hour=flow_rate, departLane="random", departSpeed=10) traffic_lights = TrafficLights() if not DISABLE_TB: traffic_lights.add(node_id="2") if not DISABLE_RAMP_METER: traffic_lights.add(node_id="3") additional_net_params = {"scaling": SCALING} net_params = NetParams(in_flows=inflow, no_internal_links=False, additional_params=additional_net_params) sumo_params = SumoParams( sim_step=0.5, sumo_binary=sumo_binary, print_warnings=False, restart_instance=False, ) env_params = EnvParams( evaluate=True, # Set to True to evaluate traffic metrics warmup_steps=40, sims_per_step=1, horizon=HORIZON, additional_params=additional_env_params, ) initial_config = InitialConfig( spacing="uniform", min_gap=5, lanes_distribution=float("inf"), edges_distribution=["2", "3", "4", "5"], ) scenario = BottleneckScenario(name="bay_bridge_toll", generator_class=BottleneckGenerator, vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=traffic_lights) env = DesiredVelocityEnv(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) results = exp.run(num_runs, HORIZON) avg_outflow = np.mean([outflow[-1] for outflow in results["per_step_returns"]]) return avg_outflow
def figure_eight_baseline(num_runs, render=True): """Run script for all figure eight baselines. Parameters ---------- num_runs : int number of rollouts the performance of the environment is evaluated over flow_params : dict the flow meta-parameters describing the structure of a benchmark. Must be one of the figure eight flow_params render : bool, optional specifies whether to use sumo's gui during execution Returns ------- SumoExperiment class needed to run simulations """ exp_tag = flow_params['exp_tag'] sumo_params = flow_params['sumo'] env_params = flow_params['env'] net_params = flow_params['net'] initial_config = flow_params.get('initial', InitialConfig()) traffic_lights = flow_params.get('tls', TrafficLights()) # modify the rendering to match what is requested sumo_params.render = render # set the evaluation flag to True env_params.evaluate = True # we want no autonomous vehicles in the simulation vehicles = Vehicles() vehicles.add(veh_id='human', acceleration_controller=(IDMController, { 'noise': 0.2 }), routing_controller=(ContinuousRouter, {}), speed_mode='no_collide', num_vehicles=14) # import the scenario class module = __import__('flow.scenarios', fromlist=[flow_params['scenario']]) scenario_class = getattr(module, flow_params['scenario']) # create the scenario object scenario = scenario_class(name=exp_tag, vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=traffic_lights) # import the environment class module = __import__('flow.envs', fromlist=[flow_params['env_name']]) env_class = getattr(module, flow_params['env_name']) # create the environment object env = env_class(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) results = exp.run(num_runs, env_params.horizon) avg_speed = np.mean(results['mean_returns']) return avg_speed
def bottleneck0_baseline(num_runs, render=True): """Run script for the bottleneck0 baseline. Parameters ---------- num_runs : int number of rollouts the performance of the environment is evaluated over render : bool, optional specifies whether to use sumo's gui during execution Returns ------- SumoExperiment class needed to run simulations """ exp_tag = flow_params['exp_tag'] sumo_params = flow_params['sumo'] env_params = flow_params['env'] net_params = flow_params['net'] initial_config = flow_params.get('initial', InitialConfig()) traffic_lights = flow_params.get('tls', TrafficLights()) # we want no autonomous vehicles in the simulation vehicles = Vehicles() vehicles.add(veh_id='human', speed_mode=9, routing_controller=(ContinuousRouter, {}), lane_change_mode=0, num_vehicles=1 * SCALING) # only include human vehicles in inflows flow_rate = 1900 * SCALING inflow = InFlows() inflow.add(veh_type='human', edge='1', vehs_per_hour=flow_rate, departLane='random', departSpeed=10) net_params.inflows = inflow # modify the rendering to match what is requested sumo_params.render = render # set the evaluation flag to True env_params.evaluate = True # import the scenario class module = __import__('flow.scenarios', fromlist=[flow_params['scenario']]) scenario_class = getattr(module, flow_params['scenario']) # create the scenario object scenario = scenario_class(name=exp_tag, vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=traffic_lights) # import the environment class module = __import__('flow.envs', fromlist=[flow_params['env_name']]) env_class = getattr(module, flow_params['env_name']) # create the environment object env = env_class(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) results = exp.run(num_runs, env_params.horizon) return np.mean(results['returns']), np.std(results['returns'])
from flow.envs.loop_accel import SimpleAccelerationEnvironment from flow.scenarios.loop.loop_scenario import LoopScenario logging.basicConfig(level=logging.INFO) sumo_params = SumoParams(time_step=0.1, human_speed_mode="no_collide", human_lane_change_mode="strategic", sumo_binary="sumo-gui") vehicles = Vehicles() vehicles.add_vehicles("idm", (IDMController, {}), None, (ContinuousRouter, {}), 0, 20) env_params = EnvParams() additional_net_params = {"length": 200, "lanes": 2, "speed_limit": 35, "resolution": 40} net_params = NetParams(additional_params=additional_net_params) initial_config = InitialConfig() scenario = LoopScenario("single-lane-one-contr", CircleGenerator, vehicles, net_params, initial_config) env = SimpleAccelerationEnvironment(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) logging.info("Experiment Set Up complete") exp.run(2, 1000) exp.env.terminate()
def grid0_baseline(num_runs, render=True): """Run script for the grid0 baseline. Parameters ---------- num_runs : int number of rollouts the performance of the environment is evaluated over render : bool, optional specifies whether to use sumo's gui during execution Returns ------- SumoExperiment class needed to run simulations """ exp_tag = flow_params['exp_tag'] sumo_params = flow_params['sumo'] vehicles = flow_params['veh'] env_params = flow_params['env'] net_params = flow_params['net'] initial_config = flow_params.get('initial', InitialConfig()) # define the traffic light logic tl_logic = TrafficLights(baseline=False) phases = [{'duration': '31', 'minDur': '5', 'maxDur': '45', 'state': 'GGGrrrGGGrrr'}, {'duration': '2', 'minDur': '2', 'maxDur': '2', 'state': 'yyyrrryyyrrr'}, {'duration': '31', 'minDur': '5', 'maxDur': '45', 'state': 'rrrGGGrrrGGG'}, {'duration': '2', 'minDur': '2', 'maxDur': '2', 'state': 'rrryyyrrryyy'}] for i in range(N_ROWS * N_COLUMNS): tl_logic.add('center'+str(i), tls_type='actuated', phases=phases, programID=1) # modify the rendering to match what is requested sumo_params.render = render # set the evaluation flag to True env_params.evaluate = True # import the scenario class module = __import__('flow.scenarios', fromlist=[flow_params['scenario']]) scenario_class = getattr(module, flow_params['scenario']) # create the scenario object scenario = scenario_class( name=exp_tag, vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=tl_logic ) # import the environment class module = __import__('flow.envs', fromlist=[flow_params['env_name']]) env_class = getattr(module, flow_params['env_name']) # create the environment object env = env_class(env_params, sumo_params, scenario) exp = SumoExperiment(env, scenario) results = exp.run(num_runs, env_params.horizon) total_delay = np.mean(results['returns']) return total_delay
class TestBottleneck(unittest.TestCase): def test_it_runs(self): self.env, self.scenario = setup_bottlenecks() self.exp = SumoExperiment(self.env, self.scenario) self.exp.run(5, 50)