def test_reset_inflows(self): """Tests that the inflow change within the expected range when calling reset.""" # set a random seed for inflows to be the same every time np.random.seed(seed=123) sim_params = SumoParams(sim_step=0.5, restart_instance=True) vehicles = VehicleParams() vehicles.add(veh_id="human") vehicles.add(veh_id="followerstopper") # edge name, how many segments to observe/control, whether the segment # is controlled controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), ("4", 2, True), ("5", 1, False)] num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] env_params = EnvParams( additional_params={ "target_velocity": 40, "disable_tb": True, "disable_ramp_metering": True, "controlled_segments": controlled_segments, "symmetric": False, "observed_segments": num_observed_segments, "reset_inflow": True, # this must be set to True for the test "lane_change_duration": 5, "max_accel": 3, "max_decel": 3, "inflow_range": [1000, 2000] # this is what we're testing } ) inflow = InFlows() inflow.add(veh_type="human", edge="1", vehs_per_hour=1500, # the initial inflow we're checking for departLane="random", departSpeed=10) net_params = NetParams( inflows=inflow, no_internal_links=False, additional_params={"scaling": 1, "speed_limit": 23}) scenario = BottleneckScenario( name="bay_bridge_toll", vehicles=vehicles, net_params=net_params) env = DesiredVelocityEnv(env_params, sim_params, scenario) # reset the environment and get a new inflow rate env.reset() expected_inflow = 1343.178 # just from checking the new inflow # check that the first inflow rate is approximately 1500 for _ in range(500): env.step(rl_actions=None) self.assertAlmostEqual( env.k.vehicle.get_inflow_rate(250)/expected_inflow, 1, 1) # reset the environment and get a new inflow rate env.reset() expected_inflow = 1729.050 # just from checking the new inflow # check that the new inflow rate is approximately as expected for _ in range(500): env.step(rl_actions=None) self.assertAlmostEqual( env.k.vehicle.get_inflow_rate(250)/expected_inflow, 1, 1)
# num_rl term (see ADDITIONAL_ENV_PARAMs) NUM_RL = [5, 13, 17][EXP_NUM] # We consider a highway network with an upstream merging lane producing # shockwaves additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params["merge_lanes"] = 1 additional_net_params["highway_lanes"] = 1 additional_net_params["pre_merge_length"] = 500 # RL vehicles constitute 5% of the total number of vehicles vehicles = VehicleParams() vehicles.add(veh_id="human", acceleration_controller=(IDMController, { "noise": 0.2 }), car_following_params=SumoCarFollowingParams( speed_mode="all_checks", ), num_vehicles=0) vehicles.add(veh_id="rl", acceleration_controller=(RLController, {}), car_following_params=SumoCarFollowingParams( speed_mode="all_checks", ), num_vehicles=0) # Vehicles are introduced from both sides of merge, with RL vehicles entering # from the highway portion as well inflow = InFlows() inflow.add(veh_type="human", edge="inflow_highway", vehs_per_hour=(1 - RL_PENETRATION) * FLOW_RATE,
from flow.core.params import NetParams from flow.core.params import SumoLaneChangeParams from flow.core.params import VehicleParams from flow.core.params import InitialConfig from flow.core.params import InFlows import flow.config as config from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION # create the base vehicle type that will be used for inflows vehicles = VehicleParams() vehicles.add( "human", num_vehicles=0, lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic", ), acceleration_controller=(IDMController, { "a": 0.3, "b": 2.0, "noise": 0.5 }), ) inflow = InFlows() # main highway inflow.add(veh_type="human", edge="119257914", vehs_per_hour=8378, departLane="random", departSpeed=23) # on ramp # inflow.add( # veh_type="human",
# num_rl term (see ADDITIONAL_ENV_PARAMs) NUM_RL = 5 # We consider a highway network with an upstream merging lane producing # shockwaves additional_net_params = deepcopy(ADDITIONAL_NET_PARAMS) additional_net_params["merge_lanes"] = 1 additional_net_params["highway_lanes"] = 1 additional_net_params["pre_merge_length"] = 500 # RL vehicles constitute 5% of the total number of vehicles vehicles = VehicleParams() vehicles.add( veh_id="human", acceleration_controller=(IDMController, {}), car_following_params=SumoCarFollowingParams( speed_mode=9, ), num_vehicles=5) vehicles.add( veh_id="rl", acceleration_controller=(RLController, {}), car_following_params=SumoCarFollowingParams( speed_mode=9, ), num_vehicles=0) # Vehicles are introduced from both sides of merge, with RL vehicles entering # from the highway portion as well inflow = InFlows() inflow.add(
network-specific parameters used to generate the network """ additional_init_params = {'enter_speed': enter_speed} initial = InitialConfig(spacing='custom', additional_params=additional_init_params) net = NetParams(additional_params=add_net_params) return initial, net vehicles = VehicleParams() vehicles.add( veh_id="human", routing_controller=(GridRouter, {}), car_following_params=SumoCarFollowingParams( min_gap=2.5, decel=7.5, # avoid collisions at emergency stops ), #num_vehicles=tot_cars num_vehicles=0) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) tl_logic = TrafficLightParams(baseline=False) phases = [{ "duration": "31", "minDur": "8", "maxDur": "45", "state": "GrGrGrGrGrGr" }, { "duration": "6",
SumoCarFollowingParams from flow.core.params import VehicleParams from flow.controllers import IDMController, ContinuousRouter, RLController from flow.scenarios.figure_eight import ADDITIONAL_NET_PARAMS # time horizon of a single rollout HORIZON = 1500 # We place 8 autonomous vehicle and 8 human-driven vehicles in the network vehicles = VehicleParams() for i in range(7): vehicles.add(veh_id="human{}".format(i), acceleration_controller=(IDMController, { "noise": 0.2 }), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams( speed_mode="obey_safe_speed", decel=1.5, ), num_vehicles=1) vehicles.add(veh_id="rl{}".format(i), acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams( speed_mode="obey_safe_speed", ), num_vehicles=1) flow_params = dict( # name of the experiment exp_tag="figure_eight_1",
# length of final edge in route LONG_LENGTH = 100 # length of edges that vehicles start on SHORT_LENGTH = 300 # number of vehicles originating in the left, right, top, and bottom edges N_LEFT, N_RIGHT, N_TOP, N_BOTTOM = 1, 1, 1, 1 # we place a sufficient number of vehicles to ensure they confirm with the # total number specified above. We also use a "right_of_way" speed mode to # support traffic light compliance vehicles = VehicleParams() vehicles.add( veh_id="human", acceleration_controller=(SimCarFollowingController, {}), car_following_params=SumoCarFollowingParams( min_gap=2.5, max_speed=V_ENTER, speed_mode="right_of_way", ), routing_controller=(GridRouter, {}), num_vehicles=(N_LEFT + N_RIGHT) * N_COLUMNS + (N_BOTTOM + N_TOP) * N_ROWS) # inflows of vehicles are place on all outer edges (listed here) outer_edges = [] outer_edges += ["left{}_{}".format(N_ROWS, i) for i in range(N_COLUMNS)] outer_edges += ["right0_{}".format(i) for i in range(N_ROWS)] outer_edges += ["bot{}_0".format(i) for i in range(N_ROWS)] outer_edges += ["top{}_{}".format(i, N_COLUMNS) for i in range(N_ROWS)] # equal inflows for each edge (as dictate by the EDGE_INFLOW constant) inflow = InFlows() for edge in outer_edges:
MERGE_HUMAN = 30 VEHICLE_NUMBER = MAIN_HUMAN + MAIN_RL + MERGE_HUMAN # We consider a highway network with an upstream merging lane producing # shockwaves additional_net_params = deepcopy(ADDITIONAL_NET_PARAMS) additional_net_params["merge_lanes"] = 1 additional_net_params["highway_lanes"] = 1 additional_net_params["pre_merge_length"] = 4000 additional_net_params["angle"] = pi / 36 additional_net_params["merge_length"] = 4500 additional_net_params["post_merge_length"] = 1000 additional_net_params["INFLOW_EDGE_LEN"] = 1000 # RL vehicles constitute 5% of the total number of vehicles vehicles = VehicleParams() vehicles.add(veh_id="human", acceleration_controller=(SimCarFollowingController, {}), car_following_params=SumoCarFollowingParams(speed_mode=9, ), num_vehicles=MAIN_HUMAN + MERGE_HUMAN) vehicles.add(veh_id="rl", acceleration_controller=(RLController, {}), car_following_params=SumoCarFollowingParams(speed_mode=9, ), num_vehicles=MAIN_RL) # Vehicles are introduced from both sides of merge, with RL vehicles entering # from the highway portion as well inflow = InFlows() ''' inflow.add( veh_type="human", edge="inflow_highway", vehs_per_hour=(1 - RL_PENETRATION) * FLOW_RATE, number = 0,#MAIN_HUMAN,#round(FLOW_RATE/(FLOW_RATE+MERGE_RATE)*(1-RL_PENETRATION) * VEHICLE_NUMBER),
def reset(self): """Reset the environment with a new inflow rate. The diverse set of inflows are used to generate a policy that is more robust with respect to the inflow rate. The inflow rate is update by creating a new scenario similar to the previous one, but with a new Inflow object with a rate within the additional environment parameter "inflow_range", which is a list consisting of the smallest and largest allowable inflow rates. **WARNING**: The inflows assume there are vehicles of type "followerstopper" and "human" within the VehicleParams object. """ add_params = self.env_params.additional_params if add_params.get("reset_inflow"): inflow_range = add_params.get("inflow_range") flow_rate = np.random.uniform(min(inflow_range), max(inflow_range)) * self.scaling # We try this for 100 trials in case unexpected errors during # instantiation. for _ in range(100): try: # introduce new inflows within the pre-defined inflow range inflow = InFlows() inflow.add( veh_type="followerstopper", # FIXME: make generic edge="1", vehs_per_hour=flow_rate * .1, departLane="random", departSpeed=10) inflow.add(veh_type="human", edge="1", vehs_per_hour=flow_rate * .9, departLane="random", departSpeed=10) # all other network parameters should match the previous # environment (we only want to change the inflow) additional_net_params = { "scaling": self.scaling, "speed_limit": self.net_params.additional_params['speed_limit'] } net_params = NetParams( inflows=inflow, additional_params=additional_net_params) vehicles = VehicleParams() vehicles.add( veh_id="human", # FIXME: make generic car_following_params=SumoCarFollowingParams( speed_mode=9, ), lane_change_controller=(SimLaneChangeController, {}), routing_controller=(ContinuousRouter, {}), lane_change_params=SumoLaneChangeParams( lane_change_mode=0, # 1621,#0b100000101, ), num_vehicles=1 * self.scaling) vehicles.add( veh_id="followerstopper", acceleration_controller=(RLController, {}), lane_change_controller=(SimLaneChangeController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams( speed_mode=9, ), lane_change_params=SumoLaneChangeParams( lane_change_mode=0, ), num_vehicles=1 * self.scaling) # recreate the scenario object self.scenario = self.scenario.__class__( name=self.scenario.orig_name, vehicles=vehicles, net_params=net_params, initial_config=self.initial_config, traffic_lights=self.scenario.traffic_lights) observation = super().reset() # reset the timer to zero self.time_counter = 0 return observation except Exception as e: print('error on reset ', e) # perform the generic reset function observation = super().reset() # reset the timer to zero self.time_counter = 0 return observation
def bottleneck2_baseline(num_runs, render=True): """Run script for the bottleneck2 baseline. Parameters ---------- num_runs : int number of rollouts the performance of the environment is evaluated over render : bool, optional specifies whether to use the gui during execution Returns ------- flow.core.experiment.Experiment class needed to run simulations """ exp_tag = flow_params['exp_tag'] sim_params = flow_params['sim'] env_params = flow_params['env'] net_params = flow_params['net'] initial_config = flow_params.get('initial', InitialConfig()) traffic_lights = flow_params.get('tls', TrafficLightParams()) # we want no autonomous vehicles in the simulation vehicles = VehicleParams() vehicles.add(veh_id='human', car_following_params=SumoCarFollowingParams(speed_mode=9, ), routing_controller=(ContinuousRouter, {}), lane_change_params=SumoLaneChangeParams(lane_change_mode=0, ), num_vehicles=1 * SCALING) # only include human vehicles in inflows flow_rate = 2300 * SCALING inflow = InFlows() inflow.add(veh_type='human', edge='1', vehs_per_hour=flow_rate, departLane='random', departSpeed=10) net_params.inflows = inflow # modify the rendering to match what is requested sim_params.render = render # set the evaluation flag to True env_params.evaluate = True # import the network class module = __import__('flow.networks', fromlist=[flow_params['network']]) network_class = getattr(module, flow_params['network']) # create the network object network = network_class(name=exp_tag, vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=traffic_lights) # import the environment class module = __import__('flow.envs', fromlist=[flow_params['env_name']]) env_class = getattr(module, flow_params['env_name']) # create the environment object env = env_class(env_params, sim_params, network) exp = Experiment(env) results = exp.run(num_runs, env_params.horizon) return np.mean(results['returns']), np.std(results['returns'])
def run_task(*_): """Implement the run_task method needed to run experiments with rllab.""" sim_params = SumoParams(sim_step=0.1, render=False, seed=0) vehicles = VehicleParams() vehicles.add(veh_id="rl", acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), num_vehicles=1) vehicles.add(veh_id="idm", acceleration_controller=(IDMController, {}), car_following_params=SumoCarFollowingParams(min_gap=0, ), routing_controller=(ContinuousRouter, {}), num_vehicles=21) additional_env_params = { "ring_length": [220, 270], "max_accel": 1, "max_decel": 1 } env_params = EnvParams(horizon=HORIZON, additional_params=additional_env_params, warmup_steps=750, clip_actions=False) additional_net_params = { "length": 260, "lanes": 1, "speed_limit": 30, "resolution": 40 } net_params = NetParams(additional_params=additional_net_params) initial_config = InitialConfig(spacing="uniform", bunching=50) print("XXX name", exp_tag) scenario = LoopScenario(exp_tag, vehicles, net_params, initial_config=initial_config) env_name = "WaveAttenuationPOEnv" pass_params = (env_name, sim_params, vehicles, env_params, net_params, initial_config, scenario) env = GymEnv(env_name, record_video=False, register_params=pass_params) horizon = env.horizon env = normalize(env) policy = GaussianGRUPolicy( env_spec=env.spec, hidden_sizes=(5, ), ) baseline = LinearFeatureBaseline(env_spec=env.spec) algo = TRPO( env=env, policy=policy, baseline=baseline, batch_size=60000, max_path_length=horizon, n_itr=500, # whole_paths=True, discount=0.999, # step_size=v["step_size"], ) algo.train(),
def bottleneck_example(flow_rate, horizon, restart_instance=False, render=None): """ Perform a simulation of vehicles on a bottleneck. Parameters ---------- flow_rate : float total inflow rate of vehicles into the bottleneck horizon : int time horizon restart_instance: bool, optional whether to restart the instance upon reset render: bool, optional specifies whether to use the gui during execution Returns ------- exp: flow.core.experiment.Experiment A non-rl experiment demonstrating the performance of human-driven vehicles on a bottleneck. """ if render is None: render = False sim_params = SumoParams(sim_step=0.5, render=render, overtake_right=False, restart_instance=restart_instance) vehicles = VehicleParams() vehicles.add(veh_id="human", lane_change_controller=(SimLaneChangeController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode=25, ), lane_change_params=SumoLaneChangeParams( lane_change_mode=1621, ), num_vehicles=1) additional_env_params = { "target_velocity": 40, "max_accel": 1, "max_decel": 1, "lane_change_duration": 5, "add_rl_if_exit": False, "disable_tb": DISABLE_TB, "disable_ramp_metering": DISABLE_RAMP_METER } env_params = EnvParams(horizon=horizon, additional_params=additional_env_params) inflow = InFlows() inflow.add(veh_type="human", edge="1", vehsPerHour=flow_rate, departLane="random", departSpeed=10) traffic_lights = TrafficLightParams() if not DISABLE_TB: traffic_lights.add(node_id="2") if not DISABLE_RAMP_METER: traffic_lights.add(node_id="3") additional_net_params = {"scaling": SCALING, "speed_limit": 23} net_params = NetParams(inflows=inflow, no_internal_links=False, additional_params=additional_net_params) initial_config = InitialConfig(spacing="random", min_gap=5, lanes_distribution=float("inf"), edges_distribution=["2", "3", "4", "5"]) scenario = BottleneckScenario(name="bay_bridge_toll", vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=traffic_lights) env = BottleneckEnv(env_params, sim_params, scenario) return BottleneckDensityExperiment(env)
def dissipating_waves(render=None): sim_params = SumoParams(sim_step=0.1, render=True, restart_instance=True) if render is not None: sim_params.render = render # Setup vehicle types vehicles = VehicleParams() vehicles.add(veh_id="human", acceleration_controller=(IDMController, { "noise": 0.2 }), car_following_params=SumoCarFollowingParams( speed_mode="obey_safe_speed", ), num_vehicles=5) vehicles.add(veh_id="rl", acceleration_controller=(RLController, {}), car_following_params=SumoCarFollowingParams( speed_mode="obey_safe_speed", ), num_vehicles=0) # Vehicles are introduced from both sides of merge, with RL vehicles entering # from the highway portion as well inflow = InFlows() inflow.add(veh_type="human", edge="inflow_highway", vehs_per_hour=(1 - RL_PENETRATION) * FLOW_RATE, depart_lane="free", depart_speed=10) inflow.add(veh_type="rl", edge="inflow_highway", vehs_per_hour=RL_PENETRATION * FLOW_RATE, depart_lane="free", depart_speed=10) inflow.add(veh_type="human", edge="inflow_merge", vehs_per_hour=100, depart_lane="free", depart_speed=7.5) # Set parameters for the network additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params["pre_merge_length"] = 600 additional_net_params["post_merge_length"] = 100 additional_net_params["merge_lanes"] = 1 additional_net_params["highway_lanes"] = 1 net_params = NetParams(inflows=inflow, additional_params=additional_net_params) # Setup the scenario initial_config = InitialConfig() scenario = MergeScenario(name='testing', vehicles=vehicles, net_params=net_params, initial_config=initial_config) # Setup the environment env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) env = MergePOEnv(env_params, sim_params, scenario) return Experiment(env)
} additional_net_params = { 'speed_limit': 35, 'grid_array': grid_array, 'horizontal_lanes': 1, 'vertical_lanes': 1 } vehicles = VehicleParams() vehicles.add( veh_id='idm', acceleration_controller=(SimCarFollowingController, {}), car_following_params=SumoCarFollowingParams( minGap=2.5, decel=7.5, # avoid collisions at emergency stops max_speed=V_ENTER, speed_mode="all_checks", ), routing_controller=(GridRouter, {}), num_vehicles=tot_cars) flow_params = dict( # name of the experiment exp_tag='traffic_light_grid', # name of the flow environment the experiment is running on env_name='TrafficLightGridPOEnv', # name of the network class the experiment is running on network='TrafficLightGridNetwork',
# make sure (sample_batch_size * num_workers ~= train_batch_size) # time horizon of a single rollout HORIZON = 3000 # Number of rings NUM_RINGS = 1 # number of rollouts per training iteration N_ROLLOUTS = 20 # int(20/NUM_RINGS) # number of parallel workers N_CPUS = 2 # int(20/NUM_RINGS) # We place one autonomous vehicle and 21 human-driven vehicles in the network vehicles = VehicleParams() for i in range(NUM_RINGS): vehicles.add(veh_id='human_{}'.format(i), acceleration_controller=(IDMController, { 'noise': 0.2 }), routing_controller=(ContinuousRouter, {}), num_vehicles=21) vehicles.add(veh_id='rl_{}'.format(i), acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), num_vehicles=1) flow_params = dict( # name of the experiment exp_tag='lord_of_numrings{}'.format(NUM_RINGS), # name of the flow environment the experiment is running on env_name='MultiWaveAttenuationPOEnv', # name of the network class the experiment is running on
"""Example of modified minicity network with human-driven vehicles.""" from flow.controllers import IDMController from flow.controllers import RLController from flow.core.params import SumoParams, EnvParams, NetParams, InitialConfig from flow.core.params import SumoCarFollowingParams, SumoLaneChangeParams from flow.core.params import VehicleParams from flow.envs.ring.accel import AccelEnv, ADDITIONAL_ENV_PARAMS from flow.controllers.routing_controllers import MinicityRouter from flow.networks import MiniCityNetwork from flow.core.params import TrafficLightParams vehicles = VehicleParams() vehicles.add(veh_id="idm", acceleration_controller=(IDMController, {}), routing_controller=(MinicityRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode=1, ), lane_change_params=SumoLaneChangeParams( lane_change_mode="no_lat_collide", ), initial_speed=0, num_vehicles=90) vehicles.add(veh_id="rl", acceleration_controller=(RLController, {}), routing_controller=(MinicityRouter, {}), car_following_params=SumoCarFollowingParams( speed_mode="obey_safe_speed", ), initial_speed=0, num_vehicles=10) ################################################ tl_logic = TrafficLightParams(baseline=False) phases = [{ "duration": "31", "minDur": "8",
# number of parallel workers N_CPUS = 2 RING_RADIUS = 100 NUM_MERGE_HUMANS = 9 NUM_MERGE_RL = 1 # note that the vehicles are added sequentially by the scenario, # so place the merging vehicles after the vehicles in the ring vehicles = VehicleParams() # Inner ring vehicles vehicles.add(veh_id='human', acceleration_controller=(IDMController, { 'noise': 0.2 }), lane_change_controller=(SimLaneChangeController, {}), routing_controller=(ContinuousRouter, {}), num_vehicles=6, car_following_params=SumoCarFollowingParams(minGap=0.0, tau=0.5), lane_change_params=SumoLaneChangeParams()) # A single learning agent in the inner ring vehicles.add(veh_id='rl', acceleration_controller=(RLController, {}), lane_change_controller=(SimLaneChangeController, {}), routing_controller=(ContinuousRouter, {}), num_vehicles=1, car_following_params=SumoCarFollowingParams( minGap=0.01, tau=0.5, speed_mode="no_collide", ),
"""Example of an open multi-lane network with human-driven vehicles.""" from flow.controllers import IDMController from flow.core.params import SumoParams, EnvParams, NetParams, InitialConfig, SumoLaneChangeParams from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS from flow.envs import LaneChangeAccelEnv vehicles = VehicleParams() vehicles.add(veh_id="human", acceleration_controller=(IDMController, {}), lane_change_params=SumoLaneChangeParams( model="SL2015", lc_sublane=2.0, ), num_vehicles=20) vehicles.add(veh_id="human2", acceleration_controller=(IDMController, {}), lane_change_params=SumoLaneChangeParams( model="SL2015", lc_sublane=2.0, ), num_vehicles=20) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) inflow = InFlows() inflow.add(veh_type="human", edge="highway_0", probability=0.25,
# number of parallel workers N_CPUS = 2 # number of rollouts per training iteration N_ROLLOUTS = N_CPUS * 4 SCALING = 1 NUM_LANES = 4 * SCALING # number of lanes in the widest highway DISABLE_TB = True DISABLE_RAMP_METER = True AV_FRAC = 0.10 vehicles = VehicleParams() vehicles.add(veh_id="human", lane_change_controller=(SimLaneChangeController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams( speed_mode="all_checks", ), lane_change_params=SumoLaneChangeParams(lane_change_mode=0, ), num_vehicles=1 * SCALING) vehicles.add(veh_id="followerstopper", acceleration_controller=(RLController, {}), lane_change_controller=(SimLaneChangeController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode=9, ), lane_change_params=SumoLaneChangeParams(lane_change_mode=0, ), num_vehicles=1 * SCALING) controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), ("4", 2, True), ("5", 1, False)] num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] additional_env_params = {
def visualizer_rllib(args): """Visualizer for RLlib experiments. This function takes args (see function create_parser below for more detailed information on what information can be fed to this visualizer), and renders the experiment associated with it. """ result_dir = args.result_dir if args.result_dir[-1] != '/' \ else args.result_dir[:-1] config = get_rllib_config(os.path.join(os.getcwd(), result_dir)) # TODO(ev) backwards compatibility hack try: pkl = get_rllib_pkl(result_dir) except Exception: pass # check if we have a multiagent scenario but in a # backwards compatible way if config.get('multiagent', {}).get('policies', {}): multiagent = True config['multiagent'] = pkl['multiagent'] else: multiagent = False # Run on only one cpu for rendering purposes config['num_workers'] = 0 flow_params = get_flow_params(config) sim_params = flow_params['sim'] # Determine agent and checkpoint config_run = config['env_config']['run'] if 'run' in config['env_config'] \ else None if args.run and config_run: if args.run != config_run: print('visualizer_rllib.py: error: run argument ' + '\'{}\' passed in '.format(args.run) + 'differs from the one stored in params.json ' + '\'{}\''.format(config_run)) sys.exit(1) if args.run: agent_cls = get_agent_class(args.run) elif config_run: agent_cls = get_agent_class(config_run) else: print('visualizer_rllib.py: error: could not find flow parameter ' '\'run\' in params.json, ' 'add argument --run to provide the algorithm or model used ' 'to train the results\n e.g. ' 'python ./visualizer_rllib.py /tmp/ray/result_dir 1 --run PPO') sys.exit(1) sim_params.restart_instance = True dir_path = os.path.dirname(os.path.realpath(__file__)) emission_path = '{0}/test_time_rollout/'.format(dir_path) sim_params.emission_path = emission_path if args.gen_emission else None # pick your rendering mode if args.render_mode == 'sumo_web3d': sim_params.num_clients = 2 sim_params.render = False elif args.render_mode == 'drgb': sim_params.render = 'drgb' sim_params.pxpm = 4 elif args.render_mode == 'sumo_gui': sim_params.render = True print('NOTE: With render mode {}, an extra instance of the SUMO GUI ' 'will display before the GUI for visualizing the result. Click ' 'the green Play arrow to continue.'.format(args.render_mode)) elif args.render_mode == 'no_render': sim_params.render = False if args.save_render: sim_params.render = 'drgb' sim_params.pxpm = 4 sim_params.save_render = True # Go through and switch out the IDM vehicles with Krauss if requested if args.test_transfer: from flow.core.params import VehicleParams temp_vehicles = VehicleParams() from flow.controllers.car_following_models import IDMController from flow.controllers.car_following_models import SimCarFollowingController for veh in flow_params['veh'].initial: # swap the car following params with Krauss if veh['acceleration_controller'][0] == IDMController: veh['acceleration_controller'] = ( SimCarFollowingController, veh['acceleration_controller'][1]) car_following_param = veh['car_following_params'] car_following_param.car_follow_model = "Krauss" veh['car_following_params'] = car_following_param temp_vehicles.add(**veh) # Create and register a gym+rllib env create_env, env_name = make_create_env(params=flow_params, version=0) register_env(env_name, create_env) # check if the environment is a single or multiagent environment, and # get the right address accordingly # single_agent_envs = [env for env in dir(flow.envs) # if not env.startswith('__')] # if flow_params['env_name'] in single_agent_envs: # env_loc = 'flow.envs' # else: # env_loc = 'flow.multiagent_envs' # Start the environment with the gui turned on and a path for the # emission file env_params = flow_params['env'] env_params.restart_instance = False # import ipdb; ipdb.set_trace() if args.evaluate: env_params.evaluate = True # lower the horizon if testing if args.horizon: config['horizon'] = args.horizon env_params.horizon = args.horizon # create the agent that will be used to compute the actions agent = agent_cls(env=env_name, config=config) checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num agent.restore(checkpoint) if hasattr(agent, "local_evaluator") and \ os.environ.get("TEST_FLAG") != 'True': env = agent.local_evaluator.env else: env = gym.make(env_name) if multiagent: rets = {} # map the agent id to its policy policy_map_fn = config['multiagent']['policy_mapping_fn'] for key in config['multiagent']['policies'].keys(): rets[key] = [] else: rets = [] if config['model']['use_lstm']: use_lstm = True if multiagent: state_init = {} # map the agent id to its policy policy_map_fn = config['multiagent']['policy_mapping_fn'].func size = config['model']['lstm_cell_size'] for key in config['multiagent']['policies'].keys(): state_init[key] = [ np.zeros(size, np.float32), np.zeros(size, np.float32) ] else: state_init = [ np.zeros(config['model']['lstm_cell_size'], np.float32), np.zeros(config['model']['lstm_cell_size'], np.float32) ] else: use_lstm = False env.restart_simulation(sim_params=sim_params, render=sim_params.render) # Simulate and collect metrics final_outflows = [] final_inflows = [] mean_speed = [] std_speed = [] def policy_map_fn(agent_id): if agent_id != 'av': return 'action_adversary' else: return agent_id if args.evaluate: env.env_params.evaluate = True controller = RllibController(args.result_dir, args.checkpoint_num, algo='PPO') for i in range(args.num_rollouts): vel = [] state = env.reset() if multiagent: ret = {key: [0] for key in rets.keys()} else: ret = 0 for _ in range(env_params.horizon): vehicles = env.unwrapped.k.vehicle vel.append(np.mean(vehicles.get_speed(vehicles.get_ids()))) # import ipdb; ipdb.set_trace() if multiagent: # import ipdb; ipdb.set_trace() action = {} for agent_id in state.keys(): if use_lstm: action[agent_id], state_init[agent_id], logits = \ agent.compute_action( state[agent_id], state=state_init[agent_id], policy_id=policy_map_fn(agent_id)) else: # import ipdb; ipdb.set_trace() if agent_id == 'av': action[agent_id] = controller.get_action( state['action_adversary']) else: action[agent_id] = agent.compute_action( state[agent_id], policy_id=policy_map_fn(agent_id)) else: # action = agent.compute_action(state) action = controller.get_action(state) state, reward, done, _ = env.step(action) if multiagent: for actor, rew in reward.items(): ret[policy_map_fn(actor)][0] += rew else: ret += reward if multiagent and done['__all__']: break if not multiagent and done: break if multiagent: for key in rets.keys(): rets[key].append(ret[key]) else: rets.append(ret) outflow = vehicles.get_outflow_rate(500) final_outflows.append(outflow) inflow = vehicles.get_inflow_rate(500) final_inflows.append(inflow) if np.all(np.array(final_inflows) > 1e-5): throughput_efficiency = [ x / y for x, y in zip(final_outflows, final_inflows) ] else: throughput_efficiency = [0] * len(final_inflows) mean_speed.append(np.mean(vel)) std_speed.append(np.std(vel)) if multiagent: for agent_id, rew in rets.items(): print('Round {}, Return: {} for agent {}'.format( i, ret, agent_id)) else: print('Round {}, Return: {}'.format(i, ret)) import ipdb ipdb.set_trace() print('==== Summary of results ====') print("Return:") print(mean_speed) if multiagent: for agent_id, rew in rets.items(): print('For agent', agent_id) print(rew) print('Average, std return: {}, {} for agent {}'.format( np.mean(rew), np.std(rew), agent_id)) else: print(rets) print('Average, std: {}, {}'.format(np.mean(rets), np.std(rets))) print("\nSpeed, mean (m/s):") print(mean_speed) print('Average, std: {}, {}'.format(np.mean(mean_speed), np.std(mean_speed))) print("\nSpeed, std (m/s):") print(std_speed) print('Average, std: {}, {}'.format(np.mean(std_speed), np.std(std_speed))) # Compute arrival rate of vehicles in the last 500 sec of the run print("\nOutflows (veh/hr):") print(final_outflows) print('Average, std: {}, {}'.format(np.mean(final_outflows), np.std(final_outflows))) # Compute departure rate of vehicles in the last 500 sec of the run print("Inflows (veh/hr):") print(final_inflows) print('Average, std: {}, {}'.format(np.mean(final_inflows), np.std(final_inflows))) # Compute throughput efficiency in the last 500 sec of the print("Throughput efficiency (veh/hr):") print(throughput_efficiency) print('Average, std: {}, {}'.format(np.mean(throughput_efficiency), np.std(throughput_efficiency))) # terminate the environment env.unwrapped.terminate() # if prompted, convert the emission file into a csv file if args.gen_emission: time.sleep(0.1) dir_path = os.path.dirname(os.path.realpath(__file__)) emission_filename = '{0}-emission.xml'.format(env.scenario.name) emission_path = \ '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename) # convert the emission file into a csv file emission_to_csv(emission_path) # delete the .xml version of the emission file os.remove(emission_path) # if we wanted to save the render, here we create the movie if args.save_render: dirs = os.listdir(os.path.expanduser('~') + '/flow_rendering') # Ignore hidden files dirs = [d for d in dirs if d[0] != '.'] dirs.sort(key=lambda date: datetime.strptime(date, "%Y-%m-%d-%H%M%S")) recent_dir = dirs[-1] # create the movie movie_dir = os.path.expanduser('~') + '/flow_rendering/' + recent_dir save_dir = os.path.expanduser('~') + '/flow_movies' if not os.path.exists(save_dir): os.mkdir(save_dir) os_cmd = "cd " + movie_dir + " && ffmpeg -i frame_%06d.png" os_cmd += " -pix_fmt yuv420p " + dirs[-1] + ".mp4" os_cmd += "&& cp " + dirs[-1] + ".mp4 " + save_dir + "/" os.system(os_cmd)
from flow.core.params import VehicleParams from flow.controllers import RLController, ContinuousRouter # time horizon of a single rollout HORIZON = 1500 SCALING = 1 NUM_LANES = 4 * SCALING # number of lanes in the widest highway DISABLE_TB = True DISABLE_RAMP_METER = True AV_FRAC = 0.25 vehicles = VehicleParams() vehicles.add(veh_id="human", routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode=9, ), lane_change_params=SumoLaneChangeParams(lane_change_mode=1621, ), num_vehicles=1 * SCALING) vehicles.add(veh_id="rl", acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode=9, ), lane_change_params=SumoLaneChangeParams(lane_change_mode=0, ), num_vehicles=1 * SCALING) controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), ("4", 2, True), ("5", 1, False)] num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] additional_env_params = { "target_velocity": 40, "disable_tb": True,
# that's the baseline scenario for this learning agent from examples.sumo import sugiyama_8 #import ADDITIONAL_ENV_PARAMS, ADDITIONAL_NET_PARAMS, SUMO_STEP # time horizon of a single rollout HORIZON = 3000 # number of rollouts per training iteration N_ROLLOUTS = 20 # number of parallel workers N_CPUS = 4 # We place one autonomous vehicle and 22 human-driven vehicles in the network vehicles = VehicleParams() vehicles.add(veh_id="human", acceleration_controller=(IDMController, { "noise": 0.2 }), routing_controller=(ContinuousRouter, {}), num_vehicles=7) vehicles.add(veh_id="rl", acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), num_vehicles=1) # import from baseline test ADDITIONAL_ENV_PARAMS = sugiyama_8.ADDITIONAL_ENV_PARAMS ADDITIONAL_ENV_PARAMS["ring_length"] = [100, 125] # ADDITIONAL_NET_PARAMS = sugiyama_8.ADDITIONAL_NET_PARAMS flow_params = dict( # name of the experiment
], } # ## Ajoute les flux de voiture # `IDMController` : The Intelligent Driver Model is a car-following model specifying vehicle dynamics by a differential equation for acceleration $\dot{v}$. # # `RLController` : a trainable autuonomous vehicle whose actions are dictated by an RL agent. # In[4]: n_veh = 12 vehicles = VehicleParams() vehicles.add("human", acceleration_controller=(IDMController, {}), num_vehicles=n_veh, color='red') vehicles.add("flow", acceleration_controller=(IDMController, {}), num_vehicles=n_veh, color='white') # - `vehs_per_hour`: nombre de vehicule par heure, uniformément espacés. Par exemple, comme il y a $60 \times 60 = 3600$ secondes dans une heure, le parametre $\frac{3600}{5}=720$ va faire rentrer des vehicules dans le network toutes les $5$ secondes. # # - `probability`: c'est la probabilité qu'un véhicule entre dans le network toutes les secondes. Par exemple, si on la fixe à $0.2$, alors chaque seconde de la simulation un véhicule aura $\frac{1}{5}$ chance d'entrer dans le network # # - `period`: C'est le temps en secondes entre 2 véhicules qui sont insérés. Par exemple, le fixer à $5$ ferait rentrer des véhicules dans le network toutes les $5$ secondes (ce qui équivaut à mettre `vehs_per_hour` à $720$). # # <font color='red'> # $\rightarrow$ Exactement 1 seul de ces 3 paramètres doit être configurer ! # </font>
def bay_bridge_example(render=None, use_inflows=False, use_traffic_lights=False): """ Perform a simulation of vehicles on the Oakland-San Francisco Bay Bridge. Parameters ---------- render: bool, optional specifies whether to use the gui during execution use_inflows: bool, optional whether to activate inflows from the peripheries of the network use_traffic_lights: bool, optional whether to activate the traffic lights in the scenario Returns ------- exp: flow.core.experiment.Experiment A non-rl experiment demonstrating the performance of human-driven vehicles simulated by sumo on the Bay Bridge. """ sim_params = SumoParams(sim_step=0.6, overtake_right=True) if render is not None: sim_params.render = render car_following_params = SumoCarFollowingParams( speedDev=0.2, speed_mode="all_checks", ) lane_change_params = SumoLaneChangeParams( lc_assertive=20, lc_pushy=0.8, lc_speed_gain=4.0, model="LC2013", lane_change_mode="no_lat_collide", # lcKeepRight=0.8 ) vehicles = VehicleParams() vehicles.add(veh_id="human", acceleration_controller=(SimCarFollowingController, {}), routing_controller=(BayBridgeRouter, {}), car_following_params=car_following_params, lane_change_params=lane_change_params, num_vehicles=1400) additional_env_params = {} env_params = EnvParams(additional_params=additional_env_params) traffic_lights = TrafficLightParams() inflow = InFlows() if use_inflows: # south inflow.add(veh_type="human", edge="183343422", vehsPerHour=528, departLane="0", departSpeed=20) inflow.add(veh_type="human", edge="183343422", vehsPerHour=864, departLane="1", departSpeed=20) inflow.add(veh_type="human", edge="183343422", vehsPerHour=600, departLane="2", departSpeed=20) inflow.add(veh_type="human", edge="393649534", probability=0.1, departLane="0", departSpeed=20) # no data for this # west inflow.add(veh_type="human", edge="11189946", vehsPerHour=1752, departLane="0", departSpeed=20) inflow.add(veh_type="human", edge="11189946", vehsPerHour=2136, departLane="1", departSpeed=20) inflow.add(veh_type="human", edge="11189946", vehsPerHour=576, departLane="2", departSpeed=20) # north inflow.add(veh_type="human", edge="28413687#0", vehsPerHour=2880, departLane="0", departSpeed=20) inflow.add(veh_type="human", edge="28413687#0", vehsPerHour=2328, departLane="1", departSpeed=20) inflow.add(veh_type="human", edge="28413687#0", vehsPerHour=3060, departLane="2", departSpeed=20) inflow.add(veh_type="human", edge="11198593", probability=0.1, departLane="0", departSpeed=20) # no data for this inflow.add(veh_type="human", edge="11197889", probability=0.1, departLane="0", departSpeed=20) # no data for this # midway through bridge inflow.add(veh_type="human", edge="35536683", probability=0.1, departLane="0", departSpeed=20) # no data for this net_params = NetParams(inflows=inflow, no_internal_links=False) net_params.netfile = NETFILE # download the netfile from AWS if use_traffic_lights: my_url = "https://s3-us-west-1.amazonaws.com/flow.netfiles/" \ "bay_bridge_TL_all_green.net.xml" else: my_url = "https://s3-us-west-1.amazonaws.com/flow.netfiles/" \ "bay_bridge_junction_fix.net.xml" my_file = urllib.request.urlopen(my_url) data_to_write = my_file.read() with open( os.path.join(os.path.dirname(os.path.abspath(__file__)), NETFILE), "wb+") as f: f.write(data_to_write) initial_config = InitialConfig( spacing="uniform", min_gap=15, edges_distribution=EDGES_DISTRIBUTION.copy()) scenario = BayBridgeScenario(name="bay_bridge", vehicles=vehicles, traffic_lights=traffic_lights, net_params=net_params, initial_config=initial_config) env = BayBridgeEnv(env_params, sim_params, scenario) return Experiment(env)
from copy import deepcopy from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ SumoCarFollowingParams from flow.core.params import VehicleParams from flow.controllers import ContinuousRouter, RLController from flow.scenarios.figure_eight import ADDITIONAL_NET_PARAMS # time horizon of a single rollout HORIZON = 1500 # We place 16 autonomous vehicle and 0 human-driven vehicles in the network vehicles = VehicleParams() vehicles.add(veh_id="rl", acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams( speed_mode="no_collide", ), num_vehicles=14) flow_params = dict( # name of the experiment exp_tag="figure_eight_2", # name of the flow environment the experiment is running on env_name="AccelEnv", # name of the scenario class the experiment is running on scenario="Figure8Scenario", # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams(
from gym.spaces import Tuple from constructionRouter import ConstructionRouter from constructionEnv_simplifiedV2 import myEnv # time horizon of a single rollout HORIZON = 5000 #5000 # number of rollouts per training iteration N_ROLLOUTS = 10 # number of parallel workers N_CPUS = 10 vehicles = VehicleParams() vehicles.add("rl", acceleration_controller=(RLController, {}), routing_controller=(ConstructionRouter, {}), car_following_params=SumoCarFollowingParams( speed_mode="obey_safe_speed", ), num_vehicles=8) vehicles.add("human", acceleration_controller=(IDMController, {}), lane_change_controller=(SimLaneChangeController, {}), car_following_params=SumoCarFollowingParams(speed_mode=25), lane_change_params=SumoLaneChangeParams(lane_change_mode=1621), num_vehicles=0) # specify the edges vehicles can originate on initial_config = InitialConfig(edges_distribution=["edge4"]) # specify the routes for vehicles in the network class Network(Network):
EDGE_INFLOW = 300 # inflow rate of vehicles at every edge N_ROWS = 2 # number of row of bidirectional lanes N_COLUMNS = 2 # number of columns of bidirectional lanes # we place a sufficient number of vehicles to ensure they confirm with the # total number specified above. We also use a "right_of_way" speed mode to # support traffic light compliance vehicles = VehicleParams() num_vehicles = (N_LEFT + N_RIGHT) * N_COLUMNS + (N_BOTTOM + N_TOP) * N_ROWS vehicles.add( veh_id="human", acceleration_controller=(SimCarFollowingController, {}), car_following_params=SumoCarFollowingParams( min_gap=2.5, max_speed=V_ENTER, decel=7.5, # avoid collisions at emergency stops speed_mode="right_of_way", ), routing_controller=(GridRouter, {}), num_vehicles=num_vehicles) # inflows of vehicles are place on all outer edges (listed here) outer_edges = [] outer_edges += ["left{}_{}".format(N_ROWS, i) for i in range(N_COLUMNS)] outer_edges += ["right0_{}".format(i) for i in range(N_COLUMNS)] outer_edges += ["bot{}_0".format(i) for i in range(N_ROWS)] outer_edges += ["top{}_{}".format(i, N_COLUMNS) for i in range(N_ROWS)] # equal inflows for each edge (as dictate by the EDGE_INFLOW constant) inflow = InFlows()
def merge_example(render=None): """ Perform a simulation of vehicles on a merge. Parameters ---------- render: bool, optional specifies whether to use the gui during execution Returns ------- exp: flow.core.experiment.Experiment A non-rl experiment demonstrating the performance of human-driven vehicles on a merge. """ sim_params = SumoParams(render=True, emission_path="./data/", sim_step=0.2, restart_instance=False) if render is not None: sim_params.render = render vehicles = VehicleParams() vehicles.add(veh_id="human", acceleration_controller=(IDMController, { "noise": 0.2 }), car_following_params=SumoCarFollowingParams( speed_mode="obey_safe_speed", ), num_vehicles=5) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS, sims_per_step=5, warmup_steps=0) inflow = InFlows() inflow.add(veh_type="human", edge="inflow_highway", vehs_per_hour=FLOW_RATE, departLane="free", departSpeed=10) inflow.add(veh_type="human", edge="inflow_merge", vehs_per_hour=100, departLane="free", departSpeed=7.5) additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params["merge_lanes"] = 1 additional_net_params["highway_lanes"] = 1 additional_net_params["pre_merge_length"] = 500 net_params = NetParams(inflows=inflow, additional_params=additional_net_params) initial_config = InitialConfig(spacing="uniform", perturbation=5.0) scenario = MergeScenario(name="merge-baseline", vehicles=vehicles, net_params=net_params, initial_config=initial_config) env = WaveAttenuationMergePOEnv(env_params, sim_params, scenario) return Experiment(env)
vehicles.add( veh_id="human", acceleration_controller=( IDMController, { #"noise": 0.2 #"fail_safe":"instantaneous", }), lane_change_controller=(SimLaneChangeController, {}), #routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams( # Define speed mode that will minimize collisions: https://sumo.dlr.de/wiki/TraCI/Change_Vehicle_State#speed_mode_.280xb3.29 speed_mode= "obey_safe_speed", #"right_of_way", #"all_checks", #no_collide", decel=7.5, # avoid collisions at emergency stops # desired time-gap from leader tau=1.5, #7, min_gap=2.5, speed_factor=1, speed_dev=0.1, #fail_safe= "instantaneous", ), lane_change_params=SumoLaneChangeParams( model="SL2015", # Define a lane changing mode that will allow lane changes # See: https://sumo.dlr.de/wiki/TraCI/Change_Vehicle_State#lane_change_mode_.280xb6.29 # and: ~/local/flow_2019_07/flow/core/params.py, see LC_MODES = {"aggressive": 0 /*bug, 0 is no lane-changes*/, "no_lat_collide": 512, "strategic": 1621}, where "strategic" is the default behavior lane_change_mode= 1621, #0b011000000001, # (like default 1621 mode, but no lane changes other than strategic to follow route, # 512, #(collision avoidance and safety gap enforcement) # "strategic", lc_speed_gain=1000000, lc_pushy=0, #0.5, #1, lc_assertive=5, #20, # the following two replace default values which are not read well by xml parser lc_impatience=1e-8, lc_time_to_impatience=1e12), num_vehicles=0)
# ramps additional_net_params["on_ramps_pos"] = [400] additional_net_params["off_ramps_pos"] = [800] # probability of exiting at the next off-ramp additional_net_params["next_off_ramp_proba"] = 0.25 # inflow rates in vehs/hour HIGHWAY_INFLOW_RATE = 4000 ON_RAMPS_INFLOW_RATE = 350 vehicles = VehicleParams() vehicles.add( veh_id="human", car_following_params=SumoCarFollowingParams( speed_mode="obey_safe_speed", # for safer behavior at the merges tau=1.5 # larger distance between cars ), lane_change_params=SumoLaneChangeParams(lane_change_mode=1621)) inflows = InFlows() inflows.add(veh_type="human", edge="highway_0", vehs_per_hour=HIGHWAY_INFLOW_RATE, depart_lane="free", depart_speed="max", name="highway_flow") for i in range(len(additional_net_params["on_ramps_pos"])): inflows.add(veh_type="human", edge="on_ramp_{}".format(i), vehs_per_hour=ON_RAMPS_INFLOW_RATE,