Exemplo n.º 1
0
def two_loops_one_merging_exp_setup(vehicles=None):
    sumo_params = SumoParams(sim_step=0.1, render=False)

    if vehicles is None:
        vehicles = Vehicles()
        vehicles.add(
            veh_id="rl",
            acceleration_controller=(RLController, {}),
            lane_change_controller=(StaticLaneChanger, {}),
            sumo_car_following_params=SumoCarFollowingParams(
                speed_mode="no_collide",
            ),
            num_vehicles=1)
        vehicles.add(
            veh_id="idm",
            acceleration_controller=(IDMController, {}),
            lane_change_controller=(StaticLaneChanger, {}),
            sumo_car_following_params=SumoCarFollowingParams(
                speed_mode="no_collide",
            ),
            num_vehicles=5)
        vehicles.add(
            veh_id="merge-idm",
            acceleration_controller=(IDMController, {}),
            lane_change_controller=(StaticLaneChanger, {}),
            sumo_car_following_params=SumoCarFollowingParams(
                speed_mode="no_collide",
            ),
            num_vehicles=5)

    env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS)

    additional_net_params = {
        "ring_radius": 50,
        "lane_length": 75,
        "inner_lanes": 3,
        "outer_lanes": 2,
        "speed_limit": 30,
        "resolution": 40
    }

    net_params = NetParams(
        no_internal_links=False, additional_params=additional_net_params)

    initial_config = InitialConfig(
        spacing="custom",
        lanes_distribution=1,
        additional_params={"merge_bunching": 0})

    scenario = TwoLoopsOneMergingScenario(
        "loop-merges",
        vehicles,
        net_params,
        initial_config=initial_config)

    env = TwoLoopsMergePOEnv(env_params, sumo_params, scenario)

    return env, scenario
Exemplo n.º 2
0
def loop_merge_example(render=None):
    """
    Perform a simulation of vehicles on a loop merge.

    Parameters
    ----------
    render : bool, optional
        specifies whether to use sumo's gui during execution

    Returns
    -------
    exp: flow.core.SumoExperiment type
        A non-rl experiment demonstrating the performance of human-driven
        vehicles on a loop merge.
    """
    sumo_params = SumoParams(sim_step=0.1,
                             emission_path="./data/",
                             render=True)

    if render is not None:
        sumo_params.render = render

    # note that the vehicles are added sequentially by the scenario,
    # so place the merging vehicles after the vehicles in the ring
    vehicles = Vehicles()
    vehicles.add(veh_id="idm",
                 acceleration_controller=(IDMController, {}),
                 lane_change_controller=(SumoLaneChangeController, {}),
                 routing_controller=(ContinuousRouter, {}),
                 num_vehicles=7,
                 speed_mode="no_collide",
                 sumo_car_following_params=SumoCarFollowingParams(minGap=0.0,
                                                                  tau=0.5),
                 sumo_lc_params=SumoLaneChangeParams())
    vehicles.add(veh_id="merge-idm",
                 acceleration_controller=(IDMController, {}),
                 lane_change_controller=(SumoLaneChangeController, {}),
                 routing_controller=(ContinuousRouter, {}),
                 num_vehicles=10,
                 speed_mode="no_collide",
                 sumo_car_following_params=SumoCarFollowingParams(minGap=0.01,
                                                                  tau=0.5),
                 sumo_lc_params=SumoLaneChangeParams())

    env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS)

    additional_net_params = ADDITIONAL_NET_PARAMS.copy()
    additional_net_params["ring_radius"] = 50
    additional_net_params["inner_lanes"] = 1
    additional_net_params["outer_lanes"] = 1
    additional_net_params["lane_length"] = 75
    net_params = NetParams(no_internal_links=False,
                           additional_params=additional_net_params)

    initial_config = InitialConfig(x0=50,
                                   spacing="uniform",
                                   additional_params={"merge_bunching": 0})

    scenario = TwoLoopsOneMergingScenario(name="two-loop-one-merging",
                                          vehicles=vehicles,
                                          net_params=net_params,
                                          initial_config=initial_config)

    env = AccelEnv(env_params, sumo_params, scenario)

    return SumoExperiment(env, scenario)
Exemplo n.º 3
0
def run_task(*_):
    """Implement the run_task method needed to run experiments with rllab."""
    sumo_params = SumoParams(sim_step=0.2, render=True)

    # note that the vehicles are added sequentially by the scenario,
    # so place the merging vehicles after the vehicles in the ring
    vehicles = Vehicles()
    # Inner ring vehicles
    vehicles.add(veh_id="human",
                 acceleration_controller=(IDMController, {
                     "noise": 0.2
                 }),
                 lane_change_controller=(SumoLaneChangeController, {}),
                 routing_controller=(ContinuousRouter, {}),
                 num_vehicles=6,
                 sumo_car_following_params=SumoCarFollowingParams(minGap=0.0,
                                                                  tau=0.5),
                 sumo_lc_params=SumoLaneChangeParams())

    # A single learning agent in the inner ring
    vehicles.add(veh_id="rl",
                 acceleration_controller=(RLController, {}),
                 lane_change_controller=(SumoLaneChangeController, {}),
                 routing_controller=(ContinuousRouter, {}),
                 num_vehicles=1,
                 sumo_car_following_params=SumoCarFollowingParams(
                     minGap=0.01, tau=0.5, speed_mode="no_collide"),
                 sumo_lc_params=SumoLaneChangeParams())

    # Outer ring vehicles
    vehicles.add(veh_id="merge-human",
                 acceleration_controller=(IDMController, {
                     "noise": 0.2
                 }),
                 lane_change_controller=(SumoLaneChangeController, {}),
                 routing_controller=(ContinuousRouter, {}),
                 num_vehicles=10,
                 sumo_car_following_params=SumoCarFollowingParams(minGap=0.0,
                                                                  tau=0.5),
                 sumo_lc_params=SumoLaneChangeParams())

    env_params = EnvParams(horizon=HORIZON,
                           additional_params={
                               "max_accel": 3,
                               "max_decel": 3,
                               "target_velocity": 10,
                               "n_preceding": 2,
                               "n_following": 2,
                               "n_merging_in": 2,
                           })

    additional_net_params = ADDITIONAL_NET_PARAMS.copy()
    additional_net_params["ring_radius"] = 50
    additional_net_params["inner_lanes"] = 1
    additional_net_params["outer_lanes"] = 1
    additional_net_params["lane_length"] = 75
    net_params = NetParams(no_internal_links=False,
                           additional_params=additional_net_params)

    initial_config = InitialConfig(x0=50,
                                   spacing="uniform",
                                   additional_params={"merge_bunching": 0})

    scenario = TwoLoopsOneMergingScenario(name=exp_tag,
                                          vehicles=vehicles,
                                          net_params=net_params,
                                          initial_config=initial_config)

    env_name = "TwoLoopsMergePOEnv"
    pass_params = (env_name, sumo_params, vehicles, env_params, net_params,
                   initial_config, scenario)

    env = GymEnv(env_name, record_video=False, register_params=pass_params)
    horizon = env.horizon
    env = normalize(env)

    policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(100, 50, 25))

    baseline = LinearFeatureBaseline(env_spec=env.spec)

    algo = TRPO(
        env=env,
        policy=policy,
        baseline=baseline,
        batch_size=64 * 3 * horizon,
        max_path_length=horizon,
        # whole_paths=True,
        n_itr=1000,
        discount=0.999,
        # step_size=0.01,
    )
    algo.train()